1 /*
2 * Copyright (c) 1995-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91 #include <sys/reason.h>
92
93 #include <sys/mount.h>
94 #include <sys/sysproto.h>
95
96 #include <security/audit/audit.h>
97
98 #include <kern/cpu_number.h>
99
100 #include <sys/vm.h>
101 #include <sys/user.h> /* for coredump */
102 #include <kern/ast.h> /* for APC support */
103 #include <kern/kalloc.h>
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <kern/policy_internal.h>
109 #include <kern/sync_sema.h>
110
111 #include <mach/exception.h>
112 #include <mach/task.h>
113 #include <mach/thread_act.h>
114 #include <libkern/OSAtomic.h>
115
116 #include <sys/sdt.h>
117 #include <sys/codesign.h>
118 #include <sys/random.h>
119 #include <libkern/section_keywords.h>
120
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124
125 /*
126 * Missing prototypes that Mach should export
127 *
128 * +++
129 */
130 extern int thread_enable_fpe(thread_t act, int onoff);
131 extern kern_return_t get_signalact(task_t, thread_t *, int);
132 extern unsigned int get_useraddr(void);
133 extern boolean_t task_did_exec(task_t task);
134 extern boolean_t task_is_exec_copy(task_t task);
135 extern void vm_shared_region_reslide_stale(boolean_t driverkit);
136
137 /*
138 * ---
139 */
140
141 extern void doexception(int exc, mach_exception_code_t code,
142 mach_exception_subcode_t sub);
143
144 static void stop(proc_t, proc_t);
145 static int cansignal_nomac(proc_t, kauth_cred_t, proc_t, int);
146 int cansignal(proc_t, kauth_cred_t, proc_t, int);
147 int killpg1(proc_t, int, int, int, int);
148 kern_return_t do_bsdexception(int, int, int);
149 void __posix_sem_syscall_return(kern_return_t);
150 char *proc_name_address(void *p);
151
152 static int filt_sigattach(struct knote *kn, struct kevent_qos_s *kev);
153 static void filt_sigdetach(struct knote *kn);
154 static int filt_signal(struct knote *kn, long hint);
155 static int filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev);
156 static int filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev);
157
158 SECURITY_READ_ONLY_EARLY(struct filterops) sig_filtops = {
159 .f_attach = filt_sigattach,
160 .f_detach = filt_sigdetach,
161 .f_event = filt_signal,
162 .f_touch = filt_signaltouch,
163 .f_process = filt_signalprocess,
164 };
165
166 /* structures and fns for killpg1 iterartion callback and filters */
167 struct killpg1_filtargs {
168 bool posix;
169 proc_t curproc;
170 };
171
172 struct killpg1_iterargs {
173 proc_t curproc;
174 kauth_cred_t uc;
175 int signum;
176 int nfound;
177 };
178
179 static int killpg1_allfilt(proc_t p, void * arg);
180 static int killpg1_callback(proc_t p, void * arg);
181
182 static int pgsignal_callback(proc_t p, void * arg);
183 static kern_return_t get_signalthread(proc_t, int, thread_t *);
184
185
186 /* flags for psignal_internal */
187 #define PSIG_LOCKED 0x1
188 #define PSIG_VFORK 0x2
189 #define PSIG_THREAD 0x4
190 #define PSIG_TRY_THREAD 0x8
191
192 static os_reason_t build_signal_reason(int signum, const char *procname);
193 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason);
194
195 /*
196 * NOTE: Source and target may *NOT* overlap! (target is smaller)
197 */
198 static void
sigaltstack_kern_to_user32(struct kern_sigaltstack * in,struct user32_sigaltstack * out)199 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
200 {
201 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
202 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
203 out->ss_flags = in->ss_flags;
204 }
205
206 static void
sigaltstack_kern_to_user64(struct kern_sigaltstack * in,struct user64_sigaltstack * out)207 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
208 {
209 out->ss_sp = in->ss_sp;
210 out->ss_size = in->ss_size;
211 out->ss_flags = in->ss_flags;
212 }
213
214 /*
215 * NOTE: Source and target may are permitted to overlap! (source is smaller);
216 * this works because we copy fields in order from the end of the struct to
217 * the beginning.
218 */
219 static void
sigaltstack_user32_to_kern(struct user32_sigaltstack * in,struct kern_sigaltstack * out)220 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
221 {
222 out->ss_flags = in->ss_flags;
223 out->ss_size = in->ss_size;
224 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
225 }
226 static void
sigaltstack_user64_to_kern(struct user64_sigaltstack * in,struct kern_sigaltstack * out)227 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
228 {
229 out->ss_flags = in->ss_flags;
230 out->ss_size = (user_size_t)in->ss_size;
231 out->ss_sp = (user_addr_t)in->ss_sp;
232 }
233
234 static void
sigaction_kern_to_user32(struct kern_sigaction * in,struct user32_sigaction * out)235 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
236 {
237 /* This assumes 32 bit __sa_handler is of type sig_t */
238 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t, in->__sigaction_u.__sa_handler);
239 out->sa_mask = in->sa_mask;
240 out->sa_flags = in->sa_flags;
241 }
242 static void
sigaction_kern_to_user64(struct kern_sigaction * in,struct user64_sigaction * out)243 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
244 {
245 /* This assumes 32 bit __sa_handler is of type sig_t */
246 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
247 out->sa_mask = in->sa_mask;
248 out->sa_flags = in->sa_flags;
249 }
250
251 static void
__sigaction_user32_to_kern(struct __user32_sigaction * in,struct __kern_sigaction * out)252 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
253 {
254 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
255 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
256 out->sa_mask = in->sa_mask;
257 out->sa_flags = in->sa_flags;
258
259 kern_return_t kr;
260 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
261 &out->sa_tramp, 1);
262 assert(kr == KERN_SUCCESS);
263 }
264
265 static void
__sigaction_user64_to_kern(struct __user64_sigaction * in,struct __kern_sigaction * out)266 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
267 {
268 out->__sigaction_u.__sa_handler = (user_addr_t)in->__sigaction_u.__sa_handler;
269 out->sa_tramp = (user_addr_t)in->sa_tramp;
270 out->sa_mask = in->sa_mask;
271 out->sa_flags = in->sa_flags;
272
273 kern_return_t kr;
274 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
275 &out->sa_tramp, 1);
276 assert(kr == KERN_SUCCESS);
277 }
278
279 #if SIGNAL_DEBUG
280 void ram_printf(int);
281 int ram_debug = 0;
282 unsigned int rdebug_proc = 0;
283 void
ram_printf(int x)284 ram_printf(int x)
285 {
286 printf("x is %d", x);
287 }
288 #endif /* SIGNAL_DEBUG */
289
290
291 void
signal_setast(thread_t sig_actthread)292 signal_setast(thread_t sig_actthread)
293 {
294 act_set_astbsd(sig_actthread);
295 }
296
297 static int
cansignal_nomac(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)298 cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
299 {
300 /* you can signal yourself */
301 if (src == dst) {
302 return 1;
303 }
304
305 /* you can't send the init proc SIGKILL, even if root */
306 if (signum == SIGKILL && dst == initproc) {
307 return 0;
308 }
309
310 /* otherwise, root can always signal */
311 if (kauth_cred_issuser(uc_src)) {
312 return 1;
313 }
314
315 /* processes in the same session can send SIGCONT to each other */
316 if (signum == SIGCONT && proc_sessionid(src) == proc_sessionid(dst)) {
317 return 1;
318 }
319
320 #if XNU_TARGET_OS_IOS
321 // Allow debugging of third party drivers on iOS
322 if (proc_is_third_party_debuggable_driver(dst)) {
323 return 1;
324 }
325 #endif /* XNU_TARGET_OS_IOS */
326
327 /* the source process must be authorized to signal the target */
328 {
329 int allowed = 0;
330 kauth_cred_t uc_dst = NOCRED, uc_ref = NOCRED;
331
332 uc_dst = uc_ref = kauth_cred_proc_ref(dst);
333
334 /*
335 * If the real or effective UID of the sender matches the real or saved
336 * UID of the target, allow the signal to be sent.
337 */
338 if (kauth_cred_getruid(uc_src) == kauth_cred_getruid(uc_dst) ||
339 kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) ||
340 kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) ||
341 kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) {
342 allowed = 1;
343 }
344
345 if (uc_ref != NOCRED) {
346 kauth_cred_unref(&uc_ref);
347 uc_ref = NOCRED;
348 }
349
350 return allowed;
351 }
352 }
353
354 /*
355 * Can process `src`, with ucred `uc_src`, send the signal `signum` to process
356 * `dst`? The ucred is referenced by the caller so internal fileds can be used
357 * safely.
358 */
359 int
cansignal(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)360 cansignal(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
361 {
362 #if CONFIG_MACF
363 if (mac_proc_check_signal(src, dst, signum)) {
364 return 0;
365 }
366 #endif
367
368 return cansignal_nomac(src, uc_src, dst, signum);
369 }
370
371 /*
372 * <rdar://problem/21952708> Some signals can be restricted from being handled,
373 * forcing the default action for that signal. This behavior applies only to
374 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
375 * bootarg:
376 *
377 * 0 (default): Disallow use of restricted signals. Trying to register a handler
378 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
379 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
380 * 2: Usual POSIX semantics.
381 */
382 static TUNABLE(unsigned, sigrestrict_arg, "sigrestrict", 0);
383
384 #if XNU_PLATFORM_WatchOS
385 static int
sigrestrictmask(void)386 sigrestrictmask(void)
387 {
388 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
389 return SIGRESTRICTMASK;
390 }
391 return 0;
392 }
393
394 static int
signal_is_restricted(proc_t p,int signum)395 signal_is_restricted(proc_t p, int signum)
396 {
397 if (sigmask(signum) & sigrestrictmask()) {
398 if (sigrestrict_arg == 0 &&
399 task_get_apptype(proc_task(p)) == TASK_APPTYPE_APP_DEFAULT) {
400 return ENOTSUP;
401 } else {
402 return EINVAL;
403 }
404 }
405 return 0;
406 }
407
408 #else
409
410 static inline int
signal_is_restricted(proc_t p,int signum)411 signal_is_restricted(proc_t p, int signum)
412 {
413 (void)p;
414 (void)signum;
415 return 0;
416 }
417 #endif /* !XNU_PLATFORM_WatchOS */
418
419 /*
420 * Returns: 0 Success
421 * EINVAL
422 * copyout:EFAULT
423 * copyin:EFAULT
424 *
425 * Notes: Uses current thread as a parameter to inform PPC to enable
426 * FPU exceptions via setsigvec(); this operation is not proxy
427 * safe!
428 */
429 /* ARGSUSED */
430 int
sigaction(proc_t p,struct sigaction_args * uap,__unused int32_t * retval)431 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
432 {
433 struct kern_sigaction vec;
434 struct __kern_sigaction __vec;
435
436 struct kern_sigaction *sa = &vec;
437 struct sigacts *ps = &p->p_sigacts;
438
439 int signum;
440 int bit, error = 0;
441 uint32_t sigreturn_validation = PS_SIGRETURN_VALIDATION_DEFAULT;
442
443 signum = uap->signum;
444 if (signum <= 0 || signum >= NSIG ||
445 signum == SIGKILL || signum == SIGSTOP) {
446 return EINVAL;
447 }
448
449 if (uap->nsa) {
450 if (IS_64BIT_PROCESS(p)) {
451 struct __user64_sigaction __vec64;
452 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
453 __sigaction_user64_to_kern(&__vec64, &__vec);
454 } else {
455 struct __user32_sigaction __vec32;
456 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
457 __sigaction_user32_to_kern(&__vec32, &__vec);
458 }
459 if (error) {
460 return error;
461 }
462
463 sigreturn_validation = (__vec.sa_flags & SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP) ?
464 PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED;
465 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
466
467 if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) {
468 if ((error = signal_is_restricted(p, signum))) {
469 if (error == ENOTSUP) {
470 printf("%s(%d): denied attempt to register action for signal %d\n",
471 proc_name_address(p), proc_pid(p), signum);
472 }
473 return error;
474 }
475 }
476 }
477
478 if (uap->osa) {
479 sa->sa_handler = SIGACTION(p, signum);
480 sa->sa_mask = ps->ps_catchmask[signum];
481 bit = sigmask(signum);
482 sa->sa_flags = 0;
483 if ((ps->ps_sigonstack & bit) != 0) {
484 sa->sa_flags |= SA_ONSTACK;
485 }
486 if ((ps->ps_sigintr & bit) == 0) {
487 sa->sa_flags |= SA_RESTART;
488 }
489 if (ps->ps_siginfo & bit) {
490 sa->sa_flags |= SA_SIGINFO;
491 }
492 if (ps->ps_signodefer & bit) {
493 sa->sa_flags |= SA_NODEFER;
494 }
495 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) {
496 sa->sa_flags |= SA_NOCLDSTOP;
497 }
498 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) {
499 sa->sa_flags |= SA_NOCLDWAIT;
500 }
501
502 if (IS_64BIT_PROCESS(p)) {
503 struct user64_sigaction vec64 = {};
504 sigaction_kern_to_user64(sa, &vec64);
505 error = copyout(&vec64, uap->osa, sizeof(vec64));
506 } else {
507 struct user32_sigaction vec32 = {};
508 sigaction_kern_to_user32(sa, &vec32);
509 error = copyout(&vec32, uap->osa, sizeof(vec32));
510 }
511 if (error) {
512 return error;
513 }
514 }
515
516 if (uap->nsa) {
517 uint32_t old_sigreturn_validation = atomic_load_explicit(
518 &ps->ps_sigreturn_validation, memory_order_relaxed);
519 if (old_sigreturn_validation == PS_SIGRETURN_VALIDATION_DEFAULT) {
520 atomic_compare_exchange_strong_explicit(&ps->ps_sigreturn_validation,
521 &old_sigreturn_validation, sigreturn_validation,
522 memory_order_relaxed, memory_order_relaxed);
523 }
524 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
525 }
526
527 return error;
528 }
529
530 /* Routines to manipulate bits on all threads */
531 int
clear_procsiglist(proc_t p,int bit,boolean_t in_signalstart)532 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
533 {
534 struct uthread * uth;
535
536 proc_lock(p);
537 if (!in_signalstart) {
538 proc_signalstart(p, 1);
539 }
540
541
542 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
543 uth->uu_siglist &= ~bit;
544 }
545 p->p_siglist &= ~bit;
546 if (!in_signalstart) {
547 proc_signalend(p, 1);
548 }
549 proc_unlock(p);
550
551 return 0;
552 }
553
554
555 static int
unblock_procsigmask(proc_t p,int bit)556 unblock_procsigmask(proc_t p, int bit)
557 {
558 struct uthread * uth;
559
560 proc_lock(p);
561 proc_signalstart(p, 1);
562
563
564 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
565 uth->uu_sigmask &= ~bit;
566 }
567 p->p_sigmask &= ~bit;
568
569 proc_signalend(p, 1);
570 proc_unlock(p);
571 return 0;
572 }
573
574 static int
block_procsigmask(proc_t p,int bit)575 block_procsigmask(proc_t p, int bit)
576 {
577 struct uthread * uth;
578
579 proc_lock(p);
580 proc_signalstart(p, 1);
581
582
583 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
584 uth->uu_sigmask |= bit;
585 }
586 p->p_sigmask |= bit;
587
588 proc_signalend(p, 1);
589 proc_unlock(p);
590 return 0;
591 }
592
593 int
set_procsigmask(proc_t p,int bit)594 set_procsigmask(proc_t p, int bit)
595 {
596 struct uthread * uth;
597
598 proc_lock(p);
599 proc_signalstart(p, 1);
600
601
602 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
603 uth->uu_sigmask = bit;
604 }
605 p->p_sigmask = bit;
606 proc_signalend(p, 1);
607 proc_unlock(p);
608
609 return 0;
610 }
611
612 /* XXX should be static? */
613 /*
614 * Notes: The thread parameter is used in the PPC case to select the
615 * thread on which the floating point exception will be enabled
616 * or disabled. We can't simply take current_thread(), since
617 * this is called from posix_spawn() on the not currently running
618 * process/thread pair.
619 *
620 * We mark thread as unused to alow compilation without warning
621 * on non-PPC platforms.
622 */
623 int
setsigvec(proc_t p,__unused thread_t thread,int signum,struct __kern_sigaction * sa,boolean_t in_sigstart)624 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
625 {
626 struct sigacts *ps = &p->p_sigacts;
627 int bit;
628
629 assert(signum < NSIG);
630
631 if ((signum == SIGKILL || signum == SIGSTOP) &&
632 sa->sa_handler != SIG_DFL) {
633 return EINVAL;
634 }
635 bit = sigmask(signum);
636 /*
637 * Change setting atomically.
638 */
639 proc_set_sigact_trampact(p, signum, sa->sa_handler, sa->sa_tramp);
640 ps->ps_catchmask[signum] = sa->sa_mask & ~sigcantmask;
641 if (sa->sa_flags & SA_SIGINFO) {
642 ps->ps_siginfo |= bit;
643 } else {
644 ps->ps_siginfo &= ~bit;
645 }
646 if ((sa->sa_flags & SA_RESTART) == 0) {
647 ps->ps_sigintr |= bit;
648 } else {
649 ps->ps_sigintr &= ~bit;
650 }
651 if (sa->sa_flags & SA_ONSTACK) {
652 ps->ps_sigonstack |= bit;
653 } else {
654 ps->ps_sigonstack &= ~bit;
655 }
656 if (sa->sa_flags & SA_RESETHAND) {
657 ps->ps_sigreset |= bit;
658 } else {
659 ps->ps_sigreset &= ~bit;
660 }
661 if (sa->sa_flags & SA_NODEFER) {
662 ps->ps_signodefer |= bit;
663 } else {
664 ps->ps_signodefer &= ~bit;
665 }
666 if (signum == SIGCHLD) {
667 if (sa->sa_flags & SA_NOCLDSTOP) {
668 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
669 } else {
670 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
671 }
672 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) {
673 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
674 } else {
675 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
676 }
677 }
678
679 /*
680 * Set bit in p_sigignore for signals that are set to SIG_IGN,
681 * and for signals set to SIG_DFL where the default is to ignore.
682 * However, don't put SIGCONT in p_sigignore,
683 * as we have to restart the process.
684 */
685 if (sa->sa_handler == SIG_IGN ||
686 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
687 clear_procsiglist(p, bit, in_sigstart);
688 if (signum != SIGCONT) {
689 p->p_sigignore |= bit; /* easier in psignal */
690 }
691 p->p_sigcatch &= ~bit;
692 } else {
693 p->p_sigignore &= ~bit;
694 if (sa->sa_handler == SIG_DFL) {
695 p->p_sigcatch &= ~bit;
696 } else {
697 p->p_sigcatch |= bit;
698 }
699 }
700 return 0;
701 }
702
703 /*
704 * Initialize signal state for process 0;
705 * set to ignore signals that are ignored by default.
706 */
707 void
siginit(proc_t p)708 siginit(proc_t p)
709 {
710 int i;
711
712 for (i = 1; i < NSIG; i++) {
713 if (sigprop[i] & SA_IGNORE && i != SIGCONT) {
714 p->p_sigignore |= sigmask(i);
715 }
716 }
717 }
718
719 /*
720 * Reset signals for an exec of the specified process.
721 */
722 void
execsigs(proc_t p,thread_t thread)723 execsigs(proc_t p, thread_t thread)
724 {
725 struct sigacts *ps = &p->p_sigacts;
726 int nc, mask;
727 struct uthread *ut;
728
729 ut = (struct uthread *)get_bsdthread_info(thread);
730
731 /*
732 * transfer saved signal states from the process
733 * back to the current thread.
734 *
735 * NOTE: We do this without the process locked,
736 * because we are guaranteed to be single-threaded
737 * by this point in exec and the p_siglist is
738 * only accessed by threads inside the process.
739 */
740 ut->uu_siglist |= p->p_siglist;
741 p->p_siglist = 0;
742
743 /*
744 * Reset caught signals. Held signals remain held
745 * through p_sigmask (unless they were caught,
746 * and are now ignored by default).
747 */
748 proc_reset_sigact(p, p->p_sigcatch);
749 while (p->p_sigcatch) {
750 nc = ffs((unsigned int)p->p_sigcatch);
751 mask = sigmask(nc);
752 p->p_sigcatch &= ~mask;
753 if (sigprop[nc] & SA_IGNORE) {
754 if (nc != SIGCONT) {
755 p->p_sigignore |= mask;
756 }
757 ut->uu_siglist &= ~mask;
758 }
759 }
760
761 atomic_store_explicit(&ps->ps_sigreturn_validation,
762 PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed);
763
764 /*
765 * Reset stack state to the user stack.
766 * Clear set of signals caught on the signal stack.
767 */
768 /* thread */
769 ut->uu_sigstk.ss_flags = SA_DISABLE;
770 ut->uu_sigstk.ss_size = 0;
771 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
772 ut->uu_flag &= ~UT_ALTSTACK;
773 /* process */
774 ps->ps_sigonstack = 0;
775 }
776
777 /*
778 * Manipulate signal mask.
779 * Note that we receive new mask, not pointer,
780 * and return old mask as return value;
781 * the library stub does the rest.
782 */
783 int
sigprocmask(proc_t p,struct sigprocmask_args * uap,__unused int32_t * retval)784 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
785 {
786 int error = 0;
787 sigset_t oldmask, nmask;
788 user_addr_t omask = uap->omask;
789 struct uthread *ut;
790
791 ut = current_uthread();
792 oldmask = ut->uu_sigmask;
793
794 if (uap->mask == USER_ADDR_NULL) {
795 /* just want old mask */
796 goto out;
797 }
798 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
799 if (error) {
800 goto out;
801 }
802
803 switch (uap->how) {
804 case SIG_BLOCK:
805 block_procsigmask(p, (nmask & ~sigcantmask));
806 signal_setast(current_thread());
807 break;
808
809 case SIG_UNBLOCK:
810 unblock_procsigmask(p, (nmask & ~sigcantmask));
811 signal_setast(current_thread());
812 break;
813
814 case SIG_SETMASK:
815 set_procsigmask(p, (nmask & ~sigcantmask));
816 signal_setast(current_thread());
817 break;
818
819 default:
820 error = EINVAL;
821 break;
822 }
823 out:
824 if (!error && omask != USER_ADDR_NULL) {
825 copyout(&oldmask, omask, sizeof(sigset_t));
826 }
827 return error;
828 }
829
830 int
sigpending(__unused proc_t p,struct sigpending_args * uap,__unused int32_t * retval)831 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
832 {
833 struct uthread *ut;
834 sigset_t pendlist;
835
836 ut = current_uthread();
837 pendlist = ut->uu_siglist;
838
839 if (uap->osv) {
840 copyout(&pendlist, uap->osv, sizeof(sigset_t));
841 }
842 return 0;
843 }
844
845 /*
846 * Suspend process until signal, providing mask to be set
847 * in the meantime. Note nonstandard calling convention:
848 * libc stub passes mask, not pointer, to save a copyin.
849 */
850
851 static int
sigcontinue(__unused int error)852 sigcontinue(__unused int error)
853 {
854 // struct uthread *ut = current_uthread();
855 unix_syscall_return(EINTR);
856 }
857
858 int
sigsuspend(proc_t p,struct sigsuspend_args * uap,int32_t * retval)859 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
860 {
861 __pthread_testcancel(1);
862 return sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval);
863 }
864
865 int
sigsuspend_nocancel(proc_t p,struct sigsuspend_nocancel_args * uap,__unused int32_t * retval)866 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
867 {
868 struct uthread *ut;
869
870 ut = current_uthread();
871
872 /*
873 * When returning from sigpause, we want
874 * the old mask to be restored after the
875 * signal handler has finished. Thus, we
876 * save it here and mark the sigacts structure
877 * to indicate this.
878 */
879 ut->uu_oldmask = ut->uu_sigmask;
880 ut->uu_flag |= UT_SAS_OLDMASK;
881 ut->uu_sigmask = (uap->mask & ~sigcantmask);
882 (void) tsleep0((caddr_t) p, PPAUSE | PCATCH, "pause", 0, sigcontinue);
883 /* always return EINTR rather than ERESTART... */
884 return EINTR;
885 }
886
887
888 int
__disable_threadsignal(__unused proc_t p,__unused struct __disable_threadsignal_args * uap,__unused int32_t * retval)889 __disable_threadsignal(__unused proc_t p,
890 __unused struct __disable_threadsignal_args *uap,
891 __unused int32_t *retval)
892 {
893 struct uthread *uth;
894
895 uth = current_uthread();
896
897 /* No longer valid to have any signal delivered */
898 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
899
900 return 0;
901 }
902
903 void
__pthread_testcancel(int presyscall)904 __pthread_testcancel(int presyscall)
905 {
906 thread_t self = current_thread();
907 struct uthread * uthread;
908
909 uthread = (struct uthread *)get_bsdthread_info(self);
910
911
912 uthread->uu_flag &= ~UT_NOTCANCELPT;
913
914 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
915 if (presyscall != 0) {
916 unix_syscall_return(EINTR);
917 /* NOTREACHED */
918 } else {
919 thread_abort_safely(self);
920 }
921 }
922 }
923
924
925
926 int
__pthread_markcancel(__unused proc_t p,struct __pthread_markcancel_args * uap,__unused int32_t * retval)927 __pthread_markcancel(__unused proc_t p,
928 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
929 {
930 thread_act_t target_act;
931 int error = 0;
932 struct uthread *uth;
933
934 target_act = (thread_act_t)port_name_to_thread(uap->thread_port,
935 PORT_INTRANS_THREAD_IN_CURRENT_TASK);
936
937 if (target_act == THR_ACT_NULL) {
938 return ESRCH;
939 }
940
941 uth = (struct uthread *)get_bsdthread_info(target_act);
942
943 if ((uth->uu_flag & (UT_CANCEL | UT_CANCELED)) == 0) {
944 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
945 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
946 && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) {
947 thread_abort_safely(target_act);
948 }
949 }
950
951 thread_deallocate(target_act);
952 return error;
953 }
954
955 /* if action =0 ; return the cancellation state ,
956 * if marked for cancellation, make the thread canceled
957 * if action = 1 ; Enable the cancel handling
958 * if action = 2; Disable the cancel handling
959 */
960 int
__pthread_canceled(__unused proc_t p,struct __pthread_canceled_args * uap,__unused int32_t * retval)961 __pthread_canceled(__unused proc_t p,
962 struct __pthread_canceled_args *uap, __unused int32_t *retval)
963 {
964 thread_act_t thread;
965 struct uthread *uth;
966 int action = uap->action;
967
968 thread = current_thread();
969 uth = (struct uthread *)get_bsdthread_info(thread);
970
971 switch (action) {
972 case 1:
973 uth->uu_flag &= ~UT_CANCELDISABLE;
974 return 0;
975 case 2:
976 uth->uu_flag |= UT_CANCELDISABLE;
977 return 0;
978 case 0:
979 default:
980 if ((uth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
981 uth->uu_flag &= ~UT_CANCEL;
982 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
983 return 0;
984 }
985 return EINVAL;
986 }
987 return EINVAL;
988 }
989
990 __attribute__((noreturn))
991 void
__posix_sem_syscall_return(kern_return_t kern_result)992 __posix_sem_syscall_return(kern_return_t kern_result)
993 {
994 int error = 0;
995
996 if (kern_result == KERN_SUCCESS) {
997 error = 0;
998 } else if (kern_result == KERN_ABORTED) {
999 error = EINTR;
1000 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1001 error = ETIMEDOUT;
1002 } else {
1003 error = EINVAL;
1004 }
1005 unix_syscall_return(error);
1006 /* does not return */
1007 }
1008
1009 /*
1010 * Returns: 0 Success
1011 * EINTR
1012 * ETIMEDOUT
1013 * EINVAL
1014 * EFAULT if timespec is NULL
1015 */
1016 int
__semwait_signal(proc_t p,struct __semwait_signal_args * uap,int32_t * retval)1017 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1018 int32_t *retval)
1019 {
1020 __pthread_testcancel(0);
1021 return __semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval);
1022 }
1023
1024 int
__semwait_signal_nocancel(__unused proc_t p,struct __semwait_signal_nocancel_args * uap,__unused int32_t * retval)1025 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1026 __unused int32_t *retval)
1027 {
1028 kern_return_t kern_result;
1029 mach_timespec_t then;
1030 struct timespec now;
1031 struct user_timespec ts;
1032 boolean_t truncated_timeout = FALSE;
1033
1034 if (uap->timeout) {
1035 ts.tv_sec = (user_time_t)uap->tv_sec;
1036 ts.tv_nsec = uap->tv_nsec;
1037
1038 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1039 ts.tv_sec = 0xFFFFFFFF;
1040 ts.tv_nsec = 0;
1041 truncated_timeout = TRUE;
1042 }
1043
1044 if (uap->relative) {
1045 then.tv_sec = (unsigned int)ts.tv_sec;
1046 then.tv_nsec = (clock_res_t)ts.tv_nsec;
1047 } else {
1048 nanotime(&now);
1049
1050 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1051 if (now.tv_sec == ts.tv_sec ?
1052 now.tv_nsec > ts.tv_nsec :
1053 now.tv_sec > ts.tv_sec) {
1054 then.tv_sec = 0;
1055 then.tv_nsec = 0;
1056 } else {
1057 then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec);
1058 then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec);
1059 if (then.tv_nsec < 0) {
1060 then.tv_nsec += NSEC_PER_SEC;
1061 then.tv_sec--;
1062 }
1063 }
1064 }
1065
1066 if (uap->mutex_sem == 0) {
1067 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1068 } else {
1069 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1070 }
1071 } else {
1072 if (uap->mutex_sem == 0) {
1073 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1074 } else {
1075 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1076 }
1077 }
1078
1079 if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1080 return 0;
1081 } else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1082 return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1083 } else if (kern_result == KERN_ABORTED) {
1084 return EINTR;
1085 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1086 return ETIMEDOUT;
1087 } else {
1088 return EINVAL;
1089 }
1090 }
1091
1092
1093 int
__pthread_kill(__unused proc_t p,struct __pthread_kill_args * uap,__unused int32_t * retval)1094 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1095 __unused int32_t *retval)
1096 {
1097 thread_t target_act;
1098 int error = 0;
1099 int signum = uap->sig;
1100 struct uthread *uth;
1101
1102 target_act = (thread_t)port_name_to_thread(uap->thread_port,
1103 PORT_INTRANS_OPTIONS_NONE);
1104
1105 if (target_act == THREAD_NULL) {
1106 return ESRCH;
1107 }
1108 if ((u_int)signum >= NSIG) {
1109 error = EINVAL;
1110 goto out;
1111 }
1112
1113 uth = (struct uthread *)get_bsdthread_info(target_act);
1114
1115 if (uth->uu_flag & UT_NO_SIGMASK) {
1116 error = ESRCH;
1117 goto out;
1118 }
1119
1120 if ((thread_get_tag(target_act) & THREAD_TAG_WORKQUEUE) && !uth->uu_workq_pthread_kill_allowed) {
1121 error = ENOTSUP;
1122 goto out;
1123 }
1124
1125 if (signum) {
1126 psignal_uthread(target_act, signum);
1127 }
1128 out:
1129 thread_deallocate(target_act);
1130 return error;
1131 }
1132
1133
1134 int
__pthread_sigmask(__unused proc_t p,struct __pthread_sigmask_args * uap,__unused int32_t * retval)1135 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1136 __unused int32_t *retval)
1137 {
1138 user_addr_t set = uap->set;
1139 user_addr_t oset = uap->oset;
1140 sigset_t nset;
1141 int error = 0;
1142 struct uthread *ut;
1143 sigset_t oldset;
1144
1145 ut = current_uthread();
1146 oldset = ut->uu_sigmask;
1147
1148 if (set == USER_ADDR_NULL) {
1149 /* need only old mask */
1150 goto out;
1151 }
1152
1153 error = copyin(set, &nset, sizeof(sigset_t));
1154 if (error) {
1155 goto out;
1156 }
1157
1158 switch (uap->how) {
1159 case SIG_BLOCK:
1160 ut->uu_sigmask |= (nset & ~sigcantmask);
1161 break;
1162
1163 case SIG_UNBLOCK:
1164 ut->uu_sigmask &= ~(nset);
1165 signal_setast(current_thread());
1166 break;
1167
1168 case SIG_SETMASK:
1169 ut->uu_sigmask = (nset & ~sigcantmask);
1170 signal_setast(current_thread());
1171 break;
1172
1173 default:
1174 error = EINVAL;
1175 }
1176 out:
1177 if (!error && oset != USER_ADDR_NULL) {
1178 copyout(&oldset, oset, sizeof(sigset_t));
1179 }
1180
1181 return error;
1182 }
1183
1184 /*
1185 * Returns: 0 Success
1186 * EINVAL
1187 * copyin:EFAULT
1188 * copyout:EFAULT
1189 */
1190 int
__sigwait(proc_t p,struct __sigwait_args * uap,int32_t * retval)1191 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1192 {
1193 __pthread_testcancel(1);
1194 return __sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval);
1195 }
1196
1197 int
__sigwait_nocancel(proc_t p,struct __sigwait_nocancel_args * uap,__unused int32_t * retval)1198 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1199 {
1200 struct uthread *ut;
1201 struct uthread *uth;
1202 int error = 0;
1203 sigset_t mask;
1204 sigset_t siglist;
1205 sigset_t sigw = 0;
1206 int signum;
1207
1208 ut = current_uthread();
1209
1210 if (uap->set == USER_ADDR_NULL) {
1211 return EINVAL;
1212 }
1213
1214 error = copyin(uap->set, &mask, sizeof(sigset_t));
1215 if (error) {
1216 return error;
1217 }
1218
1219 siglist = (mask & ~sigcantmask);
1220
1221 if (siglist == 0) {
1222 return EINVAL;
1223 }
1224
1225 proc_lock(p);
1226
1227 proc_signalstart(p, 1);
1228 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1229 if ((sigw = uth->uu_siglist & siglist)) {
1230 break;
1231 }
1232 }
1233 proc_signalend(p, 1);
1234
1235 if (sigw) {
1236 /* The signal was pending on a thread */
1237 goto sigwait1;
1238 }
1239 /*
1240 * When returning from sigwait, we want
1241 * the old mask to be restored after the
1242 * signal handler has finished. Thus, we
1243 * save it here and mark the sigacts structure
1244 * to indicate this.
1245 */
1246 uth = ut; /* wait for it to be delivered to us */
1247 ut->uu_oldmask = ut->uu_sigmask;
1248 ut->uu_flag |= UT_SAS_OLDMASK;
1249 if (siglist == (sigset_t)0) {
1250 proc_unlock(p);
1251 return EINVAL;
1252 }
1253 /* SIGKILL and SIGSTOP are not maskable as well */
1254 ut->uu_sigmask = ~(siglist | sigcantmask);
1255 ut->uu_sigwait = siglist;
1256
1257 /* No Continuations for now */
1258 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE | PCATCH, "pause", 0);
1259
1260 if (error == ERESTART) {
1261 error = 0;
1262 }
1263
1264 sigw = (ut->uu_sigwait & siglist);
1265 ut->uu_sigmask = ut->uu_oldmask;
1266 ut->uu_oldmask = 0;
1267 ut->uu_flag &= ~UT_SAS_OLDMASK;
1268 sigwait1:
1269 ut->uu_sigwait = 0;
1270 if (!error) {
1271 signum = ffs((unsigned int)sigw);
1272 if (!signum) {
1273 panic("sigwait with no signal wakeup");
1274 }
1275 /* Clear the pending signal in the thread it was delivered */
1276 uth->uu_siglist &= ~(sigmask(signum));
1277
1278 #if CONFIG_DTRACE
1279 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1280 #endif
1281
1282 proc_unlock(p);
1283 if (uap->sig != USER_ADDR_NULL) {
1284 error = copyout(&signum, uap->sig, sizeof(int));
1285 }
1286 } else {
1287 proc_unlock(p);
1288 }
1289
1290 return error;
1291 }
1292
1293 int
sigaltstack(__unused proc_t p,struct sigaltstack_args * uap,__unused int32_t * retval)1294 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1295 {
1296 struct kern_sigaltstack ss;
1297 struct kern_sigaltstack *pstk;
1298 int error;
1299 struct uthread *uth;
1300 int onstack;
1301
1302 uth = current_uthread();
1303
1304 pstk = &uth->uu_sigstk;
1305 if ((uth->uu_flag & UT_ALTSTACK) == 0) {
1306 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1307 }
1308 onstack = pstk->ss_flags & SA_ONSTACK;
1309 if (uap->oss) {
1310 if (IS_64BIT_PROCESS(p)) {
1311 struct user64_sigaltstack ss64 = {};
1312 sigaltstack_kern_to_user64(pstk, &ss64);
1313 error = copyout(&ss64, uap->oss, sizeof(ss64));
1314 } else {
1315 struct user32_sigaltstack ss32 = {};
1316 sigaltstack_kern_to_user32(pstk, &ss32);
1317 error = copyout(&ss32, uap->oss, sizeof(ss32));
1318 }
1319 if (error) {
1320 return error;
1321 }
1322 }
1323 if (uap->nss == USER_ADDR_NULL) {
1324 return 0;
1325 }
1326 if (IS_64BIT_PROCESS(p)) {
1327 struct user64_sigaltstack ss64;
1328 error = copyin(uap->nss, &ss64, sizeof(ss64));
1329 sigaltstack_user64_to_kern(&ss64, &ss);
1330 } else {
1331 struct user32_sigaltstack ss32;
1332 error = copyin(uap->nss, &ss32, sizeof(ss32));
1333 sigaltstack_user32_to_kern(&ss32, &ss);
1334 }
1335 if (error) {
1336 return error;
1337 }
1338 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1339 return EINVAL;
1340 }
1341
1342 if (ss.ss_flags & SA_DISABLE) {
1343 /* if we are here we are not in the signal handler ;so no need to check */
1344 if (uth->uu_sigstk.ss_flags & SA_ONSTACK) {
1345 return EINVAL;
1346 }
1347 uth->uu_flag &= ~UT_ALTSTACK;
1348 uth->uu_sigstk.ss_flags = ss.ss_flags;
1349 return 0;
1350 }
1351 if (onstack) {
1352 return EPERM;
1353 }
1354 /* The older stacksize was 8K, enforce that one so no compat problems */
1355 #define OLDMINSIGSTKSZ 8*1024
1356 if (ss.ss_size < OLDMINSIGSTKSZ) {
1357 return ENOMEM;
1358 }
1359 uth->uu_flag |= UT_ALTSTACK;
1360 uth->uu_sigstk = ss;
1361 return 0;
1362 }
1363
1364 int
kill(proc_t cp,struct kill_args * uap,__unused int32_t * retval)1365 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1366 {
1367 proc_t p;
1368 kauth_cred_t uc = kauth_cred_get();
1369 int posix = uap->posix; /* !0 if posix behaviour desired */
1370
1371 AUDIT_ARG(pid, uap->pid);
1372 AUDIT_ARG(signum, uap->signum);
1373
1374 if ((u_int)uap->signum >= NSIG) {
1375 return EINVAL;
1376 }
1377 if (uap->pid > 0) {
1378 /* kill single process */
1379 if ((p = proc_find(uap->pid)) == NULL) {
1380 if ((p = pzfind(uap->pid)) != NULL) {
1381 /*
1382 * POSIX 1003.1-2001 requires returning success when killing a
1383 * zombie; see Rationale for kill(2).
1384 */
1385 return 0;
1386 }
1387 return ESRCH;
1388 }
1389 AUDIT_ARG(process, p);
1390 if (!cansignal(cp, uc, p, uap->signum)) {
1391 proc_rele(p);
1392 return EPERM;
1393 }
1394 if (uap->signum) {
1395 psignal(p, uap->signum);
1396 }
1397 proc_rele(p);
1398 return 0;
1399 }
1400 switch (uap->pid) {
1401 case -1: /* broadcast signal */
1402 return killpg1(cp, uap->signum, 0, 1, posix);
1403 case 0: /* signal own process group */
1404 return killpg1(cp, uap->signum, 0, 0, posix);
1405 default: /* negative explicit process group */
1406 return killpg1(cp, uap->signum, -(uap->pid), 0, posix);
1407 }
1408 /* NOTREACHED */
1409 }
1410
1411 os_reason_t
build_userspace_exit_reason(uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1412 build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1413 user_addr_t reason_string, uint64_t reason_flags)
1414 {
1415 os_reason_t exit_reason = OS_REASON_NULL;
1416
1417 int error = 0;
1418 int num_items_to_copy = 0;
1419 uint32_t user_data_to_copy = 0;
1420 char *reason_user_desc = NULL;
1421 size_t reason_user_desc_len = 0;
1422
1423 exit_reason = os_reason_create(reason_namespace, reason_code);
1424 if (exit_reason == OS_REASON_NULL) {
1425 printf("build_userspace_exit_reason: failed to allocate exit reason\n");
1426 return exit_reason;
1427 }
1428
1429 exit_reason->osr_flags |= OS_REASON_FLAG_FROM_USERSPACE;
1430
1431 /*
1432 * Only apply flags that are allowed to be passed from userspace.
1433 */
1434 exit_reason->osr_flags |= (reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER);
1435 if ((reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER) != reason_flags) {
1436 printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n",
1437 reason_flags, reason_namespace, reason_code);
1438 }
1439
1440 if (!(exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT)) {
1441 exit_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1442 }
1443
1444 if (payload != USER_ADDR_NULL) {
1445 if (payload_size == 0) {
1446 printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n",
1447 reason_namespace);
1448 exit_reason->osr_flags |= OS_REASON_FLAG_BAD_PARAMS;
1449 payload = USER_ADDR_NULL;
1450 } else {
1451 num_items_to_copy++;
1452
1453 if (payload_size > EXIT_REASON_PAYLOAD_MAX_LEN) {
1454 exit_reason->osr_flags |= OS_REASON_FLAG_PAYLOAD_TRUNCATED;
1455 payload_size = EXIT_REASON_PAYLOAD_MAX_LEN;
1456 }
1457
1458 user_data_to_copy += payload_size;
1459 }
1460 }
1461
1462 if (reason_string != USER_ADDR_NULL) {
1463 reason_user_desc = (char *)kalloc_data(EXIT_REASON_USER_DESC_MAX_LEN, Z_WAITOK);
1464
1465 if (reason_user_desc != NULL) {
1466 error = copyinstr(reason_string, (void *) reason_user_desc,
1467 EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len);
1468
1469 if (error == 0) {
1470 num_items_to_copy++;
1471 user_data_to_copy += reason_user_desc_len;
1472 } else if (error == ENAMETOOLONG) {
1473 num_items_to_copy++;
1474 reason_user_desc[EXIT_REASON_USER_DESC_MAX_LEN - 1] = '\0';
1475 user_data_to_copy += reason_user_desc_len;
1476 } else {
1477 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1478 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1479 reason_user_desc = NULL;
1480 reason_user_desc_len = 0;
1481 }
1482 }
1483 }
1484
1485 if (num_items_to_copy != 0) {
1486 uint32_t reason_buffer_size_estimate = 0;
1487 mach_vm_address_t data_addr = 0;
1488
1489 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(num_items_to_copy, user_data_to_copy);
1490
1491 error = os_reason_alloc_buffer(exit_reason, reason_buffer_size_estimate);
1492 if (error != 0) {
1493 printf("build_userspace_exit_reason: failed to allocate signal reason buffer\n");
1494 goto out_failed_copyin;
1495 }
1496
1497 if (reason_user_desc != NULL && reason_user_desc_len != 0) {
1498 if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1499 EXIT_REASON_USER_DESC,
1500 (uint32_t)reason_user_desc_len,
1501 &data_addr)) {
1502 kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr,
1503 reason_user_desc, (uint32_t)reason_user_desc_len);
1504 } else {
1505 printf("build_userspace_exit_reason: failed to allocate space for reason string\n");
1506 goto out_failed_copyin;
1507 }
1508 }
1509
1510 if (payload != USER_ADDR_NULL) {
1511 if (KERN_SUCCESS ==
1512 kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1513 EXIT_REASON_USER_PAYLOAD,
1514 payload_size,
1515 &data_addr)) {
1516 error = copyin(payload, (void *) data_addr, payload_size);
1517 if (error) {
1518 printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error);
1519 goto out_failed_copyin;
1520 }
1521 } else {
1522 printf("build_userspace_exit_reason: failed to allocate space for payload data\n");
1523 goto out_failed_copyin;
1524 }
1525 }
1526 }
1527
1528 if (reason_user_desc != NULL) {
1529 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1530 reason_user_desc = NULL;
1531 reason_user_desc_len = 0;
1532 }
1533
1534 return exit_reason;
1535
1536 out_failed_copyin:
1537
1538 if (reason_user_desc != NULL) {
1539 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1540 reason_user_desc = NULL;
1541 reason_user_desc_len = 0;
1542 }
1543
1544 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1545 os_reason_alloc_buffer(exit_reason, 0);
1546 return exit_reason;
1547 }
1548
1549 static int
terminate_with_payload_internal(struct proc * cur_proc,int target_pid,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1550 terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t reason_namespace,
1551 uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1552 user_addr_t reason_string, uint64_t reason_flags)
1553 {
1554 proc_t target_proc = PROC_NULL;
1555 kauth_cred_t cur_cred = kauth_cred_get();
1556
1557 os_reason_t signal_reason = OS_REASON_NULL;
1558
1559 AUDIT_ARG(pid, target_pid);
1560 if ((target_pid <= 0)) {
1561 return EINVAL;
1562 }
1563
1564 target_proc = proc_find(target_pid);
1565 if (target_proc == PROC_NULL) {
1566 return ESRCH;
1567 }
1568
1569 AUDIT_ARG(process, target_proc);
1570
1571 if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL)) {
1572 proc_rele(target_proc);
1573 return EPERM;
1574 }
1575
1576 if (target_pid != proc_getpid(cur_proc)) {
1577 /*
1578 * FLAG_ABORT should only be set on terminate_with_reason(getpid()) that
1579 * was a fallback from an unsuccessful abort_with_reason(). In that case
1580 * caller's pid matches the target one. Otherwise remove the flag.
1581 */
1582 reason_flags &= ~((typeof(reason_flags))OS_REASON_FLAG_ABORT);
1583 }
1584
1585 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1586 proc_getpid(target_proc), reason_namespace,
1587 reason_code, 0, 0);
1588
1589 signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size,
1590 reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID));
1591
1592 if (target_pid == proc_getpid(cur_proc)) {
1593 /*
1594 * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
1595 * return if the thread and/or task are already terminating. Either way, the
1596 * current thread won't return to userspace.
1597 */
1598 psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason);
1599 } else {
1600 psignal_with_reason(target_proc, SIGKILL, signal_reason);
1601 }
1602
1603 proc_rele(target_proc);
1604
1605 return 0;
1606 }
1607
1608 int
terminate_with_payload(struct proc * cur_proc,struct terminate_with_payload_args * args,__unused int32_t * retval)1609 terminate_with_payload(struct proc *cur_proc, struct terminate_with_payload_args *args,
1610 __unused int32_t *retval)
1611 {
1612 return terminate_with_payload_internal(cur_proc, args->pid, args->reason_namespace, args->reason_code, args->payload,
1613 args->payload_size, args->reason_string, args->reason_flags);
1614 }
1615
1616 static int
killpg1_allfilt(proc_t p,void * arg)1617 killpg1_allfilt(proc_t p, void * arg)
1618 {
1619 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1620
1621 /*
1622 * Don't signal initproc, a system process, or the current process if POSIX
1623 * isn't specified.
1624 */
1625 return proc_getpid(p) > 1 && !(p->p_flag & P_SYSTEM) &&
1626 (kfargp->posix ? true : p != kfargp->curproc);
1627 }
1628
1629 static int
killpg1_callback(proc_t p,void * arg)1630 killpg1_callback(proc_t p, void *arg)
1631 {
1632 struct killpg1_iterargs *kargp = (struct killpg1_iterargs *)arg;
1633 int signum = kargp->signum;
1634
1635 if (proc_list_exited(p)) {
1636 /*
1637 * Count zombies as found for the purposes of signalling, since POSIX
1638 * 1003.1-2001 sees signalling zombies as successful. If killpg(2) or
1639 * kill(2) with pid -1 only finds zombies that can be signalled, it
1640 * shouldn't return ESRCH. See the Rationale for kill(2).
1641 *
1642 * Don't call into MAC -- it's not expecting signal checks for exited
1643 * processes.
1644 */
1645 if (cansignal_nomac(kargp->curproc, kargp->uc, p, signum)) {
1646 kargp->nfound++;
1647 }
1648 } else if (cansignal(kargp->curproc, kargp->uc, p, signum)) {
1649 kargp->nfound++;
1650
1651 if (signum != 0) {
1652 psignal(p, signum);
1653 }
1654 }
1655
1656 return PROC_RETURNED;
1657 }
1658
1659 /*
1660 * Common code for kill process group/broadcast kill.
1661 */
1662 int
killpg1(proc_t curproc,int signum,int pgid,int all,int posix)1663 killpg1(proc_t curproc, int signum, int pgid, int all, int posix)
1664 {
1665 kauth_cred_t uc;
1666 struct pgrp *pgrp;
1667 int error = 0;
1668
1669 uc = kauth_cred_proc_ref(curproc);
1670 struct killpg1_iterargs karg = {
1671 .curproc = curproc, .uc = uc, .nfound = 0, .signum = signum
1672 };
1673
1674 if (all) {
1675 /*
1676 * Broadcast to all processes that the user can signal (pid was -1).
1677 */
1678 struct killpg1_filtargs kfarg = {
1679 .posix = posix, .curproc = curproc
1680 };
1681 proc_iterate(PROC_ALLPROCLIST | PROC_ZOMBPROCLIST, killpg1_callback,
1682 &karg, killpg1_allfilt, &kfarg);
1683 } else {
1684 if (pgid == 0) {
1685 /*
1686 * Send to current the current process' process group.
1687 */
1688 pgrp = proc_pgrp(curproc, NULL);
1689 } else {
1690 pgrp = pgrp_find(pgid);
1691 if (pgrp == NULL) {
1692 error = ESRCH;
1693 goto out;
1694 }
1695 }
1696
1697 pgrp_iterate(pgrp, killpg1_callback, &karg, ^bool (proc_t p) {
1698 if (p == kernproc || p == initproc) {
1699 return false;
1700 }
1701 /* XXX shouldn't this allow signalling zombies? */
1702 return !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB;
1703 });
1704 pgrp_rele(pgrp);
1705 }
1706 error = (karg.nfound > 0 ? 0 : (posix ? EPERM : ESRCH));
1707 out:
1708 kauth_cred_unref(&uc);
1709 return error;
1710 }
1711
1712 /*
1713 * Send a signal to a process group.
1714 */
1715 void
gsignal(int pgid,int signum)1716 gsignal(int pgid, int signum)
1717 {
1718 struct pgrp *pgrp;
1719
1720 if (pgid && (pgrp = pgrp_find(pgid))) {
1721 pgsignal(pgrp, signum, 0);
1722 pgrp_rele(pgrp);
1723 }
1724 }
1725
1726 /*
1727 * Send a signal to a process group. If checkctty is 1,
1728 * limit to members which have a controlling terminal.
1729 */
1730
1731 static int
pgsignal_callback(proc_t p,void * arg)1732 pgsignal_callback(proc_t p, void * arg)
1733 {
1734 int signum = *(int*)arg;
1735
1736 psignal(p, signum);
1737 return PROC_RETURNED;
1738 }
1739
1740 void
pgsignal(struct pgrp * pgrp,int signum,int checkctty)1741 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1742 {
1743 if (pgrp == PGRP_NULL) {
1744 return;
1745 }
1746
1747 bool (^filter)(proc_t) = ^bool (proc_t p) {
1748 return p->p_flag & P_CONTROLT;
1749 };
1750
1751 pgrp_iterate(pgrp, pgsignal_callback, &signum, checkctty ? filter : NULL);
1752 }
1753
1754
1755 void
tty_pgsignal_locked(struct tty * tp,int signum,int checkctty)1756 tty_pgsignal_locked(struct tty *tp, int signum, int checkctty)
1757 {
1758 struct pgrp * pg;
1759
1760 pg = tty_pgrp_locked(tp);
1761 if (pg != PGRP_NULL) {
1762 tty_unlock(tp);
1763 pgsignal(pg, signum, checkctty);
1764 pgrp_rele(pg);
1765 tty_lock(tp);
1766 }
1767 }
1768 /*
1769 * Send a signal caused by a trap to a specific thread.
1770 */
1771 void
threadsignal(thread_t sig_actthread,int signum,mach_exception_code_t code,boolean_t set_exitreason)1772 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boolean_t set_exitreason)
1773 {
1774 struct uthread *uth;
1775 struct task * sig_task;
1776 proc_t p;
1777 int mask;
1778
1779 if ((u_int)signum >= NSIG || signum == 0) {
1780 return;
1781 }
1782
1783 mask = sigmask(signum);
1784 if ((mask & threadmask) == 0) {
1785 return;
1786 }
1787 sig_task = get_threadtask(sig_actthread);
1788 p = (proc_t)(get_bsdtask_info(sig_task));
1789
1790 uth = get_bsdthread_info(sig_actthread);
1791
1792 proc_lock(p);
1793 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1794 proc_unlock(p);
1795 return;
1796 }
1797
1798 uth->uu_siglist |= mask;
1799 uth->uu_code = code;
1800
1801 /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */
1802 if (set_exitreason && ((p->p_lflag & P_LTRACED) || (!(uth->uu_sigwait & mask)
1803 && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) &&
1804 !(mask & stopsigmask) && !(mask & contsigmask)) {
1805 if (uth->uu_exit_reason == OS_REASON_NULL) {
1806 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1807 proc_getpid(p), OS_REASON_SIGNAL, signum, 0, 0);
1808
1809 os_reason_t signal_reason = build_signal_reason(signum, "exc handler");
1810
1811 set_thread_exit_reason(sig_actthread, signal_reason, TRUE);
1812
1813 /* We dropped/consumed the reference in set_thread_exit_reason() */
1814 signal_reason = OS_REASON_NULL;
1815 }
1816 }
1817
1818 proc_unlock(p);
1819
1820 /* mark on process as well */
1821 signal_setast(sig_actthread);
1822 }
1823
1824 /* Called with proc locked */
1825 static void
set_thread_extra_flags(task_t task,struct uthread * uth,os_reason_t reason)1826 set_thread_extra_flags(task_t task, struct uthread *uth, os_reason_t reason)
1827 {
1828 extern int vm_shared_region_reslide_restrict;
1829 boolean_t reslide_shared_region = FALSE;
1830 boolean_t driver = task_is_driver(task);
1831 assert(uth != NULL);
1832 /*
1833 * Check whether the userland fault address falls within the shared
1834 * region and notify userland if so. This allows launchd to apply
1835 * special policies around this fault type.
1836 */
1837 if (reason->osr_namespace == OS_REASON_SIGNAL &&
1838 reason->osr_code == SIGSEGV) {
1839 mach_vm_address_t fault_address = uth->uu_subcode;
1840
1841 #if defined(__arm64__)
1842 /* Address is in userland, so we hard clear TBI bits to 0 here */
1843 fault_address = tbi_clear(fault_address);
1844 #endif /* __arm64__ */
1845
1846 if (fault_address >= SHARED_REGION_BASE &&
1847 fault_address <= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1848 /*
1849 * Always report whether the fault happened within the shared cache
1850 * region, but only stale the slide if the resliding is extended
1851 * to all processes or if the process faulting is a platform one.
1852 */
1853 reason->osr_flags |= OS_REASON_FLAG_SHAREDREGION_FAULT;
1854
1855 #if __has_feature(ptrauth_calls)
1856 if (!vm_shared_region_reslide_restrict || csproc_get_platform_binary(current_proc())) {
1857 reslide_shared_region = TRUE;
1858 }
1859 #endif /* __has_feature(ptrauth_calls) */
1860 }
1861
1862 if (driver) {
1863 /*
1864 * Always reslide the DriverKit shared region if the driver faulted.
1865 * The memory cost is acceptable because the DriverKit shared cache is small
1866 * and there are relatively few driver processes.
1867 */
1868 reslide_shared_region = TRUE;
1869 }
1870 }
1871
1872 if (reslide_shared_region) {
1873 vm_shared_region_reslide_stale(driver);
1874 }
1875 }
1876
1877 void
set_thread_exit_reason(void * th,void * reason,boolean_t proc_locked)1878 set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked)
1879 {
1880 struct uthread *targ_uth = get_bsdthread_info(th);
1881 struct task *targ_task = get_threadtask(th);
1882 proc_t targ_proc = NULL;
1883
1884 os_reason_t exit_reason = (os_reason_t)reason;
1885
1886 if (exit_reason == OS_REASON_NULL) {
1887 return;
1888 }
1889
1890 if (!proc_locked) {
1891 targ_proc = (proc_t)(get_bsdtask_info(targ_task));
1892
1893 proc_lock(targ_proc);
1894 }
1895
1896 set_thread_extra_flags(targ_task, targ_uth, exit_reason);
1897
1898 if (targ_uth->uu_exit_reason == OS_REASON_NULL) {
1899 targ_uth->uu_exit_reason = exit_reason;
1900 } else {
1901 /* The caller expects that we drop a reference on the exit reason */
1902 os_reason_free(exit_reason);
1903 }
1904
1905 if (!proc_locked) {
1906 assert(targ_proc != NULL);
1907 proc_unlock(targ_proc);
1908 }
1909 }
1910
1911 /*
1912 * get_signalthread
1913 *
1914 * Picks an appropriate thread from a process to target with a signal.
1915 *
1916 * Called with proc locked.
1917 * Returns thread with BSD ast set.
1918 *
1919 * We attempt to deliver a proc-wide signal to the first thread in the task.
1920 * This allows single threaded applications which use signals to
1921 * be able to be linked with multithreaded libraries.
1922 */
1923 static kern_return_t
get_signalthread(proc_t p,int signum,thread_t * thr)1924 get_signalthread(proc_t p, int signum, thread_t * thr)
1925 {
1926 struct uthread *uth;
1927 sigset_t mask = sigmask(signum);
1928 bool skip_wqthreads = true;
1929
1930 *thr = THREAD_NULL;
1931
1932
1933 again:
1934 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1935 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
1936 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1937 thread_t th = get_machthread(uth);
1938 if (skip_wqthreads && (thread_get_tag(th) & THREAD_TAG_WORKQUEUE)) {
1939 /* Workqueue threads may be parked in the kernel unable to
1940 * deliver signals for an extended period of time, so skip them
1941 * in favor of pthreads in a first pass. (rdar://50054475). */
1942 } else if (check_actforsig(proc_task(p), th, 1) == KERN_SUCCESS) {
1943 *thr = th;
1944 return KERN_SUCCESS;
1945 }
1946 }
1947 }
1948 if (skip_wqthreads) {
1949 skip_wqthreads = false;
1950 goto again;
1951 }
1952 if (get_signalact(proc_task(p), thr, 1) == KERN_SUCCESS) {
1953 return KERN_SUCCESS;
1954 }
1955
1956 return KERN_FAILURE;
1957 }
1958
1959 static os_reason_t
build_signal_reason(int signum,const char * procname)1960 build_signal_reason(int signum, const char *procname)
1961 {
1962 os_reason_t signal_reason = OS_REASON_NULL;
1963 proc_t sender_proc = current_proc();
1964 uint32_t reason_buffer_size_estimate = 0, proc_name_length = 0;
1965 const char *default_sender_procname = "unknown";
1966 mach_vm_address_t data_addr;
1967 int ret;
1968
1969 signal_reason = os_reason_create(OS_REASON_SIGNAL, signum);
1970 if (signal_reason == OS_REASON_NULL) {
1971 printf("build_signal_reason: unable to allocate signal reason structure.\n");
1972 return signal_reason;
1973 }
1974
1975 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) +
1976 sizeof(pid_t));
1977
1978 ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate);
1979 if (ret != 0) {
1980 printf("build_signal_reason: unable to allocate signal reason buffer.\n");
1981 return signal_reason;
1982 }
1983
1984 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PID,
1985 sizeof(pid_t), &data_addr)) {
1986 pid_t pid = proc_getpid(sender_proc);
1987 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &pid, sizeof(pid));
1988 } else {
1989 printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n");
1990 }
1991
1992 proc_name_length = sizeof(sender_proc->p_name);
1993 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PROCNAME,
1994 proc_name_length, &data_addr)) {
1995 if (procname) {
1996 char truncated_procname[proc_name_length];
1997 strncpy((char *) &truncated_procname, procname, proc_name_length);
1998 truncated_procname[proc_name_length - 1] = '\0';
1999
2000 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname,
2001 (uint32_t)strlen((char *) &truncated_procname));
2002 } else if (*sender_proc->p_name) {
2003 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name,
2004 sizeof(sender_proc->p_name));
2005 } else {
2006 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname,
2007 (uint32_t)strlen(default_sender_procname) + 1);
2008 }
2009 } else {
2010 printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n");
2011 }
2012
2013 return signal_reason;
2014 }
2015
2016 /*
2017 * Send the signal to the process. If the signal has an action, the action
2018 * is usually performed by the target process rather than the caller; we add
2019 * the signal to the set of pending signals for the process.
2020 *
2021 * Always drops a reference on a signal_reason if one is provided, whether via
2022 * passing it to a thread or deallocating directly.
2023 *
2024 * Exceptions:
2025 * o When a stop signal is sent to a sleeping process that takes the
2026 * default action, the process is stopped without awakening it.
2027 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2028 * regardless of the signal action (eg, blocked or ignored).
2029 *
2030 * Other ignored signals are discarded immediately.
2031 */
2032 static void
psignal_internal(proc_t p,task_t task,thread_t thread,int flavor,int signum,os_reason_t signal_reason)2033 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason)
2034 {
2035 int prop;
2036 user_addr_t action = USER_ADDR_NULL;
2037 proc_t sig_proc;
2038 thread_t sig_thread;
2039 task_t sig_task;
2040 int mask;
2041 struct uthread *uth;
2042 kern_return_t kret;
2043 uid_t r_uid;
2044 proc_t pp;
2045 kauth_cred_t my_cred;
2046 char *launchd_exit_reason_desc = NULL;
2047 boolean_t update_thread_policy = FALSE;
2048
2049 if ((u_int)signum >= NSIG || signum == 0) {
2050 panic("psignal: bad signal number %d", signum);
2051 }
2052
2053 mask = sigmask(signum);
2054 prop = sigprop[signum];
2055
2056 #if SIGNAL_DEBUG
2057 if (rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
2058 ram_printf(3);
2059 }
2060 #endif /* SIGNAL_DEBUG */
2061
2062 /* catch unexpected initproc kills early for easier debuggging */
2063 if (signum == SIGKILL && p == initproc) {
2064 if (signal_reason == NULL) {
2065 panic_plain("unexpected SIGKILL of %s %s (no reason provided)",
2066 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2067 ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""));
2068 } else {
2069 launchd_exit_reason_desc = exit_reason_get_string_desc(signal_reason);
2070 panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
2071 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2072 ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""),
2073 signal_reason->osr_namespace, signal_reason->osr_code,
2074 launchd_exit_reason_desc ? launchd_exit_reason_desc : "none");
2075 }
2076 }
2077
2078 /*
2079 * We will need the task pointer later. Grab it now to
2080 * check for a zombie process. Also don't send signals
2081 * to kernel internal tasks.
2082 */
2083 if (flavor & PSIG_VFORK) {
2084 sig_task = task;
2085 sig_thread = thread;
2086 sig_proc = p;
2087 } else if (flavor & PSIG_THREAD) {
2088 sig_task = get_threadtask(thread);
2089 sig_thread = thread;
2090 sig_proc = (proc_t)get_bsdtask_info(sig_task);
2091 } else if (flavor & PSIG_TRY_THREAD) {
2092 assert((thread == current_thread()) && (p == current_proc()));
2093 sig_task = proc_task(p);
2094 sig_thread = thread;
2095 sig_proc = p;
2096 } else {
2097 sig_task = proc_task(p);
2098 sig_thread = THREAD_NULL;
2099 sig_proc = p;
2100 }
2101
2102 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task)) {
2103 os_reason_free(signal_reason);
2104 return;
2105 }
2106
2107 if ((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
2108 proc_knote(sig_proc, NOTE_SIGNAL | signum);
2109 }
2110
2111 if ((flavor & PSIG_LOCKED) == 0) {
2112 proc_signalstart(sig_proc, 0);
2113 }
2114
2115 /* Don't send signals to a process that has ignored them. */
2116 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
2117 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2118 goto sigout_unlocked;
2119 }
2120
2121 /*
2122 * The proc_lock prevents the targeted thread from being deallocated
2123 * or handling the signal until we're done signaling it.
2124 *
2125 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
2126 *
2127 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
2128 */
2129 proc_lock(sig_proc);
2130
2131 /*
2132 * Don't send signals to a process which has already exited and thus
2133 * committed to a particular p_xstat exit code.
2134 * Additionally, don't abort the process running 'reboot'.
2135 */
2136 if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) {
2137 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2138 goto sigout_locked;
2139 }
2140
2141 if (flavor & PSIG_VFORK) {
2142 action = SIG_DFL;
2143 act_set_astbsd(sig_thread);
2144 kret = KERN_SUCCESS;
2145 } else if (flavor & PSIG_TRY_THREAD) {
2146 uth = get_bsdthread_info(sig_thread);
2147 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2148 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
2149 ((kret = check_actforsig(proc_task(sig_proc), sig_thread, 1)) == KERN_SUCCESS)) {
2150 /* deliver to specified thread */
2151 } else {
2152 /* deliver to any willing thread */
2153 kret = get_signalthread(sig_proc, signum, &sig_thread);
2154 }
2155 } else if (flavor & PSIG_THREAD) {
2156 /* If successful return with ast set */
2157 kret = check_actforsig(sig_task, sig_thread, 1);
2158 } else {
2159 /* If successful return with ast set */
2160 kret = get_signalthread(sig_proc, signum, &sig_thread);
2161 }
2162
2163 if (kret != KERN_SUCCESS) {
2164 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2165 proc_unlock(sig_proc);
2166 goto sigout_unlocked;
2167 }
2168
2169 uth = get_bsdthread_info(sig_thread);
2170
2171 /*
2172 * If proc is traced, always give parent a chance.
2173 */
2174
2175 if ((flavor & PSIG_VFORK) == 0) {
2176 if (sig_proc->p_lflag & P_LTRACED) {
2177 action = SIG_DFL;
2178 } else {
2179 /*
2180 * If the signal is being ignored,
2181 * then we forget about it immediately.
2182 * (Note: we don't set SIGCONT in p_sigignore,
2183 * and if it is set to SIG_IGN,
2184 * action will be SIG_DFL here.)
2185 */
2186 if (sig_proc->p_sigignore & mask) {
2187 goto sigout_locked;
2188 }
2189
2190 if (uth->uu_sigwait & mask) {
2191 action = KERN_SIG_WAIT;
2192 } else if (uth->uu_sigmask & mask) {
2193 action = KERN_SIG_HOLD;
2194 } else if (sig_proc->p_sigcatch & mask) {
2195 action = KERN_SIG_CATCH;
2196 } else {
2197 action = SIG_DFL;
2198 }
2199 }
2200 }
2201
2202 /* TODO: p_nice isn't hooked up to the scheduler... */
2203 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
2204 (sig_proc->p_lflag & P_LTRACED) == 0) {
2205 sig_proc->p_nice = NZERO;
2206 }
2207
2208 if (prop & SA_CONT) {
2209 uth->uu_siglist &= ~stopsigmask;
2210 }
2211
2212 if (prop & SA_STOP) {
2213 struct pgrp *pg;
2214 /*
2215 * If sending a tty stop signal to a member of an orphaned
2216 * process group, discard the signal here if the action
2217 * is default; don't stop the process below if sleeping,
2218 * and don't clear any pending SIGCONT.
2219 */
2220 pg = proc_pgrp(sig_proc, NULL);
2221 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
2222 action == SIG_DFL) {
2223 pgrp_rele(pg);
2224 goto sigout_locked;
2225 }
2226 pgrp_rele(pg);
2227 uth->uu_siglist &= ~contsigmask;
2228 }
2229
2230 uth->uu_siglist |= mask;
2231
2232 /*
2233 * Defer further processing for signals which are held,
2234 * except that stopped processes must be continued by SIGCONT.
2235 */
2236 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
2237 goto sigout_locked;
2238 }
2239
2240 /*
2241 * SIGKILL priority twiddling moved here from above because
2242 * it needs sig_thread. Could merge it into large switch
2243 * below if we didn't care about priority for tracing
2244 * as SIGKILL's action is always SIG_DFL.
2245 *
2246 * TODO: p_nice isn't hooked up to the scheduler...
2247 */
2248 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
2249 sig_proc->p_nice = NZERO;
2250 }
2251
2252 /*
2253 * Process is traced - wake it up (if not already
2254 * stopped) so that it can discover the signal in
2255 * issig() and stop for the parent.
2256 */
2257 if (sig_proc->p_lflag & P_LTRACED) {
2258 if (sig_proc->p_stat != SSTOP) {
2259 goto runlocked;
2260 } else {
2261 goto sigout_locked;
2262 }
2263 }
2264
2265 if ((flavor & PSIG_VFORK) != 0) {
2266 goto runlocked;
2267 }
2268
2269 if (action == KERN_SIG_WAIT) {
2270 #if CONFIG_DTRACE
2271 /*
2272 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2273 */
2274 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2275
2276 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2277
2278 uth->t_dtrace_siginfo.si_signo = signum;
2279 uth->t_dtrace_siginfo.si_pid = proc_getpid(current_proc());
2280 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2281 uth->t_dtrace_siginfo.si_uid = r_uid;
2282 uth->t_dtrace_siginfo.si_code = 0;
2283 #endif
2284 uth->uu_sigwait = mask;
2285 uth->uu_siglist &= ~mask;
2286 wakeup(&uth->uu_sigwait);
2287 /* if it is SIGCONT resume whole process */
2288 if (prop & SA_CONT) {
2289 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2290 sig_proc->p_contproc = proc_getpid(current_proc());
2291 (void) task_resume_internal(sig_task);
2292 }
2293 goto sigout_locked;
2294 }
2295
2296 if (action != SIG_DFL) {
2297 /*
2298 * User wants to catch the signal.
2299 * Wake up the thread, but don't un-suspend it
2300 * (except for SIGCONT).
2301 */
2302 if (prop & SA_CONT) {
2303 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2304 (void) task_resume_internal(sig_task);
2305 sig_proc->p_stat = SRUN;
2306 } else if (sig_proc->p_stat == SSTOP) {
2307 goto sigout_locked;
2308 }
2309 /*
2310 * Fill out siginfo structure information to pass to the
2311 * signalled process/thread sigaction handler, when it
2312 * wakes up. si_code is 0 because this is an ordinary
2313 * signal, not a SIGCHLD, and so si_status is the signal
2314 * number itself, instead of the child process exit status.
2315 * We shift this left because it will be shifted right before
2316 * it is passed to user space. kind of ugly to use W_EXITCODE
2317 * this way, but it beats defining a new macro.
2318 *
2319 * Note: Avoid the SIGCHLD recursion case!
2320 */
2321 if (signum != SIGCHLD) {
2322 r_uid = kauth_getruid();
2323
2324 sig_proc->si_pid = proc_getpid(current_proc());
2325 sig_proc->si_status = W_EXITCODE(signum, 0);
2326 sig_proc->si_uid = r_uid;
2327 sig_proc->si_code = 0;
2328 }
2329
2330 goto runlocked;
2331 } else {
2332 /* Default action - varies */
2333 if (mask & stopsigmask) {
2334 assert(signal_reason == NULL);
2335 /*
2336 * These are the signals which by default
2337 * stop a process.
2338 *
2339 * Don't clog system with children of init
2340 * stopped from the keyboard.
2341 */
2342 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2343 uth->uu_siglist &= ~mask;
2344 proc_unlock(sig_proc);
2345 /* siglock still locked, proc_lock not locked */
2346 psignal_locked(sig_proc, SIGKILL);
2347 goto sigout_unlocked;
2348 }
2349
2350 /*
2351 * Stop the task
2352 * if task hasn't already been stopped by
2353 * a signal.
2354 */
2355 uth->uu_siglist &= ~mask;
2356 if (sig_proc->p_stat != SSTOP) {
2357 sig_proc->p_xstat = signum;
2358 sig_proc->p_stat = SSTOP;
2359 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2360 sig_proc->p_lflag &= ~P_LWAITED;
2361 proc_unlock(sig_proc);
2362
2363 pp = proc_parentholdref(sig_proc);
2364 stop(sig_proc, pp);
2365 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2366 my_cred = kauth_cred_proc_ref(sig_proc);
2367 r_uid = kauth_cred_getruid(my_cred);
2368 kauth_cred_unref(&my_cred);
2369
2370 proc_lock(sig_proc);
2371 pp->si_pid = proc_getpid(sig_proc);
2372 /*
2373 * POSIX: sigaction for a stopped child
2374 * when sent to the parent must set the
2375 * child's signal number into si_status.
2376 */
2377 if (signum != SIGSTOP) {
2378 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2379 } else {
2380 pp->si_status = W_EXITCODE(signum, signum);
2381 }
2382 pp->si_code = CLD_STOPPED;
2383 pp->si_uid = r_uid;
2384 proc_unlock(sig_proc);
2385
2386 psignal(pp, SIGCHLD);
2387 }
2388 if (pp != PROC_NULL) {
2389 proc_parentdropref(pp, 0);
2390 }
2391
2392 goto sigout_unlocked;
2393 }
2394
2395 goto sigout_locked;
2396 }
2397
2398 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2399
2400 switch (signum) {
2401 /*
2402 * Signals ignored by default have been dealt
2403 * with already, since their bits are on in
2404 * p_sigignore.
2405 */
2406
2407 case SIGKILL:
2408 /*
2409 * Kill signal always sets process running and
2410 * unsuspends it.
2411 */
2412 /*
2413 * Process will be running after 'run'
2414 */
2415 sig_proc->p_stat = SRUN;
2416 /*
2417 * In scenarios where suspend/resume are racing
2418 * the signal we are missing AST_BSD by the time
2419 * we get here, set again to avoid races. This
2420 * was the scenario with spindump enabled shutdowns.
2421 * We would need to cover this approp down the line.
2422 */
2423 act_set_astbsd(sig_thread);
2424 kret = thread_abort(sig_thread);
2425 update_thread_policy = (kret == KERN_SUCCESS);
2426
2427 if (uth->uu_exit_reason == OS_REASON_NULL) {
2428 if (signal_reason == OS_REASON_NULL) {
2429 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2430 proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2431
2432 signal_reason = build_signal_reason(signum, NULL);
2433 }
2434
2435 os_reason_ref(signal_reason);
2436 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2437 }
2438
2439 goto sigout_locked;
2440
2441 case SIGCONT:
2442 /*
2443 * Let the process run. If it's sleeping on an
2444 * event, it remains so.
2445 */
2446 assert(signal_reason == NULL);
2447 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2448 sig_proc->p_contproc = proc_getpid(sig_proc);
2449 sig_proc->p_xstat = signum;
2450
2451 (void) task_resume_internal(sig_task);
2452
2453 /*
2454 * When processing a SIGCONT, we need to check
2455 * to see if there are signals pending that
2456 * were not delivered because we had been
2457 * previously stopped. If that's the case,
2458 * we need to thread_abort_safely() to trigger
2459 * interruption of the current system call to
2460 * cause their handlers to fire. If it's only
2461 * the SIGCONT, then don't wake up.
2462 */
2463 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2464 uth->uu_siglist &= ~mask;
2465 sig_proc->p_stat = SRUN;
2466 goto runlocked;
2467 }
2468
2469 uth->uu_siglist &= ~mask;
2470 sig_proc->p_stat = SRUN;
2471 goto sigout_locked;
2472
2473 default:
2474 /*
2475 * A signal which has a default action of killing
2476 * the process, and for which there is no handler,
2477 * needs to act like SIGKILL
2478 */
2479 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2480 sig_proc->p_stat = SRUN;
2481 kret = thread_abort(sig_thread);
2482 update_thread_policy = (kret == KERN_SUCCESS);
2483
2484 if (uth->uu_exit_reason == OS_REASON_NULL) {
2485 if (signal_reason == OS_REASON_NULL) {
2486 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2487 proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2488
2489 signal_reason = build_signal_reason(signum, NULL);
2490 }
2491
2492 os_reason_ref(signal_reason);
2493 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2494 }
2495
2496 goto sigout_locked;
2497 }
2498
2499 /*
2500 * All other signals wake up the process, but don't
2501 * resume it.
2502 */
2503 if (sig_proc->p_stat == SSTOP) {
2504 goto sigout_locked;
2505 }
2506 goto runlocked;
2507 }
2508 }
2509 /*NOTREACHED*/
2510
2511 runlocked:
2512 /*
2513 * If we're being traced (possibly because someone attached us
2514 * while we were stopped), check for a signal from the debugger.
2515 */
2516 if (sig_proc->p_stat == SSTOP) {
2517 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) {
2518 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2519 }
2520
2521 if ((flavor & PSIG_VFORK) != 0) {
2522 sig_proc->p_stat = SRUN;
2523 }
2524 } else {
2525 /*
2526 * setrunnable(p) in BSD and
2527 * Wake up the thread if it is interruptible.
2528 */
2529 sig_proc->p_stat = SRUN;
2530 if ((flavor & PSIG_VFORK) == 0) {
2531 thread_abort_safely(sig_thread);
2532 }
2533 }
2534
2535 sigout_locked:
2536 if (update_thread_policy) {
2537 /*
2538 * Update the thread policy to heading to terminate, increase priority if
2539 * necessary. This needs to be done before we drop the proc lock because the
2540 * thread can take the fatal signal once it's dropped.
2541 */
2542 proc_set_thread_policy(sig_thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2543 }
2544
2545 proc_unlock(sig_proc);
2546
2547 sigout_unlocked:
2548 os_reason_free(signal_reason);
2549 if ((flavor & PSIG_LOCKED) == 0) {
2550 proc_signalend(sig_proc, 0);
2551 }
2552 }
2553
2554 void
psignal(proc_t p,int signum)2555 psignal(proc_t p, int signum)
2556 {
2557 psignal_internal(p, NULL, NULL, 0, signum, NULL);
2558 }
2559
2560 void
psignal_with_reason(proc_t p,int signum,struct os_reason * signal_reason)2561 psignal_with_reason(proc_t p, int signum, struct os_reason *signal_reason)
2562 {
2563 psignal_internal(p, NULL, NULL, 0, signum, signal_reason);
2564 }
2565
2566 void
psignal_sigkill_with_reason(struct proc * p,struct os_reason * signal_reason)2567 psignal_sigkill_with_reason(struct proc *p, struct os_reason *signal_reason)
2568 {
2569 psignal_internal(p, NULL, NULL, 0, SIGKILL, signal_reason);
2570 }
2571
2572 void
psignal_locked(proc_t p,int signum)2573 psignal_locked(proc_t p, int signum)
2574 {
2575 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum, NULL);
2576 }
2577
2578 void
psignal_vfork_with_reason(proc_t p,task_t new_task,thread_t thread,int signum,struct os_reason * signal_reason)2579 psignal_vfork_with_reason(proc_t p, task_t new_task, thread_t thread, int signum, struct os_reason *signal_reason)
2580 {
2581 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, signal_reason);
2582 }
2583
2584 void
psignal_vfork(proc_t p,task_t new_task,thread_t thread,int signum)2585 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2586 {
2587 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, NULL);
2588 }
2589
2590 void
psignal_uthread(thread_t thread,int signum)2591 psignal_uthread(thread_t thread, int signum)
2592 {
2593 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum, NULL);
2594 }
2595
2596 /* same as psignal(), but prefer delivery to 'thread' if possible */
2597 void
psignal_try_thread(proc_t p,thread_t thread,int signum)2598 psignal_try_thread(proc_t p, thread_t thread, int signum)
2599 {
2600 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum, NULL);
2601 }
2602
2603 void
psignal_try_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2604 psignal_try_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2605 {
2606 psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason);
2607 }
2608
2609 void
psignal_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2610 psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2611 {
2612 psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason);
2613 }
2614
2615 /*
2616 * If the current process has received a signal (should be caught or cause
2617 * termination, should interrupt current syscall), return the signal number.
2618 * Stop signals with default action are processed immediately, then cleared;
2619 * they aren't returned. This is checked after each entry to the system for
2620 * a syscall or trap (though this can usually be done without calling issignal
2621 * by checking the pending signal masks in the CURSIG macro.) The normal call
2622 * sequence is
2623 *
2624 * while (signum = CURSIG(curproc))
2625 * postsig(signum);
2626 */
2627 int
issignal_locked(proc_t p)2628 issignal_locked(proc_t p)
2629 {
2630 int signum, mask, prop, sigbits;
2631 thread_t cur_act;
2632 struct uthread * ut;
2633 proc_t pp;
2634 kauth_cred_t my_cred;
2635 int retval = 0;
2636 uid_t r_uid;
2637
2638 cur_act = current_thread();
2639
2640 #if SIGNAL_DEBUG
2641 if (rdebug_proc && (p == rdebug_proc)) {
2642 ram_printf(3);
2643 }
2644 #endif /* SIGNAL_DEBUG */
2645
2646 /*
2647 * Try to grab the signal lock.
2648 */
2649 if (sig_try_locked(p) <= 0) {
2650 return 0;
2651 }
2652
2653 proc_signalstart(p, 1);
2654
2655 ut = get_bsdthread_info(cur_act);
2656 for (;;) {
2657 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2658
2659 if (p->p_lflag & P_LPPWAIT) {
2660 sigbits &= ~stopsigmask;
2661 }
2662 if (sigbits == 0) { /* no signal to send */
2663 retval = 0;
2664 goto out;
2665 }
2666
2667 signum = ffs((unsigned int)sigbits);
2668 mask = sigmask(signum);
2669 prop = sigprop[signum];
2670
2671 /*
2672 * We should see pending but ignored signals
2673 * only if P_LTRACED was on when they were posted.
2674 */
2675 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2676 ut->uu_siglist &= ~mask;
2677 continue;
2678 }
2679
2680 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2681 /*
2682 * If traced, deliver the signal to the debugger, and wait to be
2683 * released.
2684 */
2685 task_t task;
2686 p->p_xstat = signum;
2687
2688 if (p->p_lflag & P_LSIGEXC) {
2689 p->sigwait = TRUE;
2690 p->sigwait_thread = cur_act;
2691 p->p_stat = SSTOP;
2692 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2693 p->p_lflag &= ~P_LWAITED;
2694 ut->uu_siglist &= ~mask; /* clear the current signal from the pending list */
2695 proc_signalend(p, 1);
2696 proc_unlock(p);
2697 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2698 proc_lock(p);
2699 proc_signalstart(p, 1);
2700 } else {
2701 proc_unlock(p);
2702 my_cred = kauth_cred_proc_ref(p);
2703 r_uid = kauth_cred_getruid(my_cred);
2704 kauth_cred_unref(&my_cred);
2705
2706 pp = proc_parentholdref(p);
2707 if (pp != PROC_NULL) {
2708 proc_lock(pp);
2709
2710 pp->si_pid = proc_getpid(p);
2711 pp->p_xhighbits = p->p_xhighbits;
2712 p->p_xhighbits = 0;
2713 pp->si_status = p->p_xstat;
2714 pp->si_code = CLD_TRAPPED;
2715 pp->si_uid = r_uid;
2716
2717 proc_unlock(pp);
2718 }
2719
2720 /*
2721 * XXX Have to really stop for debuggers;
2722 * XXX stop() doesn't do the right thing.
2723 */
2724 task = proc_task(p);
2725 task_suspend_internal(task);
2726
2727 proc_lock(p);
2728 p->sigwait = TRUE;
2729 p->sigwait_thread = cur_act;
2730 p->p_stat = SSTOP;
2731 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2732 p->p_lflag &= ~P_LWAITED;
2733 ut->uu_siglist &= ~mask;
2734
2735 proc_signalend(p, 1);
2736 proc_unlock(p);
2737
2738 if (pp != PROC_NULL) {
2739 psignal(pp, SIGCHLD);
2740 proc_list_lock();
2741 wakeup((caddr_t)pp);
2742 proc_parentdropref(pp, 1);
2743 proc_list_unlock();
2744 }
2745
2746 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2747 thread_block(THREAD_CONTINUE_NULL);
2748 proc_lock(p);
2749 proc_signalstart(p, 1);
2750 }
2751
2752 p->sigwait = FALSE;
2753 p->sigwait_thread = NULL;
2754 wakeup((caddr_t)&p->sigwait_thread);
2755
2756 if (signum == SIGKILL || ut->uu_siglist & sigmask(SIGKILL)) {
2757 /*
2758 * Deliver a pending sigkill even if it's not the current signal.
2759 * Necessary for PT_KILL, which should not be delivered to the
2760 * debugger, but we can't differentiate it from any other KILL.
2761 */
2762 signum = SIGKILL;
2763 goto deliver_sig;
2764 }
2765
2766 /* We may have to quit. */
2767 if (thread_should_abort(current_thread())) {
2768 retval = 0;
2769 goto out;
2770 }
2771
2772 /*
2773 * If parent wants us to take the signal,
2774 * then it will leave it in p->p_xstat;
2775 * otherwise we just look for signals again.
2776 */
2777 signum = p->p_xstat;
2778 if (signum == 0) {
2779 continue;
2780 }
2781
2782 /*
2783 * Put the new signal into p_siglist. If the
2784 * signal is being masked, look for other signals.
2785 */
2786 mask = sigmask(signum);
2787 ut->uu_siglist |= mask;
2788 if (ut->uu_sigmask & mask) {
2789 continue;
2790 }
2791 }
2792
2793 /*
2794 * Decide whether the signal should be returned.
2795 * Return the signal's number, or fall through
2796 * to clear it from the pending mask.
2797 */
2798
2799 switch ((long)SIGACTION(p, signum)) {
2800 case (long)SIG_DFL:
2801 /*
2802 * If there is a pending stop signal to process
2803 * with default action, stop here,
2804 * then clear the signal. However,
2805 * if process is member of an orphaned
2806 * process group, ignore tty stop signals.
2807 */
2808 if (prop & SA_STOP) {
2809 struct pgrp * pg;
2810
2811 proc_unlock(p);
2812 pg = proc_pgrp(p, NULL);
2813 if (p->p_lflag & P_LTRACED ||
2814 (pg->pg_jobc == 0 &&
2815 prop & SA_TTYSTOP)) {
2816 proc_lock(p);
2817 pgrp_rele(pg);
2818 break; /* ignore signal */
2819 }
2820 pgrp_rele(pg);
2821 if (p->p_stat != SSTOP) {
2822 proc_lock(p);
2823 p->p_xstat = signum;
2824 p->p_stat = SSTOP;
2825 p->p_lflag &= ~P_LWAITED;
2826 proc_unlock(p);
2827
2828 pp = proc_parentholdref(p);
2829 stop(p, pp);
2830 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2831 my_cred = kauth_cred_proc_ref(p);
2832 r_uid = kauth_cred_getruid(my_cred);
2833 kauth_cred_unref(&my_cred);
2834
2835 proc_lock(pp);
2836 pp->si_pid = proc_getpid(p);
2837 pp->si_status = WEXITSTATUS(p->p_xstat);
2838 pp->si_code = CLD_STOPPED;
2839 pp->si_uid = r_uid;
2840 proc_unlock(pp);
2841
2842 psignal(pp, SIGCHLD);
2843 }
2844 if (pp != PROC_NULL) {
2845 proc_parentdropref(pp, 0);
2846 }
2847 }
2848 proc_lock(p);
2849 break;
2850 } else if (prop & SA_IGNORE) {
2851 /*
2852 * Except for SIGCONT, shouldn't get here.
2853 * Default action is to ignore; drop it.
2854 */
2855 break; /* ignore signal */
2856 } else {
2857 goto deliver_sig;
2858 }
2859
2860 case (long)SIG_IGN:
2861 /*
2862 * Masking above should prevent us ever trying
2863 * to take action on an ignored signal other
2864 * than SIGCONT, unless process is traced.
2865 */
2866 if ((prop & SA_CONT) == 0 &&
2867 (p->p_lflag & P_LTRACED) == 0) {
2868 printf("issignal\n");
2869 }
2870 break; /* ignore signal */
2871
2872 default:
2873 /* This signal has an action - deliver it. */
2874 goto deliver_sig;
2875 }
2876
2877 /* If we dropped through, the signal was ignored - remove it from pending list. */
2878 ut->uu_siglist &= ~mask;
2879 } /* for(;;) */
2880
2881 /* NOTREACHED */
2882
2883 deliver_sig:
2884 ut->uu_siglist &= ~mask;
2885 retval = signum;
2886
2887 out:
2888 proc_signalend(p, 1);
2889 return retval;
2890 }
2891
2892 /* called from _sleep */
2893 int
CURSIG(proc_t p)2894 CURSIG(proc_t p)
2895 {
2896 int signum, mask, prop, sigbits;
2897 thread_t cur_act;
2898 struct uthread * ut;
2899 int retnum = 0;
2900
2901
2902 cur_act = current_thread();
2903
2904 ut = get_bsdthread_info(cur_act);
2905
2906 if (ut->uu_siglist == 0) {
2907 return 0;
2908 }
2909
2910 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) {
2911 return 0;
2912 }
2913
2914 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2915
2916 for (;;) {
2917 if (p->p_lflag & P_LPPWAIT) {
2918 sigbits &= ~stopsigmask;
2919 }
2920 if (sigbits == 0) { /* no signal to send */
2921 return retnum;
2922 }
2923
2924 signum = ffs((unsigned int)sigbits);
2925 mask = sigmask(signum);
2926 prop = sigprop[signum];
2927 sigbits &= ~mask; /* take the signal out */
2928
2929 /*
2930 * We should see pending but ignored signals
2931 * only if P_LTRACED was on when they were posted.
2932 */
2933 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2934 continue;
2935 }
2936
2937 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2938 return signum;
2939 }
2940
2941 /*
2942 * Decide whether the signal should be returned.
2943 * Return the signal's number, or fall through
2944 * to clear it from the pending mask.
2945 */
2946
2947 switch ((long)SIGACTION(p, signum)) {
2948 case (long)SIG_DFL:
2949 /*
2950 * If there is a pending stop signal to process
2951 * with default action, stop here,
2952 * then clear the signal. However,
2953 * if process is member of an orphaned
2954 * process group, ignore tty stop signals.
2955 */
2956 if (prop & SA_STOP) {
2957 struct pgrp *pg;
2958
2959 pg = proc_pgrp(p, NULL);
2960
2961 if (p->p_lflag & P_LTRACED ||
2962 (pg->pg_jobc == 0 &&
2963 prop & SA_TTYSTOP)) {
2964 pgrp_rele(pg);
2965 break; /* == ignore */
2966 }
2967 pgrp_rele(pg);
2968 retnum = signum;
2969 break;
2970 } else if (prop & SA_IGNORE) {
2971 /*
2972 * Except for SIGCONT, shouldn't get here.
2973 * Default action is to ignore; drop it.
2974 */
2975 break; /* == ignore */
2976 } else {
2977 return signum;
2978 }
2979 /*NOTREACHED*/
2980
2981 case (long)SIG_IGN:
2982 /*
2983 * Masking above should prevent us ever trying
2984 * to take action on an ignored signal other
2985 * than SIGCONT, unless process is traced.
2986 */
2987 if ((prop & SA_CONT) == 0 &&
2988 (p->p_lflag & P_LTRACED) == 0) {
2989 printf("issignal\n");
2990 }
2991 break; /* == ignore */
2992
2993 default:
2994 /*
2995 * This signal has an action, let
2996 * postsig() process it.
2997 */
2998 return signum;
2999 }
3000 }
3001 /* NOTREACHED */
3002 }
3003
3004 /*
3005 * Put the argument process into the stopped state and notify the parent
3006 * via wakeup. Signals are handled elsewhere. The process must not be
3007 * on the run queue.
3008 */
3009 static void
stop(proc_t p,proc_t parent)3010 stop(proc_t p, proc_t parent)
3011 {
3012 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3013 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
3014 proc_list_lock();
3015 wakeup((caddr_t)parent);
3016 proc_list_unlock();
3017 }
3018 (void) task_suspend_internal(proc_task(p));
3019 }
3020
3021 /*
3022 * Take the action for the specified signal
3023 * from the current set of pending signals.
3024 */
3025 void
postsig_locked(int signum)3026 postsig_locked(int signum)
3027 {
3028 proc_t p = current_proc();
3029 struct sigacts *ps = &p->p_sigacts;
3030 user_addr_t catcher;
3031 uint32_t code;
3032 int mask, returnmask;
3033 struct uthread * ut;
3034 os_reason_t ut_exit_reason = OS_REASON_NULL;
3035 int coredump_flags = 0;
3036
3037 #if DIAGNOSTIC
3038 if (signum == 0) {
3039 panic("postsig");
3040 }
3041 /*
3042 * This must be called on master cpu
3043 */
3044 if (cpu_number() != master_cpu) {
3045 panic("psig not on master");
3046 }
3047 #endif
3048
3049 /*
3050 * Try to grab the signal lock.
3051 */
3052 if (sig_try_locked(p) <= 0) {
3053 return;
3054 }
3055
3056 proc_signalstart(p, 1);
3057
3058 ut = current_uthread();
3059 mask = sigmask(signum);
3060 ut->uu_siglist &= ~mask;
3061 catcher = SIGACTION(p, signum);
3062 if (catcher == SIG_DFL) {
3063 /*
3064 * Default catcher, where the default is to kill
3065 * the process. (Other cases were ignored above.)
3066 */
3067
3068 /*
3069 * exit_with_reason() below will consume a reference to the thread's exit reason, so we take another
3070 * reference so the thread still has one even after we call exit_with_reason(). The thread's reference will
3071 * ultimately be destroyed in uthread_cleanup().
3072 */
3073 ut_exit_reason = ut->uu_exit_reason;
3074 os_reason_ref(ut_exit_reason);
3075
3076 p->p_acflag |= AXSIG;
3077 if (sigprop[signum] & SA_CORE) {
3078 p->p_sigacts.ps_sig = signum;
3079 proc_signalend(p, 1);
3080 proc_unlock(p);
3081 if (task_is_driver(proc_task(p))) {
3082 coredump_flags |= COREDUMP_FULLFSYNC;
3083 }
3084 #if CONFIG_COREDUMP
3085 if (coredump(p, 0, coredump_flags) == 0) {
3086 signum |= WCOREFLAG;
3087 }
3088 #endif
3089 } else {
3090 proc_signalend(p, 1);
3091 proc_unlock(p);
3092 }
3093
3094 #if CONFIG_DTRACE
3095 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
3096
3097 ut->t_dtrace_siginfo.si_signo = signum;
3098 ut->t_dtrace_siginfo.si_pid = p->si_pid;
3099 ut->t_dtrace_siginfo.si_uid = p->si_uid;
3100 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
3101
3102 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
3103 switch (signum) {
3104 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
3105 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
3106 break;
3107 default:
3108 break;
3109 }
3110
3111
3112 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
3113 void (*)(void), SIG_DFL);
3114 #endif
3115
3116 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
3117 proc_getpid(p), W_EXITCODE(0, signum), 3, 0, 0);
3118
3119 exit_with_reason(p, W_EXITCODE(0, signum), (int *)NULL, TRUE, TRUE, 0, ut_exit_reason);
3120
3121 proc_lock(p);
3122 return;
3123 } else {
3124 /*
3125 * If we get here, the signal must be caught.
3126 */
3127 #if DIAGNOSTIC
3128 if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) {
3129 log(LOG_WARNING,
3130 "postsig: processing masked or ignored signal\n");
3131 }
3132 #endif
3133
3134 /*
3135 * Set the new mask value and also defer further
3136 * occurences of this signal.
3137 *
3138 * Special case: user has done a sigpause. Here the
3139 * current mask is not of interest, but rather the
3140 * mask from before the sigpause is what we want
3141 * restored after the signal processing is completed.
3142 */
3143 if (ut->uu_flag & UT_SAS_OLDMASK) {
3144 returnmask = ut->uu_oldmask;
3145 ut->uu_flag &= ~UT_SAS_OLDMASK;
3146 ut->uu_oldmask = 0;
3147 } else {
3148 returnmask = ut->uu_sigmask;
3149 }
3150 ut->uu_sigmask |= ps->ps_catchmask[signum];
3151 if ((ps->ps_signodefer & mask) == 0) {
3152 ut->uu_sigmask |= mask;
3153 }
3154 sigset_t siginfo = ps->ps_siginfo;
3155 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
3156 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) {
3157 p->p_sigignore |= mask;
3158 }
3159 if (SIGACTION(p, signum) != SIG_DFL) {
3160 proc_set_sigact(p, signum, SIG_DFL);
3161 }
3162 ps->ps_siginfo &= ~mask;
3163 ps->ps_signodefer &= ~mask;
3164 }
3165
3166 if (ps->ps_sig != signum) {
3167 code = 0;
3168 } else {
3169 code = ps->ps_code;
3170 ps->ps_code = 0;
3171 }
3172 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
3173 sendsig(p, catcher, signum, returnmask, code, siginfo);
3174 }
3175 proc_signalend(p, 1);
3176 }
3177
3178 /*
3179 * Attach a signal knote to the list of knotes for this process.
3180 *
3181 * Signal knotes share the knote list with proc knotes. This
3182 * could be avoided by using a signal-specific knote list, but
3183 * probably isn't worth the trouble.
3184 */
3185
3186 static int
filt_sigattach(struct knote * kn,__unused struct kevent_qos_s * kev)3187 filt_sigattach(struct knote *kn, __unused struct kevent_qos_s *kev)
3188 {
3189 proc_t p = current_proc(); /* can attach only to oneself */
3190
3191 proc_klist_lock();
3192
3193 kn->kn_proc = p;
3194 kn->kn_flags |= EV_CLEAR; /* automatically set */
3195 kn->kn_sdata = 0; /* incoming data is ignored */
3196
3197 KNOTE_ATTACH(&p->p_klist, kn);
3198
3199 proc_klist_unlock();
3200
3201 /* edge-triggered events can't have fired before we attached */
3202 return 0;
3203 }
3204
3205 /*
3206 * remove the knote from the process list, if it hasn't already
3207 * been removed by exit processing.
3208 */
3209
3210 static void
filt_sigdetach(struct knote * kn)3211 filt_sigdetach(struct knote *kn)
3212 {
3213 proc_t p;
3214
3215 proc_klist_lock();
3216 p = kn->kn_proc;
3217 if (p != NULL) {
3218 kn->kn_proc = NULL;
3219 KNOTE_DETACH(&p->p_klist, kn);
3220 }
3221 proc_klist_unlock();
3222 }
3223
3224 /*
3225 * Post an event to the signal filter. Because we share the same list
3226 * as process knotes, we have to filter out and handle only signal events.
3227 *
3228 * We assume that we process fdt_invalidate() before we post the NOTE_EXIT for
3229 * a process during exit. Therefore, since signal filters can only be
3230 * set up "in-process", we should have already torn down the kqueue
3231 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
3232 */
3233 static int
filt_signal(struct knote * kn,long hint)3234 filt_signal(struct knote *kn, long hint)
3235 {
3236 if (hint & NOTE_SIGNAL) {
3237 hint &= ~NOTE_SIGNAL;
3238
3239 if (kn->kn_id == (unsigned int)hint) {
3240 kn->kn_hook32++;
3241 }
3242 } else if (hint & NOTE_EXIT) {
3243 panic("filt_signal: detected NOTE_EXIT event");
3244 }
3245
3246 return kn->kn_hook32 != 0;
3247 }
3248
3249 static int
filt_signaltouch(struct knote * kn,struct kevent_qos_s * kev)3250 filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev)
3251 {
3252 #pragma unused(kev)
3253
3254 int res;
3255
3256 proc_klist_lock();
3257
3258 /*
3259 * No data to save - just capture if it is already fired
3260 */
3261 res = (kn->kn_hook32 > 0);
3262
3263 proc_klist_unlock();
3264
3265 return res;
3266 }
3267
3268 static int
filt_signalprocess(struct knote * kn,struct kevent_qos_s * kev)3269 filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev)
3270 {
3271 int res = 0;
3272
3273 /*
3274 * Snapshot the event data.
3275 */
3276
3277 proc_klist_lock();
3278 if (kn->kn_hook32) {
3279 knote_fill_kevent(kn, kev, kn->kn_hook32);
3280 kn->kn_hook32 = 0;
3281 res = 1;
3282 }
3283 proc_klist_unlock();
3284 return res;
3285 }
3286
3287 void
bsd_ast(thread_t thread)3288 bsd_ast(thread_t thread)
3289 {
3290 proc_t p = current_proc();
3291 struct uthread *ut = get_bsdthread_info(thread);
3292 int signum;
3293 static int bsd_init_done = 0;
3294
3295 if (p == NULL) {
3296 return;
3297 }
3298
3299 if (timerisset(&p->p_vtimer_user.it_value)) {
3300 uint32_t microsecs;
3301
3302 task_vtimer_update(proc_task(p), TASK_VTIMER_USER, µsecs);
3303
3304 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
3305 if (timerisset(&p->p_vtimer_user.it_value)) {
3306 task_vtimer_set(proc_task(p), TASK_VTIMER_USER);
3307 } else {
3308 task_vtimer_clear(proc_task(p), TASK_VTIMER_USER);
3309 }
3310
3311 psignal_try_thread(p, thread, SIGVTALRM);
3312 }
3313 }
3314
3315 if (timerisset(&p->p_vtimer_prof.it_value)) {
3316 uint32_t microsecs;
3317
3318 task_vtimer_update(proc_task(p), TASK_VTIMER_PROF, µsecs);
3319
3320 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
3321 if (timerisset(&p->p_vtimer_prof.it_value)) {
3322 task_vtimer_set(proc_task(p), TASK_VTIMER_PROF);
3323 } else {
3324 task_vtimer_clear(proc_task(p), TASK_VTIMER_PROF);
3325 }
3326
3327 psignal_try_thread(p, thread, SIGPROF);
3328 }
3329 }
3330
3331 if (timerisset(&p->p_rlim_cpu)) {
3332 struct timeval tv;
3333
3334 task_vtimer_update(proc_task(p), TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
3335
3336 proc_spinlock(p);
3337 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
3338 tv.tv_sec = 0;
3339 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
3340 proc_spinunlock(p);
3341 } else {
3342 timerclear(&p->p_rlim_cpu);
3343 proc_spinunlock(p);
3344
3345 task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
3346
3347 psignal_try_thread(p, thread, SIGXCPU);
3348 }
3349 }
3350
3351 #if CONFIG_DTRACE
3352 if (ut->t_dtrace_sig) {
3353 uint8_t dt_action_sig = ut->t_dtrace_sig;
3354 ut->t_dtrace_sig = 0;
3355 psignal(p, dt_action_sig);
3356 }
3357
3358 if (ut->t_dtrace_stop) {
3359 ut->t_dtrace_stop = 0;
3360 proc_lock(p);
3361 p->p_dtrace_stop = 1;
3362 proc_unlock(p);
3363 (void)task_suspend_internal(proc_task(p));
3364 }
3365
3366 if (ut->t_dtrace_resumepid) {
3367 proc_t resumeproc = proc_find((int)ut->t_dtrace_resumepid);
3368 ut->t_dtrace_resumepid = 0;
3369 if (resumeproc != PROC_NULL) {
3370 proc_lock(resumeproc);
3371 /* We only act on processes stopped by dtrace */
3372 if (resumeproc->p_dtrace_stop) {
3373 resumeproc->p_dtrace_stop = 0;
3374 proc_unlock(resumeproc);
3375 task_resume_internal(proc_task(resumeproc));
3376 } else {
3377 proc_unlock(resumeproc);
3378 }
3379 proc_rele(resumeproc);
3380 }
3381 }
3382
3383 #endif /* CONFIG_DTRACE */
3384
3385 proc_lock(p);
3386 if (CHECK_SIGNALS(p, current_thread(), ut)) {
3387 while ((signum = issignal_locked(p))) {
3388 postsig_locked(signum);
3389 }
3390 }
3391 proc_unlock(p);
3392
3393 if (!bsd_init_done) {
3394 bsd_init_done = 1;
3395 bsdinit_task();
3396 }
3397 }
3398
3399 /* ptrace set runnable */
3400 void
pt_setrunnable(proc_t p)3401 pt_setrunnable(proc_t p)
3402 {
3403 task_t task;
3404
3405 task = proc_task(p);
3406
3407 if (p->p_lflag & P_LTRACED) {
3408 proc_lock(p);
3409 p->p_stat = SRUN;
3410 proc_unlock(p);
3411 if (p->sigwait) {
3412 wakeup((caddr_t)&(p->sigwait));
3413 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3414 task_release(task);
3415 }
3416 }
3417 }
3418 }
3419
3420 kern_return_t
do_bsdexception(int exc,int code,int sub)3421 do_bsdexception(
3422 int exc,
3423 int code,
3424 int sub)
3425 {
3426 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3427
3428 codes[0] = code;
3429 codes[1] = sub;
3430 return bsd_exception(exc, codes, 2);
3431 }
3432
3433 int
proc_pendingsignals(proc_t p,sigset_t mask)3434 proc_pendingsignals(proc_t p, sigset_t mask)
3435 {
3436 struct uthread * uth;
3437 sigset_t bits = 0;
3438
3439 proc_lock(p);
3440 /* If the process is in proc exit return no signal info */
3441 if (p->p_lflag & P_LPEXIT) {
3442 goto out;
3443 }
3444
3445
3446 bits = 0;
3447 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3448 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3449 }
3450 out:
3451 proc_unlock(p);
3452 return bits;
3453 }
3454
3455 int
thread_issignal(proc_t p,thread_t th,sigset_t mask)3456 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3457 {
3458 struct uthread * uth;
3459 sigset_t bits = 0;
3460
3461 proc_lock(p);
3462 uth = (struct uthread *)get_bsdthread_info(th);
3463 if (uth) {
3464 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3465 }
3466 proc_unlock(p);
3467 return bits;
3468 }
3469
3470 /*
3471 * Allow external reads of the sigprop array.
3472 */
3473 int
hassigprop(int sig,int prop)3474 hassigprop(int sig, int prop)
3475 {
3476 return sigprop[sig] & prop;
3477 }
3478
3479 void
pgsigio(pid_t pgid,int sig)3480 pgsigio(pid_t pgid, int sig)
3481 {
3482 proc_t p = PROC_NULL;
3483
3484 if (pgid < 0) {
3485 gsignal(-(pgid), sig);
3486 } else if (pgid > 0 && (p = proc_find(pgid)) != 0) {
3487 psignal(p, sig);
3488 }
3489 if (p != PROC_NULL) {
3490 proc_rele(p);
3491 }
3492 }
3493
3494 void
proc_signalstart(proc_t p,int locked)3495 proc_signalstart(proc_t p, int locked)
3496 {
3497 if (!locked) {
3498 proc_lock(p);
3499 }
3500
3501 if (p->p_signalholder == current_thread()) {
3502 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3503 }
3504
3505 p->p_sigwaitcnt++;
3506 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) {
3507 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3508 }
3509 p->p_sigwaitcnt--;
3510
3511 p->p_lflag |= P_LINSIGNAL;
3512 p->p_signalholder = current_thread();
3513 if (!locked) {
3514 proc_unlock(p);
3515 }
3516 }
3517
3518 void
proc_signalend(proc_t p,int locked)3519 proc_signalend(proc_t p, int locked)
3520 {
3521 if (!locked) {
3522 proc_lock(p);
3523 }
3524 p->p_lflag &= ~P_LINSIGNAL;
3525
3526 if (p->p_sigwaitcnt > 0) {
3527 wakeup(&p->p_sigmask);
3528 }
3529
3530 p->p_signalholder = NULL;
3531 if (!locked) {
3532 proc_unlock(p);
3533 }
3534 }
3535
3536 void
sig_lock_to_exit(proc_t p)3537 sig_lock_to_exit(proc_t p)
3538 {
3539 thread_t self = current_thread();
3540
3541 p->exit_thread = self;
3542 proc_unlock(p);
3543
3544 task_hold(proc_task(p));
3545 task_wait(proc_task(p), FALSE);
3546
3547 proc_lock(p);
3548 }
3549
3550 int
sig_try_locked(proc_t p)3551 sig_try_locked(proc_t p)
3552 {
3553 thread_t self = current_thread();
3554
3555 while (p->sigwait || p->exit_thread) {
3556 if (p->exit_thread) {
3557 return 0;
3558 }
3559 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3560 if (thread_should_abort(self)) {
3561 /*
3562 * Terminate request - clean up.
3563 */
3564 proc_lock(p);
3565 return -1;
3566 }
3567 proc_lock(p);
3568 }
3569 return 1;
3570 }
3571