1 /*
2 * Copyright (c) 1995-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91 #include <sys/reason.h>
92
93 #include <sys/mount.h>
94 #include <sys/sysproto.h>
95
96 #include <security/audit/audit.h>
97
98 #include <kern/cpu_number.h>
99
100 #include <sys/vm.h>
101 #include <sys/user.h> /* for coredump */
102 #include <kern/ast.h> /* for APC support */
103 #include <kern/kalloc.h>
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <kern/policy_internal.h>
109 #include <kern/sync_sema.h>
110
111 #include <vm/vm_shared_region_xnu.h>
112
113 #include <os/log.h>
114
115 #include <mach/exception.h>
116 #include <mach/task.h>
117 #include <mach/thread_act.h>
118 #include <libkern/OSAtomic.h>
119
120 #include <sys/sdt.h>
121 #include <sys/codesign.h>
122 #include <sys/random.h>
123 #include <libkern/section_keywords.h>
124
125 #if CONFIG_MACF
126 #include <security/mac_framework.h>
127 #endif
128
129 /*
130 * Missing prototypes that Mach should export
131 *
132 * +++
133 */
134 extern int thread_enable_fpe(thread_t act, int onoff);
135 extern kern_return_t get_signalact(task_t, thread_t *, int);
136 extern unsigned int get_useraddr(void);
137 extern boolean_t task_did_exec(task_t task);
138 extern boolean_t task_is_exec_copy(task_t task);
139
140 /*
141 * ---
142 */
143
144 extern void doexception(int exc, mach_exception_code_t code,
145 mach_exception_subcode_t sub);
146
147 static void stop(proc_t, proc_t);
148 bool cansignal_nomac(proc_t, kauth_cred_t, proc_t, int);
149 bool cansignal(proc_t, kauth_cred_t, proc_t, int);
150 int killpg1(proc_t, int, int, int, int);
151 kern_return_t do_bsdexception(int, int, int);
152 void __posix_sem_syscall_return(kern_return_t);
153 char *proc_name_address(void *p);
154
155 static int filt_sigattach(struct knote *kn, struct kevent_qos_s *kev);
156 static void filt_sigdetach(struct knote *kn);
157 static int filt_signal(struct knote *kn, long hint);
158 static int filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev);
159 static int filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev);
160
161 SECURITY_READ_ONLY_EARLY(struct filterops) sig_filtops = {
162 .f_attach = filt_sigattach,
163 .f_detach = filt_sigdetach,
164 .f_event = filt_signal,
165 .f_touch = filt_signaltouch,
166 .f_process = filt_signalprocess,
167 };
168
169 /* structures and fns for killpg1 iterartion callback and filters */
170 struct killpg1_filtargs {
171 bool posix;
172 proc_t curproc;
173 };
174
175 struct killpg1_iterargs {
176 proc_t curproc;
177 kauth_cred_t uc;
178 int signum;
179 int nfound;
180 };
181
182 static int killpg1_allfilt(proc_t p, void * arg);
183 static int killpg1_callback(proc_t p, void * arg);
184
185 static int pgsignal_callback(proc_t p, void * arg);
186 static kern_return_t get_signalthread(proc_t, int, thread_t *);
187
188
189 /* flags for psignal_internal */
190 #define PSIG_LOCKED 0x1
191 #define PSIG_VFORK 0x2
192 #define PSIG_THREAD 0x4
193 #define PSIG_TRY_THREAD 0x8
194
195 static os_reason_t build_signal_reason(int signum, const char *procname);
196 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason);
197
198 /*
199 * NOTE: Source and target may *NOT* overlap! (target is smaller)
200 */
201 static void
sigaltstack_kern_to_user32(struct kern_sigaltstack * in,struct user32_sigaltstack * out)202 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
203 {
204 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
205 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
206 out->ss_flags = in->ss_flags;
207 }
208
209 static void
sigaltstack_kern_to_user64(struct kern_sigaltstack * in,struct user64_sigaltstack * out)210 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
211 {
212 out->ss_sp = in->ss_sp;
213 out->ss_size = in->ss_size;
214 out->ss_flags = in->ss_flags;
215 }
216
217 /*
218 * NOTE: Source and target may are permitted to overlap! (source is smaller);
219 * this works because we copy fields in order from the end of the struct to
220 * the beginning.
221 */
222 static void
sigaltstack_user32_to_kern(struct user32_sigaltstack * in,struct kern_sigaltstack * out)223 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
224 {
225 out->ss_flags = in->ss_flags;
226 out->ss_size = in->ss_size;
227 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
228 }
229 static void
sigaltstack_user64_to_kern(struct user64_sigaltstack * in,struct kern_sigaltstack * out)230 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
231 {
232 out->ss_flags = in->ss_flags;
233 out->ss_size = (user_size_t)in->ss_size;
234 out->ss_sp = (user_addr_t)in->ss_sp;
235 }
236
237 static void
sigaction_kern_to_user32(struct kern_sigaction * in,struct user32_sigaction * out)238 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
239 {
240 /* This assumes 32 bit __sa_handler is of type sig_t */
241 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t, in->__sigaction_u.__sa_handler);
242 out->sa_mask = in->sa_mask;
243 out->sa_flags = in->sa_flags;
244 }
245 static void
sigaction_kern_to_user64(struct kern_sigaction * in,struct user64_sigaction * out)246 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
247 {
248 /* This assumes 32 bit __sa_handler is of type sig_t */
249 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
250 out->sa_mask = in->sa_mask;
251 out->sa_flags = in->sa_flags;
252 }
253
254 static void
__sigaction_user32_to_kern(struct __user32_sigaction * in,struct __kern_sigaction * out)255 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
256 {
257 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
258 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
259 out->sa_mask = in->sa_mask;
260 out->sa_flags = in->sa_flags;
261
262 kern_return_t kr;
263 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
264 &out->sa_tramp, 1);
265 assert(kr == KERN_SUCCESS);
266 }
267
268 static void
__sigaction_user64_to_kern(struct __user64_sigaction * in,struct __kern_sigaction * out)269 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
270 {
271 out->__sigaction_u.__sa_handler = (user_addr_t)in->__sigaction_u.__sa_handler;
272 out->sa_tramp = (user_addr_t)in->sa_tramp;
273 out->sa_mask = in->sa_mask;
274 out->sa_flags = in->sa_flags;
275
276 kern_return_t kr;
277 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
278 &out->sa_tramp, 1);
279 assert(kr == KERN_SUCCESS);
280 }
281
282 #if SIGNAL_DEBUG
283 void ram_printf(int);
284 int ram_debug = 0;
285 unsigned int rdebug_proc = 0;
286 void
ram_printf(int x)287 ram_printf(int x)
288 {
289 printf("x is %d", x);
290 }
291 #endif /* SIGNAL_DEBUG */
292
293
294 void
signal_setast(thread_t sig_actthread)295 signal_setast(thread_t sig_actthread)
296 {
297 act_set_astbsd(sig_actthread);
298 }
299
300 bool
cansignal_nomac(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)301 cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
302 {
303 /* you can signal yourself */
304 if (src == dst) {
305 return true;
306 }
307
308 /*
309 * You can't signal the initproc, even if root.
310 * Note that this still permits the kernel itself to signal initproc directly,
311 * e.g SIGCHLD when reparenting or SIGTERM at shutdown, because those are
312 * not considered to originate from a user process, so the cansignal()
313 * check isn't performed.
314 */
315 if (dst == initproc) {
316 return false;
317 }
318
319 /* otherwise, root can always signal */
320 if (kauth_cred_issuser(uc_src)) {
321 return true;
322 }
323
324 /* processes in the same session can send SIGCONT to each other */
325 if (signum == SIGCONT && proc_sessionid(src) == proc_sessionid(dst)) {
326 return true;
327 }
328
329 #if XNU_TARGET_OS_IOS
330 // Allow debugging of third party drivers on iOS
331 if (proc_is_third_party_debuggable_driver(dst)) {
332 return true;
333 }
334 #endif /* XNU_TARGET_OS_IOS */
335
336 /* the source process must be authorized to signal the target */
337 {
338 bool allowed = false;
339 kauth_cred_t uc_dst = NOCRED, uc_ref = NOCRED;
340
341 uc_dst = uc_ref = kauth_cred_proc_ref(dst);
342
343 /*
344 * If the real or effective UID of the sender matches the real or saved
345 * UID of the target, allow the signal to be sent.
346 */
347 if (kauth_cred_getruid(uc_src) == kauth_cred_getruid(uc_dst) ||
348 kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) ||
349 kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) ||
350 kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) {
351 allowed = true;
352 }
353
354 if (uc_ref != NOCRED) {
355 kauth_cred_unref(&uc_ref);
356 uc_ref = NOCRED;
357 }
358
359 return allowed;
360 }
361 }
362
363 /*
364 * Can process `src`, with ucred `uc_src`, send the signal `signum` to process
365 * `dst`? The ucred is referenced by the caller so internal fileds can be used
366 * safely.
367 */
368 bool
cansignal(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)369 cansignal(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
370 {
371 #if CONFIG_MACF
372 struct proc_ident dst_ident = proc_ident_with_policy(dst, IDENT_VALIDATION_PROC_MAY_EXEC | IDENT_VALIDATION_PROC_MAY_EXIT);
373 if (mac_proc_check_signal(src, NULL, &dst_ident, signum)) {
374 return false;
375 }
376 #endif
377
378 return cansignal_nomac(src, uc_src, dst, signum);
379 }
380
381 /*
382 * <rdar://problem/21952708> Some signals can be restricted from being handled,
383 * forcing the default action for that signal. This behavior applies only to
384 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
385 * bootarg:
386 *
387 * 0 (default): Disallow use of restricted signals. Trying to register a handler
388 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
389 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
390 * 2: Usual POSIX semantics.
391 */
392 static TUNABLE(unsigned, sigrestrict_arg, "sigrestrict", 0);
393
394 #if XNU_PLATFORM_WatchOS
395 static int
sigrestrictmask(void)396 sigrestrictmask(void)
397 {
398 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
399 return SIGRESTRICTMASK;
400 }
401 return 0;
402 }
403
404 static int
signal_is_restricted(proc_t p,int signum)405 signal_is_restricted(proc_t p, int signum)
406 {
407 if (sigmask(signum) & sigrestrictmask()) {
408 if (sigrestrict_arg == 0 && task_is_app(proc_task(p))) {
409 return ENOTSUP;
410 } else {
411 return EINVAL;
412 }
413 }
414 return 0;
415 }
416
417 #else
418
419 static inline int
signal_is_restricted(proc_t p,int signum)420 signal_is_restricted(proc_t p, int signum)
421 {
422 (void)p;
423 (void)signum;
424 return 0;
425 }
426 #endif /* !XNU_PLATFORM_WatchOS */
427
428 /*
429 * Returns: 0 Success
430 * EINVAL
431 * copyout:EFAULT
432 * copyin:EFAULT
433 *
434 * Notes: Uses current thread as a parameter to inform PPC to enable
435 * FPU exceptions via setsigvec(); this operation is not proxy
436 * safe!
437 */
438 /* ARGSUSED */
439 int
sigaction(proc_t p,struct sigaction_args * uap,__unused int32_t * retval)440 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
441 {
442 struct kern_sigaction vec;
443 struct __kern_sigaction __vec;
444
445 struct kern_sigaction *sa = &vec;
446 struct sigacts *ps = &p->p_sigacts;
447
448 int signum;
449 int bit, error = 0;
450 uint32_t sigreturn_validation = PS_SIGRETURN_VALIDATION_DEFAULT;
451
452 signum = uap->signum;
453 if (signum <= 0 || signum >= NSIG ||
454 signum == SIGKILL || signum == SIGSTOP) {
455 return EINVAL;
456 }
457
458 if (uap->nsa) {
459 if (IS_64BIT_PROCESS(p)) {
460 struct __user64_sigaction __vec64;
461 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
462 __sigaction_user64_to_kern(&__vec64, &__vec);
463 } else {
464 struct __user32_sigaction __vec32;
465 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
466 __sigaction_user32_to_kern(&__vec32, &__vec);
467 }
468 if (error) {
469 return error;
470 }
471
472 sigreturn_validation = (__vec.sa_flags & SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP) ?
473 PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED;
474 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
475
476 if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) {
477 if ((error = signal_is_restricted(p, signum))) {
478 if (error == ENOTSUP) {
479 printf("%s(%d): denied attempt to register action for signal %d\n",
480 proc_name_address(p), proc_pid(p), signum);
481 }
482 return error;
483 }
484 }
485 }
486
487 if (uap->osa) {
488 sa->sa_handler = SIGACTION(p, signum);
489 sa->sa_mask = ps->ps_catchmask[signum];
490 bit = sigmask(signum);
491 sa->sa_flags = 0;
492 if ((ps->ps_sigonstack & bit) != 0) {
493 sa->sa_flags |= SA_ONSTACK;
494 }
495 if ((ps->ps_sigintr & bit) == 0) {
496 sa->sa_flags |= SA_RESTART;
497 }
498 if (ps->ps_siginfo & bit) {
499 sa->sa_flags |= SA_SIGINFO;
500 }
501 if (ps->ps_signodefer & bit) {
502 sa->sa_flags |= SA_NODEFER;
503 }
504 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) {
505 sa->sa_flags |= SA_NOCLDSTOP;
506 }
507 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) {
508 sa->sa_flags |= SA_NOCLDWAIT;
509 }
510
511 if (IS_64BIT_PROCESS(p)) {
512 struct user64_sigaction vec64 = {};
513 sigaction_kern_to_user64(sa, &vec64);
514 error = copyout(&vec64, uap->osa, sizeof(vec64));
515 } else {
516 struct user32_sigaction vec32 = {};
517 sigaction_kern_to_user32(sa, &vec32);
518 error = copyout(&vec32, uap->osa, sizeof(vec32));
519 }
520 if (error) {
521 return error;
522 }
523 }
524
525 if (uap->nsa) {
526 uint32_t old_sigreturn_validation = atomic_load_explicit(
527 &ps->ps_sigreturn_validation, memory_order_relaxed);
528 if (old_sigreturn_validation == PS_SIGRETURN_VALIDATION_DEFAULT) {
529 atomic_compare_exchange_strong_explicit(&ps->ps_sigreturn_validation,
530 &old_sigreturn_validation, sigreturn_validation,
531 memory_order_relaxed, memory_order_relaxed);
532 }
533 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
534 }
535
536 return error;
537 }
538
539 /* Routines to manipulate bits on all threads */
540 int
clear_procsiglist(proc_t p,int bit,boolean_t in_signalstart)541 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
542 {
543 struct uthread * uth;
544
545 proc_lock(p);
546 if (!in_signalstart) {
547 proc_signalstart(p, 1);
548 }
549
550
551 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
552 uth->uu_siglist &= ~bit;
553 }
554 p->p_siglist &= ~bit;
555 if (!in_signalstart) {
556 proc_signalend(p, 1);
557 }
558 proc_unlock(p);
559
560 return 0;
561 }
562
563
564 static int
unblock_procsigmask(proc_t p,int bit)565 unblock_procsigmask(proc_t p, int bit)
566 {
567 struct uthread * uth;
568
569 proc_lock(p);
570 proc_signalstart(p, 1);
571
572
573 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
574 uth->uu_sigmask &= ~bit;
575 }
576 p->p_sigmask &= ~bit;
577
578 proc_signalend(p, 1);
579 proc_unlock(p);
580 return 0;
581 }
582
583 static int
block_procsigmask(proc_t p,int bit)584 block_procsigmask(proc_t p, int bit)
585 {
586 struct uthread * uth;
587
588 proc_lock(p);
589 proc_signalstart(p, 1);
590
591
592 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
593 uth->uu_sigmask |= bit;
594 }
595 p->p_sigmask |= bit;
596
597 proc_signalend(p, 1);
598 proc_unlock(p);
599 return 0;
600 }
601
602 int
set_procsigmask(proc_t p,int bit)603 set_procsigmask(proc_t p, int bit)
604 {
605 struct uthread * uth;
606
607 proc_lock(p);
608 proc_signalstart(p, 1);
609
610
611 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
612 uth->uu_sigmask = bit;
613 }
614 p->p_sigmask = bit;
615 proc_signalend(p, 1);
616 proc_unlock(p);
617
618 return 0;
619 }
620
621 /* XXX should be static? */
622 /*
623 * Notes: The thread parameter is used in the PPC case to select the
624 * thread on which the floating point exception will be enabled
625 * or disabled. We can't simply take current_thread(), since
626 * this is called from posix_spawn() on the not currently running
627 * process/thread pair.
628 *
629 * We mark thread as unused to alow compilation without warning
630 * on non-PPC platforms.
631 */
632 int
setsigvec(proc_t p,__unused thread_t thread,int signum,struct __kern_sigaction * sa,boolean_t in_sigstart)633 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
634 {
635 struct sigacts *ps = &p->p_sigacts;
636 int bit;
637
638 assert(signum < NSIG);
639
640 if ((signum == SIGKILL || signum == SIGSTOP) &&
641 sa->sa_handler != SIG_DFL) {
642 return EINVAL;
643 }
644 bit = sigmask(signum);
645 /*
646 * Change setting atomically.
647 */
648 proc_set_sigact_trampact(p, signum, sa->sa_handler, sa->sa_tramp);
649 ps->ps_catchmask[signum] = sa->sa_mask & ~sigcantmask;
650 if (sa->sa_flags & SA_SIGINFO) {
651 ps->ps_siginfo |= bit;
652 } else {
653 ps->ps_siginfo &= ~bit;
654 }
655 if ((sa->sa_flags & SA_RESTART) == 0) {
656 ps->ps_sigintr |= bit;
657 } else {
658 ps->ps_sigintr &= ~bit;
659 }
660 if (sa->sa_flags & SA_ONSTACK) {
661 ps->ps_sigonstack |= bit;
662 } else {
663 ps->ps_sigonstack &= ~bit;
664 }
665 if (sa->sa_flags & SA_RESETHAND) {
666 ps->ps_sigreset |= bit;
667 } else {
668 ps->ps_sigreset &= ~bit;
669 }
670 if (sa->sa_flags & SA_NODEFER) {
671 ps->ps_signodefer |= bit;
672 } else {
673 ps->ps_signodefer &= ~bit;
674 }
675 if (signum == SIGCHLD) {
676 if (sa->sa_flags & SA_NOCLDSTOP) {
677 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
678 } else {
679 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
680 }
681 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) {
682 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
683 } else {
684 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
685 }
686 }
687
688 /*
689 * Set bit in p_sigignore for signals that are set to SIG_IGN,
690 * and for signals set to SIG_DFL where the default is to ignore.
691 * However, don't put SIGCONT in p_sigignore,
692 * as we have to restart the process.
693 */
694 if (sa->sa_handler == SIG_IGN ||
695 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
696 clear_procsiglist(p, bit, in_sigstart);
697 if (signum != SIGCONT) {
698 p->p_sigignore |= bit; /* easier in psignal */
699 }
700 p->p_sigcatch &= ~bit;
701 } else {
702 p->p_sigignore &= ~bit;
703 if (sa->sa_handler == SIG_DFL) {
704 p->p_sigcatch &= ~bit;
705 } else {
706 p->p_sigcatch |= bit;
707 }
708 }
709 return 0;
710 }
711
712 /*
713 * Initialize signal state for process 0;
714 * set to ignore signals that are ignored by default.
715 */
716 void
siginit(proc_t p)717 siginit(proc_t p)
718 {
719 int i;
720
721 for (i = 1; i < NSIG; i++) {
722 if (sigprop[i] & SA_IGNORE && i != SIGCONT) {
723 p->p_sigignore |= sigmask(i);
724 }
725 }
726 }
727
728 /*
729 * Reset signals for an exec of the specified process.
730 */
731 void
execsigs(proc_t p,thread_t thread)732 execsigs(proc_t p, thread_t thread)
733 {
734 struct sigacts *ps = &p->p_sigacts;
735 int nc, mask;
736 struct uthread *ut;
737
738 ut = (struct uthread *)get_bsdthread_info(thread);
739
740 /*
741 * transfer saved signal states from the process
742 * back to the current thread.
743 *
744 * NOTE: We do this without the process locked,
745 * because we are guaranteed to be single-threaded
746 * by this point in exec and the p_siglist is
747 * only accessed by threads inside the process.
748 */
749 ut->uu_siglist |= p->p_siglist;
750 p->p_siglist = 0;
751
752 /*
753 * Reset caught signals. Held signals remain held
754 * through p_sigmask (unless they were caught,
755 * and are now ignored by default).
756 */
757 proc_reset_sigact(p, p->p_sigcatch);
758 while (p->p_sigcatch) {
759 nc = ffs((unsigned int)p->p_sigcatch);
760 mask = sigmask(nc);
761 p->p_sigcatch &= ~mask;
762 if (sigprop[nc] & SA_IGNORE) {
763 if (nc != SIGCONT) {
764 p->p_sigignore |= mask;
765 }
766 ut->uu_siglist &= ~mask;
767 }
768 }
769
770 atomic_store_explicit(&ps->ps_sigreturn_validation,
771 PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed);
772
773 /*
774 * Reset stack state to the user stack.
775 * Clear set of signals caught on the signal stack.
776 */
777 /* thread */
778 ut->uu_sigstk.ss_flags = SA_DISABLE;
779 ut->uu_sigstk.ss_size = 0;
780 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
781 ut->uu_flag &= ~UT_ALTSTACK;
782 /* process */
783 ps->ps_sigonstack = 0;
784 }
785
786 /*
787 * Manipulate signal mask.
788 * Note that we receive new mask, not pointer,
789 * and return old mask as return value;
790 * the library stub does the rest.
791 */
792 int
sigprocmask(proc_t p,struct sigprocmask_args * uap,__unused int32_t * retval)793 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
794 {
795 int error = 0;
796 sigset_t oldmask, nmask;
797 user_addr_t omask = uap->omask;
798 struct uthread *ut;
799
800 ut = current_uthread();
801 oldmask = ut->uu_sigmask;
802
803 if (uap->mask == USER_ADDR_NULL) {
804 /* just want old mask */
805 goto out;
806 }
807 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
808 if (error) {
809 goto out;
810 }
811
812 switch (uap->how) {
813 case SIG_BLOCK:
814 block_procsigmask(p, (nmask & ~sigcantmask));
815 signal_setast(current_thread());
816 break;
817
818 case SIG_UNBLOCK:
819 unblock_procsigmask(p, (nmask & ~sigcantmask));
820 signal_setast(current_thread());
821 break;
822
823 case SIG_SETMASK:
824 set_procsigmask(p, (nmask & ~sigcantmask));
825 signal_setast(current_thread());
826 break;
827
828 default:
829 error = EINVAL;
830 break;
831 }
832 out:
833 if (!error && omask != USER_ADDR_NULL) {
834 copyout(&oldmask, omask, sizeof(sigset_t));
835 }
836 return error;
837 }
838
839 int
sigpending(__unused proc_t p,struct sigpending_args * uap,__unused int32_t * retval)840 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
841 {
842 struct uthread *ut;
843 sigset_t pendlist;
844
845 ut = current_uthread();
846 pendlist = ut->uu_siglist;
847
848 if (uap->osv) {
849 copyout(&pendlist, uap->osv, sizeof(sigset_t));
850 }
851 return 0;
852 }
853
854 /*
855 * Suspend process until signal, providing mask to be set
856 * in the meantime. Note nonstandard calling convention:
857 * libc stub passes mask, not pointer, to save a copyin.
858 */
859
860 static int
sigcontinue(__unused int error)861 sigcontinue(__unused int error)
862 {
863 // struct uthread *ut = current_uthread();
864 unix_syscall_return(EINTR);
865 }
866
867 int
sigsuspend(proc_t p,struct sigsuspend_args * uap,int32_t * retval)868 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
869 {
870 __pthread_testcancel(1);
871 return sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval);
872 }
873
874 int
sigsuspend_nocancel(proc_t p,struct sigsuspend_nocancel_args * uap,__unused int32_t * retval)875 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
876 {
877 struct uthread *ut;
878
879 ut = current_uthread();
880
881 /*
882 * When returning from sigpause, we want
883 * the old mask to be restored after the
884 * signal handler has finished. Thus, we
885 * save it here and mark the sigacts structure
886 * to indicate this.
887 */
888 ut->uu_oldmask = ut->uu_sigmask;
889 ut->uu_flag |= UT_SAS_OLDMASK;
890 ut->uu_sigmask = (uap->mask & ~sigcantmask);
891 (void) tsleep0((caddr_t) p, PPAUSE | PCATCH, "pause", 0, sigcontinue);
892 /* always return EINTR rather than ERESTART... */
893 return EINTR;
894 }
895
896
897 int
__disable_threadsignal(__unused proc_t p,__unused struct __disable_threadsignal_args * uap,__unused int32_t * retval)898 __disable_threadsignal(__unused proc_t p,
899 __unused struct __disable_threadsignal_args *uap,
900 __unused int32_t *retval)
901 {
902 struct uthread *uth;
903
904 uth = current_uthread();
905
906 /* No longer valid to have any signal delivered */
907 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
908
909 return 0;
910 }
911
912 void
__pthread_testcancel(int presyscall)913 __pthread_testcancel(int presyscall)
914 {
915 thread_t self = current_thread();
916 struct uthread * uthread;
917
918 uthread = (struct uthread *)get_bsdthread_info(self);
919
920
921 uthread->uu_flag &= ~UT_NOTCANCELPT;
922
923 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
924 if (presyscall != 0) {
925 unix_syscall_return(EINTR);
926 /* NOTREACHED */
927 } else {
928 thread_abort_safely(self);
929 }
930 }
931 }
932
933
934
935 int
__pthread_markcancel(__unused proc_t p,struct __pthread_markcancel_args * uap,__unused int32_t * retval)936 __pthread_markcancel(__unused proc_t p,
937 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
938 {
939 thread_act_t target_act;
940 int error = 0;
941 struct uthread *uth;
942
943 target_act = (thread_act_t)port_name_to_thread(uap->thread_port,
944 PORT_INTRANS_THREAD_IN_CURRENT_TASK);
945
946 if (target_act == THR_ACT_NULL) {
947 return ESRCH;
948 }
949
950 uth = (struct uthread *)get_bsdthread_info(target_act);
951
952 if ((uth->uu_flag & (UT_CANCEL | UT_CANCELED)) == 0) {
953 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
954 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
955 && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) {
956 thread_abort_safely(target_act);
957 }
958 }
959
960 thread_deallocate(target_act);
961 return error;
962 }
963
964 /* if action =0 ; return the cancellation state ,
965 * if marked for cancellation, make the thread canceled
966 * if action = 1 ; Enable the cancel handling
967 * if action = 2; Disable the cancel handling
968 */
969 int
__pthread_canceled(__unused proc_t p,struct __pthread_canceled_args * uap,__unused int32_t * retval)970 __pthread_canceled(__unused proc_t p,
971 struct __pthread_canceled_args *uap, __unused int32_t *retval)
972 {
973 thread_act_t thread;
974 struct uthread *uth;
975 int action = uap->action;
976
977 thread = current_thread();
978 uth = (struct uthread *)get_bsdthread_info(thread);
979
980 switch (action) {
981 case 1:
982 uth->uu_flag &= ~UT_CANCELDISABLE;
983 return 0;
984 case 2:
985 uth->uu_flag |= UT_CANCELDISABLE;
986 return 0;
987 case 0:
988 default:
989 if ((uth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
990 uth->uu_flag &= ~UT_CANCEL;
991 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
992 return 0;
993 }
994 return EINVAL;
995 }
996 return EINVAL;
997 }
998
999 __attribute__((noreturn))
1000 void
__posix_sem_syscall_return(kern_return_t kern_result)1001 __posix_sem_syscall_return(kern_return_t kern_result)
1002 {
1003 int error = 0;
1004
1005 if (kern_result == KERN_SUCCESS) {
1006 error = 0;
1007 } else if (kern_result == KERN_ABORTED) {
1008 error = EINTR;
1009 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1010 error = ETIMEDOUT;
1011 } else {
1012 error = EINVAL;
1013 }
1014 unix_syscall_return(error);
1015 /* does not return */
1016 }
1017
1018 /*
1019 * Returns: 0 Success
1020 * EINTR
1021 * ETIMEDOUT
1022 * EINVAL
1023 * EFAULT if timespec is NULL
1024 */
1025 int
__semwait_signal(proc_t p,struct __semwait_signal_args * uap,int32_t * retval)1026 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1027 int32_t *retval)
1028 {
1029 __pthread_testcancel(0);
1030 return __semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval);
1031 }
1032
1033 int
__semwait_signal_nocancel(__unused proc_t p,struct __semwait_signal_nocancel_args * uap,__unused int32_t * retval)1034 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1035 __unused int32_t *retval)
1036 {
1037 kern_return_t kern_result;
1038 mach_timespec_t then;
1039 struct timespec now;
1040 struct user_timespec ts;
1041 boolean_t truncated_timeout = FALSE;
1042
1043 if (uap->timeout) {
1044 ts.tv_sec = (user_time_t)uap->tv_sec;
1045 ts.tv_nsec = uap->tv_nsec;
1046
1047 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1048 ts.tv_sec = 0xFFFFFFFF;
1049 ts.tv_nsec = 0;
1050 truncated_timeout = TRUE;
1051 }
1052
1053 if (uap->relative) {
1054 then.tv_sec = (unsigned int)ts.tv_sec;
1055 then.tv_nsec = (clock_res_t)ts.tv_nsec;
1056 } else {
1057 nanotime(&now);
1058
1059 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1060 if (now.tv_sec == ts.tv_sec ?
1061 now.tv_nsec > ts.tv_nsec :
1062 now.tv_sec > ts.tv_sec) {
1063 then.tv_sec = 0;
1064 then.tv_nsec = 0;
1065 } else {
1066 then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec);
1067 then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec);
1068 if (then.tv_nsec < 0) {
1069 then.tv_nsec += NSEC_PER_SEC;
1070 then.tv_sec--;
1071 }
1072 }
1073 }
1074
1075 if (uap->mutex_sem == 0) {
1076 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1077 } else {
1078 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1079 }
1080 } else {
1081 if (uap->mutex_sem == 0) {
1082 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1083 } else {
1084 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1085 }
1086 }
1087
1088 if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1089 return 0;
1090 } else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1091 return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1092 } else if (kern_result == KERN_ABORTED) {
1093 return EINTR;
1094 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1095 return ETIMEDOUT;
1096 } else {
1097 return EINVAL;
1098 }
1099 }
1100
1101
1102 int
__pthread_kill(__unused proc_t p,struct __pthread_kill_args * uap,__unused int32_t * retval)1103 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1104 __unused int32_t *retval)
1105 {
1106 thread_t target_act;
1107 int error = 0;
1108 int signum = uap->sig;
1109 struct uthread *uth;
1110
1111 target_act = (thread_t)port_name_to_thread(uap->thread_port,
1112 PORT_INTRANS_OPTIONS_NONE);
1113
1114 if (target_act == THREAD_NULL) {
1115 return ESRCH;
1116 }
1117 if ((u_int)signum >= NSIG) {
1118 error = EINVAL;
1119 goto out;
1120 }
1121
1122 uth = (struct uthread *)get_bsdthread_info(target_act);
1123
1124 if (uth->uu_flag & UT_NO_SIGMASK) {
1125 error = ESRCH;
1126 goto out;
1127 }
1128
1129 /*
1130 * workq threads must have kills enabled through either
1131 * BSDTHREAD_CTL_WORKQ_ALLOW_KILL or BSDTHREAD_CTL_WORKQ_ALLOW_SIGMASK
1132 */
1133 if (((thread_get_tag(target_act) & THREAD_TAG_WORKQUEUE) &&
1134 !(uth->uu_workq_pthread_kill_allowed || p->p_workq_allow_sigmask)) ||
1135 (thread_get_tag(target_act) & THREAD_TAG_AIO_WORKQUEUE)) {
1136 error = ENOTSUP;
1137 goto out;
1138 }
1139
1140 if (signum) {
1141 psignal_uthread(target_act, signum);
1142 }
1143 out:
1144 thread_deallocate(target_act);
1145 return error;
1146 }
1147
1148
1149 int
__pthread_sigmask(__unused proc_t p,struct __pthread_sigmask_args * uap,__unused int32_t * retval)1150 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1151 __unused int32_t *retval)
1152 {
1153 user_addr_t set = uap->set;
1154 user_addr_t oset = uap->oset;
1155 sigset_t nset;
1156 int error = 0;
1157 struct uthread *ut;
1158 sigset_t oldset;
1159
1160 ut = current_uthread();
1161 oldset = ut->uu_sigmask;
1162
1163 if (set == USER_ADDR_NULL) {
1164 /* need only old mask */
1165 goto out;
1166 }
1167
1168 error = copyin(set, &nset, sizeof(sigset_t));
1169 if (error) {
1170 goto out;
1171 }
1172
1173 switch (uap->how) {
1174 case SIG_BLOCK:
1175 ut->uu_sigmask |= (nset & ~sigcantmask);
1176 break;
1177
1178 case SIG_UNBLOCK:
1179 ut->uu_sigmask &= ~(nset);
1180 signal_setast(current_thread());
1181 break;
1182
1183 case SIG_SETMASK:
1184 ut->uu_sigmask = (nset & ~sigcantmask);
1185 signal_setast(current_thread());
1186 break;
1187
1188 default:
1189 error = EINVAL;
1190 }
1191 out:
1192 if (!error && oset != USER_ADDR_NULL) {
1193 copyout(&oldset, oset, sizeof(sigset_t));
1194 }
1195
1196 return error;
1197 }
1198
1199 /*
1200 * Returns: 0 Success
1201 * EINVAL
1202 * copyin:EFAULT
1203 * copyout:EFAULT
1204 */
1205 int
__sigwait(proc_t p,struct __sigwait_args * uap,int32_t * retval)1206 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1207 {
1208 __pthread_testcancel(1);
1209 return __sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval);
1210 }
1211
1212 int
__sigwait_nocancel(proc_t p,struct __sigwait_nocancel_args * uap,__unused int32_t * retval)1213 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1214 {
1215 struct uthread *ut;
1216 struct uthread *uth;
1217 int error = 0;
1218 sigset_t mask;
1219 sigset_t siglist;
1220 sigset_t sigw = 0;
1221 int signum;
1222
1223 ut = current_uthread();
1224
1225 if (uap->set == USER_ADDR_NULL) {
1226 return EINVAL;
1227 }
1228
1229 error = copyin(uap->set, &mask, sizeof(sigset_t));
1230 if (error) {
1231 return error;
1232 }
1233
1234 siglist = (mask & ~sigcantmask);
1235
1236 if (siglist == 0) {
1237 return EINVAL;
1238 }
1239
1240 proc_lock(p);
1241
1242 proc_signalstart(p, 1);
1243 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1244 if ((sigw = uth->uu_siglist & siglist)) {
1245 break;
1246 }
1247 }
1248 proc_signalend(p, 1);
1249
1250 if (sigw) {
1251 /* The signal was pending on a thread */
1252 goto sigwait1;
1253 }
1254 /*
1255 * When returning from sigwait, we want
1256 * the old mask to be restored after the
1257 * signal handler has finished. Thus, we
1258 * save it here and mark the sigacts structure
1259 * to indicate this.
1260 */
1261 uth = ut; /* wait for it to be delivered to us */
1262 ut->uu_oldmask = ut->uu_sigmask;
1263 ut->uu_flag |= UT_SAS_OLDMASK;
1264 if (siglist == (sigset_t)0) {
1265 proc_unlock(p);
1266 return EINVAL;
1267 }
1268 /* SIGKILL and SIGSTOP are not maskable as well */
1269 ut->uu_sigmask = ~(siglist | sigcantmask);
1270 ut->uu_sigwait = siglist;
1271
1272 /* No Continuations for now */
1273 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE | PCATCH, "pause", 0);
1274
1275 if (error == ERESTART) {
1276 error = 0;
1277 }
1278
1279 sigw = (ut->uu_sigwait & siglist);
1280 ut->uu_sigmask = ut->uu_oldmask;
1281 ut->uu_oldmask = 0;
1282 ut->uu_flag &= ~UT_SAS_OLDMASK;
1283 sigwait1:
1284 ut->uu_sigwait = 0;
1285 if (!error) {
1286 signum = ffs((unsigned int)sigw);
1287 if (!signum) {
1288 panic("sigwait with no signal wakeup");
1289 }
1290 /* Clear the pending signal in the thread it was delivered */
1291 uth->uu_siglist &= ~(sigmask(signum));
1292
1293 #if CONFIG_DTRACE
1294 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1295 #endif
1296
1297 proc_unlock(p);
1298 if (uap->sig != USER_ADDR_NULL) {
1299 error = copyout(&signum, uap->sig, sizeof(int));
1300 }
1301 } else {
1302 proc_unlock(p);
1303 }
1304
1305 return error;
1306 }
1307
1308 int
sigaltstack(__unused proc_t p,struct sigaltstack_args * uap,__unused int32_t * retval)1309 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1310 {
1311 struct kern_sigaltstack ss;
1312 struct kern_sigaltstack *pstk;
1313 int error;
1314 struct uthread *uth;
1315 int onstack;
1316
1317 uth = current_uthread();
1318
1319 pstk = &uth->uu_sigstk;
1320 if ((uth->uu_flag & UT_ALTSTACK) == 0) {
1321 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1322 }
1323 onstack = pstk->ss_flags & SA_ONSTACK;
1324 if (uap->oss) {
1325 if (IS_64BIT_PROCESS(p)) {
1326 struct user64_sigaltstack ss64 = {};
1327 sigaltstack_kern_to_user64(pstk, &ss64);
1328 error = copyout(&ss64, uap->oss, sizeof(ss64));
1329 } else {
1330 struct user32_sigaltstack ss32 = {};
1331 sigaltstack_kern_to_user32(pstk, &ss32);
1332 error = copyout(&ss32, uap->oss, sizeof(ss32));
1333 }
1334 if (error) {
1335 return error;
1336 }
1337 }
1338 if (uap->nss == USER_ADDR_NULL) {
1339 return 0;
1340 }
1341 if (IS_64BIT_PROCESS(p)) {
1342 struct user64_sigaltstack ss64;
1343 error = copyin(uap->nss, &ss64, sizeof(ss64));
1344 sigaltstack_user64_to_kern(&ss64, &ss);
1345 } else {
1346 struct user32_sigaltstack ss32;
1347 error = copyin(uap->nss, &ss32, sizeof(ss32));
1348 sigaltstack_user32_to_kern(&ss32, &ss);
1349 }
1350 if (error) {
1351 return error;
1352 }
1353 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1354 return EINVAL;
1355 }
1356
1357 if (ss.ss_flags & SA_DISABLE) {
1358 /* if we are here we are not in the signal handler ;so no need to check */
1359 if (uth->uu_sigstk.ss_flags & SA_ONSTACK) {
1360 return EINVAL;
1361 }
1362 uth->uu_flag &= ~UT_ALTSTACK;
1363 uth->uu_sigstk.ss_flags = ss.ss_flags;
1364 return 0;
1365 }
1366 if (onstack) {
1367 return EPERM;
1368 }
1369 /* The older stacksize was 8K, enforce that one so no compat problems */
1370 #define OLDMINSIGSTKSZ 8*1024
1371 if (ss.ss_size < OLDMINSIGSTKSZ) {
1372 return ENOMEM;
1373 }
1374 uth->uu_flag |= UT_ALTSTACK;
1375 uth->uu_sigstk = ss;
1376 return 0;
1377 }
1378
1379 int
kill(proc_t cp,struct kill_args * uap,__unused int32_t * retval)1380 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1381 {
1382 proc_t p;
1383 kauth_cred_t uc = kauth_cred_get();
1384 int posix = uap->posix; /* !0 if posix behaviour desired */
1385
1386 AUDIT_ARG(pid, uap->pid);
1387 AUDIT_ARG(signum, uap->signum);
1388
1389 if ((u_int)uap->signum >= NSIG) {
1390 return EINVAL;
1391 }
1392 if (uap->pid > 0) {
1393 /* kill single process */
1394 if ((p = proc_find(uap->pid)) == NULL) {
1395 if (pzfind(uap->pid)) {
1396 /*
1397 * POSIX 1003.1-2001 requires returning success when killing a
1398 * zombie; see Rationale for kill(2).
1399 */
1400 return 0;
1401 }
1402 return ESRCH;
1403 }
1404 AUDIT_ARG(process, p);
1405 if (!cansignal(cp, uc, p, uap->signum)) {
1406 proc_rele(p);
1407 return EPERM;
1408 }
1409 if (uap->signum) {
1410 psignal(p, uap->signum);
1411 }
1412 proc_rele(p);
1413 return 0;
1414 }
1415 switch (uap->pid) {
1416 case -1: /* broadcast signal */
1417 return killpg1(cp, uap->signum, 0, 1, posix);
1418 case 0: /* signal own process group */
1419 return killpg1(cp, uap->signum, 0, 0, posix);
1420 default: /* negative explicit process group */
1421 return killpg1(cp, uap->signum, -(uap->pid), 0, posix);
1422 }
1423 /* NOTREACHED */
1424 }
1425
1426 os_reason_t
build_userspace_exit_reason(uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1427 build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1428 user_addr_t reason_string, uint64_t reason_flags)
1429 {
1430 os_reason_t exit_reason = OS_REASON_NULL;
1431
1432 int error = 0;
1433 int num_items_to_copy = 0;
1434 uint32_t user_data_to_copy = 0;
1435 char *reason_user_desc = NULL;
1436 size_t reason_user_desc_len = 0;
1437
1438 exit_reason = os_reason_create(reason_namespace, reason_code);
1439 if (exit_reason == OS_REASON_NULL) {
1440 os_log(OS_LOG_DEFAULT, "build_userspace_exit_reason: failed to allocate exit reason\n");
1441 return exit_reason;
1442 }
1443
1444 exit_reason->osr_flags |= OS_REASON_FLAG_FROM_USERSPACE;
1445
1446 /*
1447 * Only apply flags that are allowed to be passed from userspace.
1448 */
1449 reason_flags = reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER;
1450 exit_reason->osr_flags |= reason_flags;
1451
1452 if (!(exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT)) {
1453 exit_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1454 }
1455
1456 if (payload != USER_ADDR_NULL) {
1457 if (payload_size == 0) {
1458 os_log(OS_LOG_DEFAULT, "build_userspace_exit_reason: exit reason with namespace %u,"
1459 " nonzero payload but zero length\n", reason_namespace);
1460 exit_reason->osr_flags |= OS_REASON_FLAG_BAD_PARAMS;
1461 payload = USER_ADDR_NULL;
1462 } else {
1463 num_items_to_copy++;
1464
1465 if (payload_size > EXIT_REASON_PAYLOAD_MAX_LEN) {
1466 exit_reason->osr_flags |= OS_REASON_FLAG_PAYLOAD_TRUNCATED;
1467 payload_size = EXIT_REASON_PAYLOAD_MAX_LEN;
1468 }
1469
1470 user_data_to_copy += payload_size;
1471 }
1472 }
1473
1474 if (reason_string != USER_ADDR_NULL) {
1475 reason_user_desc = (char *)kalloc_data(EXIT_REASON_USER_DESC_MAX_LEN, Z_WAITOK);
1476
1477 if (reason_user_desc != NULL) {
1478 error = copyinstr(reason_string, (void *) reason_user_desc,
1479 EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len);
1480
1481 if (error == 0) {
1482 num_items_to_copy++;
1483 user_data_to_copy += reason_user_desc_len;
1484 } else if (error == ENAMETOOLONG) {
1485 num_items_to_copy++;
1486 reason_user_desc[EXIT_REASON_USER_DESC_MAX_LEN - 1] = '\0';
1487 user_data_to_copy += reason_user_desc_len;
1488 } else {
1489 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1490 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1491 reason_user_desc = NULL;
1492 reason_user_desc_len = 0;
1493 }
1494 }
1495 }
1496
1497 if (num_items_to_copy != 0) {
1498 uint32_t reason_buffer_size_estimate = 0;
1499 mach_vm_address_t data_addr = 0;
1500
1501 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(num_items_to_copy, user_data_to_copy);
1502
1503 error = os_reason_alloc_buffer(exit_reason, reason_buffer_size_estimate);
1504 if (error != 0) {
1505 os_log(OS_LOG_DEFAULT, "build_userspace_exit_reason: failed to allocate signal reason buffer\n");
1506 goto out_failed_copyin;
1507 }
1508
1509 if (reason_user_desc != NULL && reason_user_desc_len != 0) {
1510 if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1511 EXIT_REASON_USER_DESC,
1512 (uint32_t)reason_user_desc_len,
1513 &data_addr)) {
1514 kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr,
1515 reason_user_desc, (uint32_t)reason_user_desc_len);
1516 } else {
1517 os_log(OS_LOG_DEFAULT, "build_userspace_exit_reason: failed to allocate space for reason string\n");
1518 goto out_failed_copyin;
1519 }
1520 }
1521
1522 if (payload != USER_ADDR_NULL) {
1523 if (KERN_SUCCESS ==
1524 kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1525 EXIT_REASON_USER_PAYLOAD,
1526 payload_size,
1527 &data_addr)) {
1528 error = copyin(payload, (void *) data_addr, payload_size);
1529 if (error) {
1530 os_log(OS_LOG_DEFAULT, "build_userspace_exit_reason: failed to copy in payload data with error %d\n", error);
1531 goto out_failed_copyin;
1532 }
1533 } else {
1534 os_log(OS_LOG_DEFAULT, "build_userspace_exit_reason: failed to allocate space for payload data\n");
1535 goto out_failed_copyin;
1536 }
1537 }
1538 }
1539
1540 if (reason_user_desc != NULL) {
1541 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1542 reason_user_desc = NULL;
1543 reason_user_desc_len = 0;
1544 }
1545
1546 return exit_reason;
1547
1548 out_failed_copyin:
1549
1550 if (reason_user_desc != NULL) {
1551 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1552 reason_user_desc = NULL;
1553 reason_user_desc_len = 0;
1554 }
1555
1556 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1557 os_reason_alloc_buffer(exit_reason, 0);
1558 return exit_reason;
1559 }
1560
1561 static int
terminate_with_payload_internal(struct proc * cur_proc,int target_pid,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1562 terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t reason_namespace,
1563 uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1564 user_addr_t reason_string, uint64_t reason_flags)
1565 {
1566 proc_t target_proc = PROC_NULL;
1567 kauth_cred_t cur_cred = kauth_cred_get();
1568
1569 os_reason_t signal_reason = OS_REASON_NULL;
1570
1571 AUDIT_ARG(pid, target_pid);
1572 if ((target_pid <= 0)) {
1573 return EINVAL;
1574 }
1575
1576 target_proc = proc_find(target_pid);
1577 if (target_proc == PROC_NULL) {
1578 return ESRCH;
1579 }
1580
1581 AUDIT_ARG(process, target_proc);
1582
1583 if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL)) {
1584 proc_rele(target_proc);
1585 return EPERM;
1586 }
1587
1588 if (target_pid != proc_getpid(cur_proc)) {
1589 /*
1590 * FLAG_ABORT should only be set on terminate_with_reason(getpid()) that
1591 * was a fallback from an unsuccessful abort_with_reason(). In that case
1592 * caller's pid matches the target one. Otherwise remove the flag.
1593 */
1594 reason_flags &= ~((typeof(reason_flags))OS_REASON_FLAG_ABORT);
1595 }
1596
1597 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1598 proc_getpid(target_proc), reason_namespace,
1599 reason_code, 0, 0);
1600
1601 signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size,
1602 reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID));
1603
1604 if (target_pid == proc_getpid(cur_proc)) {
1605 /*
1606 * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
1607 * return if the thread and/or task are already terminating. Either way, the
1608 * current thread won't return to userspace.
1609 */
1610 psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason);
1611 } else {
1612 psignal_with_reason(target_proc, SIGKILL, signal_reason);
1613 }
1614
1615 proc_rele(target_proc);
1616
1617 return 0;
1618 }
1619
1620 int
terminate_with_payload(struct proc * cur_proc,struct terminate_with_payload_args * args,__unused int32_t * retval)1621 terminate_with_payload(struct proc *cur_proc, struct terminate_with_payload_args *args,
1622 __unused int32_t *retval)
1623 {
1624 return terminate_with_payload_internal(cur_proc, args->pid, args->reason_namespace, args->reason_code, args->payload,
1625 args->payload_size, args->reason_string, args->reason_flags);
1626 }
1627
1628 static int
killpg1_allfilt(proc_t p,void * arg)1629 killpg1_allfilt(proc_t p, void * arg)
1630 {
1631 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1632
1633 /*
1634 * Don't signal initproc, a system process, or the current process if POSIX
1635 * isn't specified.
1636 */
1637 return proc_getpid(p) > 1 && !(p->p_flag & P_SYSTEM) &&
1638 (kfargp->posix ? true : p != kfargp->curproc);
1639 }
1640
1641 static int
killpg1_callback(proc_t p,void * arg)1642 killpg1_callback(proc_t p, void *arg)
1643 {
1644 struct killpg1_iterargs *kargp = (struct killpg1_iterargs *)arg;
1645 int signum = kargp->signum;
1646
1647 if (proc_list_exited(p)) {
1648 /*
1649 * Count zombies as found for the purposes of signalling, since POSIX
1650 * 1003.1-2001 sees signalling zombies as successful. If killpg(2) or
1651 * kill(2) with pid -1 only finds zombies that can be signalled, it
1652 * shouldn't return ESRCH. See the Rationale for kill(2).
1653 *
1654 * Don't call into MAC -- it's not expecting signal checks for exited
1655 * processes.
1656 */
1657 if (cansignal_nomac(kargp->curproc, kargp->uc, p, signum)) {
1658 kargp->nfound++;
1659 }
1660 } else if (cansignal(kargp->curproc, kargp->uc, p, signum)) {
1661 kargp->nfound++;
1662
1663 if (signum != 0) {
1664 psignal(p, signum);
1665 }
1666 }
1667
1668 return PROC_RETURNED;
1669 }
1670
1671 /*
1672 * Common code for kill process group/broadcast kill.
1673 */
1674 int
killpg1(proc_t curproc,int signum,int pgid,int all,int posix)1675 killpg1(proc_t curproc, int signum, int pgid, int all, int posix)
1676 {
1677 kauth_cred_t uc;
1678 struct pgrp *pgrp;
1679 int error = 0;
1680
1681 uc = kauth_cred_proc_ref(curproc);
1682 struct killpg1_iterargs karg = {
1683 .curproc = curproc, .uc = uc, .nfound = 0, .signum = signum
1684 };
1685
1686 if (all) {
1687 /*
1688 * Broadcast to all processes that the user can signal (pid was -1).
1689 */
1690 struct killpg1_filtargs kfarg = {
1691 .posix = posix, .curproc = curproc
1692 };
1693 proc_iterate(PROC_ALLPROCLIST | PROC_ZOMBPROCLIST, killpg1_callback,
1694 &karg, killpg1_allfilt, &kfarg);
1695 } else {
1696 if (pgid == 0) {
1697 /*
1698 * Send to current the current process' process group.
1699 */
1700 pgrp = proc_pgrp(curproc, NULL);
1701 } else {
1702 pgrp = pgrp_find(pgid);
1703 if (pgrp == NULL) {
1704 error = ESRCH;
1705 goto out;
1706 }
1707 }
1708
1709 pgrp_iterate(pgrp, killpg1_callback, &karg, ^bool (proc_t p) {
1710 if (p == kernproc || p == initproc) {
1711 return false;
1712 }
1713 /* XXX shouldn't this allow signalling zombies? */
1714 return !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB;
1715 });
1716 pgrp_rele(pgrp);
1717 }
1718 error = (karg.nfound > 0 ? 0 : (posix ? EPERM : ESRCH));
1719 out:
1720 kauth_cred_unref(&uc);
1721 return error;
1722 }
1723
1724 /*
1725 * Send a signal to a process group.
1726 */
1727 void
gsignal(int pgid,int signum)1728 gsignal(int pgid, int signum)
1729 {
1730 struct pgrp *pgrp;
1731
1732 if (pgid && (pgrp = pgrp_find(pgid))) {
1733 pgsignal(pgrp, signum, 0);
1734 pgrp_rele(pgrp);
1735 }
1736 }
1737
1738 /*
1739 * Send a signal to a process group. If checkctty is 1,
1740 * limit to members which have a controlling terminal.
1741 */
1742
1743 static int
pgsignal_callback(proc_t p,void * arg)1744 pgsignal_callback(proc_t p, void * arg)
1745 {
1746 int signum = *(int*)arg;
1747
1748 psignal(p, signum);
1749 return PROC_RETURNED;
1750 }
1751
1752 void
pgsignal(struct pgrp * pgrp,int signum,int checkctty)1753 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1754 {
1755 if (pgrp == PGRP_NULL) {
1756 return;
1757 }
1758
1759 bool (^filter)(proc_t) = ^bool (proc_t p) {
1760 return p->p_flag & P_CONTROLT;
1761 };
1762
1763 pgrp_iterate(pgrp, pgsignal_callback, &signum, checkctty ? filter : NULL);
1764 }
1765
1766
1767 void
tty_pgsignal_locked(struct tty * tp,int signum,int checkctty)1768 tty_pgsignal_locked(struct tty *tp, int signum, int checkctty)
1769 {
1770 struct pgrp * pg;
1771
1772 pg = tty_pgrp_locked(tp);
1773 if (pg != PGRP_NULL) {
1774 tty_unlock(tp);
1775 pgsignal(pg, signum, checkctty);
1776 pgrp_rele(pg);
1777 tty_lock(tp);
1778 }
1779 }
1780 /*
1781 * Send a signal caused by a trap to a specific thread.
1782 */
1783 void
threadsignal(thread_t sig_actthread,int signum,mach_exception_code_t code,boolean_t set_exitreason)1784 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boolean_t set_exitreason)
1785 {
1786 struct uthread *uth;
1787 struct task * sig_task;
1788 proc_t p;
1789 int mask;
1790
1791 if ((u_int)signum >= NSIG || signum == 0) {
1792 return;
1793 }
1794
1795 mask = sigmask(signum);
1796 if ((mask & threadmask) == 0) {
1797 return;
1798 }
1799 sig_task = get_threadtask(sig_actthread);
1800 p = (proc_t)(get_bsdtask_info(sig_task));
1801
1802 uth = get_bsdthread_info(sig_actthread);
1803
1804 proc_lock(p);
1805 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1806 proc_unlock(p);
1807 return;
1808 }
1809
1810 uth->uu_siglist |= mask;
1811 uth->uu_code = code;
1812
1813 /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */
1814 if (set_exitreason && ((p->p_lflag & P_LTRACED) || (!(uth->uu_sigwait & mask)
1815 && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) &&
1816 !(mask & stopsigmask) && !(mask & contsigmask)) {
1817 if (uth->uu_exit_reason == OS_REASON_NULL) {
1818 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1819 proc_getpid(p), OS_REASON_SIGNAL, signum, 0, 0);
1820
1821 os_reason_t signal_reason = build_signal_reason(signum, "exc handler");
1822
1823 set_thread_exit_reason(sig_actthread, signal_reason, TRUE);
1824
1825 /* We dropped/consumed the reference in set_thread_exit_reason() */
1826 signal_reason = OS_REASON_NULL;
1827 }
1828 }
1829
1830 proc_unlock(p);
1831
1832 /* mark on process as well */
1833 signal_setast(sig_actthread);
1834 }
1835
1836 /* Called with proc locked */
1837 static void
set_thread_extra_flags(task_t task,struct uthread * uth,os_reason_t reason)1838 set_thread_extra_flags(task_t task, struct uthread *uth, os_reason_t reason)
1839 {
1840 extern int vm_shared_region_reslide_restrict;
1841 boolean_t reslide_shared_region = FALSE;
1842 boolean_t driver = task_is_driver(task);
1843 assert(uth != NULL);
1844 /*
1845 * Check whether the userland fault address falls within the shared
1846 * region and notify userland if so. To limit the occurrences of shared
1847 * cache resliding - and its associated memory tax - only investigate the
1848 * fault if it is consequence of accessing unmapped memory (SIGSEGV) or
1849 * accessing with incorrect permissions (SIGBUS - KERN_PROTECTION_FAILURE).
1850 *
1851 * This allows launchd to apply special policies around this fault type.
1852 */
1853 if (reason->osr_namespace == OS_REASON_SIGNAL &&
1854 (reason->osr_code == SIGSEGV ||
1855 (reason->osr_code == SIGBUS && uth->uu_code == KERN_PROTECTION_FAILURE))) {
1856 mach_vm_address_t fault_address = uth->uu_subcode;
1857
1858 /* Address is in userland, so we hard clear any non-canonical bits to 0 here */
1859 fault_address = VM_USER_STRIP_PTR(fault_address);
1860
1861 if (fault_address >= SHARED_REGION_BASE &&
1862 fault_address <= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1863 /*
1864 * Always report whether the fault happened within the shared cache
1865 * region, but only stale the slide if the resliding is extended
1866 * to all processes or if the process faulting is a platform one.
1867 */
1868 reason->osr_flags |= OS_REASON_FLAG_SHAREDREGION_FAULT;
1869
1870 #if __has_feature(ptrauth_calls)
1871 if (!vm_shared_region_reslide_restrict ||
1872 (task_get_platform_restrictions_version(current_task()) >= 1)) {
1873 reslide_shared_region = TRUE;
1874 }
1875 #endif /* __has_feature(ptrauth_calls) */
1876 }
1877
1878 if (driver) {
1879 /*
1880 * Always reslide the DriverKit shared region if the driver faulted.
1881 * The memory cost is acceptable because the DriverKit shared cache is small
1882 * and there are relatively few driver processes.
1883 */
1884 reslide_shared_region = TRUE;
1885 }
1886 }
1887
1888 if (reslide_shared_region) {
1889 vm_shared_region_reslide_stale(driver);
1890 }
1891 }
1892
1893 void
set_thread_exit_reason(void * th,void * reason,boolean_t proc_locked)1894 set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked)
1895 {
1896 struct uthread *targ_uth = get_bsdthread_info(th);
1897 struct task *targ_task = get_threadtask(th);
1898 proc_t targ_proc = NULL;
1899
1900 os_reason_t exit_reason = (os_reason_t)reason;
1901
1902 if (exit_reason == OS_REASON_NULL) {
1903 return;
1904 }
1905
1906 if (!proc_locked) {
1907 targ_proc = (proc_t)(get_bsdtask_info(targ_task));
1908
1909 proc_lock(targ_proc);
1910 }
1911
1912 set_thread_extra_flags(targ_task, targ_uth, exit_reason);
1913
1914 if (targ_uth->uu_exit_reason == OS_REASON_NULL) {
1915 targ_uth->uu_exit_reason = exit_reason;
1916 } else {
1917 /* The caller expects that we drop a reference on the exit reason */
1918 os_reason_free(exit_reason);
1919 }
1920
1921 if (!proc_locked) {
1922 assert(targ_proc != NULL);
1923 proc_unlock(targ_proc);
1924 }
1925 }
1926
1927 /*
1928 * get_signalthread
1929 *
1930 * Picks an appropriate thread from a process to target with a signal.
1931 *
1932 * Called with proc locked.
1933 * Returns thread with BSD ast set.
1934 *
1935 * We attempt to deliver a proc-wide signal to the first thread in the task.
1936 * This allows single threaded applications which use signals to
1937 * be able to be linked with multithreaded libraries.
1938 */
1939 static kern_return_t
get_signalthread(proc_t p,int signum,thread_t * thr)1940 get_signalthread(proc_t p, int signum, thread_t * thr)
1941 {
1942 struct uthread *uth;
1943 sigset_t mask = sigmask(signum);
1944 bool skip_wqthreads = true;
1945
1946 *thr = THREAD_NULL;
1947
1948
1949 again:
1950 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1951 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
1952 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
1953 thread_t th = get_machthread(uth);
1954 if ((skip_wqthreads && (thread_get_tag(th) & THREAD_TAG_WORKQUEUE)) ||
1955 (thread_get_tag(th) & THREAD_TAG_AIO_WORKQUEUE)) {
1956 /* Workqueue threads may be parked in the kernel unable to
1957 * deliver signals for an extended period of time, so skip them
1958 * in favor of pthreads in a first pass. (rdar://50054475). */
1959 } else if (check_actforsig(proc_task(p), th, 1) == KERN_SUCCESS) {
1960 *thr = th;
1961 return KERN_SUCCESS;
1962 }
1963 }
1964 }
1965 if (skip_wqthreads) {
1966 skip_wqthreads = false;
1967 goto again;
1968 }
1969 if (get_signalact(proc_task(p), thr, 1) == KERN_SUCCESS) {
1970 return KERN_SUCCESS;
1971 }
1972
1973 return KERN_FAILURE;
1974 }
1975
1976 static os_reason_t
build_signal_reason(int signum,const char * procname)1977 build_signal_reason(int signum, const char *procname)
1978 {
1979 os_reason_t signal_reason = OS_REASON_NULL;
1980 proc_t sender_proc = current_proc();
1981 const uint32_t proc_name_length = sizeof(sender_proc->p_name);
1982 uint32_t reason_buffer_size_estimate = 0;
1983 const char *default_sender_procname = "unknown";
1984 mach_vm_address_t data_addr;
1985 int ret;
1986
1987 signal_reason = os_reason_create(OS_REASON_SIGNAL, signum);
1988 if (signal_reason == OS_REASON_NULL) {
1989 printf("build_signal_reason: unable to allocate signal reason structure.\n");
1990 return signal_reason;
1991 }
1992
1993 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) +
1994 sizeof(pid_t));
1995
1996 ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate);
1997 if (ret != 0) {
1998 printf("build_signal_reason: unable to allocate signal reason buffer.\n");
1999 return signal_reason;
2000 }
2001
2002 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PID,
2003 sizeof(pid_t), &data_addr)) {
2004 pid_t pid = proc_getpid(sender_proc);
2005 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &pid, sizeof(pid));
2006 } else {
2007 printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n");
2008 }
2009
2010 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PROCNAME,
2011 proc_name_length, &data_addr)) {
2012 if (procname) {
2013 char truncated_procname[proc_name_length];
2014 strncpy((char *) &truncated_procname, procname, proc_name_length);
2015 truncated_procname[proc_name_length - 1] = '\0';
2016
2017 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname,
2018 (uint32_t)strlen((char *) &truncated_procname));
2019 } else if (*sender_proc->p_name) {
2020 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name,
2021 sizeof(sender_proc->p_name));
2022 } else {
2023 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname,
2024 (uint32_t)strlen(default_sender_procname) + 1);
2025 }
2026 } else {
2027 printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n");
2028 }
2029
2030 return signal_reason;
2031 }
2032
2033 /*
2034 * Send the signal to the process. If the signal has an action, the action
2035 * is usually performed by the target process rather than the caller; we add
2036 * the signal to the set of pending signals for the process.
2037 *
2038 * Always drops a reference on a signal_reason if one is provided, whether via
2039 * passing it to a thread or deallocating directly.
2040 *
2041 * Exceptions:
2042 * o When a stop signal is sent to a sleeping process that takes the
2043 * default action, the process is stopped without awakening it.
2044 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2045 * regardless of the signal action (eg, blocked or ignored).
2046 *
2047 * Other ignored signals are discarded immediately.
2048 */
2049 static void
psignal_internal(proc_t p,task_t task,thread_t thread,int flavor,int signum,os_reason_t signal_reason)2050 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason)
2051 {
2052 int prop;
2053 user_addr_t action = USER_ADDR_NULL;
2054 proc_t sig_proc;
2055 thread_t sig_thread;
2056 task_t sig_task;
2057 int mask;
2058 struct uthread *uth;
2059 kern_return_t kret;
2060 uid_t r_uid;
2061 proc_t pp;
2062 kauth_cred_t my_cred;
2063 char *launchd_exit_reason_desc = NULL;
2064 boolean_t update_thread_policy = FALSE;
2065
2066 if ((u_int)signum >= NSIG || signum == 0) {
2067 panic("psignal: bad signal number %d", signum);
2068 }
2069
2070 mask = sigmask(signum);
2071 prop = sigprop[signum];
2072
2073 #if SIGNAL_DEBUG
2074 if (rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
2075 ram_printf(3);
2076 }
2077 #endif /* SIGNAL_DEBUG */
2078
2079 /* catch unexpected initproc kills early for easier debuggging */
2080 if (signum == SIGKILL && p == initproc) {
2081 if (signal_reason == NULL) {
2082 panic_plain("unexpected SIGKILL of %s %s (no reason provided)",
2083 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2084 ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""));
2085 } else {
2086 launchd_exit_reason_desc = exit_reason_get_string_desc(signal_reason);
2087 panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
2088 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2089 ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""),
2090 signal_reason->osr_namespace, signal_reason->osr_code,
2091 launchd_exit_reason_desc ? launchd_exit_reason_desc : "none");
2092 }
2093 }
2094
2095 /*
2096 * We will need the task pointer later. Grab it now to
2097 * check for a zombie process. Also don't send signals
2098 * to kernel internal tasks.
2099 */
2100 if (flavor & PSIG_VFORK) {
2101 sig_task = task;
2102 sig_thread = thread;
2103 sig_proc = p;
2104 } else if (flavor & PSIG_THREAD) {
2105 sig_task = get_threadtask(thread);
2106 sig_thread = thread;
2107 sig_proc = (proc_t)get_bsdtask_info(sig_task);
2108 } else if (flavor & PSIG_TRY_THREAD) {
2109 assert((thread == current_thread()) && (p == current_proc()));
2110 sig_task = proc_task(p);
2111 sig_thread = thread;
2112 sig_proc = p;
2113 } else {
2114 sig_task = proc_task(p);
2115 sig_thread = THREAD_NULL;
2116 sig_proc = p;
2117 }
2118
2119 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task)) {
2120 os_reason_free(signal_reason);
2121 return;
2122 }
2123
2124 if ((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
2125 proc_knote(sig_proc, NOTE_SIGNAL | signum);
2126 }
2127
2128 if ((flavor & PSIG_LOCKED) == 0) {
2129 proc_signalstart(sig_proc, 0);
2130 }
2131
2132 /* Don't send signals to a process that has ignored them. */
2133 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
2134 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2135 goto sigout_unlocked;
2136 }
2137
2138 /*
2139 * The proc_lock prevents the targeted thread from being deallocated
2140 * or handling the signal until we're done signaling it.
2141 *
2142 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
2143 *
2144 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
2145 */
2146 proc_lock(sig_proc);
2147
2148 /*
2149 * Don't send signals to a process which has already exited and thus
2150 * committed to a particular p_xstat exit code.
2151 * Additionally, don't abort the process running 'reboot'.
2152 */
2153 if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) {
2154 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2155 goto sigout_locked;
2156 }
2157
2158 if (flavor & PSIG_VFORK) {
2159 action = SIG_DFL;
2160 act_set_astbsd(sig_thread);
2161 kret = KERN_SUCCESS;
2162 } else if (flavor & PSIG_TRY_THREAD) {
2163 uth = get_bsdthread_info(sig_thread);
2164 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2165 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
2166 ((kret = check_actforsig(proc_task(sig_proc), sig_thread, 1)) == KERN_SUCCESS)) {
2167 /* deliver to specified thread */
2168 } else {
2169 /* deliver to any willing thread */
2170 kret = get_signalthread(sig_proc, signum, &sig_thread);
2171 }
2172 } else if (flavor & PSIG_THREAD) {
2173 /* If successful return with ast set */
2174 kret = check_actforsig(sig_task, sig_thread, 1);
2175 } else {
2176 /* If successful return with ast set */
2177 kret = get_signalthread(sig_proc, signum, &sig_thread);
2178 }
2179
2180 if (kret != KERN_SUCCESS) {
2181 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2182 proc_unlock(sig_proc);
2183 goto sigout_unlocked;
2184 }
2185
2186 uth = get_bsdthread_info(sig_thread);
2187
2188 /*
2189 * If proc is traced, always give parent a chance.
2190 */
2191
2192 if ((flavor & PSIG_VFORK) == 0) {
2193 if (sig_proc->p_lflag & P_LTRACED) {
2194 action = SIG_DFL;
2195 } else {
2196 /*
2197 * If the signal is being ignored,
2198 * then we forget about it immediately.
2199 * (Note: we don't set SIGCONT in p_sigignore,
2200 * and if it is set to SIG_IGN,
2201 * action will be SIG_DFL here.)
2202 */
2203 if (sig_proc->p_sigignore & mask) {
2204 goto sigout_locked;
2205 }
2206
2207 if (uth->uu_sigwait & mask) {
2208 action = KERN_SIG_WAIT;
2209 } else if (uth->uu_sigmask & mask) {
2210 action = KERN_SIG_HOLD;
2211 } else if (sig_proc->p_sigcatch & mask) {
2212 action = KERN_SIG_CATCH;
2213 } else {
2214 action = SIG_DFL;
2215 }
2216 }
2217 }
2218
2219 /* TODO: p_nice isn't hooked up to the scheduler... */
2220 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
2221 (sig_proc->p_lflag & P_LTRACED) == 0) {
2222 sig_proc->p_nice = NZERO;
2223 }
2224
2225 if (prop & SA_CONT) {
2226 uth->uu_siglist &= ~stopsigmask;
2227 }
2228
2229 if (prop & SA_STOP) {
2230 struct pgrp *pg;
2231 /*
2232 * If sending a tty stop signal to a member of an orphaned
2233 * process group, discard the signal here if the action
2234 * is default; don't stop the process below if sleeping,
2235 * and don't clear any pending SIGCONT.
2236 */
2237 pg = proc_pgrp(sig_proc, NULL);
2238 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
2239 action == SIG_DFL) {
2240 pgrp_rele(pg);
2241 goto sigout_locked;
2242 }
2243 pgrp_rele(pg);
2244 uth->uu_siglist &= ~contsigmask;
2245 }
2246
2247 uth->uu_siglist |= mask;
2248
2249 /*
2250 * Defer further processing for signals which are held,
2251 * except that stopped processes must be continued by SIGCONT.
2252 */
2253 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
2254 goto sigout_locked;
2255 }
2256
2257 /*
2258 * SIGKILL priority twiddling moved here from above because
2259 * it needs sig_thread. Could merge it into large switch
2260 * below if we didn't care about priority for tracing
2261 * as SIGKILL's action is always SIG_DFL.
2262 *
2263 * TODO: p_nice isn't hooked up to the scheduler...
2264 */
2265 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
2266 sig_proc->p_nice = NZERO;
2267 }
2268
2269 /*
2270 * Process is traced - wake it up (if not already
2271 * stopped) so that it can discover the signal in
2272 * issig() and stop for the parent.
2273 */
2274 if (sig_proc->p_lflag & P_LTRACED) {
2275 if (sig_proc->p_stat != SSTOP) {
2276 goto runlocked;
2277 } else {
2278 goto sigout_locked;
2279 }
2280 }
2281
2282 if ((flavor & PSIG_VFORK) != 0) {
2283 goto runlocked;
2284 }
2285
2286 if (action == KERN_SIG_WAIT) {
2287 #if CONFIG_DTRACE
2288 /*
2289 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2290 */
2291 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2292
2293 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2294
2295 uth->t_dtrace_siginfo.si_signo = signum;
2296 uth->t_dtrace_siginfo.si_pid = proc_getpid(current_proc());
2297 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2298 uth->t_dtrace_siginfo.si_uid = r_uid;
2299 uth->t_dtrace_siginfo.si_code = 0;
2300 #endif
2301 uth->uu_sigwait = mask;
2302 uth->uu_siglist &= ~mask;
2303 wakeup(&uth->uu_sigwait);
2304 /* if it is SIGCONT resume whole process */
2305 if (prop & SA_CONT) {
2306 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2307 sig_proc->p_contproc = proc_getpid(current_proc());
2308 (void) task_resume_internal(sig_task);
2309 }
2310 goto sigout_locked;
2311 }
2312
2313 if (action != SIG_DFL) {
2314 /*
2315 * User wants to catch the signal.
2316 * Wake up the thread, but don't un-suspend it
2317 * (except for SIGCONT).
2318 */
2319 if (prop & SA_CONT) {
2320 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2321 (void) task_resume_internal(sig_task);
2322 sig_proc->p_stat = SRUN;
2323 } else if (sig_proc->p_stat == SSTOP) {
2324 goto sigout_locked;
2325 }
2326 /*
2327 * Fill out siginfo structure information to pass to the
2328 * signalled process/thread sigaction handler, when it
2329 * wakes up. si_code is 0 because this is an ordinary
2330 * signal, not a SIGCHLD, and so si_status is the signal
2331 * number itself, instead of the child process exit status.
2332 * We shift this left because it will be shifted right before
2333 * it is passed to user space. kind of ugly to use W_EXITCODE
2334 * this way, but it beats defining a new macro.
2335 *
2336 * Note: Avoid the SIGCHLD recursion case!
2337 */
2338 if (signum != SIGCHLD) {
2339 r_uid = kauth_getruid();
2340
2341 sig_proc->si_pid = proc_getpid(current_proc());
2342 sig_proc->si_status = W_EXITCODE(signum, 0);
2343 sig_proc->si_uid = r_uid;
2344 sig_proc->si_code = 0;
2345 }
2346
2347 goto runlocked;
2348 } else {
2349 /* Default action - varies */
2350 if (mask & stopsigmask) {
2351 assert(signal_reason == NULL);
2352 /*
2353 * These are the signals which by default
2354 * stop a process.
2355 *
2356 * Don't clog system with children of init
2357 * stopped from the keyboard.
2358 */
2359 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2360 uth->uu_siglist &= ~mask;
2361 proc_unlock(sig_proc);
2362 /* siglock still locked, proc_lock not locked */
2363 psignal_locked(sig_proc, SIGKILL);
2364 goto sigout_unlocked;
2365 }
2366
2367 /*
2368 * Stop the task
2369 * if task hasn't already been stopped by
2370 * a signal.
2371 */
2372 uth->uu_siglist &= ~mask;
2373 if (sig_proc->p_stat != SSTOP) {
2374 sig_proc->p_xstat = signum;
2375 sig_proc->p_stat = SSTOP;
2376 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2377 sig_proc->p_lflag &= ~P_LWAITED;
2378 proc_signalend(sig_proc, 1);
2379 proc_unlock(sig_proc);
2380
2381 pp = proc_parentholdref(sig_proc);
2382 proc_signalstart(sig_proc, 0);
2383 stop(sig_proc, pp);
2384 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2385 my_cred = kauth_cred_proc_ref(sig_proc);
2386 r_uid = kauth_cred_getruid(my_cred);
2387 kauth_cred_unref(&my_cred);
2388
2389 proc_lock(sig_proc);
2390 pp->si_pid = proc_getpid(sig_proc);
2391 /*
2392 * POSIX: sigaction for a stopped child
2393 * when sent to the parent must set the
2394 * child's signal number into si_status.
2395 */
2396 if (signum != SIGSTOP) {
2397 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2398 } else {
2399 pp->si_status = W_EXITCODE(signum, signum);
2400 }
2401 pp->si_code = CLD_STOPPED;
2402 pp->si_uid = r_uid;
2403 proc_unlock(sig_proc);
2404
2405 psignal(pp, SIGCHLD);
2406 }
2407 if (pp != PROC_NULL) {
2408 proc_parentdropref(pp, 0);
2409 }
2410
2411 goto sigout_unlocked;
2412 }
2413
2414 goto sigout_locked;
2415 }
2416
2417 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2418
2419 switch (signum) {
2420 /*
2421 * Signals ignored by default have been dealt
2422 * with already, since their bits are on in
2423 * p_sigignore.
2424 */
2425
2426 case SIGKILL:
2427 /*
2428 * Kill signal always sets process running and
2429 * unsuspends it.
2430 */
2431 /*
2432 * Process will be running after 'run'
2433 */
2434 sig_proc->p_stat = SRUN;
2435 /*
2436 * In scenarios where suspend/resume are racing
2437 * the signal we are missing AST_BSD by the time
2438 * we get here, set again to avoid races. This
2439 * was the scenario with spindump enabled shutdowns.
2440 * We would need to cover this approp down the line.
2441 */
2442 act_set_astbsd(sig_thread);
2443 kret = thread_abort(sig_thread);
2444 update_thread_policy = (kret == KERN_SUCCESS);
2445
2446 if (uth->uu_exit_reason == OS_REASON_NULL) {
2447 if (signal_reason == OS_REASON_NULL) {
2448 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2449 proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2450
2451 signal_reason = build_signal_reason(signum, NULL);
2452 }
2453
2454 os_reason_ref(signal_reason);
2455 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2456 }
2457
2458 goto sigout_locked;
2459
2460 case SIGCONT:
2461 /*
2462 * Let the process run. If it's sleeping on an
2463 * event, it remains so.
2464 */
2465 assert(signal_reason == NULL);
2466 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2467 sig_proc->p_contproc = proc_getpid(sig_proc);
2468 sig_proc->p_xstat = signum;
2469
2470 (void) task_resume_internal(sig_task);
2471
2472 /*
2473 * When processing a SIGCONT, we need to check
2474 * to see if there are signals pending that
2475 * were not delivered because we had been
2476 * previously stopped. If that's the case,
2477 * we need to thread_abort_safely() to trigger
2478 * interruption of the current system call to
2479 * cause their handlers to fire. If it's only
2480 * the SIGCONT, then don't wake up.
2481 */
2482 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2483 uth->uu_siglist &= ~mask;
2484 sig_proc->p_stat = SRUN;
2485 goto runlocked;
2486 }
2487
2488 uth->uu_siglist &= ~mask;
2489 sig_proc->p_stat = SRUN;
2490 goto sigout_locked;
2491
2492 default:
2493 {
2494 /*
2495 * A signal which has a default action of killing
2496 * the process, and for which there is no handler,
2497 * needs to act like SIGKILL
2498 *
2499 * The thread_sstop condition is a remnant of a fix
2500 * where PSIG_THREAD exit reasons were not set
2501 * correctly (93593933). We keep the behavior with
2502 * SSTOP the same as before.
2503 */
2504 const bool default_kill = (action == SIG_DFL) && (prop & SA_KILL);
2505 const bool thread_sstop = (flavor & PSIG_THREAD) && (sig_proc->p_stat == SSTOP);
2506
2507 if (default_kill && !thread_sstop) {
2508 sig_proc->p_stat = SRUN;
2509 kret = thread_abort(sig_thread);
2510 update_thread_policy = (kret == KERN_SUCCESS);
2511
2512 if (uth->uu_exit_reason == OS_REASON_NULL) {
2513 if (signal_reason == OS_REASON_NULL) {
2514 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2515 proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2516
2517 signal_reason = build_signal_reason(signum, NULL);
2518 }
2519
2520 os_reason_ref(signal_reason);
2521 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2522 }
2523
2524 goto sigout_locked;
2525 }
2526
2527 /*
2528 * All other signals wake up the process, but don't
2529 * resume it.
2530 */
2531 if (sig_proc->p_stat == SSTOP) {
2532 goto sigout_locked;
2533 }
2534 goto runlocked;
2535 }
2536 }
2537 }
2538 /*NOTREACHED*/
2539
2540 runlocked:
2541 /*
2542 * If we're being traced (possibly because someone attached us
2543 * while we were stopped), check for a signal from the debugger.
2544 */
2545 if (sig_proc->p_stat == SSTOP) {
2546 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) {
2547 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2548 }
2549
2550 if ((flavor & PSIG_VFORK) != 0) {
2551 sig_proc->p_stat = SRUN;
2552 }
2553 } else {
2554 /*
2555 * setrunnable(p) in BSD and
2556 * Wake up the thread if it is interruptible.
2557 */
2558 sig_proc->p_stat = SRUN;
2559 if ((flavor & PSIG_VFORK) == 0) {
2560 thread_abort_safely(sig_thread);
2561 }
2562 }
2563
2564 sigout_locked:
2565 if (update_thread_policy) {
2566 /*
2567 * Update the thread policy to heading to terminate, increase priority if
2568 * necessary. This needs to be done before we drop the proc lock because the
2569 * thread can take the fatal signal once it's dropped.
2570 */
2571 proc_set_thread_policy(sig_thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2572 }
2573
2574 proc_unlock(sig_proc);
2575
2576 sigout_unlocked:
2577 os_reason_free(signal_reason);
2578 if ((flavor & PSIG_LOCKED) == 0) {
2579 proc_signalend(sig_proc, 0);
2580 }
2581 }
2582
2583 void
psignal(proc_t p,int signum)2584 psignal(proc_t p, int signum)
2585 {
2586 psignal_internal(p, NULL, NULL, 0, signum, NULL);
2587 }
2588
2589 void
psignal_with_reason(proc_t p,int signum,struct os_reason * signal_reason)2590 psignal_with_reason(proc_t p, int signum, struct os_reason *signal_reason)
2591 {
2592 psignal_internal(p, NULL, NULL, 0, signum, signal_reason);
2593 }
2594
2595 void
psignal_sigkill_with_reason(struct proc * p,struct os_reason * signal_reason)2596 psignal_sigkill_with_reason(struct proc *p, struct os_reason *signal_reason)
2597 {
2598 psignal_internal(p, NULL, NULL, 0, SIGKILL, signal_reason);
2599 }
2600
2601 void
psignal_locked(proc_t p,int signum)2602 psignal_locked(proc_t p, int signum)
2603 {
2604 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum, NULL);
2605 }
2606
2607 void
psignal_vfork_with_reason(proc_t p,task_t new_task,thread_t thread,int signum,struct os_reason * signal_reason)2608 psignal_vfork_with_reason(proc_t p, task_t new_task, thread_t thread, int signum, struct os_reason *signal_reason)
2609 {
2610 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, signal_reason);
2611 }
2612
2613 void
psignal_vfork(proc_t p,task_t new_task,thread_t thread,int signum)2614 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2615 {
2616 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, NULL);
2617 }
2618
2619 void
psignal_uthread(thread_t thread,int signum)2620 psignal_uthread(thread_t thread, int signum)
2621 {
2622 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum, NULL);
2623 }
2624
2625 /* same as psignal(), but prefer delivery to 'thread' if possible */
2626 void
psignal_try_thread(proc_t p,thread_t thread,int signum)2627 psignal_try_thread(proc_t p, thread_t thread, int signum)
2628 {
2629 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum, NULL);
2630 }
2631
2632 void
psignal_try_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2633 psignal_try_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2634 {
2635 psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason);
2636 }
2637
2638 void
psignal_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2639 psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2640 {
2641 psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason);
2642 }
2643
2644 void
psignal_sigkill_try_thread_with_reason(proc_t p,thread_t thread,struct os_reason * signal_reason)2645 psignal_sigkill_try_thread_with_reason(proc_t p, thread_t thread, struct os_reason *signal_reason)
2646 {
2647 psignal_try_thread_with_reason(p, thread, SIGKILL, signal_reason);
2648 }
2649
2650 /*
2651 * If the current process has received a signal (should be caught or cause
2652 * termination, should interrupt current syscall), return the signal number.
2653 * Stop signals with default action are processed immediately, then cleared;
2654 * they aren't returned. This is checked after each entry to the system for
2655 * a syscall or trap (though this can usually be done without calling issignal
2656 * by checking the pending signal masks in the CURSIG macro.) The normal call
2657 * sequence is
2658 *
2659 * while (signum = CURSIG(curproc))
2660 * postsig(signum);
2661 */
2662 int
issignal_locked(proc_t p)2663 issignal_locked(proc_t p)
2664 {
2665 int signum, mask, prop, sigbits;
2666 thread_t cur_act;
2667 struct uthread * ut;
2668 proc_t pp;
2669 kauth_cred_t my_cred;
2670 int retval = 0;
2671 uid_t r_uid;
2672
2673 cur_act = current_thread();
2674
2675 #if SIGNAL_DEBUG
2676 if (rdebug_proc && (p == rdebug_proc)) {
2677 ram_printf(3);
2678 }
2679 #endif /* SIGNAL_DEBUG */
2680
2681 /*
2682 * Try to grab the signal lock.
2683 */
2684 if (sig_try_locked(p) <= 0) {
2685 return 0;
2686 }
2687
2688 proc_signalstart(p, 1);
2689
2690 ut = get_bsdthread_info(cur_act);
2691 for (;;) {
2692 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2693
2694 if (p->p_lflag & P_LPPWAIT) {
2695 sigbits &= ~stopsigmask;
2696 }
2697 if (sigbits == 0) { /* no signal to send */
2698 retval = 0;
2699 goto out;
2700 }
2701
2702 signum = ffs((unsigned int)sigbits);
2703 mask = sigmask(signum);
2704 prop = sigprop[signum];
2705
2706 /*
2707 * We should see pending but ignored signals
2708 * only if P_LTRACED was on when they were posted.
2709 */
2710 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2711 ut->uu_siglist &= ~mask;
2712 continue;
2713 }
2714
2715 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2716 /*
2717 * If traced, deliver the signal to the debugger, and wait to be
2718 * released.
2719 */
2720 task_t task;
2721 p->p_xstat = signum;
2722
2723 if (p->p_lflag & P_LSIGEXC) {
2724 p->sigwait = TRUE;
2725 p->sigwait_thread = cur_act;
2726 p->p_stat = SSTOP;
2727 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2728 p->p_lflag &= ~P_LWAITED;
2729 ut->uu_siglist &= ~mask; /* clear the current signal from the pending list */
2730 proc_signalend(p, 1);
2731 proc_unlock(p);
2732 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2733 proc_lock(p);
2734 proc_signalstart(p, 1);
2735 } else {
2736 proc_unlock(p);
2737 my_cred = kauth_cred_proc_ref(p);
2738 r_uid = kauth_cred_getruid(my_cred);
2739 kauth_cred_unref(&my_cred);
2740
2741 /*
2742 * XXX Have to really stop for debuggers;
2743 * XXX stop() doesn't do the right thing.
2744 */
2745 task = proc_task(p);
2746 task_suspend_internal(task);
2747
2748 proc_lock(p);
2749 p->sigwait = TRUE;
2750 p->sigwait_thread = cur_act;
2751 p->p_stat = SSTOP;
2752 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2753 p->p_lflag &= ~P_LWAITED;
2754 ut->uu_siglist &= ~mask;
2755
2756 proc_signalend(p, 1);
2757 proc_unlock(p);
2758
2759 pp = proc_parentholdref(p);
2760 if (pp != PROC_NULL) {
2761 proc_lock(pp);
2762 pp->si_pid = proc_getpid(p);
2763 pp->p_xhighbits = p->p_xhighbits;
2764 p->p_xhighbits = 0;
2765 pp->si_status = p->p_xstat;
2766 pp->si_code = CLD_TRAPPED;
2767 pp->si_uid = r_uid;
2768 proc_unlock(pp);
2769
2770 psignal(pp, SIGCHLD);
2771 proc_list_lock();
2772 wakeup((caddr_t)pp);
2773 proc_parentdropref(pp, 1);
2774 proc_list_unlock();
2775 }
2776
2777 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2778 thread_block(THREAD_CONTINUE_NULL);
2779 proc_lock(p);
2780 proc_signalstart(p, 1);
2781 }
2782
2783 p->sigwait = FALSE;
2784 p->sigwait_thread = NULL;
2785 wakeup((caddr_t)&p->sigwait_thread);
2786
2787 if (signum == SIGKILL || ut->uu_siglist & sigmask(SIGKILL)) {
2788 /*
2789 * Deliver a pending sigkill even if it's not the current signal.
2790 * Necessary for PT_KILL, which should not be delivered to the
2791 * debugger, but we can't differentiate it from any other KILL.
2792 */
2793 signum = SIGKILL;
2794 goto deliver_sig;
2795 }
2796
2797 /* We may have to quit. */
2798 if (thread_should_abort(current_thread())) {
2799 retval = 0;
2800 goto out;
2801 }
2802
2803 /*
2804 * If parent wants us to take the signal,
2805 * then it will leave it in p->p_xstat;
2806 * otherwise we just look for signals again.
2807 */
2808 signum = p->p_xstat;
2809 if (signum == 0) {
2810 continue;
2811 }
2812
2813 /*
2814 * Put the new signal into p_siglist. If the
2815 * signal is being masked, look for other signals.
2816 */
2817 mask = sigmask(signum);
2818 ut->uu_siglist |= mask;
2819 if (ut->uu_sigmask & mask) {
2820 continue;
2821 }
2822 }
2823
2824 /*
2825 * Decide whether the signal should be returned.
2826 * Return the signal's number, or fall through
2827 * to clear it from the pending mask.
2828 */
2829
2830 switch ((long)SIGACTION(p, signum)) {
2831 case (long)SIG_DFL:
2832 /*
2833 * If there is a pending stop signal to process
2834 * with default action, stop here,
2835 * then clear the signal. However,
2836 * if process is member of an orphaned
2837 * process group, ignore tty stop signals.
2838 */
2839 if (prop & SA_STOP) {
2840 struct pgrp * pg;
2841
2842 proc_unlock(p);
2843 pg = proc_pgrp(p, NULL);
2844 if (p->p_lflag & P_LTRACED ||
2845 (pg->pg_jobc == 0 &&
2846 prop & SA_TTYSTOP)) {
2847 proc_lock(p);
2848 pgrp_rele(pg);
2849 break; /* ignore signal */
2850 }
2851 pgrp_rele(pg);
2852 if (p->p_stat != SSTOP) {
2853 proc_lock(p);
2854 p->p_xstat = signum;
2855 p->p_stat = SSTOP;
2856 p->p_lflag &= ~P_LWAITED;
2857 proc_signalend(p, 1);
2858 proc_unlock(p);
2859
2860 pp = proc_parentholdref(p);
2861 proc_signalstart(p, 0);
2862 stop(p, pp);
2863 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2864 my_cred = kauth_cred_proc_ref(p);
2865 r_uid = kauth_cred_getruid(my_cred);
2866 kauth_cred_unref(&my_cred);
2867
2868 proc_lock(pp);
2869 pp->si_pid = proc_getpid(p);
2870 pp->si_status = WEXITSTATUS(p->p_xstat);
2871 pp->si_code = CLD_STOPPED;
2872 pp->si_uid = r_uid;
2873 proc_unlock(pp);
2874
2875 psignal(pp, SIGCHLD);
2876 }
2877 if (pp != PROC_NULL) {
2878 proc_parentdropref(pp, 0);
2879 }
2880 }
2881 proc_lock(p);
2882 break;
2883 } else if (prop & SA_IGNORE) {
2884 /*
2885 * Except for SIGCONT, shouldn't get here.
2886 * Default action is to ignore; drop it.
2887 */
2888 break; /* ignore signal */
2889 } else {
2890 goto deliver_sig;
2891 }
2892
2893 case (long)SIG_IGN:
2894 /*
2895 * Masking above should prevent us ever trying
2896 * to take action on an ignored signal other
2897 * than SIGCONT, unless process is traced.
2898 */
2899 if ((prop & SA_CONT) == 0 &&
2900 (p->p_lflag & P_LTRACED) == 0) {
2901 printf("issignal\n");
2902 }
2903 break; /* ignore signal */
2904
2905 default:
2906 /* This signal has an action - deliver it. */
2907 goto deliver_sig;
2908 }
2909
2910 /* If we dropped through, the signal was ignored - remove it from pending list. */
2911 ut->uu_siglist &= ~mask;
2912 } /* for(;;) */
2913
2914 /* NOTREACHED */
2915
2916 deliver_sig:
2917 ut->uu_siglist &= ~mask;
2918 retval = signum;
2919
2920 out:
2921 proc_signalend(p, 1);
2922 return retval;
2923 }
2924
2925 /* called from _sleep */
2926 int
CURSIG(proc_t p)2927 CURSIG(proc_t p)
2928 {
2929 int signum, mask, prop, sigbits;
2930 thread_t cur_act;
2931 struct uthread * ut;
2932 int retnum = 0;
2933
2934
2935 cur_act = current_thread();
2936
2937 ut = get_bsdthread_info(cur_act);
2938
2939 if (ut->uu_siglist == 0) {
2940 return 0;
2941 }
2942
2943 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) {
2944 return 0;
2945 }
2946
2947 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2948
2949 for (;;) {
2950 if (p->p_lflag & P_LPPWAIT) {
2951 sigbits &= ~stopsigmask;
2952 }
2953 if (sigbits == 0) { /* no signal to send */
2954 return retnum;
2955 }
2956
2957 signum = ffs((unsigned int)sigbits);
2958 mask = sigmask(signum);
2959 prop = sigprop[signum];
2960 sigbits &= ~mask; /* take the signal out */
2961
2962 /*
2963 * We should see pending but ignored signals
2964 * only if P_LTRACED was on when they were posted.
2965 */
2966 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2967 continue;
2968 }
2969
2970 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2971 return signum;
2972 }
2973
2974 /*
2975 * Decide whether the signal should be returned.
2976 * Return the signal's number, or fall through
2977 * to clear it from the pending mask.
2978 */
2979
2980 switch ((long)SIGACTION(p, signum)) {
2981 case (long)SIG_DFL:
2982 /*
2983 * If there is a pending stop signal to process
2984 * with default action, stop here,
2985 * then clear the signal. However,
2986 * if process is member of an orphaned
2987 * process group, ignore tty stop signals.
2988 */
2989 if (prop & SA_STOP) {
2990 struct pgrp *pg;
2991
2992 pg = proc_pgrp(p, NULL);
2993
2994 if (p->p_lflag & P_LTRACED ||
2995 (pg->pg_jobc == 0 &&
2996 prop & SA_TTYSTOP)) {
2997 pgrp_rele(pg);
2998 break; /* == ignore */
2999 }
3000 pgrp_rele(pg);
3001 retnum = signum;
3002 break;
3003 } else if (prop & SA_IGNORE) {
3004 /*
3005 * Except for SIGCONT, shouldn't get here.
3006 * Default action is to ignore; drop it.
3007 */
3008 break; /* == ignore */
3009 } else {
3010 return signum;
3011 }
3012 /*NOTREACHED*/
3013
3014 case (long)SIG_IGN:
3015 /*
3016 * Masking above should prevent us ever trying
3017 * to take action on an ignored signal other
3018 * than SIGCONT, unless process is traced.
3019 */
3020 if ((prop & SA_CONT) == 0 &&
3021 (p->p_lflag & P_LTRACED) == 0) {
3022 printf("issignal\n");
3023 }
3024 break; /* == ignore */
3025
3026 default:
3027 /*
3028 * This signal has an action, let
3029 * postsig() process it.
3030 */
3031 return signum;
3032 }
3033 }
3034 /* NOTREACHED */
3035 }
3036
3037 /*
3038 * Put the argument process into the stopped state and notify the parent
3039 * via wakeup. Signals are handled elsewhere. The process must not be
3040 * on the run queue.
3041 */
3042 static void
stop(proc_t p,proc_t parent)3043 stop(proc_t p, proc_t parent)
3044 {
3045 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3046 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
3047 proc_list_lock();
3048 wakeup((caddr_t)parent);
3049 proc_list_unlock();
3050 }
3051 (void) task_suspend_internal(proc_task(p));
3052 }
3053
3054 /*
3055 * Take the action for the specified signal
3056 * from the current set of pending signals.
3057 */
3058 void
postsig_locked(int signum)3059 postsig_locked(int signum)
3060 {
3061 proc_t p = current_proc();
3062 struct sigacts *ps = &p->p_sigacts;
3063 user_addr_t catcher;
3064 uint32_t code;
3065 int mask, returnmask;
3066 struct uthread * ut;
3067 os_reason_t ut_exit_reason = OS_REASON_NULL;
3068
3069 #if DIAGNOSTIC
3070 if (signum == 0) {
3071 panic("postsig");
3072 }
3073 #endif
3074
3075 /*
3076 * Try to grab the signal lock.
3077 */
3078 if (sig_try_locked(p) <= 0) {
3079 return;
3080 }
3081
3082 proc_signalstart(p, 1);
3083
3084 ut = current_uthread();
3085 mask = sigmask(signum);
3086 ut->uu_siglist &= ~mask;
3087 catcher = SIGACTION(p, signum);
3088 if (catcher == SIG_DFL) {
3089 /*
3090 * Default catcher, where the default is to kill
3091 * the process. (Other cases were ignored above.)
3092 */
3093
3094 /*
3095 * exit_with_reason() below will consume a reference to the thread's exit reason, so we take another
3096 * reference so the thread still has one even after we call exit_with_reason(). The thread's reference will
3097 * ultimately be destroyed in uthread_cleanup().
3098 */
3099 ut_exit_reason = ut->uu_exit_reason;
3100 os_reason_ref(ut_exit_reason);
3101
3102 p->p_acflag |= AXSIG;
3103 if (sigprop[signum] & SA_CORE) {
3104 p->p_sigacts.ps_sig = signum;
3105 proc_signalend(p, 1);
3106 proc_unlock(p);
3107 #if CONFIG_COREDUMP || CONFIG_UCOREDUMP
3108 /*
3109 * For now, driver dumps are only performed by xnu.
3110 * Regular processes can be configured to use xnu
3111 * (synchronously generating very large core files),
3112 * or xnu can generate a specially tagged corpse which
3113 * (depending on other configuration) will cause
3114 * ReportCrash to dump a core file asynchronously.
3115 *
3116 * The userland dumping path must operate
3117 * asynchronously to avoid deadlocks, yet may have
3118 * unexpected failures => indicate dump *initiation*
3119 * via WCOREFLAG (or CLD_DUMPED).
3120 */
3121 do {
3122 if (task_is_driver(proc_task(p))) {
3123 #if CONFIG_COREDUMP
3124 if (coredump(p, 0, COREDUMP_FULLFSYNC) == 0) {
3125 signum |= WCOREFLAG;
3126 }
3127 #endif /* CONFIG_COREDUMP */
3128 break;
3129 }
3130 #if CONFIG_UCOREDUMP
3131 if (do_ucoredump) {
3132 /*
3133 * A compatibility nod to existing
3134 * coredump behavior: only set
3135 * WCOREFLAG here if the user has
3136 * implicitly asked for a core
3137 * file and it passes security
3138 * checks. (A core file might still
3139 * be dumped because of other policy.)
3140 */
3141 if (proc_limitgetcur(p, RLIMIT_CORE) != 0 &&
3142 is_coredump_eligible(p) == 0) {
3143 signum |= WCOREFLAG;
3144 }
3145 break;
3146 }
3147 #endif /* CONFIG_UCOREDUMP */
3148 #if CONFIG_COREDUMP
3149 if (coredump(p, 0, 0) == 0) {
3150 signum |= WCOREFLAG;
3151 }
3152 #endif /* CONFIG_COREDUMP */
3153 } while (0);
3154 #endif /* CONFIG_COREDUMP || CONFIG_UCOREDUMP */
3155 } else {
3156 proc_signalend(p, 1);
3157 proc_unlock(p);
3158 }
3159
3160 #if CONFIG_DTRACE
3161 bzero(&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
3162
3163 const int signo = signum & ~WCOREFLAG;
3164 ut->t_dtrace_siginfo.si_signo = signo;
3165 ut->t_dtrace_siginfo.si_pid = p->si_pid;
3166 ut->t_dtrace_siginfo.si_uid = p->si_uid;
3167 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
3168
3169 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
3170 switch (signo) {
3171 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
3172 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
3173 break;
3174 default:
3175 break;
3176 }
3177
3178
3179 DTRACE_PROC3(signal__handle, int, signo, siginfo_t *, &(ut->t_dtrace_siginfo),
3180 void (*)(void), SIG_DFL);
3181 #endif
3182
3183 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
3184 proc_getpid(p), W_EXITCODE(0, signum), 3, 0, 0);
3185
3186 exit_with_reason(p, W_EXITCODE(0, signum), (int *)NULL, TRUE, TRUE, 0, ut_exit_reason);
3187
3188 proc_lock(p);
3189 return;
3190 } else {
3191 /*
3192 * If we get here, the signal must be caught.
3193 */
3194 #if DIAGNOSTIC
3195 if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) {
3196 log(LOG_WARNING,
3197 "postsig: processing masked or ignored signal\n");
3198 }
3199 #endif
3200
3201 /*
3202 * Set the new mask value and also defer further
3203 * occurences of this signal.
3204 *
3205 * Special case: user has done a sigpause. Here the
3206 * current mask is not of interest, but rather the
3207 * mask from before the sigpause is what we want
3208 * restored after the signal processing is completed.
3209 */
3210 if (ut->uu_flag & UT_SAS_OLDMASK) {
3211 returnmask = ut->uu_oldmask;
3212 ut->uu_flag &= ~UT_SAS_OLDMASK;
3213 ut->uu_oldmask = 0;
3214 } else {
3215 returnmask = ut->uu_sigmask;
3216 }
3217 ut->uu_sigmask |= ps->ps_catchmask[signum];
3218 if ((ps->ps_signodefer & mask) == 0) {
3219 ut->uu_sigmask |= mask;
3220 }
3221 sigset_t siginfo = ps->ps_siginfo;
3222 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
3223 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) {
3224 p->p_sigignore |= mask;
3225 }
3226 if (SIGACTION(p, signum) != SIG_DFL) {
3227 proc_set_sigact(p, signum, SIG_DFL);
3228 }
3229 ps->ps_siginfo &= ~mask;
3230 ps->ps_signodefer &= ~mask;
3231 }
3232
3233 if (ps->ps_sig != signum) {
3234 code = 0;
3235 } else {
3236 code = ps->ps_code;
3237 ps->ps_code = 0;
3238 }
3239 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
3240 sendsig(p, catcher, signum, returnmask, code, siginfo);
3241 }
3242 proc_signalend(p, 1);
3243 }
3244
3245 /*
3246 * Attach a signal knote to the list of knotes for this process.
3247 *
3248 * Signal knotes share the knote list with proc knotes. This
3249 * could be avoided by using a signal-specific knote list, but
3250 * probably isn't worth the trouble.
3251 */
3252
3253 static int
filt_sigattach(struct knote * kn,__unused struct kevent_qos_s * kev)3254 filt_sigattach(struct knote *kn, __unused struct kevent_qos_s *kev)
3255 {
3256 proc_t p = current_proc(); /* can attach only to oneself */
3257
3258 proc_klist_lock();
3259
3260 kn->kn_proc = p;
3261 kn->kn_flags |= EV_CLEAR; /* automatically set */
3262 kn->kn_sdata = 0; /* incoming data is ignored */
3263
3264 KNOTE_ATTACH(&p->p_klist, kn);
3265
3266 proc_klist_unlock();
3267
3268 /* edge-triggered events can't have fired before we attached */
3269 return 0;
3270 }
3271
3272 /*
3273 * remove the knote from the process list, if it hasn't already
3274 * been removed by exit processing.
3275 */
3276
3277 static void
filt_sigdetach(struct knote * kn)3278 filt_sigdetach(struct knote *kn)
3279 {
3280 proc_t p;
3281
3282 proc_klist_lock();
3283 p = kn->kn_proc;
3284 if (p != NULL) {
3285 kn->kn_proc = NULL;
3286 KNOTE_DETACH(&p->p_klist, kn);
3287 }
3288 proc_klist_unlock();
3289 }
3290
3291 /*
3292 * Post an event to the signal filter. Because we share the same list
3293 * as process knotes, we have to filter out and handle only signal events.
3294 *
3295 * We assume that we process fdt_invalidate() before we post the NOTE_EXIT for
3296 * a process during exit. Therefore, since signal filters can only be
3297 * set up "in-process", we should have already torn down the kqueue
3298 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
3299 */
3300 static int
filt_signal(struct knote * kn,long hint)3301 filt_signal(struct knote *kn, long hint)
3302 {
3303 if (hint & NOTE_SIGNAL) {
3304 hint &= ~NOTE_SIGNAL;
3305
3306 if (kn->kn_id == (unsigned int)hint) {
3307 kn->kn_hook32++;
3308 }
3309 } else if (hint & NOTE_EXIT) {
3310 panic("filt_signal: detected NOTE_EXIT event");
3311 }
3312
3313 return kn->kn_hook32 != 0;
3314 }
3315
3316 static int
filt_signaltouch(struct knote * kn,struct kevent_qos_s * kev)3317 filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev)
3318 {
3319 #pragma unused(kev)
3320
3321 int res;
3322
3323 proc_klist_lock();
3324
3325 /*
3326 * No data to save - just capture if it is already fired
3327 */
3328 res = (kn->kn_hook32 > 0);
3329
3330 proc_klist_unlock();
3331
3332 return res;
3333 }
3334
3335 static int
filt_signalprocess(struct knote * kn,struct kevent_qos_s * kev)3336 filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev)
3337 {
3338 int res = 0;
3339
3340 /*
3341 * Snapshot the event data.
3342 */
3343
3344 proc_klist_lock();
3345 if (kn->kn_hook32) {
3346 knote_fill_kevent(kn, kev, kn->kn_hook32);
3347 kn->kn_hook32 = 0;
3348 res = 1;
3349 }
3350 proc_klist_unlock();
3351 return res;
3352 }
3353
3354 void
bsd_ast(thread_t thread)3355 bsd_ast(thread_t thread)
3356 {
3357 proc_t p = current_proc();
3358 struct uthread *ut = get_bsdthread_info(thread);
3359 int signum;
3360 static int bsd_init_done = 0;
3361
3362 if (p == NULL) {
3363 return;
3364 }
3365
3366 if (timerisset(&p->p_vtimer_user.it_value)) {
3367 uint32_t microsecs;
3368
3369 task_vtimer_update(proc_task(p), TASK_VTIMER_USER, µsecs);
3370
3371 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
3372 if (timerisset(&p->p_vtimer_user.it_value)) {
3373 task_vtimer_set(proc_task(p), TASK_VTIMER_USER);
3374 } else {
3375 task_vtimer_clear(proc_task(p), TASK_VTIMER_USER);
3376 }
3377
3378 psignal_try_thread(p, thread, SIGVTALRM);
3379 }
3380 }
3381
3382 if (timerisset(&p->p_vtimer_prof.it_value)) {
3383 uint32_t microsecs;
3384
3385 task_vtimer_update(proc_task(p), TASK_VTIMER_PROF, µsecs);
3386
3387 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
3388 if (timerisset(&p->p_vtimer_prof.it_value)) {
3389 task_vtimer_set(proc_task(p), TASK_VTIMER_PROF);
3390 } else {
3391 task_vtimer_clear(proc_task(p), TASK_VTIMER_PROF);
3392 }
3393
3394 psignal_try_thread(p, thread, SIGPROF);
3395 }
3396 }
3397
3398 if (timerisset(&p->p_rlim_cpu)) {
3399 struct timeval tv;
3400
3401 task_vtimer_update(proc_task(p), TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
3402
3403 proc_spinlock(p);
3404 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
3405 tv.tv_sec = 0;
3406 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
3407 proc_spinunlock(p);
3408 } else {
3409 timerclear(&p->p_rlim_cpu);
3410 proc_spinunlock(p);
3411
3412 task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
3413
3414 psignal_try_thread(p, thread, SIGXCPU);
3415 }
3416 }
3417
3418 #if CONFIG_DTRACE
3419 if (ut->t_dtrace_sig) {
3420 uint8_t dt_action_sig = ut->t_dtrace_sig;
3421 ut->t_dtrace_sig = 0;
3422 psignal(p, dt_action_sig);
3423 }
3424
3425 if (ut->t_dtrace_stop) {
3426 ut->t_dtrace_stop = 0;
3427 proc_lock(p);
3428 p->p_dtrace_stop = 1;
3429 proc_unlock(p);
3430 (void)task_suspend_internal(proc_task(p));
3431 }
3432
3433 if (ut->t_dtrace_resumepid) {
3434 proc_t resumeproc = proc_find((int)ut->t_dtrace_resumepid);
3435 ut->t_dtrace_resumepid = 0;
3436 if (resumeproc != PROC_NULL) {
3437 proc_lock(resumeproc);
3438 /* We only act on processes stopped by dtrace */
3439 if (resumeproc->p_dtrace_stop) {
3440 resumeproc->p_dtrace_stop = 0;
3441 proc_unlock(resumeproc);
3442 task_resume_internal(proc_task(resumeproc));
3443 } else {
3444 proc_unlock(resumeproc);
3445 }
3446 proc_rele(resumeproc);
3447 }
3448 }
3449
3450 #endif /* CONFIG_DTRACE */
3451
3452 proc_lock(p);
3453 if (CHECK_SIGNALS(p, current_thread(), ut)) {
3454 while ((signum = issignal_locked(p))) {
3455 postsig_locked(signum);
3456 }
3457 }
3458 proc_unlock(p);
3459
3460 if (!bsd_init_done) {
3461 bsd_init_done = 1;
3462 bsdinit_task();
3463 }
3464 }
3465
3466 /* ptrace set runnable */
3467 void
pt_setrunnable(proc_t p)3468 pt_setrunnable(proc_t p)
3469 {
3470 task_t task;
3471
3472 task = proc_task(p);
3473
3474 if (p->p_lflag & P_LTRACED) {
3475 proc_lock(p);
3476 p->p_stat = SRUN;
3477 proc_unlock(p);
3478 if (p->sigwait) {
3479 wakeup((caddr_t)&(p->sigwait));
3480 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3481 task_release(task);
3482 }
3483 }
3484 }
3485 }
3486
3487 kern_return_t
do_bsdexception(int exc,int code,int sub)3488 do_bsdexception(
3489 int exc,
3490 int code,
3491 int sub)
3492 {
3493 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3494
3495 codes[0] = code;
3496 codes[1] = sub;
3497 return bsd_exception(exc, codes, 2);
3498 }
3499
3500 int
proc_pendingsignals(proc_t p,sigset_t mask)3501 proc_pendingsignals(proc_t p, sigset_t mask)
3502 {
3503 struct uthread * uth;
3504 sigset_t bits = 0;
3505
3506 proc_lock(p);
3507 /* If the process is in proc exit return no signal info */
3508 if (p->p_lflag & P_LPEXIT) {
3509 goto out;
3510 }
3511
3512
3513 bits = 0;
3514 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3515 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3516 }
3517 out:
3518 proc_unlock(p);
3519 return bits;
3520 }
3521
3522 int
thread_issignal(proc_t p,thread_t th,sigset_t mask)3523 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3524 {
3525 struct uthread * uth;
3526 sigset_t bits = 0;
3527
3528 proc_lock(p);
3529 uth = (struct uthread *)get_bsdthread_info(th);
3530 if (uth) {
3531 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3532 }
3533 proc_unlock(p);
3534 return bits;
3535 }
3536
3537 /*
3538 * Allow external reads of the sigprop array.
3539 */
3540 int
hassigprop(int sig,int prop)3541 hassigprop(int sig, int prop)
3542 {
3543 return sigprop[sig] & prop;
3544 }
3545
3546 void
pgsigio(pid_t pgid,int sig)3547 pgsigio(pid_t pgid, int sig)
3548 {
3549 proc_t p = PROC_NULL;
3550
3551 if (pgid < 0) {
3552 gsignal(-(pgid), sig);
3553 } else if (pgid > 0 && (p = proc_find(pgid)) != 0) {
3554 psignal(p, sig);
3555 }
3556 if (p != PROC_NULL) {
3557 proc_rele(p);
3558 }
3559 }
3560
3561 void
proc_signalstart(proc_t p,int locked)3562 proc_signalstart(proc_t p, int locked)
3563 {
3564 if (!locked) {
3565 proc_lock(p);
3566 }
3567
3568 if (p->p_signalholder == current_thread()) {
3569 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3570 }
3571
3572 p->p_sigwaitcnt++;
3573 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) {
3574 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3575 }
3576 p->p_sigwaitcnt--;
3577
3578 p->p_lflag |= P_LINSIGNAL;
3579 p->p_signalholder = current_thread();
3580 if (!locked) {
3581 proc_unlock(p);
3582 }
3583 }
3584
3585 void
proc_signalend(proc_t p,int locked)3586 proc_signalend(proc_t p, int locked)
3587 {
3588 if (!locked) {
3589 proc_lock(p);
3590 }
3591 p->p_lflag &= ~P_LINSIGNAL;
3592
3593 if (p->p_sigwaitcnt > 0) {
3594 wakeup(&p->p_sigmask);
3595 }
3596
3597 p->p_signalholder = NULL;
3598 if (!locked) {
3599 proc_unlock(p);
3600 }
3601 }
3602
3603 void
sig_lock_to_exit(proc_t p)3604 sig_lock_to_exit(proc_t p)
3605 {
3606 thread_t self = current_thread();
3607
3608 p->exit_thread = self;
3609 proc_unlock(p);
3610
3611 task_hold_and_wait(proc_task(p), true);
3612
3613 proc_lock(p);
3614 }
3615
3616 int
sig_try_locked(proc_t p)3617 sig_try_locked(proc_t p)
3618 {
3619 thread_t self = current_thread();
3620
3621 while (p->sigwait || p->exit_thread) {
3622 if (p->exit_thread) {
3623 return 0;
3624 }
3625 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3626 if (thread_should_abort(self)) {
3627 /*
3628 * Terminate request - clean up.
3629 */
3630 proc_lock(p);
3631 return -1;
3632 }
3633 proc_lock(p);
3634 }
3635 return 1;
3636 }
3637