1 /*
2 * Copyright (c) 1995-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
66 */
67 /*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74 #define SIGPROP /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91 #include <sys/reason.h>
92
93 #include <sys/mount.h>
94 #include <sys/sysproto.h>
95
96 #include <security/audit/audit.h>
97
98 #include <kern/cpu_number.h>
99
100 #include <sys/vm.h>
101 #include <sys/user.h> /* for coredump */
102 #include <kern/ast.h> /* for APC support */
103 #include <kern/kalloc.h>
104 #include <kern/task.h> /* extern void *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <kern/policy_internal.h>
109 #include <kern/sync_sema.h>
110
111 #include <mach/exception.h>
112 #include <mach/task.h>
113 #include <mach/thread_act.h>
114 #include <libkern/OSAtomic.h>
115
116 #include <sys/sdt.h>
117 #include <sys/codesign.h>
118 #include <sys/random.h>
119 #include <libkern/section_keywords.h>
120
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124
125 /*
126 * Missing prototypes that Mach should export
127 *
128 * +++
129 */
130 extern int thread_enable_fpe(thread_t act, int onoff);
131 extern kern_return_t get_signalact(task_t, thread_t *, int);
132 extern unsigned int get_useraddr(void);
133 extern boolean_t task_did_exec(task_t task);
134 extern boolean_t task_is_exec_copy(task_t task);
135 extern void vm_shared_region_reslide_stale(void);
136
137 /*
138 * ---
139 */
140
141 extern void doexception(int exc, mach_exception_code_t code,
142 mach_exception_subcode_t sub);
143
144 static void stop(proc_t, proc_t);
145 static int cansignal_nomac(proc_t, kauth_cred_t, proc_t, int);
146 int cansignal(proc_t, kauth_cred_t, proc_t, int);
147 int killpg1(proc_t, int, int, int, int);
148 kern_return_t do_bsdexception(int, int, int);
149 void __posix_sem_syscall_return(kern_return_t);
150 char *proc_name_address(void *p);
151
152 static int filt_sigattach(struct knote *kn, struct kevent_qos_s *kev);
153 static void filt_sigdetach(struct knote *kn);
154 static int filt_signal(struct knote *kn, long hint);
155 static int filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev);
156 static int filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev);
157
158 SECURITY_READ_ONLY_EARLY(struct filterops) sig_filtops = {
159 .f_attach = filt_sigattach,
160 .f_detach = filt_sigdetach,
161 .f_event = filt_signal,
162 .f_touch = filt_signaltouch,
163 .f_process = filt_signalprocess,
164 };
165
166 /* structures and fns for killpg1 iterartion callback and filters */
167 struct killpg1_filtargs {
168 bool posix;
169 proc_t curproc;
170 };
171
172 struct killpg1_iterargs {
173 proc_t curproc;
174 kauth_cred_t uc;
175 int signum;
176 int nfound;
177 };
178
179 static int killpg1_allfilt(proc_t p, void * arg);
180 static int killpg1_callback(proc_t p, void * arg);
181
182 static int pgsignal_callback(proc_t p, void * arg);
183 static kern_return_t get_signalthread(proc_t, int, thread_t *);
184
185
186 /* flags for psignal_internal */
187 #define PSIG_LOCKED 0x1
188 #define PSIG_VFORK 0x2
189 #define PSIG_THREAD 0x4
190 #define PSIG_TRY_THREAD 0x8
191
192 static os_reason_t build_signal_reason(int signum, const char *procname);
193 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason);
194
195 /*
196 * NOTE: Source and target may *NOT* overlap! (target is smaller)
197 */
198 static void
sigaltstack_kern_to_user32(struct kern_sigaltstack * in,struct user32_sigaltstack * out)199 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
200 {
201 out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
202 out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
203 out->ss_flags = in->ss_flags;
204 }
205
206 static void
sigaltstack_kern_to_user64(struct kern_sigaltstack * in,struct user64_sigaltstack * out)207 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
208 {
209 out->ss_sp = in->ss_sp;
210 out->ss_size = in->ss_size;
211 out->ss_flags = in->ss_flags;
212 }
213
214 /*
215 * NOTE: Source and target may are permitted to overlap! (source is smaller);
216 * this works because we copy fields in order from the end of the struct to
217 * the beginning.
218 */
219 static void
sigaltstack_user32_to_kern(struct user32_sigaltstack * in,struct kern_sigaltstack * out)220 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
221 {
222 out->ss_flags = in->ss_flags;
223 out->ss_size = in->ss_size;
224 out->ss_sp = CAST_USER_ADDR_T(in->ss_sp);
225 }
226 static void
sigaltstack_user64_to_kern(struct user64_sigaltstack * in,struct kern_sigaltstack * out)227 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
228 {
229 out->ss_flags = in->ss_flags;
230 out->ss_size = (user_size_t)in->ss_size;
231 out->ss_sp = (user_addr_t)in->ss_sp;
232 }
233
234 static void
sigaction_kern_to_user32(struct kern_sigaction * in,struct user32_sigaction * out)235 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
236 {
237 /* This assumes 32 bit __sa_handler is of type sig_t */
238 out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t, in->__sigaction_u.__sa_handler);
239 out->sa_mask = in->sa_mask;
240 out->sa_flags = in->sa_flags;
241 }
242 static void
sigaction_kern_to_user64(struct kern_sigaction * in,struct user64_sigaction * out)243 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
244 {
245 /* This assumes 32 bit __sa_handler is of type sig_t */
246 out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
247 out->sa_mask = in->sa_mask;
248 out->sa_flags = in->sa_flags;
249 }
250
251 static void
__sigaction_user32_to_kern(struct __user32_sigaction * in,struct __kern_sigaction * out)252 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
253 {
254 out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
255 out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
256 out->sa_mask = in->sa_mask;
257 out->sa_flags = in->sa_flags;
258
259 kern_return_t kr;
260 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
261 &out->sa_tramp, 1);
262 assert(kr == KERN_SUCCESS);
263 }
264
265 static void
__sigaction_user64_to_kern(struct __user64_sigaction * in,struct __kern_sigaction * out)266 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
267 {
268 out->__sigaction_u.__sa_handler = (user_addr_t)in->__sigaction_u.__sa_handler;
269 out->sa_tramp = (user_addr_t)in->sa_tramp;
270 out->sa_mask = in->sa_mask;
271 out->sa_flags = in->sa_flags;
272
273 kern_return_t kr;
274 kr = machine_thread_function_pointers_convert_from_user(current_thread(),
275 &out->sa_tramp, 1);
276 assert(kr == KERN_SUCCESS);
277 }
278
279 #if SIGNAL_DEBUG
280 void ram_printf(int);
281 int ram_debug = 0;
282 unsigned int rdebug_proc = 0;
283 void
ram_printf(int x)284 ram_printf(int x)
285 {
286 printf("x is %d", x);
287 }
288 #endif /* SIGNAL_DEBUG */
289
290
291 void
signal_setast(thread_t sig_actthread)292 signal_setast(thread_t sig_actthread)
293 {
294 act_set_astbsd(sig_actthread);
295 }
296
297 static int
cansignal_nomac(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)298 cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
299 {
300 /* you can signal yourself */
301 if (src == dst) {
302 return 1;
303 }
304
305 /* you can't send the init proc SIGKILL, even if root */
306 if (signum == SIGKILL && dst == initproc) {
307 return 0;
308 }
309
310 /* otherwise, root can always signal */
311 if (kauth_cred_issuser(uc_src)) {
312 return 1;
313 }
314
315 /* processes in the same session can send SIGCONT to each other */
316 if (signum == SIGCONT && proc_sessionid(src) == proc_sessionid(dst)) {
317 return 1;
318 }
319
320 /* the source process must be authorized to signal the target */
321 {
322 int allowed = 0;
323 kauth_cred_t uc_dst = NOCRED, uc_ref = NOCRED;
324
325 uc_dst = uc_ref = kauth_cred_proc_ref(dst);
326
327 /*
328 * If the real or effective UID of the sender matches the real or saved
329 * UID of the target, allow the signal to be sent.
330 */
331 if (kauth_cred_getruid(uc_src) == kauth_cred_getruid(uc_dst) ||
332 kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) ||
333 kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) ||
334 kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) {
335 allowed = 1;
336 }
337
338 if (uc_ref != NOCRED) {
339 kauth_cred_unref(&uc_ref);
340 uc_ref = NOCRED;
341 }
342
343 return allowed;
344 }
345 }
346
347 /*
348 * Can process `src`, with ucred `uc_src`, send the signal `signum` to process
349 * `dst`? The ucred is referenced by the caller so internal fileds can be used
350 * safely.
351 */
352 int
cansignal(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)353 cansignal(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
354 {
355 #if CONFIG_MACF
356 if (mac_proc_check_signal(src, dst, signum)) {
357 return 0;
358 }
359 #endif
360
361 return cansignal_nomac(src, uc_src, dst, signum);
362 }
363
364 /*
365 * <rdar://problem/21952708> Some signals can be restricted from being handled,
366 * forcing the default action for that signal. This behavior applies only to
367 * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
368 * bootarg:
369 *
370 * 0 (default): Disallow use of restricted signals. Trying to register a handler
371 * returns ENOTSUP, which userspace may use to take special action (e.g. abort).
372 * 1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
373 * 2: Usual POSIX semantics.
374 */
375 static TUNABLE(unsigned, sigrestrict_arg, "sigrestrict", 0);
376
377 #if PLATFORM_WatchOS
378 static int
sigrestrictmask(void)379 sigrestrictmask(void)
380 {
381 if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
382 return SIGRESTRICTMASK;
383 }
384 return 0;
385 }
386
387 static int
signal_is_restricted(proc_t p,int signum)388 signal_is_restricted(proc_t p, int signum)
389 {
390 if (sigmask(signum) & sigrestrictmask()) {
391 if (sigrestrict_arg == 0 &&
392 task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) {
393 return ENOTSUP;
394 } else {
395 return EINVAL;
396 }
397 }
398 return 0;
399 }
400
401 #else
402
403 static inline int
signal_is_restricted(proc_t p,int signum)404 signal_is_restricted(proc_t p, int signum)
405 {
406 (void)p;
407 (void)signum;
408 return 0;
409 }
410 #endif /* !PLATFORM_WatchOS */
411
412 /*
413 * Returns: 0 Success
414 * EINVAL
415 * copyout:EFAULT
416 * copyin:EFAULT
417 *
418 * Notes: Uses current thread as a parameter to inform PPC to enable
419 * FPU exceptions via setsigvec(); this operation is not proxy
420 * safe!
421 */
422 /* ARGSUSED */
423 int
sigaction(proc_t p,struct sigaction_args * uap,__unused int32_t * retval)424 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
425 {
426 struct kern_sigaction vec;
427 struct __kern_sigaction __vec;
428
429 struct kern_sigaction *sa = &vec;
430 struct sigacts *ps = &p->p_sigacts;
431
432 int signum;
433 int bit, error = 0;
434 uint32_t sigreturn_validation = PS_SIGRETURN_VALIDATION_DEFAULT;
435
436 signum = uap->signum;
437 if (signum <= 0 || signum >= NSIG ||
438 signum == SIGKILL || signum == SIGSTOP) {
439 return EINVAL;
440 }
441
442 if (uap->nsa) {
443 if (IS_64BIT_PROCESS(p)) {
444 struct __user64_sigaction __vec64;
445 error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
446 __sigaction_user64_to_kern(&__vec64, &__vec);
447 } else {
448 struct __user32_sigaction __vec32;
449 error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
450 __sigaction_user32_to_kern(&__vec32, &__vec);
451 }
452 if (error) {
453 return error;
454 }
455
456 sigreturn_validation = (__vec.sa_flags & SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP) ?
457 PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED;
458 __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
459
460 if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) {
461 if ((error = signal_is_restricted(p, signum))) {
462 if (error == ENOTSUP) {
463 printf("%s(%d): denied attempt to register action for signal %d\n",
464 proc_name_address(p), proc_pid(p), signum);
465 }
466 return error;
467 }
468 }
469 }
470
471 if (uap->osa) {
472 sa->sa_handler = SIGACTION(p, signum);
473 sa->sa_mask = ps->ps_catchmask[signum];
474 bit = sigmask(signum);
475 sa->sa_flags = 0;
476 if ((ps->ps_sigonstack & bit) != 0) {
477 sa->sa_flags |= SA_ONSTACK;
478 }
479 if ((ps->ps_sigintr & bit) == 0) {
480 sa->sa_flags |= SA_RESTART;
481 }
482 if (ps->ps_siginfo & bit) {
483 sa->sa_flags |= SA_SIGINFO;
484 }
485 if (ps->ps_signodefer & bit) {
486 sa->sa_flags |= SA_NODEFER;
487 }
488 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) {
489 sa->sa_flags |= SA_NOCLDSTOP;
490 }
491 if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) {
492 sa->sa_flags |= SA_NOCLDWAIT;
493 }
494
495 if (IS_64BIT_PROCESS(p)) {
496 struct user64_sigaction vec64 = {};
497 sigaction_kern_to_user64(sa, &vec64);
498 error = copyout(&vec64, uap->osa, sizeof(vec64));
499 } else {
500 struct user32_sigaction vec32 = {};
501 sigaction_kern_to_user32(sa, &vec32);
502 error = copyout(&vec32, uap->osa, sizeof(vec32));
503 }
504 if (error) {
505 return error;
506 }
507 }
508
509 if (uap->nsa) {
510 uint32_t old_sigreturn_validation = atomic_load_explicit(
511 &ps->ps_sigreturn_validation, memory_order_relaxed);
512 if (old_sigreturn_validation == PS_SIGRETURN_VALIDATION_DEFAULT) {
513 atomic_compare_exchange_strong_explicit(&ps->ps_sigreturn_validation,
514 &old_sigreturn_validation, sigreturn_validation,
515 memory_order_relaxed, memory_order_relaxed);
516 }
517 error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
518 }
519
520 return error;
521 }
522
523 /* Routines to manipulate bits on all threads */
524 int
clear_procsiglist(proc_t p,int bit,boolean_t in_signalstart)525 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
526 {
527 struct uthread * uth;
528
529 proc_lock(p);
530 if (!in_signalstart) {
531 proc_signalstart(p, 1);
532 }
533
534
535 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
536 uth->uu_siglist &= ~bit;
537 }
538 p->p_siglist &= ~bit;
539 if (!in_signalstart) {
540 proc_signalend(p, 1);
541 }
542 proc_unlock(p);
543
544 return 0;
545 }
546
547
548 static int
unblock_procsigmask(proc_t p,int bit)549 unblock_procsigmask(proc_t p, int bit)
550 {
551 struct uthread * uth;
552
553 proc_lock(p);
554 proc_signalstart(p, 1);
555
556
557 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
558 uth->uu_sigmask &= ~bit;
559 }
560 p->p_sigmask &= ~bit;
561
562 proc_signalend(p, 1);
563 proc_unlock(p);
564 return 0;
565 }
566
567 static int
block_procsigmask(proc_t p,int bit)568 block_procsigmask(proc_t p, int bit)
569 {
570 struct uthread * uth;
571
572 proc_lock(p);
573 proc_signalstart(p, 1);
574
575
576 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
577 uth->uu_sigmask |= bit;
578 }
579 p->p_sigmask |= bit;
580
581 proc_signalend(p, 1);
582 proc_unlock(p);
583 return 0;
584 }
585
586 int
set_procsigmask(proc_t p,int bit)587 set_procsigmask(proc_t p, int bit)
588 {
589 struct uthread * uth;
590
591 proc_lock(p);
592 proc_signalstart(p, 1);
593
594
595 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
596 uth->uu_sigmask = bit;
597 }
598 p->p_sigmask = bit;
599 proc_signalend(p, 1);
600 proc_unlock(p);
601
602 return 0;
603 }
604
605 /* XXX should be static? */
606 /*
607 * Notes: The thread parameter is used in the PPC case to select the
608 * thread on which the floating point exception will be enabled
609 * or disabled. We can't simply take current_thread(), since
610 * this is called from posix_spawn() on the not currently running
611 * process/thread pair.
612 *
613 * We mark thread as unused to alow compilation without warning
614 * on non-PPC platforms.
615 */
616 int
setsigvec(proc_t p,__unused thread_t thread,int signum,struct __kern_sigaction * sa,boolean_t in_sigstart)617 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
618 {
619 struct sigacts *ps = &p->p_sigacts;
620 int bit;
621
622 assert(signum < NSIG);
623
624 if ((signum == SIGKILL || signum == SIGSTOP) &&
625 sa->sa_handler != SIG_DFL) {
626 return EINVAL;
627 }
628 bit = sigmask(signum);
629 /*
630 * Change setting atomically.
631 */
632 proc_set_sigact_trampact(p, signum, sa->sa_handler, sa->sa_tramp);
633 ps->ps_catchmask[signum] = sa->sa_mask & ~sigcantmask;
634 if (sa->sa_flags & SA_SIGINFO) {
635 ps->ps_siginfo |= bit;
636 } else {
637 ps->ps_siginfo &= ~bit;
638 }
639 if ((sa->sa_flags & SA_RESTART) == 0) {
640 ps->ps_sigintr |= bit;
641 } else {
642 ps->ps_sigintr &= ~bit;
643 }
644 if (sa->sa_flags & SA_ONSTACK) {
645 ps->ps_sigonstack |= bit;
646 } else {
647 ps->ps_sigonstack &= ~bit;
648 }
649 if (sa->sa_flags & SA_RESETHAND) {
650 ps->ps_sigreset |= bit;
651 } else {
652 ps->ps_sigreset &= ~bit;
653 }
654 if (sa->sa_flags & SA_NODEFER) {
655 ps->ps_signodefer |= bit;
656 } else {
657 ps->ps_signodefer &= ~bit;
658 }
659 if (signum == SIGCHLD) {
660 if (sa->sa_flags & SA_NOCLDSTOP) {
661 OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
662 } else {
663 OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
664 }
665 if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) {
666 OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
667 } else {
668 OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
669 }
670 }
671
672 /*
673 * Set bit in p_sigignore for signals that are set to SIG_IGN,
674 * and for signals set to SIG_DFL where the default is to ignore.
675 * However, don't put SIGCONT in p_sigignore,
676 * as we have to restart the process.
677 */
678 if (sa->sa_handler == SIG_IGN ||
679 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
680 clear_procsiglist(p, bit, in_sigstart);
681 if (signum != SIGCONT) {
682 p->p_sigignore |= bit; /* easier in psignal */
683 }
684 p->p_sigcatch &= ~bit;
685 } else {
686 p->p_sigignore &= ~bit;
687 if (sa->sa_handler == SIG_DFL) {
688 p->p_sigcatch &= ~bit;
689 } else {
690 p->p_sigcatch |= bit;
691 }
692 }
693 return 0;
694 }
695
696 /*
697 * Initialize signal state for process 0;
698 * set to ignore signals that are ignored by default.
699 */
700 void
siginit(proc_t p)701 siginit(proc_t p)
702 {
703 int i;
704
705 for (i = 1; i < NSIG; i++) {
706 if (sigprop[i] & SA_IGNORE && i != SIGCONT) {
707 p->p_sigignore |= sigmask(i);
708 }
709 }
710 }
711
712 /*
713 * Reset signals for an exec of the specified process.
714 */
715 void
execsigs(proc_t p,thread_t thread)716 execsigs(proc_t p, thread_t thread)
717 {
718 struct sigacts *ps = &p->p_sigacts;
719 int nc, mask;
720 struct uthread *ut;
721
722 ut = (struct uthread *)get_bsdthread_info(thread);
723
724 /*
725 * transfer saved signal states from the process
726 * back to the current thread.
727 *
728 * NOTE: We do this without the process locked,
729 * because we are guaranteed to be single-threaded
730 * by this point in exec and the p_siglist is
731 * only accessed by threads inside the process.
732 */
733 ut->uu_siglist |= p->p_siglist;
734 p->p_siglist = 0;
735
736 /*
737 * Reset caught signals. Held signals remain held
738 * through p_sigmask (unless they were caught,
739 * and are now ignored by default).
740 */
741 proc_reset_sigact(p, p->p_sigcatch);
742 while (p->p_sigcatch) {
743 nc = ffs((unsigned int)p->p_sigcatch);
744 mask = sigmask(nc);
745 p->p_sigcatch &= ~mask;
746 if (sigprop[nc] & SA_IGNORE) {
747 if (nc != SIGCONT) {
748 p->p_sigignore |= mask;
749 }
750 ut->uu_siglist &= ~mask;
751 }
752 }
753
754 atomic_store_explicit(&ps->ps_sigreturn_validation,
755 PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed);
756 /* Generate random token value used to validate sigreturn arguments */
757 read_random(&ps->ps_sigreturn_token, sizeof(ps->ps_sigreturn_token));
758
759 /*
760 * Reset stack state to the user stack.
761 * Clear set of signals caught on the signal stack.
762 */
763 /* thread */
764 ut->uu_sigstk.ss_flags = SA_DISABLE;
765 ut->uu_sigstk.ss_size = 0;
766 ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
767 ut->uu_flag &= ~UT_ALTSTACK;
768 /* process */
769 ps->ps_sigonstack = 0;
770 }
771
772 /*
773 * Manipulate signal mask.
774 * Note that we receive new mask, not pointer,
775 * and return old mask as return value;
776 * the library stub does the rest.
777 */
778 int
sigprocmask(proc_t p,struct sigprocmask_args * uap,__unused int32_t * retval)779 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
780 {
781 int error = 0;
782 sigset_t oldmask, nmask;
783 user_addr_t omask = uap->omask;
784 struct uthread *ut;
785
786 ut = current_uthread();
787 oldmask = ut->uu_sigmask;
788
789 if (uap->mask == USER_ADDR_NULL) {
790 /* just want old mask */
791 goto out;
792 }
793 error = copyin(uap->mask, &nmask, sizeof(sigset_t));
794 if (error) {
795 goto out;
796 }
797
798 switch (uap->how) {
799 case SIG_BLOCK:
800 block_procsigmask(p, (nmask & ~sigcantmask));
801 signal_setast(current_thread());
802 break;
803
804 case SIG_UNBLOCK:
805 unblock_procsigmask(p, (nmask & ~sigcantmask));
806 signal_setast(current_thread());
807 break;
808
809 case SIG_SETMASK:
810 set_procsigmask(p, (nmask & ~sigcantmask));
811 signal_setast(current_thread());
812 break;
813
814 default:
815 error = EINVAL;
816 break;
817 }
818 out:
819 if (!error && omask != USER_ADDR_NULL) {
820 copyout(&oldmask, omask, sizeof(sigset_t));
821 }
822 return error;
823 }
824
825 int
sigpending(__unused proc_t p,struct sigpending_args * uap,__unused int32_t * retval)826 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
827 {
828 struct uthread *ut;
829 sigset_t pendlist;
830
831 ut = current_uthread();
832 pendlist = ut->uu_siglist;
833
834 if (uap->osv) {
835 copyout(&pendlist, uap->osv, sizeof(sigset_t));
836 }
837 return 0;
838 }
839
840 /*
841 * Suspend process until signal, providing mask to be set
842 * in the meantime. Note nonstandard calling convention:
843 * libc stub passes mask, not pointer, to save a copyin.
844 */
845
846 static int
sigcontinue(__unused int error)847 sigcontinue(__unused int error)
848 {
849 // struct uthread *ut = current_uthread();
850 unix_syscall_return(EINTR);
851 }
852
853 int
sigsuspend(proc_t p,struct sigsuspend_args * uap,int32_t * retval)854 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
855 {
856 __pthread_testcancel(1);
857 return sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval);
858 }
859
860 int
sigsuspend_nocancel(proc_t p,struct sigsuspend_nocancel_args * uap,__unused int32_t * retval)861 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
862 {
863 struct uthread *ut;
864
865 ut = current_uthread();
866
867 /*
868 * When returning from sigpause, we want
869 * the old mask to be restored after the
870 * signal handler has finished. Thus, we
871 * save it here and mark the sigacts structure
872 * to indicate this.
873 */
874 ut->uu_oldmask = ut->uu_sigmask;
875 ut->uu_flag |= UT_SAS_OLDMASK;
876 ut->uu_sigmask = (uap->mask & ~sigcantmask);
877 (void) tsleep0((caddr_t) p, PPAUSE | PCATCH, "pause", 0, sigcontinue);
878 /* always return EINTR rather than ERESTART... */
879 return EINTR;
880 }
881
882
883 int
__disable_threadsignal(__unused proc_t p,__unused struct __disable_threadsignal_args * uap,__unused int32_t * retval)884 __disable_threadsignal(__unused proc_t p,
885 __unused struct __disable_threadsignal_args *uap,
886 __unused int32_t *retval)
887 {
888 struct uthread *uth;
889
890 uth = current_uthread();
891
892 /* No longer valid to have any signal delivered */
893 uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
894
895 return 0;
896 }
897
898 void
__pthread_testcancel(int presyscall)899 __pthread_testcancel(int presyscall)
900 {
901 thread_t self = current_thread();
902 struct uthread * uthread;
903
904 uthread = (struct uthread *)get_bsdthread_info(self);
905
906
907 uthread->uu_flag &= ~UT_NOTCANCELPT;
908
909 if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
910 if (presyscall != 0) {
911 unix_syscall_return(EINTR);
912 /* NOTREACHED */
913 } else {
914 thread_abort_safely(self);
915 }
916 }
917 }
918
919
920
921 int
__pthread_markcancel(__unused proc_t p,struct __pthread_markcancel_args * uap,__unused int32_t * retval)922 __pthread_markcancel(__unused proc_t p,
923 struct __pthread_markcancel_args *uap, __unused int32_t *retval)
924 {
925 thread_act_t target_act;
926 int error = 0;
927 struct uthread *uth;
928
929 target_act = (thread_act_t)port_name_to_thread(uap->thread_port,
930 PORT_INTRANS_THREAD_IN_CURRENT_TASK);
931
932 if (target_act == THR_ACT_NULL) {
933 return ESRCH;
934 }
935
936 uth = (struct uthread *)get_bsdthread_info(target_act);
937
938 if ((uth->uu_flag & (UT_CANCEL | UT_CANCELED)) == 0) {
939 uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
940 if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
941 && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) {
942 thread_abort_safely(target_act);
943 }
944 }
945
946 thread_deallocate(target_act);
947 return error;
948 }
949
950 /* if action =0 ; return the cancellation state ,
951 * if marked for cancellation, make the thread canceled
952 * if action = 1 ; Enable the cancel handling
953 * if action = 2; Disable the cancel handling
954 */
955 int
__pthread_canceled(__unused proc_t p,struct __pthread_canceled_args * uap,__unused int32_t * retval)956 __pthread_canceled(__unused proc_t p,
957 struct __pthread_canceled_args *uap, __unused int32_t *retval)
958 {
959 thread_act_t thread;
960 struct uthread *uth;
961 int action = uap->action;
962
963 thread = current_thread();
964 uth = (struct uthread *)get_bsdthread_info(thread);
965
966 switch (action) {
967 case 1:
968 uth->uu_flag &= ~UT_CANCELDISABLE;
969 return 0;
970 case 2:
971 uth->uu_flag |= UT_CANCELDISABLE;
972 return 0;
973 case 0:
974 default:
975 if ((uth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
976 uth->uu_flag &= ~UT_CANCEL;
977 uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
978 return 0;
979 }
980 return EINVAL;
981 }
982 return EINVAL;
983 }
984
985 __attribute__((noreturn))
986 void
__posix_sem_syscall_return(kern_return_t kern_result)987 __posix_sem_syscall_return(kern_return_t kern_result)
988 {
989 int error = 0;
990
991 if (kern_result == KERN_SUCCESS) {
992 error = 0;
993 } else if (kern_result == KERN_ABORTED) {
994 error = EINTR;
995 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
996 error = ETIMEDOUT;
997 } else {
998 error = EINVAL;
999 }
1000 unix_syscall_return(error);
1001 /* does not return */
1002 }
1003
1004 #if OLD_SEMWAIT_SIGNAL
1005 /*
1006 * Returns: 0 Success
1007 * EINTR
1008 * ETIMEDOUT
1009 * EINVAL
1010 * EFAULT if timespec is NULL
1011 */
1012 int
__old_semwait_signal(proc_t p,struct __old_semwait_signal_args * uap,int32_t * retval)1013 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
1014 int32_t *retval)
1015 {
1016 __pthread_testcancel(0);
1017 return __old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval);
1018 }
1019
1020 int
__old_semwait_signal_nocancel(proc_t p,struct __old_semwait_signal_nocancel_args * uap,__unused int32_t * retval)1021 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
1022 __unused int32_t *retval)
1023 {
1024 kern_return_t kern_result;
1025 int error;
1026 mach_timespec_t then;
1027 struct timespec now;
1028 struct user_timespec ts;
1029 boolean_t truncated_timeout = FALSE;
1030
1031 if (uap->timeout) {
1032 if (IS_64BIT_PROCESS(p)) {
1033 struct user64_timespec ts64;
1034 error = copyin(uap->ts, &ts64, sizeof(ts64));
1035 ts.tv_sec = (user_time_t)ts64.tv_sec;
1036 ts.tv_nsec = (user_long_t)ts64.tv_nsec;
1037 } else {
1038 struct user32_timespec ts32;
1039 error = copyin(uap->ts, &ts32, sizeof(ts32));
1040 ts.tv_sec = ts32.tv_sec;
1041 ts.tv_nsec = ts32.tv_nsec;
1042 }
1043
1044 if (error) {
1045 return error;
1046 }
1047
1048 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1049 ts.tv_sec = 0xFFFFFFFF;
1050 ts.tv_nsec = 0;
1051 truncated_timeout = TRUE;
1052 }
1053
1054 if (uap->relative) {
1055 then.tv_sec = (unsigned int)ts.tv_sec;
1056 then.tv_nsec = (clock_res_t)ts.tv_nsec;
1057 } else {
1058 nanotime(&now);
1059
1060 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1061 if (now.tv_sec == ts.tv_sec ?
1062 now.tv_nsec > ts.tv_nsec :
1063 now.tv_sec > ts.tv_sec) {
1064 then.tv_sec = 0;
1065 then.tv_nsec = 0;
1066 } else {
1067 then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec);
1068 then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec);
1069 if (then.tv_nsec < 0) {
1070 then.tv_nsec += NSEC_PER_SEC;
1071 then.tv_sec--;
1072 }
1073 }
1074 }
1075
1076 if (uap->mutex_sem == 0) {
1077 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1078 } else {
1079 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1080 }
1081 } else {
1082 if (uap->mutex_sem == 0) {
1083 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1084 } else {
1085 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1086 }
1087 }
1088
1089 if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1090 return 0;
1091 } else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1092 return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1093 } else if (kern_result == KERN_ABORTED) {
1094 return EINTR;
1095 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1096 return ETIMEDOUT;
1097 } else {
1098 return EINVAL;
1099 }
1100 }
1101 #endif /* OLD_SEMWAIT_SIGNAL*/
1102
1103 /*
1104 * Returns: 0 Success
1105 * EINTR
1106 * ETIMEDOUT
1107 * EINVAL
1108 * EFAULT if timespec is NULL
1109 */
1110 int
__semwait_signal(proc_t p,struct __semwait_signal_args * uap,int32_t * retval)1111 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1112 int32_t *retval)
1113 {
1114 __pthread_testcancel(0);
1115 return __semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval);
1116 }
1117
1118 int
__semwait_signal_nocancel(__unused proc_t p,struct __semwait_signal_nocancel_args * uap,__unused int32_t * retval)1119 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1120 __unused int32_t *retval)
1121 {
1122 kern_return_t kern_result;
1123 mach_timespec_t then;
1124 struct timespec now;
1125 struct user_timespec ts;
1126 boolean_t truncated_timeout = FALSE;
1127
1128 if (uap->timeout) {
1129 ts.tv_sec = (user_time_t)uap->tv_sec;
1130 ts.tv_nsec = uap->tv_nsec;
1131
1132 if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1133 ts.tv_sec = 0xFFFFFFFF;
1134 ts.tv_nsec = 0;
1135 truncated_timeout = TRUE;
1136 }
1137
1138 if (uap->relative) {
1139 then.tv_sec = (unsigned int)ts.tv_sec;
1140 then.tv_nsec = (clock_res_t)ts.tv_nsec;
1141 } else {
1142 nanotime(&now);
1143
1144 /* if time has elapsed, set time to null timepsec to bailout rightaway */
1145 if (now.tv_sec == ts.tv_sec ?
1146 now.tv_nsec > ts.tv_nsec :
1147 now.tv_sec > ts.tv_sec) {
1148 then.tv_sec = 0;
1149 then.tv_nsec = 0;
1150 } else {
1151 then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec);
1152 then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec);
1153 if (then.tv_nsec < 0) {
1154 then.tv_nsec += NSEC_PER_SEC;
1155 then.tv_sec--;
1156 }
1157 }
1158 }
1159
1160 if (uap->mutex_sem == 0) {
1161 kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1162 } else {
1163 kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1164 }
1165 } else {
1166 if (uap->mutex_sem == 0) {
1167 kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1168 } else {
1169 kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1170 }
1171 }
1172
1173 if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1174 return 0;
1175 } else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1176 return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1177 } else if (kern_result == KERN_ABORTED) {
1178 return EINTR;
1179 } else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1180 return ETIMEDOUT;
1181 } else {
1182 return EINVAL;
1183 }
1184 }
1185
1186
1187 int
__pthread_kill(__unused proc_t p,struct __pthread_kill_args * uap,__unused int32_t * retval)1188 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1189 __unused int32_t *retval)
1190 {
1191 thread_t target_act;
1192 int error = 0;
1193 int signum = uap->sig;
1194 struct uthread *uth;
1195
1196 target_act = (thread_t)port_name_to_thread(uap->thread_port,
1197 PORT_INTRANS_OPTIONS_NONE);
1198
1199 if (target_act == THREAD_NULL) {
1200 return ESRCH;
1201 }
1202 if ((u_int)signum >= NSIG) {
1203 error = EINVAL;
1204 goto out;
1205 }
1206
1207 uth = (struct uthread *)get_bsdthread_info(target_act);
1208
1209 if (uth->uu_flag & UT_NO_SIGMASK) {
1210 error = ESRCH;
1211 goto out;
1212 }
1213
1214 if ((thread_get_tag(target_act) & THREAD_TAG_WORKQUEUE) && !uth->uu_workq_pthread_kill_allowed) {
1215 error = ENOTSUP;
1216 goto out;
1217 }
1218
1219 if (signum) {
1220 psignal_uthread(target_act, signum);
1221 }
1222 out:
1223 thread_deallocate(target_act);
1224 return error;
1225 }
1226
1227
1228 int
__pthread_sigmask(__unused proc_t p,struct __pthread_sigmask_args * uap,__unused int32_t * retval)1229 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1230 __unused int32_t *retval)
1231 {
1232 user_addr_t set = uap->set;
1233 user_addr_t oset = uap->oset;
1234 sigset_t nset;
1235 int error = 0;
1236 struct uthread *ut;
1237 sigset_t oldset;
1238
1239 ut = current_uthread();
1240 oldset = ut->uu_sigmask;
1241
1242 if (set == USER_ADDR_NULL) {
1243 /* need only old mask */
1244 goto out;
1245 }
1246
1247 error = copyin(set, &nset, sizeof(sigset_t));
1248 if (error) {
1249 goto out;
1250 }
1251
1252 switch (uap->how) {
1253 case SIG_BLOCK:
1254 ut->uu_sigmask |= (nset & ~sigcantmask);
1255 break;
1256
1257 case SIG_UNBLOCK:
1258 ut->uu_sigmask &= ~(nset);
1259 signal_setast(current_thread());
1260 break;
1261
1262 case SIG_SETMASK:
1263 ut->uu_sigmask = (nset & ~sigcantmask);
1264 signal_setast(current_thread());
1265 break;
1266
1267 default:
1268 error = EINVAL;
1269 }
1270 out:
1271 if (!error && oset != USER_ADDR_NULL) {
1272 copyout(&oldset, oset, sizeof(sigset_t));
1273 }
1274
1275 return error;
1276 }
1277
1278 /*
1279 * Returns: 0 Success
1280 * EINVAL
1281 * copyin:EFAULT
1282 * copyout:EFAULT
1283 */
1284 int
__sigwait(proc_t p,struct __sigwait_args * uap,int32_t * retval)1285 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1286 {
1287 __pthread_testcancel(1);
1288 return __sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval);
1289 }
1290
1291 int
__sigwait_nocancel(proc_t p,struct __sigwait_nocancel_args * uap,__unused int32_t * retval)1292 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1293 {
1294 struct uthread *ut;
1295 struct uthread *uth;
1296 int error = 0;
1297 sigset_t mask;
1298 sigset_t siglist;
1299 sigset_t sigw = 0;
1300 int signum;
1301
1302 ut = current_uthread();
1303
1304 if (uap->set == USER_ADDR_NULL) {
1305 return EINVAL;
1306 }
1307
1308 error = copyin(uap->set, &mask, sizeof(sigset_t));
1309 if (error) {
1310 return error;
1311 }
1312
1313 siglist = (mask & ~sigcantmask);
1314
1315 if (siglist == 0) {
1316 return EINVAL;
1317 }
1318
1319 proc_lock(p);
1320
1321 proc_signalstart(p, 1);
1322 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1323 if ((sigw = uth->uu_siglist & siglist)) {
1324 break;
1325 }
1326 }
1327 proc_signalend(p, 1);
1328
1329 if (sigw) {
1330 /* The signal was pending on a thread */
1331 goto sigwait1;
1332 }
1333 /*
1334 * When returning from sigwait, we want
1335 * the old mask to be restored after the
1336 * signal handler has finished. Thus, we
1337 * save it here and mark the sigacts structure
1338 * to indicate this.
1339 */
1340 uth = ut; /* wait for it to be delivered to us */
1341 ut->uu_oldmask = ut->uu_sigmask;
1342 ut->uu_flag |= UT_SAS_OLDMASK;
1343 if (siglist == (sigset_t)0) {
1344 proc_unlock(p);
1345 return EINVAL;
1346 }
1347 /* SIGKILL and SIGSTOP are not maskable as well */
1348 ut->uu_sigmask = ~(siglist | sigcantmask);
1349 ut->uu_sigwait = siglist;
1350
1351 /* No Continuations for now */
1352 error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE | PCATCH, "pause", 0);
1353
1354 if (error == ERESTART) {
1355 error = 0;
1356 }
1357
1358 sigw = (ut->uu_sigwait & siglist);
1359 ut->uu_sigmask = ut->uu_oldmask;
1360 ut->uu_oldmask = 0;
1361 ut->uu_flag &= ~UT_SAS_OLDMASK;
1362 sigwait1:
1363 ut->uu_sigwait = 0;
1364 if (!error) {
1365 signum = ffs((unsigned int)sigw);
1366 if (!signum) {
1367 panic("sigwait with no signal wakeup");
1368 }
1369 /* Clear the pending signal in the thread it was delivered */
1370 uth->uu_siglist &= ~(sigmask(signum));
1371
1372 #if CONFIG_DTRACE
1373 DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1374 #endif
1375
1376 proc_unlock(p);
1377 if (uap->sig != USER_ADDR_NULL) {
1378 error = copyout(&signum, uap->sig, sizeof(int));
1379 }
1380 } else {
1381 proc_unlock(p);
1382 }
1383
1384 return error;
1385 }
1386
1387 int
sigaltstack(__unused proc_t p,struct sigaltstack_args * uap,__unused int32_t * retval)1388 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1389 {
1390 struct kern_sigaltstack ss;
1391 struct kern_sigaltstack *pstk;
1392 int error;
1393 struct uthread *uth;
1394 int onstack;
1395
1396 uth = current_uthread();
1397
1398 pstk = &uth->uu_sigstk;
1399 if ((uth->uu_flag & UT_ALTSTACK) == 0) {
1400 uth->uu_sigstk.ss_flags |= SA_DISABLE;
1401 }
1402 onstack = pstk->ss_flags & SA_ONSTACK;
1403 if (uap->oss) {
1404 if (IS_64BIT_PROCESS(p)) {
1405 struct user64_sigaltstack ss64 = {};
1406 sigaltstack_kern_to_user64(pstk, &ss64);
1407 error = copyout(&ss64, uap->oss, sizeof(ss64));
1408 } else {
1409 struct user32_sigaltstack ss32 = {};
1410 sigaltstack_kern_to_user32(pstk, &ss32);
1411 error = copyout(&ss32, uap->oss, sizeof(ss32));
1412 }
1413 if (error) {
1414 return error;
1415 }
1416 }
1417 if (uap->nss == USER_ADDR_NULL) {
1418 return 0;
1419 }
1420 if (IS_64BIT_PROCESS(p)) {
1421 struct user64_sigaltstack ss64;
1422 error = copyin(uap->nss, &ss64, sizeof(ss64));
1423 sigaltstack_user64_to_kern(&ss64, &ss);
1424 } else {
1425 struct user32_sigaltstack ss32;
1426 error = copyin(uap->nss, &ss32, sizeof(ss32));
1427 sigaltstack_user32_to_kern(&ss32, &ss);
1428 }
1429 if (error) {
1430 return error;
1431 }
1432 if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1433 return EINVAL;
1434 }
1435
1436 if (ss.ss_flags & SA_DISABLE) {
1437 /* if we are here we are not in the signal handler ;so no need to check */
1438 if (uth->uu_sigstk.ss_flags & SA_ONSTACK) {
1439 return EINVAL;
1440 }
1441 uth->uu_flag &= ~UT_ALTSTACK;
1442 uth->uu_sigstk.ss_flags = ss.ss_flags;
1443 return 0;
1444 }
1445 if (onstack) {
1446 return EPERM;
1447 }
1448 /* The older stacksize was 8K, enforce that one so no compat problems */
1449 #define OLDMINSIGSTKSZ 8*1024
1450 if (ss.ss_size < OLDMINSIGSTKSZ) {
1451 return ENOMEM;
1452 }
1453 uth->uu_flag |= UT_ALTSTACK;
1454 uth->uu_sigstk = ss;
1455 return 0;
1456 }
1457
1458 int
kill(proc_t cp,struct kill_args * uap,__unused int32_t * retval)1459 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1460 {
1461 proc_t p;
1462 kauth_cred_t uc = kauth_cred_get();
1463 int posix = uap->posix; /* !0 if posix behaviour desired */
1464
1465 AUDIT_ARG(pid, uap->pid);
1466 AUDIT_ARG(signum, uap->signum);
1467
1468 if ((u_int)uap->signum >= NSIG) {
1469 return EINVAL;
1470 }
1471 if (uap->pid > 0) {
1472 /* kill single process */
1473 if ((p = proc_find(uap->pid)) == NULL) {
1474 if ((p = pzfind(uap->pid)) != NULL) {
1475 /*
1476 * POSIX 1003.1-2001 requires returning success when killing a
1477 * zombie; see Rationale for kill(2).
1478 */
1479 return 0;
1480 }
1481 return ESRCH;
1482 }
1483 AUDIT_ARG(process, p);
1484 if (!cansignal(cp, uc, p, uap->signum)) {
1485 proc_rele(p);
1486 return EPERM;
1487 }
1488 if (uap->signum) {
1489 psignal(p, uap->signum);
1490 }
1491 proc_rele(p);
1492 return 0;
1493 }
1494 switch (uap->pid) {
1495 case -1: /* broadcast signal */
1496 return killpg1(cp, uap->signum, 0, 1, posix);
1497 case 0: /* signal own process group */
1498 return killpg1(cp, uap->signum, 0, 0, posix);
1499 default: /* negative explicit process group */
1500 return killpg1(cp, uap->signum, -(uap->pid), 0, posix);
1501 }
1502 /* NOTREACHED */
1503 }
1504
1505 os_reason_t
build_userspace_exit_reason(uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1506 build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1507 user_addr_t reason_string, uint64_t reason_flags)
1508 {
1509 os_reason_t exit_reason = OS_REASON_NULL;
1510
1511 int error = 0;
1512 int num_items_to_copy = 0;
1513 uint32_t user_data_to_copy = 0;
1514 char *reason_user_desc = NULL;
1515 size_t reason_user_desc_len = 0;
1516
1517 exit_reason = os_reason_create(reason_namespace, reason_code);
1518 if (exit_reason == OS_REASON_NULL) {
1519 printf("build_userspace_exit_reason: failed to allocate exit reason\n");
1520 return exit_reason;
1521 }
1522
1523 exit_reason->osr_flags |= OS_REASON_FLAG_FROM_USERSPACE;
1524
1525 /*
1526 * Only apply flags that are allowed to be passed from userspace.
1527 */
1528 exit_reason->osr_flags |= (reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER);
1529 if ((reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER) != reason_flags) {
1530 printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n",
1531 reason_flags, reason_namespace, reason_code);
1532 }
1533
1534 if (!(exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT)) {
1535 exit_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1536 }
1537
1538 if (payload != USER_ADDR_NULL) {
1539 if (payload_size == 0) {
1540 printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n",
1541 reason_namespace);
1542 exit_reason->osr_flags |= OS_REASON_FLAG_BAD_PARAMS;
1543 payload = USER_ADDR_NULL;
1544 } else {
1545 num_items_to_copy++;
1546
1547 if (payload_size > EXIT_REASON_PAYLOAD_MAX_LEN) {
1548 exit_reason->osr_flags |= OS_REASON_FLAG_PAYLOAD_TRUNCATED;
1549 payload_size = EXIT_REASON_PAYLOAD_MAX_LEN;
1550 }
1551
1552 user_data_to_copy += payload_size;
1553 }
1554 }
1555
1556 if (reason_string != USER_ADDR_NULL) {
1557 reason_user_desc = (char *)kalloc_data(EXIT_REASON_USER_DESC_MAX_LEN, Z_WAITOK);
1558
1559 if (reason_user_desc != NULL) {
1560 error = copyinstr(reason_string, (void *) reason_user_desc,
1561 EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len);
1562
1563 if (error == 0) {
1564 num_items_to_copy++;
1565 user_data_to_copy += reason_user_desc_len;
1566 } else if (error == ENAMETOOLONG) {
1567 num_items_to_copy++;
1568 reason_user_desc[EXIT_REASON_USER_DESC_MAX_LEN - 1] = '\0';
1569 user_data_to_copy += reason_user_desc_len;
1570 } else {
1571 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1572 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1573 reason_user_desc = NULL;
1574 reason_user_desc_len = 0;
1575 }
1576 }
1577 }
1578
1579 if (num_items_to_copy != 0) {
1580 uint32_t reason_buffer_size_estimate = 0;
1581 mach_vm_address_t data_addr = 0;
1582
1583 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(num_items_to_copy, user_data_to_copy);
1584
1585 error = os_reason_alloc_buffer(exit_reason, reason_buffer_size_estimate);
1586 if (error != 0) {
1587 printf("build_userspace_exit_reason: failed to allocate signal reason buffer\n");
1588 goto out_failed_copyin;
1589 }
1590
1591 if (reason_user_desc != NULL && reason_user_desc_len != 0) {
1592 if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1593 EXIT_REASON_USER_DESC,
1594 (uint32_t)reason_user_desc_len,
1595 &data_addr)) {
1596 kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr,
1597 reason_user_desc, (uint32_t)reason_user_desc_len);
1598 } else {
1599 printf("build_userspace_exit_reason: failed to allocate space for reason string\n");
1600 goto out_failed_copyin;
1601 }
1602 }
1603
1604 if (payload != USER_ADDR_NULL) {
1605 if (KERN_SUCCESS ==
1606 kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1607 EXIT_REASON_USER_PAYLOAD,
1608 payload_size,
1609 &data_addr)) {
1610 error = copyin(payload, (void *) data_addr, payload_size);
1611 if (error) {
1612 printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error);
1613 goto out_failed_copyin;
1614 }
1615 } else {
1616 printf("build_userspace_exit_reason: failed to allocate space for payload data\n");
1617 goto out_failed_copyin;
1618 }
1619 }
1620 }
1621
1622 if (reason_user_desc != NULL) {
1623 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1624 reason_user_desc = NULL;
1625 reason_user_desc_len = 0;
1626 }
1627
1628 return exit_reason;
1629
1630 out_failed_copyin:
1631
1632 if (reason_user_desc != NULL) {
1633 kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1634 reason_user_desc = NULL;
1635 reason_user_desc_len = 0;
1636 }
1637
1638 exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1639 os_reason_alloc_buffer(exit_reason, 0);
1640 return exit_reason;
1641 }
1642
1643 static int
terminate_with_payload_internal(struct proc * cur_proc,int target_pid,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1644 terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t reason_namespace,
1645 uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1646 user_addr_t reason_string, uint64_t reason_flags)
1647 {
1648 proc_t target_proc = PROC_NULL;
1649 kauth_cred_t cur_cred = kauth_cred_get();
1650
1651 os_reason_t signal_reason = OS_REASON_NULL;
1652
1653 AUDIT_ARG(pid, target_pid);
1654 if ((target_pid <= 0)) {
1655 return EINVAL;
1656 }
1657
1658 target_proc = proc_find(target_pid);
1659 if (target_proc == PROC_NULL) {
1660 return ESRCH;
1661 }
1662
1663 AUDIT_ARG(process, target_proc);
1664
1665 if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL)) {
1666 proc_rele(target_proc);
1667 return EPERM;
1668 }
1669
1670 if (target_pid != proc_getpid(cur_proc)) {
1671 /*
1672 * FLAG_ABORT should only be set on terminate_with_reason(getpid()) that
1673 * was a fallback from an unsuccessful abort_with_reason(). In that case
1674 * caller's pid matches the target one. Otherwise remove the flag.
1675 */
1676 reason_flags &= ~((typeof(reason_flags))OS_REASON_FLAG_ABORT);
1677 }
1678
1679 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1680 proc_getpid(target_proc), reason_namespace,
1681 reason_code, 0, 0);
1682
1683 signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size,
1684 reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID));
1685
1686 if (target_pid == proc_getpid(cur_proc)) {
1687 /*
1688 * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
1689 * return if the thread and/or task are already terminating. Either way, the
1690 * current thread won't return to userspace.
1691 */
1692 psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason);
1693 } else {
1694 psignal_with_reason(target_proc, SIGKILL, signal_reason);
1695 }
1696
1697 proc_rele(target_proc);
1698
1699 return 0;
1700 }
1701
1702 int
terminate_with_payload(struct proc * cur_proc,struct terminate_with_payload_args * args,__unused int32_t * retval)1703 terminate_with_payload(struct proc *cur_proc, struct terminate_with_payload_args *args,
1704 __unused int32_t *retval)
1705 {
1706 return terminate_with_payload_internal(cur_proc, args->pid, args->reason_namespace, args->reason_code, args->payload,
1707 args->payload_size, args->reason_string, args->reason_flags);
1708 }
1709
1710 static int
killpg1_allfilt(proc_t p,void * arg)1711 killpg1_allfilt(proc_t p, void * arg)
1712 {
1713 struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1714
1715 /*
1716 * Don't signal initproc, a system process, or the current process if POSIX
1717 * isn't specified.
1718 */
1719 return proc_getpid(p) > 1 && !(p->p_flag & P_SYSTEM) &&
1720 (kfargp->posix ? true : p != kfargp->curproc);
1721 }
1722
1723 static int
killpg1_callback(proc_t p,void * arg)1724 killpg1_callback(proc_t p, void *arg)
1725 {
1726 struct killpg1_iterargs *kargp = (struct killpg1_iterargs *)arg;
1727 int signum = kargp->signum;
1728
1729 if (proc_list_exited(p)) {
1730 /*
1731 * Count zombies as found for the purposes of signalling, since POSIX
1732 * 1003.1-2001 sees signalling zombies as successful. If killpg(2) or
1733 * kill(2) with pid -1 only finds zombies that can be signalled, it
1734 * shouldn't return ESRCH. See the Rationale for kill(2).
1735 *
1736 * Don't call into MAC -- it's not expecting signal checks for exited
1737 * processes.
1738 */
1739 if (cansignal_nomac(kargp->curproc, kargp->uc, p, signum)) {
1740 kargp->nfound++;
1741 }
1742 } else if (cansignal(kargp->curproc, kargp->uc, p, signum)) {
1743 kargp->nfound++;
1744
1745 if (signum != 0) {
1746 psignal(p, signum);
1747 }
1748 }
1749
1750 return PROC_RETURNED;
1751 }
1752
1753 /*
1754 * Common code for kill process group/broadcast kill.
1755 */
1756 int
killpg1(proc_t curproc,int signum,int pgid,int all,int posix)1757 killpg1(proc_t curproc, int signum, int pgid, int all, int posix)
1758 {
1759 kauth_cred_t uc;
1760 struct pgrp *pgrp;
1761 int error = 0;
1762
1763 uc = kauth_cred_proc_ref(curproc);
1764 struct killpg1_iterargs karg = {
1765 .curproc = curproc, .uc = uc, .nfound = 0, .signum = signum
1766 };
1767
1768 if (all) {
1769 /*
1770 * Broadcast to all processes that the user can signal (pid was -1).
1771 */
1772 struct killpg1_filtargs kfarg = {
1773 .posix = posix, .curproc = curproc
1774 };
1775 proc_iterate(PROC_ALLPROCLIST | PROC_ZOMBPROCLIST, killpg1_callback,
1776 &karg, killpg1_allfilt, &kfarg);
1777 } else {
1778 if (pgid == 0) {
1779 /*
1780 * Send to current the current process' process group.
1781 */
1782 pgrp = proc_pgrp(curproc, NULL);
1783 } else {
1784 pgrp = pgrp_find(pgid);
1785 if (pgrp == NULL) {
1786 error = ESRCH;
1787 goto out;
1788 }
1789 }
1790
1791 pgrp_iterate(pgrp, killpg1_callback, &karg, ^bool (proc_t p) {
1792 if (p == kernproc || p == initproc) {
1793 return false;
1794 }
1795 /* XXX shouldn't this allow signalling zombies? */
1796 return !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB;
1797 });
1798 pgrp_rele(pgrp);
1799 }
1800 error = (karg.nfound > 0 ? 0 : (posix ? EPERM : ESRCH));
1801 out:
1802 kauth_cred_unref(&uc);
1803 return error;
1804 }
1805
1806 /*
1807 * Send a signal to a process group.
1808 */
1809 void
gsignal(int pgid,int signum)1810 gsignal(int pgid, int signum)
1811 {
1812 struct pgrp *pgrp;
1813
1814 if (pgid && (pgrp = pgrp_find(pgid))) {
1815 pgsignal(pgrp, signum, 0);
1816 pgrp_rele(pgrp);
1817 }
1818 }
1819
1820 /*
1821 * Send a signal to a process group. If checkctty is 1,
1822 * limit to members which have a controlling terminal.
1823 */
1824
1825 static int
pgsignal_callback(proc_t p,void * arg)1826 pgsignal_callback(proc_t p, void * arg)
1827 {
1828 int signum = *(int*)arg;
1829
1830 psignal(p, signum);
1831 return PROC_RETURNED;
1832 }
1833
1834 void
pgsignal(struct pgrp * pgrp,int signum,int checkctty)1835 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1836 {
1837 if (pgrp == PGRP_NULL) {
1838 return;
1839 }
1840
1841 bool (^filter)(proc_t) = ^bool (proc_t p) {
1842 return p->p_flag & P_CONTROLT;
1843 };
1844
1845 pgrp_iterate(pgrp, pgsignal_callback, &signum, checkctty ? filter : NULL);
1846 }
1847
1848
1849 void
tty_pgsignal_locked(struct tty * tp,int signum,int checkctty)1850 tty_pgsignal_locked(struct tty *tp, int signum, int checkctty)
1851 {
1852 struct pgrp * pg;
1853
1854 pg = tty_pgrp_locked(tp);
1855 if (pg != PGRP_NULL) {
1856 tty_unlock(tp);
1857 pgsignal(pg, signum, checkctty);
1858 pgrp_rele(pg);
1859 tty_lock(tp);
1860 }
1861 }
1862 /*
1863 * Send a signal caused by a trap to a specific thread.
1864 */
1865 void
threadsignal(thread_t sig_actthread,int signum,mach_exception_code_t code,boolean_t set_exitreason)1866 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boolean_t set_exitreason)
1867 {
1868 struct uthread *uth;
1869 struct task * sig_task;
1870 proc_t p;
1871 int mask;
1872
1873 if ((u_int)signum >= NSIG || signum == 0) {
1874 return;
1875 }
1876
1877 mask = sigmask(signum);
1878 if ((mask & threadmask) == 0) {
1879 return;
1880 }
1881 sig_task = get_threadtask(sig_actthread);
1882 p = (proc_t)(get_bsdtask_info(sig_task));
1883
1884 uth = get_bsdthread_info(sig_actthread);
1885
1886 proc_lock(p);
1887 if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1888 proc_unlock(p);
1889 return;
1890 }
1891
1892 uth->uu_siglist |= mask;
1893 uth->uu_code = code;
1894
1895 /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */
1896 if (set_exitreason && ((p->p_lflag & P_LTRACED) || (!(uth->uu_sigwait & mask)
1897 && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) &&
1898 !(mask & stopsigmask) && !(mask & contsigmask)) {
1899 if (uth->uu_exit_reason == OS_REASON_NULL) {
1900 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1901 proc_getpid(p), OS_REASON_SIGNAL, signum, 0, 0);
1902
1903 os_reason_t signal_reason = build_signal_reason(signum, "exc handler");
1904
1905 set_thread_exit_reason(sig_actthread, signal_reason, TRUE);
1906
1907 /* We dropped/consumed the reference in set_thread_exit_reason() */
1908 signal_reason = OS_REASON_NULL;
1909 }
1910 }
1911
1912 proc_unlock(p);
1913
1914 /* mark on process as well */
1915 signal_setast(sig_actthread);
1916 }
1917
1918 /* Called with proc locked */
1919 static void
set_thread_extra_flags(struct uthread * uth,os_reason_t reason)1920 set_thread_extra_flags(struct uthread *uth, os_reason_t reason)
1921 {
1922 extern int vm_shared_region_reslide_restrict;
1923 assert(uth != NULL);
1924 /*
1925 * Check whether the userland fault address falls within the shared
1926 * region and notify userland if so. This allows launchd to apply
1927 * special policies around this fault type.
1928 */
1929 if (reason->osr_namespace == OS_REASON_SIGNAL &&
1930 reason->osr_code == SIGSEGV) {
1931 mach_vm_address_t fault_address = uth->uu_subcode;
1932
1933 #if defined(__arm64__)
1934 /* Address is in userland, so we hard clear TBI bits to 0 here */
1935 fault_address = tbi_clear(fault_address);
1936 #endif /* __arm64__ */
1937
1938 if (fault_address >= SHARED_REGION_BASE &&
1939 fault_address <= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1940 /*
1941 * Always report whether the fault happened within the shared cache
1942 * region, but only stale the slide if the resliding is extended
1943 * to all processes or if the process faulting is a platform one.
1944 */
1945 reason->osr_flags |= OS_REASON_FLAG_SHAREDREGION_FAULT;
1946
1947 #if __has_feature(ptrauth_calls)
1948 if (!vm_shared_region_reslide_restrict || csproc_get_platform_binary(current_proc())) {
1949 vm_shared_region_reslide_stale();
1950 }
1951 #endif /* __has_feature(ptrauth_calls) */
1952 }
1953 }
1954 }
1955
1956 void
set_thread_exit_reason(void * th,void * reason,boolean_t proc_locked)1957 set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked)
1958 {
1959 struct uthread *targ_uth = get_bsdthread_info(th);
1960 struct task *targ_task = NULL;
1961 proc_t targ_proc = NULL;
1962
1963 os_reason_t exit_reason = (os_reason_t)reason;
1964
1965 if (exit_reason == OS_REASON_NULL) {
1966 return;
1967 }
1968
1969 if (!proc_locked) {
1970 targ_task = get_threadtask(th);
1971 targ_proc = (proc_t)(get_bsdtask_info(targ_task));
1972
1973 proc_lock(targ_proc);
1974 }
1975
1976 set_thread_extra_flags(targ_uth, exit_reason);
1977
1978 if (targ_uth->uu_exit_reason == OS_REASON_NULL) {
1979 targ_uth->uu_exit_reason = exit_reason;
1980 } else {
1981 /* The caller expects that we drop a reference on the exit reason */
1982 os_reason_free(exit_reason);
1983 }
1984
1985 if (!proc_locked) {
1986 assert(targ_proc != NULL);
1987 proc_unlock(targ_proc);
1988 }
1989 }
1990
1991 /*
1992 * get_signalthread
1993 *
1994 * Picks an appropriate thread from a process to target with a signal.
1995 *
1996 * Called with proc locked.
1997 * Returns thread with BSD ast set.
1998 *
1999 * We attempt to deliver a proc-wide signal to the first thread in the task.
2000 * This allows single threaded applications which use signals to
2001 * be able to be linked with multithreaded libraries.
2002 */
2003 static kern_return_t
get_signalthread(proc_t p,int signum,thread_t * thr)2004 get_signalthread(proc_t p, int signum, thread_t * thr)
2005 {
2006 struct uthread *uth;
2007 sigset_t mask = sigmask(signum);
2008 bool skip_wqthreads = true;
2009
2010 *thr = THREAD_NULL;
2011
2012
2013 again:
2014 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2015 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2016 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
2017 thread_t th = get_machthread(uth);
2018 if (skip_wqthreads && (thread_get_tag(th) & THREAD_TAG_WORKQUEUE)) {
2019 /* Workqueue threads may be parked in the kernel unable to
2020 * deliver signals for an extended period of time, so skip them
2021 * in favor of pthreads in a first pass. (rdar://50054475). */
2022 } else if (check_actforsig(p->task, th, 1) == KERN_SUCCESS) {
2023 *thr = th;
2024 return KERN_SUCCESS;
2025 }
2026 }
2027 }
2028 if (skip_wqthreads) {
2029 skip_wqthreads = false;
2030 goto again;
2031 }
2032 if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
2033 return KERN_SUCCESS;
2034 }
2035
2036 return KERN_FAILURE;
2037 }
2038
2039 static os_reason_t
build_signal_reason(int signum,const char * procname)2040 build_signal_reason(int signum, const char *procname)
2041 {
2042 os_reason_t signal_reason = OS_REASON_NULL;
2043 proc_t sender_proc = current_proc();
2044 uint32_t reason_buffer_size_estimate = 0, proc_name_length = 0;
2045 const char *default_sender_procname = "unknown";
2046 mach_vm_address_t data_addr;
2047 int ret;
2048
2049 signal_reason = os_reason_create(OS_REASON_SIGNAL, signum);
2050 if (signal_reason == OS_REASON_NULL) {
2051 printf("build_signal_reason: unable to allocate signal reason structure.\n");
2052 return signal_reason;
2053 }
2054
2055 reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) +
2056 sizeof(pid_t));
2057
2058 ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate);
2059 if (ret != 0) {
2060 printf("build_signal_reason: unable to allocate signal reason buffer.\n");
2061 return signal_reason;
2062 }
2063
2064 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PID,
2065 sizeof(pid_t), &data_addr)) {
2066 pid_t pid = proc_getpid(sender_proc);
2067 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &pid, sizeof(pid));
2068 } else {
2069 printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n");
2070 }
2071
2072 proc_name_length = sizeof(sender_proc->p_name);
2073 if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PROCNAME,
2074 proc_name_length, &data_addr)) {
2075 if (procname) {
2076 char truncated_procname[proc_name_length];
2077 strncpy((char *) &truncated_procname, procname, proc_name_length);
2078 truncated_procname[proc_name_length - 1] = '\0';
2079
2080 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname,
2081 (uint32_t)strlen((char *) &truncated_procname));
2082 } else if (*sender_proc->p_name) {
2083 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name,
2084 sizeof(sender_proc->p_name));
2085 } else {
2086 kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname,
2087 (uint32_t)strlen(default_sender_procname) + 1);
2088 }
2089 } else {
2090 printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n");
2091 }
2092
2093 return signal_reason;
2094 }
2095
2096 /*
2097 * Send the signal to the process. If the signal has an action, the action
2098 * is usually performed by the target process rather than the caller; we add
2099 * the signal to the set of pending signals for the process.
2100 *
2101 * Always drops a reference on a signal_reason if one is provided, whether via
2102 * passing it to a thread or deallocating directly.
2103 *
2104 * Exceptions:
2105 * o When a stop signal is sent to a sleeping process that takes the
2106 * default action, the process is stopped without awakening it.
2107 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2108 * regardless of the signal action (eg, blocked or ignored).
2109 *
2110 * Other ignored signals are discarded immediately.
2111 */
2112 static void
psignal_internal(proc_t p,task_t task,thread_t thread,int flavor,int signum,os_reason_t signal_reason)2113 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason)
2114 {
2115 int prop;
2116 user_addr_t action = USER_ADDR_NULL;
2117 proc_t sig_proc;
2118 thread_t sig_thread;
2119 task_t sig_task;
2120 int mask;
2121 struct uthread *uth;
2122 kern_return_t kret;
2123 uid_t r_uid;
2124 proc_t pp;
2125 kauth_cred_t my_cred;
2126 char *launchd_exit_reason_desc = NULL;
2127 boolean_t update_thread_policy = FALSE;
2128
2129 if ((u_int)signum >= NSIG || signum == 0) {
2130 panic("psignal: bad signal number %d", signum);
2131 }
2132
2133 mask = sigmask(signum);
2134 prop = sigprop[signum];
2135
2136 #if SIGNAL_DEBUG
2137 if (rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
2138 ram_printf(3);
2139 }
2140 #endif /* SIGNAL_DEBUG */
2141
2142 /* catch unexpected initproc kills early for easier debuggging */
2143 if (signum == SIGKILL && p == initproc) {
2144 if (signal_reason == NULL) {
2145 panic_plain("unexpected SIGKILL of %s %s (no reason provided)",
2146 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2147 ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""));
2148 } else {
2149 launchd_exit_reason_desc = launchd_exit_reason_get_string_desc(signal_reason);
2150 panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
2151 (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2152 ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""),
2153 signal_reason->osr_namespace, signal_reason->osr_code,
2154 launchd_exit_reason_desc ? launchd_exit_reason_desc : "none");
2155 }
2156 }
2157
2158 /*
2159 * We will need the task pointer later. Grab it now to
2160 * check for a zombie process. Also don't send signals
2161 * to kernel internal tasks.
2162 */
2163 if (flavor & PSIG_VFORK) {
2164 sig_task = task;
2165 sig_thread = thread;
2166 sig_proc = p;
2167 } else if (flavor & PSIG_THREAD) {
2168 sig_task = get_threadtask(thread);
2169 sig_thread = thread;
2170 sig_proc = (proc_t)get_bsdtask_info(sig_task);
2171 } else if (flavor & PSIG_TRY_THREAD) {
2172 assert((thread == current_thread()) && (p == current_proc()));
2173 sig_task = p->task;
2174 sig_thread = thread;
2175 sig_proc = p;
2176 } else {
2177 sig_task = p->task;
2178 sig_thread = THREAD_NULL;
2179 sig_proc = p;
2180 }
2181
2182 if ((sig_task == TASK_NULL) || is_kerneltask(sig_task)) {
2183 os_reason_free(signal_reason);
2184 return;
2185 }
2186
2187 if ((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
2188 proc_knote(sig_proc, NOTE_SIGNAL | signum);
2189 }
2190
2191 if ((flavor & PSIG_LOCKED) == 0) {
2192 proc_signalstart(sig_proc, 0);
2193 }
2194
2195 /* Don't send signals to a process that has ignored them. */
2196 if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
2197 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2198 goto sigout_unlocked;
2199 }
2200
2201 /*
2202 * The proc_lock prevents the targeted thread from being deallocated
2203 * or handling the signal until we're done signaling it.
2204 *
2205 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
2206 *
2207 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
2208 */
2209 proc_lock(sig_proc);
2210
2211 /*
2212 * Don't send signals to a process which has already exited and thus
2213 * committed to a particular p_xstat exit code.
2214 * Additionally, don't abort the process running 'reboot'.
2215 */
2216 if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) {
2217 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2218 goto sigout_locked;
2219 }
2220
2221 if (flavor & PSIG_VFORK) {
2222 action = SIG_DFL;
2223 act_set_astbsd(sig_thread);
2224 kret = KERN_SUCCESS;
2225 } else if (flavor & PSIG_TRY_THREAD) {
2226 uth = get_bsdthread_info(sig_thread);
2227 if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2228 (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
2229 ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) {
2230 /* deliver to specified thread */
2231 } else {
2232 /* deliver to any willing thread */
2233 kret = get_signalthread(sig_proc, signum, &sig_thread);
2234 }
2235 } else if (flavor & PSIG_THREAD) {
2236 /* If successful return with ast set */
2237 kret = check_actforsig(sig_task, sig_thread, 1);
2238 } else {
2239 /* If successful return with ast set */
2240 kret = get_signalthread(sig_proc, signum, &sig_thread);
2241 }
2242
2243 if (kret != KERN_SUCCESS) {
2244 DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2245 proc_unlock(sig_proc);
2246 goto sigout_unlocked;
2247 }
2248
2249 uth = get_bsdthread_info(sig_thread);
2250
2251 /*
2252 * If proc is traced, always give parent a chance.
2253 */
2254
2255 if ((flavor & PSIG_VFORK) == 0) {
2256 if (sig_proc->p_lflag & P_LTRACED) {
2257 action = SIG_DFL;
2258 } else {
2259 /*
2260 * If the signal is being ignored,
2261 * then we forget about it immediately.
2262 * (Note: we don't set SIGCONT in p_sigignore,
2263 * and if it is set to SIG_IGN,
2264 * action will be SIG_DFL here.)
2265 */
2266 if (sig_proc->p_sigignore & mask) {
2267 goto sigout_locked;
2268 }
2269
2270 if (uth->uu_sigwait & mask) {
2271 action = KERN_SIG_WAIT;
2272 } else if (uth->uu_sigmask & mask) {
2273 action = KERN_SIG_HOLD;
2274 } else if (sig_proc->p_sigcatch & mask) {
2275 action = KERN_SIG_CATCH;
2276 } else {
2277 action = SIG_DFL;
2278 }
2279 }
2280 }
2281
2282 /* TODO: p_nice isn't hooked up to the scheduler... */
2283 if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
2284 (sig_proc->p_lflag & P_LTRACED) == 0) {
2285 sig_proc->p_nice = NZERO;
2286 }
2287
2288 if (prop & SA_CONT) {
2289 uth->uu_siglist &= ~stopsigmask;
2290 }
2291
2292 if (prop & SA_STOP) {
2293 struct pgrp *pg;
2294 /*
2295 * If sending a tty stop signal to a member of an orphaned
2296 * process group, discard the signal here if the action
2297 * is default; don't stop the process below if sleeping,
2298 * and don't clear any pending SIGCONT.
2299 */
2300 pg = proc_pgrp(sig_proc, NULL);
2301 if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
2302 action == SIG_DFL) {
2303 pgrp_rele(pg);
2304 goto sigout_locked;
2305 }
2306 pgrp_rele(pg);
2307 uth->uu_siglist &= ~contsigmask;
2308 }
2309
2310 uth->uu_siglist |= mask;
2311
2312 /*
2313 * Defer further processing for signals which are held,
2314 * except that stopped processes must be continued by SIGCONT.
2315 */
2316 if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
2317 goto sigout_locked;
2318 }
2319
2320 /*
2321 * SIGKILL priority twiddling moved here from above because
2322 * it needs sig_thread. Could merge it into large switch
2323 * below if we didn't care about priority for tracing
2324 * as SIGKILL's action is always SIG_DFL.
2325 *
2326 * TODO: p_nice isn't hooked up to the scheduler...
2327 */
2328 if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
2329 sig_proc->p_nice = NZERO;
2330 }
2331
2332 /*
2333 * Process is traced - wake it up (if not already
2334 * stopped) so that it can discover the signal in
2335 * issig() and stop for the parent.
2336 */
2337 if (sig_proc->p_lflag & P_LTRACED) {
2338 if (sig_proc->p_stat != SSTOP) {
2339 goto runlocked;
2340 } else {
2341 goto sigout_locked;
2342 }
2343 }
2344
2345 if ((flavor & PSIG_VFORK) != 0) {
2346 goto runlocked;
2347 }
2348
2349 if (action == KERN_SIG_WAIT) {
2350 #if CONFIG_DTRACE
2351 /*
2352 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2353 */
2354 r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2355
2356 bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2357
2358 uth->t_dtrace_siginfo.si_signo = signum;
2359 uth->t_dtrace_siginfo.si_pid = proc_getpid(current_proc());
2360 uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2361 uth->t_dtrace_siginfo.si_uid = r_uid;
2362 uth->t_dtrace_siginfo.si_code = 0;
2363 #endif
2364 uth->uu_sigwait = mask;
2365 uth->uu_siglist &= ~mask;
2366 wakeup(&uth->uu_sigwait);
2367 /* if it is SIGCONT resume whole process */
2368 if (prop & SA_CONT) {
2369 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2370 sig_proc->p_contproc = proc_getpid(current_proc());
2371 (void) task_resume_internal(sig_task);
2372 }
2373 goto sigout_locked;
2374 }
2375
2376 if (action != SIG_DFL) {
2377 /*
2378 * User wants to catch the signal.
2379 * Wake up the thread, but don't un-suspend it
2380 * (except for SIGCONT).
2381 */
2382 if (prop & SA_CONT) {
2383 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2384 (void) task_resume_internal(sig_task);
2385 sig_proc->p_stat = SRUN;
2386 } else if (sig_proc->p_stat == SSTOP) {
2387 goto sigout_locked;
2388 }
2389 /*
2390 * Fill out siginfo structure information to pass to the
2391 * signalled process/thread sigaction handler, when it
2392 * wakes up. si_code is 0 because this is an ordinary
2393 * signal, not a SIGCHLD, and so si_status is the signal
2394 * number itself, instead of the child process exit status.
2395 * We shift this left because it will be shifted right before
2396 * it is passed to user space. kind of ugly to use W_EXITCODE
2397 * this way, but it beats defining a new macro.
2398 *
2399 * Note: Avoid the SIGCHLD recursion case!
2400 */
2401 if (signum != SIGCHLD) {
2402 r_uid = kauth_getruid();
2403
2404 sig_proc->si_pid = proc_getpid(current_proc());
2405 sig_proc->si_status = W_EXITCODE(signum, 0);
2406 sig_proc->si_uid = r_uid;
2407 sig_proc->si_code = 0;
2408 }
2409
2410 goto runlocked;
2411 } else {
2412 /* Default action - varies */
2413 if (mask & stopsigmask) {
2414 assert(signal_reason == NULL);
2415 /*
2416 * These are the signals which by default
2417 * stop a process.
2418 *
2419 * Don't clog system with children of init
2420 * stopped from the keyboard.
2421 */
2422 if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2423 uth->uu_siglist &= ~mask;
2424 proc_unlock(sig_proc);
2425 /* siglock still locked, proc_lock not locked */
2426 psignal_locked(sig_proc, SIGKILL);
2427 goto sigout_unlocked;
2428 }
2429
2430 /*
2431 * Stop the task
2432 * if task hasn't already been stopped by
2433 * a signal.
2434 */
2435 uth->uu_siglist &= ~mask;
2436 if (sig_proc->p_stat != SSTOP) {
2437 sig_proc->p_xstat = signum;
2438 sig_proc->p_stat = SSTOP;
2439 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2440 sig_proc->p_lflag &= ~P_LWAITED;
2441 proc_unlock(sig_proc);
2442
2443 pp = proc_parentholdref(sig_proc);
2444 stop(sig_proc, pp);
2445 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2446 my_cred = kauth_cred_proc_ref(sig_proc);
2447 r_uid = kauth_cred_getruid(my_cred);
2448 kauth_cred_unref(&my_cred);
2449
2450 proc_lock(sig_proc);
2451 pp->si_pid = proc_getpid(sig_proc);
2452 /*
2453 * POSIX: sigaction for a stopped child
2454 * when sent to the parent must set the
2455 * child's signal number into si_status.
2456 */
2457 if (signum != SIGSTOP) {
2458 pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2459 } else {
2460 pp->si_status = W_EXITCODE(signum, signum);
2461 }
2462 pp->si_code = CLD_STOPPED;
2463 pp->si_uid = r_uid;
2464 proc_unlock(sig_proc);
2465
2466 psignal(pp, SIGCHLD);
2467 }
2468 if (pp != PROC_NULL) {
2469 proc_parentdropref(pp, 0);
2470 }
2471
2472 goto sigout_unlocked;
2473 }
2474
2475 goto sigout_locked;
2476 }
2477
2478 DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2479
2480 switch (signum) {
2481 /*
2482 * Signals ignored by default have been dealt
2483 * with already, since their bits are on in
2484 * p_sigignore.
2485 */
2486
2487 case SIGKILL:
2488 /*
2489 * Kill signal always sets process running and
2490 * unsuspends it.
2491 */
2492 /*
2493 * Process will be running after 'run'
2494 */
2495 sig_proc->p_stat = SRUN;
2496 /*
2497 * In scenarios where suspend/resume are racing
2498 * the signal we are missing AST_BSD by the time
2499 * we get here, set again to avoid races. This
2500 * was the scenario with spindump enabled shutdowns.
2501 * We would need to cover this approp down the line.
2502 */
2503 act_set_astbsd(sig_thread);
2504 kret = thread_abort(sig_thread);
2505 update_thread_policy = (kret == KERN_SUCCESS);
2506
2507 if (uth->uu_exit_reason == OS_REASON_NULL) {
2508 if (signal_reason == OS_REASON_NULL) {
2509 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2510 proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2511
2512 signal_reason = build_signal_reason(signum, NULL);
2513 }
2514
2515 os_reason_ref(signal_reason);
2516 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2517 }
2518
2519 goto sigout_locked;
2520
2521 case SIGCONT:
2522 /*
2523 * Let the process run. If it's sleeping on an
2524 * event, it remains so.
2525 */
2526 assert(signal_reason == NULL);
2527 OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2528 sig_proc->p_contproc = proc_getpid(sig_proc);
2529 sig_proc->p_xstat = signum;
2530
2531 (void) task_resume_internal(sig_task);
2532
2533 /*
2534 * When processing a SIGCONT, we need to check
2535 * to see if there are signals pending that
2536 * were not delivered because we had been
2537 * previously stopped. If that's the case,
2538 * we need to thread_abort_safely() to trigger
2539 * interruption of the current system call to
2540 * cause their handlers to fire. If it's only
2541 * the SIGCONT, then don't wake up.
2542 */
2543 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2544 uth->uu_siglist &= ~mask;
2545 sig_proc->p_stat = SRUN;
2546 goto runlocked;
2547 }
2548
2549 uth->uu_siglist &= ~mask;
2550 sig_proc->p_stat = SRUN;
2551 goto sigout_locked;
2552
2553 default:
2554 /*
2555 * A signal which has a default action of killing
2556 * the process, and for which there is no handler,
2557 * needs to act like SIGKILL
2558 */
2559 if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2560 sig_proc->p_stat = SRUN;
2561 kret = thread_abort(sig_thread);
2562 update_thread_policy = (kret == KERN_SUCCESS);
2563
2564 if (uth->uu_exit_reason == OS_REASON_NULL) {
2565 if (signal_reason == OS_REASON_NULL) {
2566 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2567 proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2568
2569 signal_reason = build_signal_reason(signum, NULL);
2570 }
2571
2572 os_reason_ref(signal_reason);
2573 set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2574 }
2575
2576 goto sigout_locked;
2577 }
2578
2579 /*
2580 * All other signals wake up the process, but don't
2581 * resume it.
2582 */
2583 if (sig_proc->p_stat == SSTOP) {
2584 goto sigout_locked;
2585 }
2586 goto runlocked;
2587 }
2588 }
2589 /*NOTREACHED*/
2590
2591 runlocked:
2592 /*
2593 * If we're being traced (possibly because someone attached us
2594 * while we were stopped), check for a signal from the debugger.
2595 */
2596 if (sig_proc->p_stat == SSTOP) {
2597 if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) {
2598 uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2599 }
2600
2601 if ((flavor & PSIG_VFORK) != 0) {
2602 sig_proc->p_stat = SRUN;
2603 }
2604 } else {
2605 /*
2606 * setrunnable(p) in BSD and
2607 * Wake up the thread if it is interruptible.
2608 */
2609 sig_proc->p_stat = SRUN;
2610 if ((flavor & PSIG_VFORK) == 0) {
2611 thread_abort_safely(sig_thread);
2612 }
2613 }
2614
2615 sigout_locked:
2616 if (update_thread_policy) {
2617 /*
2618 * Update the thread policy to heading to terminate, increase priority if
2619 * necessary. This needs to be done before we drop the proc lock because the
2620 * thread can take the fatal signal once it's dropped.
2621 */
2622 proc_set_thread_policy(sig_thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2623 }
2624
2625 proc_unlock(sig_proc);
2626
2627 sigout_unlocked:
2628 os_reason_free(signal_reason);
2629 if ((flavor & PSIG_LOCKED) == 0) {
2630 proc_signalend(sig_proc, 0);
2631 }
2632 }
2633
2634 void
psignal(proc_t p,int signum)2635 psignal(proc_t p, int signum)
2636 {
2637 psignal_internal(p, NULL, NULL, 0, signum, NULL);
2638 }
2639
2640 void
psignal_with_reason(proc_t p,int signum,struct os_reason * signal_reason)2641 psignal_with_reason(proc_t p, int signum, struct os_reason *signal_reason)
2642 {
2643 psignal_internal(p, NULL, NULL, 0, signum, signal_reason);
2644 }
2645
2646 void
psignal_sigkill_with_reason(struct proc * p,struct os_reason * signal_reason)2647 psignal_sigkill_with_reason(struct proc *p, struct os_reason *signal_reason)
2648 {
2649 psignal_internal(p, NULL, NULL, 0, SIGKILL, signal_reason);
2650 }
2651
2652 void
psignal_locked(proc_t p,int signum)2653 psignal_locked(proc_t p, int signum)
2654 {
2655 psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum, NULL);
2656 }
2657
2658 void
psignal_vfork_with_reason(proc_t p,task_t new_task,thread_t thread,int signum,struct os_reason * signal_reason)2659 psignal_vfork_with_reason(proc_t p, task_t new_task, thread_t thread, int signum, struct os_reason *signal_reason)
2660 {
2661 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, signal_reason);
2662 }
2663
2664 void
psignal_vfork(proc_t p,task_t new_task,thread_t thread,int signum)2665 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2666 {
2667 psignal_internal(p, new_task, thread, PSIG_VFORK, signum, NULL);
2668 }
2669
2670 void
psignal_uthread(thread_t thread,int signum)2671 psignal_uthread(thread_t thread, int signum)
2672 {
2673 psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum, NULL);
2674 }
2675
2676 /* same as psignal(), but prefer delivery to 'thread' if possible */
2677 void
psignal_try_thread(proc_t p,thread_t thread,int signum)2678 psignal_try_thread(proc_t p, thread_t thread, int signum)
2679 {
2680 psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum, NULL);
2681 }
2682
2683 void
psignal_try_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2684 psignal_try_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2685 {
2686 psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason);
2687 }
2688
2689 void
psignal_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2690 psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2691 {
2692 psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason);
2693 }
2694
2695 /*
2696 * If the current process has received a signal (should be caught or cause
2697 * termination, should interrupt current syscall), return the signal number.
2698 * Stop signals with default action are processed immediately, then cleared;
2699 * they aren't returned. This is checked after each entry to the system for
2700 * a syscall or trap (though this can usually be done without calling issignal
2701 * by checking the pending signal masks in the CURSIG macro.) The normal call
2702 * sequence is
2703 *
2704 * while (signum = CURSIG(curproc))
2705 * postsig(signum);
2706 */
2707 int
issignal_locked(proc_t p)2708 issignal_locked(proc_t p)
2709 {
2710 int signum, mask, prop, sigbits;
2711 thread_t cur_act;
2712 struct uthread * ut;
2713 proc_t pp;
2714 kauth_cred_t my_cred;
2715 int retval = 0;
2716 uid_t r_uid;
2717
2718 cur_act = current_thread();
2719
2720 #if SIGNAL_DEBUG
2721 if (rdebug_proc && (p == rdebug_proc)) {
2722 ram_printf(3);
2723 }
2724 #endif /* SIGNAL_DEBUG */
2725
2726 /*
2727 * Try to grab the signal lock.
2728 */
2729 if (sig_try_locked(p) <= 0) {
2730 return 0;
2731 }
2732
2733 proc_signalstart(p, 1);
2734
2735 ut = get_bsdthread_info(cur_act);
2736 for (;;) {
2737 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2738
2739 if (p->p_lflag & P_LPPWAIT) {
2740 sigbits &= ~stopsigmask;
2741 }
2742 if (sigbits == 0) { /* no signal to send */
2743 retval = 0;
2744 goto out;
2745 }
2746
2747 signum = ffs((unsigned int)sigbits);
2748 mask = sigmask(signum);
2749 prop = sigprop[signum];
2750
2751 /*
2752 * We should see pending but ignored signals
2753 * only if P_LTRACED was on when they were posted.
2754 */
2755 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2756 ut->uu_siglist &= ~mask;
2757 continue;
2758 }
2759
2760 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2761 /*
2762 * If traced, deliver the signal to the debugger, and wait to be
2763 * released.
2764 */
2765 task_t task;
2766 p->p_xstat = signum;
2767
2768 if (p->p_lflag & P_LSIGEXC) {
2769 p->sigwait = TRUE;
2770 p->sigwait_thread = cur_act;
2771 p->p_stat = SSTOP;
2772 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2773 p->p_lflag &= ~P_LWAITED;
2774 ut->uu_siglist &= ~mask; /* clear the current signal from the pending list */
2775 proc_signalend(p, 1);
2776 proc_unlock(p);
2777 do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2778 proc_lock(p);
2779 proc_signalstart(p, 1);
2780 } else {
2781 proc_unlock(p);
2782 my_cred = kauth_cred_proc_ref(p);
2783 r_uid = kauth_cred_getruid(my_cred);
2784 kauth_cred_unref(&my_cred);
2785
2786 pp = proc_parentholdref(p);
2787 if (pp != PROC_NULL) {
2788 proc_lock(pp);
2789
2790 pp->si_pid = proc_getpid(p);
2791 pp->p_xhighbits = p->p_xhighbits;
2792 p->p_xhighbits = 0;
2793 pp->si_status = p->p_xstat;
2794 pp->si_code = CLD_TRAPPED;
2795 pp->si_uid = r_uid;
2796
2797 proc_unlock(pp);
2798 }
2799
2800 /*
2801 * XXX Have to really stop for debuggers;
2802 * XXX stop() doesn't do the right thing.
2803 */
2804 task = p->task;
2805 task_suspend_internal(task);
2806
2807 proc_lock(p);
2808 p->sigwait = TRUE;
2809 p->sigwait_thread = cur_act;
2810 p->p_stat = SSTOP;
2811 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2812 p->p_lflag &= ~P_LWAITED;
2813 ut->uu_siglist &= ~mask;
2814
2815 proc_signalend(p, 1);
2816 proc_unlock(p);
2817
2818 if (pp != PROC_NULL) {
2819 psignal(pp, SIGCHLD);
2820 proc_list_lock();
2821 wakeup((caddr_t)pp);
2822 proc_parentdropref(pp, 1);
2823 proc_list_unlock();
2824 }
2825
2826 assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2827 thread_block(THREAD_CONTINUE_NULL);
2828 proc_lock(p);
2829 proc_signalstart(p, 1);
2830 }
2831
2832 p->sigwait = FALSE;
2833 p->sigwait_thread = NULL;
2834 wakeup((caddr_t)&p->sigwait_thread);
2835
2836 if (signum == SIGKILL || ut->uu_siglist & sigmask(SIGKILL)) {
2837 /*
2838 * Deliver a pending sigkill even if it's not the current signal.
2839 * Necessary for PT_KILL, which should not be delivered to the
2840 * debugger, but we can't differentiate it from any other KILL.
2841 */
2842 signum = SIGKILL;
2843 goto deliver_sig;
2844 }
2845
2846 /* We may have to quit. */
2847 if (thread_should_abort(current_thread())) {
2848 retval = 0;
2849 goto out;
2850 }
2851
2852 /*
2853 * If parent wants us to take the signal,
2854 * then it will leave it in p->p_xstat;
2855 * otherwise we just look for signals again.
2856 */
2857 signum = p->p_xstat;
2858 if (signum == 0) {
2859 continue;
2860 }
2861
2862 /*
2863 * Put the new signal into p_siglist. If the
2864 * signal is being masked, look for other signals.
2865 */
2866 mask = sigmask(signum);
2867 ut->uu_siglist |= mask;
2868 if (ut->uu_sigmask & mask) {
2869 continue;
2870 }
2871 }
2872
2873 /*
2874 * Decide whether the signal should be returned.
2875 * Return the signal's number, or fall through
2876 * to clear it from the pending mask.
2877 */
2878
2879 switch ((long)SIGACTION(p, signum)) {
2880 case (long)SIG_DFL:
2881 /*
2882 * If there is a pending stop signal to process
2883 * with default action, stop here,
2884 * then clear the signal. However,
2885 * if process is member of an orphaned
2886 * process group, ignore tty stop signals.
2887 */
2888 if (prop & SA_STOP) {
2889 struct pgrp * pg;
2890
2891 proc_unlock(p);
2892 pg = proc_pgrp(p, NULL);
2893 if (p->p_lflag & P_LTRACED ||
2894 (pg->pg_jobc == 0 &&
2895 prop & SA_TTYSTOP)) {
2896 proc_lock(p);
2897 pgrp_rele(pg);
2898 break; /* ignore signal */
2899 }
2900 pgrp_rele(pg);
2901 if (p->p_stat != SSTOP) {
2902 proc_lock(p);
2903 p->p_xstat = signum;
2904 p->p_stat = SSTOP;
2905 p->p_lflag &= ~P_LWAITED;
2906 proc_unlock(p);
2907
2908 pp = proc_parentholdref(p);
2909 stop(p, pp);
2910 if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2911 my_cred = kauth_cred_proc_ref(p);
2912 r_uid = kauth_cred_getruid(my_cred);
2913 kauth_cred_unref(&my_cred);
2914
2915 proc_lock(pp);
2916 pp->si_pid = proc_getpid(p);
2917 pp->si_status = WEXITSTATUS(p->p_xstat);
2918 pp->si_code = CLD_STOPPED;
2919 pp->si_uid = r_uid;
2920 proc_unlock(pp);
2921
2922 psignal(pp, SIGCHLD);
2923 }
2924 if (pp != PROC_NULL) {
2925 proc_parentdropref(pp, 0);
2926 }
2927 }
2928 proc_lock(p);
2929 break;
2930 } else if (prop & SA_IGNORE) {
2931 /*
2932 * Except for SIGCONT, shouldn't get here.
2933 * Default action is to ignore; drop it.
2934 */
2935 break; /* ignore signal */
2936 } else {
2937 goto deliver_sig;
2938 }
2939
2940 case (long)SIG_IGN:
2941 /*
2942 * Masking above should prevent us ever trying
2943 * to take action on an ignored signal other
2944 * than SIGCONT, unless process is traced.
2945 */
2946 if ((prop & SA_CONT) == 0 &&
2947 (p->p_lflag & P_LTRACED) == 0) {
2948 printf("issignal\n");
2949 }
2950 break; /* ignore signal */
2951
2952 default:
2953 /* This signal has an action - deliver it. */
2954 goto deliver_sig;
2955 }
2956
2957 /* If we dropped through, the signal was ignored - remove it from pending list. */
2958 ut->uu_siglist &= ~mask;
2959 } /* for(;;) */
2960
2961 /* NOTREACHED */
2962
2963 deliver_sig:
2964 ut->uu_siglist &= ~mask;
2965 retval = signum;
2966
2967 out:
2968 proc_signalend(p, 1);
2969 return retval;
2970 }
2971
2972 /* called from _sleep */
2973 int
CURSIG(proc_t p)2974 CURSIG(proc_t p)
2975 {
2976 int signum, mask, prop, sigbits;
2977 thread_t cur_act;
2978 struct uthread * ut;
2979 int retnum = 0;
2980
2981
2982 cur_act = current_thread();
2983
2984 ut = get_bsdthread_info(cur_act);
2985
2986 if (ut->uu_siglist == 0) {
2987 return 0;
2988 }
2989
2990 if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) {
2991 return 0;
2992 }
2993
2994 sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2995
2996 for (;;) {
2997 if (p->p_lflag & P_LPPWAIT) {
2998 sigbits &= ~stopsigmask;
2999 }
3000 if (sigbits == 0) { /* no signal to send */
3001 return retnum;
3002 }
3003
3004 signum = ffs((unsigned int)sigbits);
3005 mask = sigmask(signum);
3006 prop = sigprop[signum];
3007 sigbits &= ~mask; /* take the signal out */
3008
3009 /*
3010 * We should see pending but ignored signals
3011 * only if P_LTRACED was on when they were posted.
3012 */
3013 if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
3014 continue;
3015 }
3016
3017 if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
3018 return signum;
3019 }
3020
3021 /*
3022 * Decide whether the signal should be returned.
3023 * Return the signal's number, or fall through
3024 * to clear it from the pending mask.
3025 */
3026
3027 switch ((long)SIGACTION(p, signum)) {
3028 case (long)SIG_DFL:
3029 /*
3030 * If there is a pending stop signal to process
3031 * with default action, stop here,
3032 * then clear the signal. However,
3033 * if process is member of an orphaned
3034 * process group, ignore tty stop signals.
3035 */
3036 if (prop & SA_STOP) {
3037 struct pgrp *pg;
3038
3039 pg = proc_pgrp(p, NULL);
3040
3041 if (p->p_lflag & P_LTRACED ||
3042 (pg->pg_jobc == 0 &&
3043 prop & SA_TTYSTOP)) {
3044 pgrp_rele(pg);
3045 break; /* == ignore */
3046 }
3047 pgrp_rele(pg);
3048 retnum = signum;
3049 break;
3050 } else if (prop & SA_IGNORE) {
3051 /*
3052 * Except for SIGCONT, shouldn't get here.
3053 * Default action is to ignore; drop it.
3054 */
3055 break; /* == ignore */
3056 } else {
3057 return signum;
3058 }
3059 /*NOTREACHED*/
3060
3061 case (long)SIG_IGN:
3062 /*
3063 * Masking above should prevent us ever trying
3064 * to take action on an ignored signal other
3065 * than SIGCONT, unless process is traced.
3066 */
3067 if ((prop & SA_CONT) == 0 &&
3068 (p->p_lflag & P_LTRACED) == 0) {
3069 printf("issignal\n");
3070 }
3071 break; /* == ignore */
3072
3073 default:
3074 /*
3075 * This signal has an action, let
3076 * postsig() process it.
3077 */
3078 return signum;
3079 }
3080 }
3081 /* NOTREACHED */
3082 }
3083
3084 /*
3085 * Put the argument process into the stopped state and notify the parent
3086 * via wakeup. Signals are handled elsewhere. The process must not be
3087 * on the run queue.
3088 */
3089 static void
stop(proc_t p,proc_t parent)3090 stop(proc_t p, proc_t parent)
3091 {
3092 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3093 if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
3094 proc_list_lock();
3095 wakeup((caddr_t)parent);
3096 proc_list_unlock();
3097 }
3098 (void) task_suspend_internal(p->task);
3099 }
3100
3101 /*
3102 * Take the action for the specified signal
3103 * from the current set of pending signals.
3104 */
3105 void
postsig_locked(int signum)3106 postsig_locked(int signum)
3107 {
3108 proc_t p = current_proc();
3109 struct sigacts *ps = &p->p_sigacts;
3110 user_addr_t catcher;
3111 uint32_t code;
3112 int mask, returnmask;
3113 struct uthread * ut;
3114 os_reason_t ut_exit_reason = OS_REASON_NULL;
3115
3116 #if DIAGNOSTIC
3117 if (signum == 0) {
3118 panic("postsig");
3119 }
3120 /*
3121 * This must be called on master cpu
3122 */
3123 if (cpu_number() != master_cpu) {
3124 panic("psig not on master");
3125 }
3126 #endif
3127
3128 /*
3129 * Try to grab the signal lock.
3130 */
3131 if (sig_try_locked(p) <= 0) {
3132 return;
3133 }
3134
3135 proc_signalstart(p, 1);
3136
3137 ut = current_uthread();
3138 mask = sigmask(signum);
3139 ut->uu_siglist &= ~mask;
3140 catcher = SIGACTION(p, signum);
3141 if (catcher == SIG_DFL) {
3142 /*
3143 * Default catcher, where the default is to kill
3144 * the process. (Other cases were ignored above.)
3145 */
3146
3147 /*
3148 * exit_with_reason() below will consume a reference to the thread's exit reason, so we take another
3149 * reference so the thread still has one even after we call exit_with_reason(). The thread's reference will
3150 * ultimately be destroyed in uthread_cleanup().
3151 */
3152 ut_exit_reason = ut->uu_exit_reason;
3153 os_reason_ref(ut_exit_reason);
3154
3155 p->p_acflag |= AXSIG;
3156 if (sigprop[signum] & SA_CORE) {
3157 p->p_sigacts.ps_sig = signum;
3158 proc_signalend(p, 1);
3159 proc_unlock(p);
3160 #if CONFIG_COREDUMP
3161 if (coredump(p, 0, 0) == 0) {
3162 signum |= WCOREFLAG;
3163 }
3164 #endif
3165 } else {
3166 proc_signalend(p, 1);
3167 proc_unlock(p);
3168 }
3169
3170 #if CONFIG_DTRACE
3171 bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
3172
3173 ut->t_dtrace_siginfo.si_signo = signum;
3174 ut->t_dtrace_siginfo.si_pid = p->si_pid;
3175 ut->t_dtrace_siginfo.si_uid = p->si_uid;
3176 ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
3177
3178 /* Fire DTrace proc:::fault probe when signal is generated by hardware. */
3179 switch (signum) {
3180 case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
3181 DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
3182 break;
3183 default:
3184 break;
3185 }
3186
3187
3188 DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
3189 void (*)(void), SIG_DFL);
3190 #endif
3191
3192 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
3193 proc_getpid(p), W_EXITCODE(0, signum), 3, 0, 0);
3194
3195 exit_with_reason(p, W_EXITCODE(0, signum), (int *)NULL, TRUE, TRUE, 0, ut_exit_reason);
3196
3197 proc_lock(p);
3198 return;
3199 } else {
3200 /*
3201 * If we get here, the signal must be caught.
3202 */
3203 #if DIAGNOSTIC
3204 if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) {
3205 log(LOG_WARNING,
3206 "postsig: processing masked or ignored signal\n");
3207 }
3208 #endif
3209
3210 /*
3211 * Set the new mask value and also defer further
3212 * occurences of this signal.
3213 *
3214 * Special case: user has done a sigpause. Here the
3215 * current mask is not of interest, but rather the
3216 * mask from before the sigpause is what we want
3217 * restored after the signal processing is completed.
3218 */
3219 if (ut->uu_flag & UT_SAS_OLDMASK) {
3220 returnmask = ut->uu_oldmask;
3221 ut->uu_flag &= ~UT_SAS_OLDMASK;
3222 ut->uu_oldmask = 0;
3223 } else {
3224 returnmask = ut->uu_sigmask;
3225 }
3226 ut->uu_sigmask |= ps->ps_catchmask[signum];
3227 if ((ps->ps_signodefer & mask) == 0) {
3228 ut->uu_sigmask |= mask;
3229 }
3230 sigset_t siginfo = ps->ps_siginfo;
3231 if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
3232 if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) {
3233 p->p_sigignore |= mask;
3234 }
3235 if (SIGACTION(p, signum) != SIG_DFL) {
3236 proc_set_sigact(p, signum, SIG_DFL);
3237 }
3238 ps->ps_siginfo &= ~mask;
3239 ps->ps_signodefer &= ~mask;
3240 }
3241
3242 if (ps->ps_sig != signum) {
3243 code = 0;
3244 } else {
3245 code = ps->ps_code;
3246 ps->ps_code = 0;
3247 }
3248 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
3249 sendsig(p, catcher, signum, returnmask, code, siginfo);
3250 }
3251 proc_signalend(p, 1);
3252 }
3253
3254 /*
3255 * Attach a signal knote to the list of knotes for this process.
3256 *
3257 * Signal knotes share the knote list with proc knotes. This
3258 * could be avoided by using a signal-specific knote list, but
3259 * probably isn't worth the trouble.
3260 */
3261
3262 static int
filt_sigattach(struct knote * kn,__unused struct kevent_qos_s * kev)3263 filt_sigattach(struct knote *kn, __unused struct kevent_qos_s *kev)
3264 {
3265 proc_t p = current_proc(); /* can attach only to oneself */
3266
3267 proc_klist_lock();
3268
3269 kn->kn_proc = p;
3270 kn->kn_flags |= EV_CLEAR; /* automatically set */
3271 kn->kn_sdata = 0; /* incoming data is ignored */
3272
3273 KNOTE_ATTACH(&p->p_klist, kn);
3274
3275 proc_klist_unlock();
3276
3277 /* edge-triggered events can't have fired before we attached */
3278 return 0;
3279 }
3280
3281 /*
3282 * remove the knote from the process list, if it hasn't already
3283 * been removed by exit processing.
3284 */
3285
3286 static void
filt_sigdetach(struct knote * kn)3287 filt_sigdetach(struct knote *kn)
3288 {
3289 proc_t p = kn->kn_proc;
3290
3291 proc_klist_lock();
3292 kn->kn_proc = NULL;
3293 KNOTE_DETACH(&p->p_klist, kn);
3294 proc_klist_unlock();
3295 }
3296
3297 /*
3298 * Post an event to the signal filter. Because we share the same list
3299 * as process knotes, we have to filter out and handle only signal events.
3300 *
3301 * We assume that we process fdt_invalidate() before we post the NOTE_EXIT for
3302 * a process during exit. Therefore, since signal filters can only be
3303 * set up "in-process", we should have already torn down the kqueue
3304 * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
3305 */
3306 static int
filt_signal(struct knote * kn,long hint)3307 filt_signal(struct knote *kn, long hint)
3308 {
3309 if (hint & NOTE_SIGNAL) {
3310 hint &= ~NOTE_SIGNAL;
3311
3312 if (kn->kn_id == (unsigned int)hint) {
3313 kn->kn_hook32++;
3314 }
3315 } else if (hint & NOTE_EXIT) {
3316 panic("filt_signal: detected NOTE_EXIT event");
3317 }
3318
3319 return kn->kn_hook32 != 0;
3320 }
3321
3322 static int
filt_signaltouch(struct knote * kn,struct kevent_qos_s * kev)3323 filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev)
3324 {
3325 #pragma unused(kev)
3326
3327 int res;
3328
3329 proc_klist_lock();
3330
3331 /*
3332 * No data to save - just capture if it is already fired
3333 */
3334 res = (kn->kn_hook32 > 0);
3335
3336 proc_klist_unlock();
3337
3338 return res;
3339 }
3340
3341 static int
filt_signalprocess(struct knote * kn,struct kevent_qos_s * kev)3342 filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev)
3343 {
3344 int res = 0;
3345
3346 /*
3347 * Snapshot the event data.
3348 */
3349
3350 proc_klist_lock();
3351 if (kn->kn_hook32) {
3352 knote_fill_kevent(kn, kev, kn->kn_hook32);
3353 kn->kn_hook32 = 0;
3354 res = 1;
3355 }
3356 proc_klist_unlock();
3357 return res;
3358 }
3359
3360 void
bsd_ast(thread_t thread)3361 bsd_ast(thread_t thread)
3362 {
3363 proc_t p = current_proc();
3364 struct uthread *ut = get_bsdthread_info(thread);
3365 int signum;
3366 static int bsd_init_done = 0;
3367
3368 if (p == NULL) {
3369 return;
3370 }
3371
3372 /* don't run bsd ast on exec copy or exec'ed tasks */
3373 if (task_did_exec(current_task()) || task_is_exec_copy(current_task())) {
3374 return;
3375 }
3376
3377 if (timerisset(&p->p_vtimer_user.it_value)) {
3378 uint32_t microsecs;
3379
3380 task_vtimer_update(p->task, TASK_VTIMER_USER, µsecs);
3381
3382 if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
3383 if (timerisset(&p->p_vtimer_user.it_value)) {
3384 task_vtimer_set(p->task, TASK_VTIMER_USER);
3385 } else {
3386 task_vtimer_clear(p->task, TASK_VTIMER_USER);
3387 }
3388
3389 psignal_try_thread(p, thread, SIGVTALRM);
3390 }
3391 }
3392
3393 if (timerisset(&p->p_vtimer_prof.it_value)) {
3394 uint32_t microsecs;
3395
3396 task_vtimer_update(p->task, TASK_VTIMER_PROF, µsecs);
3397
3398 if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
3399 if (timerisset(&p->p_vtimer_prof.it_value)) {
3400 task_vtimer_set(p->task, TASK_VTIMER_PROF);
3401 } else {
3402 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
3403 }
3404
3405 psignal_try_thread(p, thread, SIGPROF);
3406 }
3407 }
3408
3409 if (timerisset(&p->p_rlim_cpu)) {
3410 struct timeval tv;
3411
3412 task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
3413
3414 proc_spinlock(p);
3415 if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
3416 tv.tv_sec = 0;
3417 timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
3418 proc_spinunlock(p);
3419 } else {
3420 timerclear(&p->p_rlim_cpu);
3421 proc_spinunlock(p);
3422
3423 task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
3424
3425 psignal_try_thread(p, thread, SIGXCPU);
3426 }
3427 }
3428
3429 #if CONFIG_DTRACE
3430 if (ut->t_dtrace_sig) {
3431 uint8_t dt_action_sig = ut->t_dtrace_sig;
3432 ut->t_dtrace_sig = 0;
3433 psignal(p, dt_action_sig);
3434 }
3435
3436 if (ut->t_dtrace_stop) {
3437 ut->t_dtrace_stop = 0;
3438 proc_lock(p);
3439 p->p_dtrace_stop = 1;
3440 proc_unlock(p);
3441 (void)task_suspend_internal(p->task);
3442 }
3443
3444 if (ut->t_dtrace_resumepid) {
3445 proc_t resumeproc = proc_find((int)ut->t_dtrace_resumepid);
3446 ut->t_dtrace_resumepid = 0;
3447 if (resumeproc != PROC_NULL) {
3448 proc_lock(resumeproc);
3449 /* We only act on processes stopped by dtrace */
3450 if (resumeproc->p_dtrace_stop) {
3451 resumeproc->p_dtrace_stop = 0;
3452 proc_unlock(resumeproc);
3453 task_resume_internal(resumeproc->task);
3454 } else {
3455 proc_unlock(resumeproc);
3456 }
3457 proc_rele(resumeproc);
3458 }
3459 }
3460
3461 #endif /* CONFIG_DTRACE */
3462
3463 proc_lock(p);
3464 if (CHECK_SIGNALS(p, current_thread(), ut)) {
3465 while ((signum = issignal_locked(p))) {
3466 postsig_locked(signum);
3467 }
3468 }
3469 proc_unlock(p);
3470
3471 #ifdef CONFIG_32BIT_TELEMETRY
3472 if (task_consume_32bit_log_flag(p->task)) {
3473 proc_log_32bit_telemetry(p);
3474 }
3475 #endif /* CONFIG_32BIT_TELEMETRY */
3476
3477 if (!bsd_init_done) {
3478 bsd_init_done = 1;
3479 bsdinit_task();
3480 }
3481 }
3482
3483 /* ptrace set runnable */
3484 void
pt_setrunnable(proc_t p)3485 pt_setrunnable(proc_t p)
3486 {
3487 task_t task;
3488
3489 task = p->task;
3490
3491 if (p->p_lflag & P_LTRACED) {
3492 proc_lock(p);
3493 p->p_stat = SRUN;
3494 proc_unlock(p);
3495 if (p->sigwait) {
3496 wakeup((caddr_t)&(p->sigwait));
3497 if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479
3498 task_release(task);
3499 }
3500 }
3501 }
3502 }
3503
3504 kern_return_t
do_bsdexception(int exc,int code,int sub)3505 do_bsdexception(
3506 int exc,
3507 int code,
3508 int sub)
3509 {
3510 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
3511
3512 codes[0] = code;
3513 codes[1] = sub;
3514 return bsd_exception(exc, codes, 2);
3515 }
3516
3517 int
proc_pendingsignals(proc_t p,sigset_t mask)3518 proc_pendingsignals(proc_t p, sigset_t mask)
3519 {
3520 struct uthread * uth;
3521 sigset_t bits = 0;
3522
3523 proc_lock(p);
3524 /* If the process is in proc exit return no signal info */
3525 if (p->p_lflag & P_LPEXIT) {
3526 goto out;
3527 }
3528
3529
3530 bits = 0;
3531 TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3532 bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3533 }
3534 out:
3535 proc_unlock(p);
3536 return bits;
3537 }
3538
3539 int
thread_issignal(proc_t p,thread_t th,sigset_t mask)3540 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3541 {
3542 struct uthread * uth;
3543 sigset_t bits = 0;
3544
3545 proc_lock(p);
3546 uth = (struct uthread *)get_bsdthread_info(th);
3547 if (uth) {
3548 bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3549 }
3550 proc_unlock(p);
3551 return bits;
3552 }
3553
3554 /*
3555 * Allow external reads of the sigprop array.
3556 */
3557 int
hassigprop(int sig,int prop)3558 hassigprop(int sig, int prop)
3559 {
3560 return sigprop[sig] & prop;
3561 }
3562
3563 void
pgsigio(pid_t pgid,int sig)3564 pgsigio(pid_t pgid, int sig)
3565 {
3566 proc_t p = PROC_NULL;
3567
3568 if (pgid < 0) {
3569 gsignal(-(pgid), sig);
3570 } else if (pgid > 0 && (p = proc_find(pgid)) != 0) {
3571 psignal(p, sig);
3572 }
3573 if (p != PROC_NULL) {
3574 proc_rele(p);
3575 }
3576 }
3577
3578 void
proc_signalstart(proc_t p,int locked)3579 proc_signalstart(proc_t p, int locked)
3580 {
3581 if (!locked) {
3582 proc_lock(p);
3583 }
3584
3585 if (p->p_signalholder == current_thread()) {
3586 panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3587 }
3588
3589 p->p_sigwaitcnt++;
3590 while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) {
3591 msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3592 }
3593 p->p_sigwaitcnt--;
3594
3595 p->p_lflag |= P_LINSIGNAL;
3596 p->p_signalholder = current_thread();
3597 if (!locked) {
3598 proc_unlock(p);
3599 }
3600 }
3601
3602 void
proc_signalend(proc_t p,int locked)3603 proc_signalend(proc_t p, int locked)
3604 {
3605 if (!locked) {
3606 proc_lock(p);
3607 }
3608 p->p_lflag &= ~P_LINSIGNAL;
3609
3610 if (p->p_sigwaitcnt > 0) {
3611 wakeup(&p->p_sigmask);
3612 }
3613
3614 p->p_signalholder = NULL;
3615 if (!locked) {
3616 proc_unlock(p);
3617 }
3618 }
3619
3620 void
sig_lock_to_exit(proc_t p)3621 sig_lock_to_exit(proc_t p)
3622 {
3623 thread_t self = current_thread();
3624
3625 p->exit_thread = self;
3626 proc_unlock(p);
3627
3628 task_hold(p->task);
3629 task_wait(p->task, FALSE);
3630
3631 proc_lock(p);
3632 }
3633
3634 int
sig_try_locked(proc_t p)3635 sig_try_locked(proc_t p)
3636 {
3637 thread_t self = current_thread();
3638
3639 while (p->sigwait || p->exit_thread) {
3640 if (p->exit_thread) {
3641 return 0;
3642 }
3643 msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3644 if (thread_should_abort(self)) {
3645 /*
3646 * Terminate request - clean up.
3647 */
3648 proc_lock(p);
3649 return -1;
3650 }
3651 proc_lock(p);
3652 }
3653 return 1;
3654 }
3655