xref: /xnu-8020.121.3/bsd/kern/kern_sig.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 1995-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1989, 1991, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  * (c) UNIX System Laboratories, Inc.
32  * All or some portions of this file are derived from material licensed
33  * to the University of California by American Telephone and Telegraph
34  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35  * the permission of UNIX System Laboratories, Inc.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)kern_sig.c	8.7 (Berkeley) 4/18/94
66  */
67 /*
68  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69  * support for mandatory and extensible security protections.  This notice
70  * is included in support of clause 2.2 (b) of the Apple Public License,
71  * Version 2.0.
72  */
73 
74 #define SIGPROP         /* include signal properties table */
75 #include <sys/param.h>
76 #include <sys/resourcevar.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/timeb.h>
81 #include <sys/times.h>
82 #include <sys/acct.h>
83 #include <sys/file_internal.h>
84 #include <sys/kernel.h>
85 #include <sys/wait.h>
86 #include <sys/signalvar.h>
87 #include <sys/syslog.h>
88 #include <sys/stat.h>
89 #include <sys/lock.h>
90 #include <sys/kdebug.h>
91 #include <sys/reason.h>
92 
93 #include <sys/mount.h>
94 #include <sys/sysproto.h>
95 
96 #include <security/audit/audit.h>
97 
98 #include <kern/cpu_number.h>
99 
100 #include <sys/vm.h>
101 #include <sys/user.h>           /* for coredump */
102 #include <kern/ast.h>           /* for APC support */
103 #include <kern/kalloc.h>
104 #include <kern/task.h>          /* extern void   *get_bsdtask_info(task_t); */
105 #include <kern/thread.h>
106 #include <kern/sched_prim.h>
107 #include <kern/thread_call.h>
108 #include <kern/policy_internal.h>
109 #include <kern/sync_sema.h>
110 
111 #include <mach/exception.h>
112 #include <mach/task.h>
113 #include <mach/thread_act.h>
114 #include <libkern/OSAtomic.h>
115 
116 #include <sys/sdt.h>
117 #include <sys/codesign.h>
118 #include <sys/random.h>
119 #include <libkern/section_keywords.h>
120 
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124 
125 /*
126  * Missing prototypes that Mach should export
127  *
128  * +++
129  */
130 extern int thread_enable_fpe(thread_t act, int onoff);
131 extern kern_return_t get_signalact(task_t, thread_t *, int);
132 extern unsigned int get_useraddr(void);
133 extern boolean_t task_did_exec(task_t task);
134 extern boolean_t task_is_exec_copy(task_t task);
135 extern void vm_shared_region_reslide_stale(void);
136 
137 /*
138  * ---
139  */
140 
141 extern void doexception(int exc, mach_exception_code_t code,
142     mach_exception_subcode_t sub);
143 
144 static void stop(proc_t, proc_t);
145 static int cansignal_nomac(proc_t, kauth_cred_t, proc_t, int);
146 int cansignal(proc_t, kauth_cred_t, proc_t, int);
147 int killpg1(proc_t, int, int, int, int);
148 kern_return_t do_bsdexception(int, int, int);
149 void __posix_sem_syscall_return(kern_return_t);
150 char *proc_name_address(void *p);
151 
152 static int      filt_sigattach(struct knote *kn, struct kevent_qos_s *kev);
153 static void     filt_sigdetach(struct knote *kn);
154 static int      filt_signal(struct knote *kn, long hint);
155 static int      filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev);
156 static int      filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev);
157 
158 SECURITY_READ_ONLY_EARLY(struct filterops) sig_filtops = {
159 	.f_attach = filt_sigattach,
160 	.f_detach = filt_sigdetach,
161 	.f_event = filt_signal,
162 	.f_touch = filt_signaltouch,
163 	.f_process = filt_signalprocess,
164 };
165 
166 /* structures  and fns for killpg1 iterartion callback and filters */
167 struct killpg1_filtargs {
168 	bool posix;
169 	proc_t curproc;
170 };
171 
172 struct killpg1_iterargs {
173 	proc_t curproc;
174 	kauth_cred_t uc;
175 	int signum;
176 	int nfound;
177 };
178 
179 static int killpg1_allfilt(proc_t p, void * arg);
180 static int killpg1_callback(proc_t p, void * arg);
181 
182 static int pgsignal_callback(proc_t p, void * arg);
183 static kern_return_t get_signalthread(proc_t, int, thread_t *);
184 
185 
186 /* flags for psignal_internal */
187 #define PSIG_LOCKED     0x1
188 #define PSIG_VFORK      0x2
189 #define PSIG_THREAD     0x4
190 #define PSIG_TRY_THREAD 0x8
191 
192 static os_reason_t build_signal_reason(int signum, const char *procname);
193 static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason);
194 
195 /*
196  * NOTE: Source and target may *NOT* overlap! (target is smaller)
197  */
198 static void
sigaltstack_kern_to_user32(struct kern_sigaltstack * in,struct user32_sigaltstack * out)199 sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out)
200 {
201 	out->ss_sp          = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp);
202 	out->ss_size    = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size);
203 	out->ss_flags   = in->ss_flags;
204 }
205 
206 static void
sigaltstack_kern_to_user64(struct kern_sigaltstack * in,struct user64_sigaltstack * out)207 sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out)
208 {
209 	out->ss_sp          = in->ss_sp;
210 	out->ss_size    = in->ss_size;
211 	out->ss_flags   = in->ss_flags;
212 }
213 
214 /*
215  * NOTE: Source and target may are permitted to overlap! (source is smaller);
216  * this works because we copy fields in order from the end of the struct to
217  * the beginning.
218  */
219 static void
sigaltstack_user32_to_kern(struct user32_sigaltstack * in,struct kern_sigaltstack * out)220 sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out)
221 {
222 	out->ss_flags   = in->ss_flags;
223 	out->ss_size    = in->ss_size;
224 	out->ss_sp              = CAST_USER_ADDR_T(in->ss_sp);
225 }
226 static void
sigaltstack_user64_to_kern(struct user64_sigaltstack * in,struct kern_sigaltstack * out)227 sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out)
228 {
229 	out->ss_flags   = in->ss_flags;
230 	out->ss_size    = (user_size_t)in->ss_size;
231 	out->ss_sp      = (user_addr_t)in->ss_sp;
232 }
233 
234 static void
sigaction_kern_to_user32(struct kern_sigaction * in,struct user32_sigaction * out)235 sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out)
236 {
237 	/* This assumes 32 bit __sa_handler is of type sig_t */
238 	out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t, in->__sigaction_u.__sa_handler);
239 	out->sa_mask = in->sa_mask;
240 	out->sa_flags = in->sa_flags;
241 }
242 static void
sigaction_kern_to_user64(struct kern_sigaction * in,struct user64_sigaction * out)243 sigaction_kern_to_user64(struct kern_sigaction *in, struct user64_sigaction *out)
244 {
245 	/* This assumes 32 bit __sa_handler is of type sig_t */
246 	out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler;
247 	out->sa_mask = in->sa_mask;
248 	out->sa_flags = in->sa_flags;
249 }
250 
251 static void
__sigaction_user32_to_kern(struct __user32_sigaction * in,struct __kern_sigaction * out)252 __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigaction *out)
253 {
254 	out->__sigaction_u.__sa_handler = CAST_USER_ADDR_T(in->__sigaction_u.__sa_handler);
255 	out->sa_tramp = CAST_USER_ADDR_T(in->sa_tramp);
256 	out->sa_mask = in->sa_mask;
257 	out->sa_flags = in->sa_flags;
258 
259 	kern_return_t kr;
260 	kr = machine_thread_function_pointers_convert_from_user(current_thread(),
261 	    &out->sa_tramp, 1);
262 	assert(kr == KERN_SUCCESS);
263 }
264 
265 static void
__sigaction_user64_to_kern(struct __user64_sigaction * in,struct __kern_sigaction * out)266 __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out)
267 {
268 	out->__sigaction_u.__sa_handler = (user_addr_t)in->__sigaction_u.__sa_handler;
269 	out->sa_tramp = (user_addr_t)in->sa_tramp;
270 	out->sa_mask = in->sa_mask;
271 	out->sa_flags = in->sa_flags;
272 
273 	kern_return_t kr;
274 	kr = machine_thread_function_pointers_convert_from_user(current_thread(),
275 	    &out->sa_tramp, 1);
276 	assert(kr == KERN_SUCCESS);
277 }
278 
279 #if SIGNAL_DEBUG
280 void ram_printf(int);
281 int ram_debug = 0;
282 unsigned int rdebug_proc = 0;
283 void
ram_printf(int x)284 ram_printf(int x)
285 {
286 	printf("x is %d", x);
287 }
288 #endif /* SIGNAL_DEBUG */
289 
290 
291 void
signal_setast(thread_t sig_actthread)292 signal_setast(thread_t sig_actthread)
293 {
294 	act_set_astbsd(sig_actthread);
295 }
296 
297 static int
cansignal_nomac(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)298 cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
299 {
300 	/* you can signal yourself */
301 	if (src == dst) {
302 		return 1;
303 	}
304 
305 	/* you can't send the init proc SIGKILL, even if root */
306 	if (signum == SIGKILL && dst == initproc) {
307 		return 0;
308 	}
309 
310 	/* otherwise, root can always signal */
311 	if (kauth_cred_issuser(uc_src)) {
312 		return 1;
313 	}
314 
315 	/* processes in the same session can send SIGCONT to each other */
316 	if (signum == SIGCONT && proc_sessionid(src) == proc_sessionid(dst)) {
317 		return 1;
318 	}
319 
320 	/* the source process must be authorized to signal the target */
321 	{
322 		int allowed = 0;
323 		kauth_cred_t uc_dst = NOCRED, uc_ref = NOCRED;
324 
325 		uc_dst = uc_ref = kauth_cred_proc_ref(dst);
326 
327 		/*
328 		 * If the real or effective UID of the sender matches the real or saved
329 		 * UID of the target, allow the signal to be sent.
330 		 */
331 		if (kauth_cred_getruid(uc_src) == kauth_cred_getruid(uc_dst) ||
332 		    kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) ||
333 		    kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) ||
334 		    kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) {
335 			allowed = 1;
336 		}
337 
338 		if (uc_ref != NOCRED) {
339 			kauth_cred_unref(&uc_ref);
340 			uc_ref = NOCRED;
341 		}
342 
343 		return allowed;
344 	}
345 }
346 
347 /*
348  * Can process `src`, with ucred `uc_src`, send the signal `signum` to process
349  * `dst`?  The ucred is referenced by the caller so internal fileds can be used
350  * safely.
351  */
352 int
cansignal(proc_t src,kauth_cred_t uc_src,proc_t dst,int signum)353 cansignal(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum)
354 {
355 #if CONFIG_MACF
356 	if (mac_proc_check_signal(src, dst, signum)) {
357 		return 0;
358 	}
359 #endif
360 
361 	return cansignal_nomac(src, uc_src, dst, signum);
362 }
363 
364 /*
365  * <rdar://problem/21952708> Some signals can be restricted from being handled,
366  * forcing the default action for that signal. This behavior applies only to
367  * non-root (EUID != 0) processes, and is configured with the "sigrestrict=x"
368  * bootarg:
369  *
370  *   0 (default): Disallow use of restricted signals. Trying to register a handler
371  *		returns ENOTSUP, which userspace may use to take special action (e.g. abort).
372  *   1: As above, but return EINVAL. Restricted signals behave similarly to SIGKILL.
373  *   2: Usual POSIX semantics.
374  */
375 static TUNABLE(unsigned, sigrestrict_arg, "sigrestrict", 0);
376 
377 #if PLATFORM_WatchOS
378 static int
sigrestrictmask(void)379 sigrestrictmask(void)
380 {
381 	if (kauth_getuid() != 0 && sigrestrict_arg != 2) {
382 		return SIGRESTRICTMASK;
383 	}
384 	return 0;
385 }
386 
387 static int
signal_is_restricted(proc_t p,int signum)388 signal_is_restricted(proc_t p, int signum)
389 {
390 	if (sigmask(signum) & sigrestrictmask()) {
391 		if (sigrestrict_arg == 0 &&
392 		    task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) {
393 			return ENOTSUP;
394 		} else {
395 			return EINVAL;
396 		}
397 	}
398 	return 0;
399 }
400 
401 #else
402 
403 static inline int
signal_is_restricted(proc_t p,int signum)404 signal_is_restricted(proc_t p, int signum)
405 {
406 	(void)p;
407 	(void)signum;
408 	return 0;
409 }
410 #endif /* !PLATFORM_WatchOS */
411 
412 /*
413  * Returns:	0			Success
414  *		EINVAL
415  *	copyout:EFAULT
416  *	copyin:EFAULT
417  *
418  * Notes:	Uses current thread as a parameter to inform PPC to enable
419  *		FPU exceptions via setsigvec(); this operation is not proxy
420  *		safe!
421  */
422 /* ARGSUSED */
423 int
sigaction(proc_t p,struct sigaction_args * uap,__unused int32_t * retval)424 sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval)
425 {
426 	struct kern_sigaction vec;
427 	struct __kern_sigaction __vec;
428 
429 	struct kern_sigaction *sa = &vec;
430 	struct sigacts *ps = &p->p_sigacts;
431 
432 	int signum;
433 	int bit, error = 0;
434 	uint32_t sigreturn_validation = PS_SIGRETURN_VALIDATION_DEFAULT;
435 
436 	signum = uap->signum;
437 	if (signum <= 0 || signum >= NSIG ||
438 	    signum == SIGKILL || signum == SIGSTOP) {
439 		return EINVAL;
440 	}
441 
442 	if (uap->nsa) {
443 		if (IS_64BIT_PROCESS(p)) {
444 			struct __user64_sigaction       __vec64;
445 			error = copyin(uap->nsa, &__vec64, sizeof(__vec64));
446 			__sigaction_user64_to_kern(&__vec64, &__vec);
447 		} else {
448 			struct __user32_sigaction       __vec32;
449 			error = copyin(uap->nsa, &__vec32, sizeof(__vec32));
450 			__sigaction_user32_to_kern(&__vec32, &__vec);
451 		}
452 		if (error) {
453 			return error;
454 		}
455 
456 		sigreturn_validation = (__vec.sa_flags & SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP) ?
457 		    PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED;
458 		__vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */
459 
460 		if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) {
461 			if ((error = signal_is_restricted(p, signum))) {
462 				if (error == ENOTSUP) {
463 					printf("%s(%d): denied attempt to register action for signal %d\n",
464 					    proc_name_address(p), proc_pid(p), signum);
465 				}
466 				return error;
467 			}
468 		}
469 	}
470 
471 	if (uap->osa) {
472 		sa->sa_handler = SIGACTION(p, signum);
473 		sa->sa_mask = ps->ps_catchmask[signum];
474 		bit = sigmask(signum);
475 		sa->sa_flags = 0;
476 		if ((ps->ps_sigonstack & bit) != 0) {
477 			sa->sa_flags |= SA_ONSTACK;
478 		}
479 		if ((ps->ps_sigintr & bit) == 0) {
480 			sa->sa_flags |= SA_RESTART;
481 		}
482 		if (ps->ps_siginfo & bit) {
483 			sa->sa_flags |= SA_SIGINFO;
484 		}
485 		if (ps->ps_signodefer & bit) {
486 			sa->sa_flags |= SA_NODEFER;
487 		}
488 		if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) {
489 			sa->sa_flags |= SA_NOCLDSTOP;
490 		}
491 		if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) {
492 			sa->sa_flags |= SA_NOCLDWAIT;
493 		}
494 
495 		if (IS_64BIT_PROCESS(p)) {
496 			struct user64_sigaction vec64 = {};
497 			sigaction_kern_to_user64(sa, &vec64);
498 			error = copyout(&vec64, uap->osa, sizeof(vec64));
499 		} else {
500 			struct user32_sigaction vec32 = {};
501 			sigaction_kern_to_user32(sa, &vec32);
502 			error = copyout(&vec32, uap->osa, sizeof(vec32));
503 		}
504 		if (error) {
505 			return error;
506 		}
507 	}
508 
509 	if (uap->nsa) {
510 		uint32_t old_sigreturn_validation = atomic_load_explicit(
511 			&ps->ps_sigreturn_validation, memory_order_relaxed);
512 		if (old_sigreturn_validation == PS_SIGRETURN_VALIDATION_DEFAULT) {
513 			atomic_compare_exchange_strong_explicit(&ps->ps_sigreturn_validation,
514 			    &old_sigreturn_validation, sigreturn_validation,
515 			    memory_order_relaxed, memory_order_relaxed);
516 		}
517 		error = setsigvec(p, current_thread(), signum, &__vec, FALSE);
518 	}
519 
520 	return error;
521 }
522 
523 /* Routines to manipulate bits on all threads */
524 int
clear_procsiglist(proc_t p,int bit,boolean_t in_signalstart)525 clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart)
526 {
527 	struct uthread * uth;
528 
529 	proc_lock(p);
530 	if (!in_signalstart) {
531 		proc_signalstart(p, 1);
532 	}
533 
534 
535 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
536 		uth->uu_siglist &= ~bit;
537 	}
538 	p->p_siglist &= ~bit;
539 	if (!in_signalstart) {
540 		proc_signalend(p, 1);
541 	}
542 	proc_unlock(p);
543 
544 	return 0;
545 }
546 
547 
548 static int
unblock_procsigmask(proc_t p,int bit)549 unblock_procsigmask(proc_t p, int bit)
550 {
551 	struct uthread * uth;
552 
553 	proc_lock(p);
554 	proc_signalstart(p, 1);
555 
556 
557 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
558 		uth->uu_sigmask &= ~bit;
559 	}
560 	p->p_sigmask &= ~bit;
561 
562 	proc_signalend(p, 1);
563 	proc_unlock(p);
564 	return 0;
565 }
566 
567 static int
block_procsigmask(proc_t p,int bit)568 block_procsigmask(proc_t p, int bit)
569 {
570 	struct uthread * uth;
571 
572 	proc_lock(p);
573 	proc_signalstart(p, 1);
574 
575 
576 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
577 		uth->uu_sigmask |= bit;
578 	}
579 	p->p_sigmask |=  bit;
580 
581 	proc_signalend(p, 1);
582 	proc_unlock(p);
583 	return 0;
584 }
585 
586 int
set_procsigmask(proc_t p,int bit)587 set_procsigmask(proc_t p, int bit)
588 {
589 	struct uthread * uth;
590 
591 	proc_lock(p);
592 	proc_signalstart(p, 1);
593 
594 
595 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
596 		uth->uu_sigmask = bit;
597 	}
598 	p->p_sigmask =  bit;
599 	proc_signalend(p, 1);
600 	proc_unlock(p);
601 
602 	return 0;
603 }
604 
605 /* XXX should be static? */
606 /*
607  * Notes:	The thread parameter is used in the PPC case to select the
608  *		thread on which the floating point exception will be enabled
609  *		or disabled.  We can't simply take current_thread(), since
610  *		this is called from posix_spawn() on the not currently running
611  *		process/thread pair.
612  *
613  *		We mark thread as unused to alow compilation without warning
614  *		on non-PPC platforms.
615  */
616 int
setsigvec(proc_t p,__unused thread_t thread,int signum,struct __kern_sigaction * sa,boolean_t in_sigstart)617 setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigaction *sa, boolean_t in_sigstart)
618 {
619 	struct sigacts *ps = &p->p_sigacts;
620 	int bit;
621 
622 	assert(signum < NSIG);
623 
624 	if ((signum == SIGKILL || signum == SIGSTOP) &&
625 	    sa->sa_handler != SIG_DFL) {
626 		return EINVAL;
627 	}
628 	bit = sigmask(signum);
629 	/*
630 	 * Change setting atomically.
631 	 */
632 	proc_set_sigact_trampact(p, signum, sa->sa_handler, sa->sa_tramp);
633 	ps->ps_catchmask[signum] = sa->sa_mask & ~sigcantmask;
634 	if (sa->sa_flags & SA_SIGINFO) {
635 		ps->ps_siginfo |= bit;
636 	} else {
637 		ps->ps_siginfo &= ~bit;
638 	}
639 	if ((sa->sa_flags & SA_RESTART) == 0) {
640 		ps->ps_sigintr |= bit;
641 	} else {
642 		ps->ps_sigintr &= ~bit;
643 	}
644 	if (sa->sa_flags & SA_ONSTACK) {
645 		ps->ps_sigonstack |= bit;
646 	} else {
647 		ps->ps_sigonstack &= ~bit;
648 	}
649 	if (sa->sa_flags & SA_RESETHAND) {
650 		ps->ps_sigreset |= bit;
651 	} else {
652 		ps->ps_sigreset &= ~bit;
653 	}
654 	if (sa->sa_flags & SA_NODEFER) {
655 		ps->ps_signodefer |= bit;
656 	} else {
657 		ps->ps_signodefer &= ~bit;
658 	}
659 	if (signum == SIGCHLD) {
660 		if (sa->sa_flags & SA_NOCLDSTOP) {
661 			OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag);
662 		} else {
663 			OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag);
664 		}
665 		if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) {
666 			OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag);
667 		} else {
668 			OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag);
669 		}
670 	}
671 
672 	/*
673 	 * Set bit in p_sigignore for signals that are set to SIG_IGN,
674 	 * and for signals set to SIG_DFL where the default is to ignore.
675 	 * However, don't put SIGCONT in p_sigignore,
676 	 * as we have to restart the process.
677 	 */
678 	if (sa->sa_handler == SIG_IGN ||
679 	    (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
680 		clear_procsiglist(p, bit, in_sigstart);
681 		if (signum != SIGCONT) {
682 			p->p_sigignore |= bit;  /* easier in psignal */
683 		}
684 		p->p_sigcatch &= ~bit;
685 	} else {
686 		p->p_sigignore &= ~bit;
687 		if (sa->sa_handler == SIG_DFL) {
688 			p->p_sigcatch &= ~bit;
689 		} else {
690 			p->p_sigcatch |= bit;
691 		}
692 	}
693 	return 0;
694 }
695 
696 /*
697  * Initialize signal state for process 0;
698  * set to ignore signals that are ignored by default.
699  */
700 void
siginit(proc_t p)701 siginit(proc_t p)
702 {
703 	int i;
704 
705 	for (i = 1; i < NSIG; i++) {
706 		if (sigprop[i] & SA_IGNORE && i != SIGCONT) {
707 			p->p_sigignore |= sigmask(i);
708 		}
709 	}
710 }
711 
712 /*
713  * Reset signals for an exec of the specified process.
714  */
715 void
execsigs(proc_t p,thread_t thread)716 execsigs(proc_t p, thread_t thread)
717 {
718 	struct sigacts *ps = &p->p_sigacts;
719 	int nc, mask;
720 	struct uthread *ut;
721 
722 	ut = (struct uthread *)get_bsdthread_info(thread);
723 
724 	/*
725 	 * transfer saved signal states from the process
726 	 * back to the current thread.
727 	 *
728 	 * NOTE: We do this without the process locked,
729 	 * because we are guaranteed to be single-threaded
730 	 * by this point in exec and the p_siglist is
731 	 * only accessed by threads inside the process.
732 	 */
733 	ut->uu_siglist |= p->p_siglist;
734 	p->p_siglist = 0;
735 
736 	/*
737 	 * Reset caught signals.  Held signals remain held
738 	 * through p_sigmask (unless they were caught,
739 	 * and are now ignored by default).
740 	 */
741 	proc_reset_sigact(p, p->p_sigcatch);
742 	while (p->p_sigcatch) {
743 		nc = ffs((unsigned int)p->p_sigcatch);
744 		mask = sigmask(nc);
745 		p->p_sigcatch &= ~mask;
746 		if (sigprop[nc] & SA_IGNORE) {
747 			if (nc != SIGCONT) {
748 				p->p_sigignore |= mask;
749 			}
750 			ut->uu_siglist &= ~mask;
751 		}
752 	}
753 
754 	atomic_store_explicit(&ps->ps_sigreturn_validation,
755 	    PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed);
756 
757 	/*
758 	 * Reset stack state to the user stack.
759 	 * Clear set of signals caught on the signal stack.
760 	 */
761 	/* thread */
762 	ut->uu_sigstk.ss_flags = SA_DISABLE;
763 	ut->uu_sigstk.ss_size = 0;
764 	ut->uu_sigstk.ss_sp = USER_ADDR_NULL;
765 	ut->uu_flag &= ~UT_ALTSTACK;
766 	/* process */
767 	ps->ps_sigonstack = 0;
768 }
769 
770 /*
771  * Manipulate signal mask.
772  * Note that we receive new mask, not pointer,
773  * and return old mask as return value;
774  * the library stub does the rest.
775  */
776 int
sigprocmask(proc_t p,struct sigprocmask_args * uap,__unused int32_t * retval)777 sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval)
778 {
779 	int error = 0;
780 	sigset_t oldmask, nmask;
781 	user_addr_t omask = uap->omask;
782 	struct uthread *ut;
783 
784 	ut = current_uthread();
785 	oldmask  = ut->uu_sigmask;
786 
787 	if (uap->mask == USER_ADDR_NULL) {
788 		/* just want old mask */
789 		goto out;
790 	}
791 	error = copyin(uap->mask, &nmask, sizeof(sigset_t));
792 	if (error) {
793 		goto out;
794 	}
795 
796 	switch (uap->how) {
797 	case SIG_BLOCK:
798 		block_procsigmask(p, (nmask & ~sigcantmask));
799 		signal_setast(current_thread());
800 		break;
801 
802 	case SIG_UNBLOCK:
803 		unblock_procsigmask(p, (nmask & ~sigcantmask));
804 		signal_setast(current_thread());
805 		break;
806 
807 	case SIG_SETMASK:
808 		set_procsigmask(p, (nmask & ~sigcantmask));
809 		signal_setast(current_thread());
810 		break;
811 
812 	default:
813 		error = EINVAL;
814 		break;
815 	}
816 out:
817 	if (!error && omask != USER_ADDR_NULL) {
818 		copyout(&oldmask, omask, sizeof(sigset_t));
819 	}
820 	return error;
821 }
822 
823 int
sigpending(__unused proc_t p,struct sigpending_args * uap,__unused int32_t * retval)824 sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *retval)
825 {
826 	struct uthread *ut;
827 	sigset_t pendlist;
828 
829 	ut = current_uthread();
830 	pendlist = ut->uu_siglist;
831 
832 	if (uap->osv) {
833 		copyout(&pendlist, uap->osv, sizeof(sigset_t));
834 	}
835 	return 0;
836 }
837 
838 /*
839  * Suspend process until signal, providing mask to be set
840  * in the meantime.  Note nonstandard calling convention:
841  * libc stub passes mask, not pointer, to save a copyin.
842  */
843 
844 static int
sigcontinue(__unused int error)845 sigcontinue(__unused int error)
846 {
847 //	struct uthread *ut = current_uthread();
848 	unix_syscall_return(EINTR);
849 }
850 
851 int
sigsuspend(proc_t p,struct sigsuspend_args * uap,int32_t * retval)852 sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval)
853 {
854 	__pthread_testcancel(1);
855 	return sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval);
856 }
857 
858 int
sigsuspend_nocancel(proc_t p,struct sigsuspend_nocancel_args * uap,__unused int32_t * retval)859 sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int32_t *retval)
860 {
861 	struct uthread *ut;
862 
863 	ut = current_uthread();
864 
865 	/*
866 	 * When returning from sigpause, we want
867 	 * the old mask to be restored after the
868 	 * signal handler has finished.  Thus, we
869 	 * save it here and mark the sigacts structure
870 	 * to indicate this.
871 	 */
872 	ut->uu_oldmask = ut->uu_sigmask;
873 	ut->uu_flag |= UT_SAS_OLDMASK;
874 	ut->uu_sigmask = (uap->mask & ~sigcantmask);
875 	(void) tsleep0((caddr_t) p, PPAUSE | PCATCH, "pause", 0, sigcontinue);
876 	/* always return EINTR rather than ERESTART... */
877 	return EINTR;
878 }
879 
880 
881 int
__disable_threadsignal(__unused proc_t p,__unused struct __disable_threadsignal_args * uap,__unused int32_t * retval)882 __disable_threadsignal(__unused proc_t p,
883     __unused struct __disable_threadsignal_args *uap,
884     __unused int32_t *retval)
885 {
886 	struct uthread *uth;
887 
888 	uth = current_uthread();
889 
890 	/* No longer valid to have any signal delivered */
891 	uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE);
892 
893 	return 0;
894 }
895 
896 void
__pthread_testcancel(int presyscall)897 __pthread_testcancel(int presyscall)
898 {
899 	thread_t self = current_thread();
900 	struct uthread * uthread;
901 
902 	uthread = (struct uthread *)get_bsdthread_info(self);
903 
904 
905 	uthread->uu_flag &= ~UT_NOTCANCELPT;
906 
907 	if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
908 		if (presyscall != 0) {
909 			unix_syscall_return(EINTR);
910 			/* NOTREACHED */
911 		} else {
912 			thread_abort_safely(self);
913 		}
914 	}
915 }
916 
917 
918 
919 int
__pthread_markcancel(__unused proc_t p,struct __pthread_markcancel_args * uap,__unused int32_t * retval)920 __pthread_markcancel(__unused proc_t p,
921     struct __pthread_markcancel_args *uap, __unused int32_t *retval)
922 {
923 	thread_act_t target_act;
924 	int error = 0;
925 	struct uthread *uth;
926 
927 	target_act = (thread_act_t)port_name_to_thread(uap->thread_port,
928 	    PORT_INTRANS_THREAD_IN_CURRENT_TASK);
929 
930 	if (target_act == THR_ACT_NULL) {
931 		return ESRCH;
932 	}
933 
934 	uth = (struct uthread *)get_bsdthread_info(target_act);
935 
936 	if ((uth->uu_flag & (UT_CANCEL | UT_CANCELED)) == 0) {
937 		uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK);
938 		if (((uth->uu_flag & UT_NOTCANCELPT) == 0)
939 		    && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) {
940 			thread_abort_safely(target_act);
941 		}
942 	}
943 
944 	thread_deallocate(target_act);
945 	return error;
946 }
947 
948 /* if action =0 ; return the cancellation state ,
949  *      if marked for cancellation, make the thread canceled
950  * if action = 1 ; Enable the cancel handling
951  * if action = 2; Disable the cancel handling
952  */
953 int
__pthread_canceled(__unused proc_t p,struct __pthread_canceled_args * uap,__unused int32_t * retval)954 __pthread_canceled(__unused proc_t p,
955     struct __pthread_canceled_args *uap, __unused int32_t *retval)
956 {
957 	thread_act_t thread;
958 	struct uthread *uth;
959 	int action = uap->action;
960 
961 	thread = current_thread();
962 	uth = (struct uthread *)get_bsdthread_info(thread);
963 
964 	switch (action) {
965 	case 1:
966 		uth->uu_flag &= ~UT_CANCELDISABLE;
967 		return 0;
968 	case 2:
969 		uth->uu_flag |= UT_CANCELDISABLE;
970 		return 0;
971 	case 0:
972 	default:
973 		if ((uth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
974 			uth->uu_flag &= ~UT_CANCEL;
975 			uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK);
976 			return 0;
977 		}
978 		return EINVAL;
979 	}
980 	return EINVAL;
981 }
982 
983 __attribute__((noreturn))
984 void
__posix_sem_syscall_return(kern_return_t kern_result)985 __posix_sem_syscall_return(kern_return_t kern_result)
986 {
987 	int error = 0;
988 
989 	if (kern_result == KERN_SUCCESS) {
990 		error = 0;
991 	} else if (kern_result == KERN_ABORTED) {
992 		error = EINTR;
993 	} else if (kern_result == KERN_OPERATION_TIMED_OUT) {
994 		error = ETIMEDOUT;
995 	} else {
996 		error = EINVAL;
997 	}
998 	unix_syscall_return(error);
999 	/* does not return */
1000 }
1001 
1002 #if OLD_SEMWAIT_SIGNAL
1003 /*
1004  * Returns:	0			Success
1005  *		EINTR
1006  *		ETIMEDOUT
1007  *		EINVAL
1008  *      EFAULT if timespec is NULL
1009  */
1010 int
__old_semwait_signal(proc_t p,struct __old_semwait_signal_args * uap,int32_t * retval)1011 __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap,
1012     int32_t *retval)
1013 {
1014 	__pthread_testcancel(0);
1015 	return __old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval);
1016 }
1017 
1018 int
__old_semwait_signal_nocancel(proc_t p,struct __old_semwait_signal_nocancel_args * uap,__unused int32_t * retval)1019 __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap,
1020     __unused int32_t *retval)
1021 {
1022 	kern_return_t kern_result;
1023 	int error;
1024 	mach_timespec_t then;
1025 	struct timespec now;
1026 	struct user_timespec ts;
1027 	boolean_t truncated_timeout = FALSE;
1028 
1029 	if (uap->timeout) {
1030 		if (IS_64BIT_PROCESS(p)) {
1031 			struct user64_timespec ts64;
1032 			error = copyin(uap->ts, &ts64, sizeof(ts64));
1033 			ts.tv_sec = (user_time_t)ts64.tv_sec;
1034 			ts.tv_nsec = (user_long_t)ts64.tv_nsec;
1035 		} else {
1036 			struct user32_timespec ts32;
1037 			error = copyin(uap->ts, &ts32, sizeof(ts32));
1038 			ts.tv_sec = ts32.tv_sec;
1039 			ts.tv_nsec = ts32.tv_nsec;
1040 		}
1041 
1042 		if (error) {
1043 			return error;
1044 		}
1045 
1046 		if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1047 			ts.tv_sec = 0xFFFFFFFF;
1048 			ts.tv_nsec = 0;
1049 			truncated_timeout = TRUE;
1050 		}
1051 
1052 		if (uap->relative) {
1053 			then.tv_sec = (unsigned int)ts.tv_sec;
1054 			then.tv_nsec = (clock_res_t)ts.tv_nsec;
1055 		} else {
1056 			nanotime(&now);
1057 
1058 			/* if time has elapsed, set time to null timepsec to bailout rightaway */
1059 			if (now.tv_sec == ts.tv_sec ?
1060 			    now.tv_nsec > ts.tv_nsec :
1061 			    now.tv_sec > ts.tv_sec) {
1062 				then.tv_sec = 0;
1063 				then.tv_nsec = 0;
1064 			} else {
1065 				then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec);
1066 				then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec);
1067 				if (then.tv_nsec < 0) {
1068 					then.tv_nsec += NSEC_PER_SEC;
1069 					then.tv_sec--;
1070 				}
1071 			}
1072 		}
1073 
1074 		if (uap->mutex_sem == 0) {
1075 			kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1076 		} else {
1077 			kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1078 		}
1079 	} else {
1080 		if (uap->mutex_sem == 0) {
1081 			kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1082 		} else {
1083 			kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1084 		}
1085 	}
1086 
1087 	if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1088 		return 0;
1089 	} else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1090 		return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1091 	} else if (kern_result == KERN_ABORTED) {
1092 		return EINTR;
1093 	} else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1094 		return ETIMEDOUT;
1095 	} else {
1096 		return EINVAL;
1097 	}
1098 }
1099 #endif /* OLD_SEMWAIT_SIGNAL*/
1100 
1101 /*
1102  * Returns:	0			Success
1103  *		EINTR
1104  *		ETIMEDOUT
1105  *		EINVAL
1106  *      EFAULT if timespec is NULL
1107  */
1108 int
__semwait_signal(proc_t p,struct __semwait_signal_args * uap,int32_t * retval)1109 __semwait_signal(proc_t p, struct __semwait_signal_args *uap,
1110     int32_t *retval)
1111 {
1112 	__pthread_testcancel(0);
1113 	return __semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval);
1114 }
1115 
1116 int
__semwait_signal_nocancel(__unused proc_t p,struct __semwait_signal_nocancel_args * uap,__unused int32_t * retval)1117 __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap,
1118     __unused int32_t *retval)
1119 {
1120 	kern_return_t kern_result;
1121 	mach_timespec_t then;
1122 	struct timespec now;
1123 	struct user_timespec ts;
1124 	boolean_t truncated_timeout = FALSE;
1125 
1126 	if (uap->timeout) {
1127 		ts.tv_sec = (user_time_t)uap->tv_sec;
1128 		ts.tv_nsec = uap->tv_nsec;
1129 
1130 		if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) {
1131 			ts.tv_sec = 0xFFFFFFFF;
1132 			ts.tv_nsec = 0;
1133 			truncated_timeout = TRUE;
1134 		}
1135 
1136 		if (uap->relative) {
1137 			then.tv_sec = (unsigned int)ts.tv_sec;
1138 			then.tv_nsec = (clock_res_t)ts.tv_nsec;
1139 		} else {
1140 			nanotime(&now);
1141 
1142 			/* if time has elapsed, set time to null timepsec to bailout rightaway */
1143 			if (now.tv_sec == ts.tv_sec ?
1144 			    now.tv_nsec > ts.tv_nsec :
1145 			    now.tv_sec > ts.tv_sec) {
1146 				then.tv_sec = 0;
1147 				then.tv_nsec = 0;
1148 			} else {
1149 				then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec);
1150 				then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec);
1151 				if (then.tv_nsec < 0) {
1152 					then.tv_nsec += NSEC_PER_SEC;
1153 					then.tv_sec--;
1154 				}
1155 			}
1156 		}
1157 
1158 		if (uap->mutex_sem == 0) {
1159 			kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1160 		} else {
1161 			kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return);
1162 		}
1163 	} else {
1164 		if (uap->mutex_sem == 0) {
1165 			kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return);
1166 		} else {
1167 			kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return);
1168 		}
1169 	}
1170 
1171 	if (kern_result == KERN_SUCCESS && !truncated_timeout) {
1172 		return 0;
1173 	} else if (kern_result == KERN_SUCCESS && truncated_timeout) {
1174 		return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */
1175 	} else if (kern_result == KERN_ABORTED) {
1176 		return EINTR;
1177 	} else if (kern_result == KERN_OPERATION_TIMED_OUT) {
1178 		return ETIMEDOUT;
1179 	} else {
1180 		return EINVAL;
1181 	}
1182 }
1183 
1184 
1185 int
__pthread_kill(__unused proc_t p,struct __pthread_kill_args * uap,__unused int32_t * retval)1186 __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap,
1187     __unused int32_t *retval)
1188 {
1189 	thread_t target_act;
1190 	int error = 0;
1191 	int signum = uap->sig;
1192 	struct uthread *uth;
1193 
1194 	target_act = (thread_t)port_name_to_thread(uap->thread_port,
1195 	    PORT_INTRANS_OPTIONS_NONE);
1196 
1197 	if (target_act == THREAD_NULL) {
1198 		return ESRCH;
1199 	}
1200 	if ((u_int)signum >= NSIG) {
1201 		error = EINVAL;
1202 		goto out;
1203 	}
1204 
1205 	uth = (struct uthread *)get_bsdthread_info(target_act);
1206 
1207 	if (uth->uu_flag & UT_NO_SIGMASK) {
1208 		error = ESRCH;
1209 		goto out;
1210 	}
1211 
1212 	if ((thread_get_tag(target_act) & THREAD_TAG_WORKQUEUE) && !uth->uu_workq_pthread_kill_allowed) {
1213 		error = ENOTSUP;
1214 		goto out;
1215 	}
1216 
1217 	if (signum) {
1218 		psignal_uthread(target_act, signum);
1219 	}
1220 out:
1221 	thread_deallocate(target_act);
1222 	return error;
1223 }
1224 
1225 
1226 int
__pthread_sigmask(__unused proc_t p,struct __pthread_sigmask_args * uap,__unused int32_t * retval)1227 __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap,
1228     __unused int32_t *retval)
1229 {
1230 	user_addr_t set = uap->set;
1231 	user_addr_t oset = uap->oset;
1232 	sigset_t nset;
1233 	int error = 0;
1234 	struct uthread *ut;
1235 	sigset_t  oldset;
1236 
1237 	ut = current_uthread();
1238 	oldset = ut->uu_sigmask;
1239 
1240 	if (set == USER_ADDR_NULL) {
1241 		/* need only old mask */
1242 		goto out;
1243 	}
1244 
1245 	error = copyin(set, &nset, sizeof(sigset_t));
1246 	if (error) {
1247 		goto out;
1248 	}
1249 
1250 	switch (uap->how) {
1251 	case SIG_BLOCK:
1252 		ut->uu_sigmask |= (nset & ~sigcantmask);
1253 		break;
1254 
1255 	case SIG_UNBLOCK:
1256 		ut->uu_sigmask &= ~(nset);
1257 		signal_setast(current_thread());
1258 		break;
1259 
1260 	case SIG_SETMASK:
1261 		ut->uu_sigmask = (nset & ~sigcantmask);
1262 		signal_setast(current_thread());
1263 		break;
1264 
1265 	default:
1266 		error = EINVAL;
1267 	}
1268 out:
1269 	if (!error && oset != USER_ADDR_NULL) {
1270 		copyout(&oldset, oset, sizeof(sigset_t));
1271 	}
1272 
1273 	return error;
1274 }
1275 
1276 /*
1277  * Returns:	0			Success
1278  *		EINVAL
1279  *	copyin:EFAULT
1280  *	copyout:EFAULT
1281  */
1282 int
__sigwait(proc_t p,struct __sigwait_args * uap,int32_t * retval)1283 __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval)
1284 {
1285 	__pthread_testcancel(1);
1286 	return __sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval);
1287 }
1288 
1289 int
__sigwait_nocancel(proc_t p,struct __sigwait_nocancel_args * uap,__unused int32_t * retval)1290 __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval)
1291 {
1292 	struct uthread *ut;
1293 	struct uthread *uth;
1294 	int error = 0;
1295 	sigset_t mask;
1296 	sigset_t siglist;
1297 	sigset_t sigw = 0;
1298 	int signum;
1299 
1300 	ut = current_uthread();
1301 
1302 	if (uap->set == USER_ADDR_NULL) {
1303 		return EINVAL;
1304 	}
1305 
1306 	error = copyin(uap->set, &mask, sizeof(sigset_t));
1307 	if (error) {
1308 		return error;
1309 	}
1310 
1311 	siglist = (mask & ~sigcantmask);
1312 
1313 	if (siglist == 0) {
1314 		return EINVAL;
1315 	}
1316 
1317 	proc_lock(p);
1318 
1319 	proc_signalstart(p, 1);
1320 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
1321 		if ((sigw = uth->uu_siglist & siglist)) {
1322 			break;
1323 		}
1324 	}
1325 	proc_signalend(p, 1);
1326 
1327 	if (sigw) {
1328 		/* The signal was pending on a thread */
1329 		goto sigwait1;
1330 	}
1331 	/*
1332 	 * When returning from sigwait, we want
1333 	 * the old mask to be restored after the
1334 	 * signal handler has finished.  Thus, we
1335 	 * save it here and mark the sigacts structure
1336 	 * to indicate this.
1337 	 */
1338 	uth = ut;               /* wait for it to be delivered to us */
1339 	ut->uu_oldmask = ut->uu_sigmask;
1340 	ut->uu_flag |= UT_SAS_OLDMASK;
1341 	if (siglist == (sigset_t)0) {
1342 		proc_unlock(p);
1343 		return EINVAL;
1344 	}
1345 	/* SIGKILL and SIGSTOP are not maskable as well */
1346 	ut->uu_sigmask = ~(siglist | sigcantmask);
1347 	ut->uu_sigwait = siglist;
1348 
1349 	/* No Continuations for now */
1350 	error =  msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE | PCATCH, "pause", 0);
1351 
1352 	if (error == ERESTART) {
1353 		error = 0;
1354 	}
1355 
1356 	sigw = (ut->uu_sigwait & siglist);
1357 	ut->uu_sigmask = ut->uu_oldmask;
1358 	ut->uu_oldmask = 0;
1359 	ut->uu_flag &= ~UT_SAS_OLDMASK;
1360 sigwait1:
1361 	ut->uu_sigwait = 0;
1362 	if (!error) {
1363 		signum = ffs((unsigned int)sigw);
1364 		if (!signum) {
1365 			panic("sigwait with no signal wakeup");
1366 		}
1367 		/* Clear the pending signal in the thread it was delivered */
1368 		uth->uu_siglist &= ~(sigmask(signum));
1369 
1370 #if CONFIG_DTRACE
1371 		DTRACE_PROC2(signal__clear, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo));
1372 #endif
1373 
1374 		proc_unlock(p);
1375 		if (uap->sig != USER_ADDR_NULL) {
1376 			error = copyout(&signum, uap->sig, sizeof(int));
1377 		}
1378 	} else {
1379 		proc_unlock(p);
1380 	}
1381 
1382 	return error;
1383 }
1384 
1385 int
sigaltstack(__unused proc_t p,struct sigaltstack_args * uap,__unused int32_t * retval)1386 sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *retval)
1387 {
1388 	struct kern_sigaltstack ss;
1389 	struct kern_sigaltstack *pstk;
1390 	int error;
1391 	struct uthread *uth;
1392 	int onstack;
1393 
1394 	uth = current_uthread();
1395 
1396 	pstk = &uth->uu_sigstk;
1397 	if ((uth->uu_flag & UT_ALTSTACK) == 0) {
1398 		uth->uu_sigstk.ss_flags |= SA_DISABLE;
1399 	}
1400 	onstack = pstk->ss_flags & SA_ONSTACK;
1401 	if (uap->oss) {
1402 		if (IS_64BIT_PROCESS(p)) {
1403 			struct user64_sigaltstack ss64 = {};
1404 			sigaltstack_kern_to_user64(pstk, &ss64);
1405 			error = copyout(&ss64, uap->oss, sizeof(ss64));
1406 		} else {
1407 			struct user32_sigaltstack ss32 = {};
1408 			sigaltstack_kern_to_user32(pstk, &ss32);
1409 			error = copyout(&ss32, uap->oss, sizeof(ss32));
1410 		}
1411 		if (error) {
1412 			return error;
1413 		}
1414 	}
1415 	if (uap->nss == USER_ADDR_NULL) {
1416 		return 0;
1417 	}
1418 	if (IS_64BIT_PROCESS(p)) {
1419 		struct user64_sigaltstack ss64;
1420 		error = copyin(uap->nss, &ss64, sizeof(ss64));
1421 		sigaltstack_user64_to_kern(&ss64, &ss);
1422 	} else {
1423 		struct user32_sigaltstack ss32;
1424 		error = copyin(uap->nss, &ss32, sizeof(ss32));
1425 		sigaltstack_user32_to_kern(&ss32, &ss);
1426 	}
1427 	if (error) {
1428 		return error;
1429 	}
1430 	if ((ss.ss_flags & ~SA_DISABLE) != 0) {
1431 		return EINVAL;
1432 	}
1433 
1434 	if (ss.ss_flags & SA_DISABLE) {
1435 		/* if we are here we are not in the signal handler ;so no need to check */
1436 		if (uth->uu_sigstk.ss_flags & SA_ONSTACK) {
1437 			return EINVAL;
1438 		}
1439 		uth->uu_flag &= ~UT_ALTSTACK;
1440 		uth->uu_sigstk.ss_flags = ss.ss_flags;
1441 		return 0;
1442 	}
1443 	if (onstack) {
1444 		return EPERM;
1445 	}
1446 /* The older stacksize was 8K, enforce that one so no compat problems */
1447 #define OLDMINSIGSTKSZ 8*1024
1448 	if (ss.ss_size < OLDMINSIGSTKSZ) {
1449 		return ENOMEM;
1450 	}
1451 	uth->uu_flag |= UT_ALTSTACK;
1452 	uth->uu_sigstk = ss;
1453 	return 0;
1454 }
1455 
1456 int
kill(proc_t cp,struct kill_args * uap,__unused int32_t * retval)1457 kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval)
1458 {
1459 	proc_t p;
1460 	kauth_cred_t uc = kauth_cred_get();
1461 	int posix = uap->posix;         /* !0 if posix behaviour desired */
1462 
1463 	AUDIT_ARG(pid, uap->pid);
1464 	AUDIT_ARG(signum, uap->signum);
1465 
1466 	if ((u_int)uap->signum >= NSIG) {
1467 		return EINVAL;
1468 	}
1469 	if (uap->pid > 0) {
1470 		/* kill single process */
1471 		if ((p = proc_find(uap->pid)) == NULL) {
1472 			if ((p = pzfind(uap->pid)) != NULL) {
1473 				/*
1474 				 * POSIX 1003.1-2001 requires returning success when killing a
1475 				 * zombie; see Rationale for kill(2).
1476 				 */
1477 				return 0;
1478 			}
1479 			return ESRCH;
1480 		}
1481 		AUDIT_ARG(process, p);
1482 		if (!cansignal(cp, uc, p, uap->signum)) {
1483 			proc_rele(p);
1484 			return EPERM;
1485 		}
1486 		if (uap->signum) {
1487 			psignal(p, uap->signum);
1488 		}
1489 		proc_rele(p);
1490 		return 0;
1491 	}
1492 	switch (uap->pid) {
1493 	case -1: /* broadcast signal */
1494 		return killpg1(cp, uap->signum, 0, 1, posix);
1495 	case 0: /* signal own process group */
1496 		return killpg1(cp, uap->signum, 0, 0, posix);
1497 	default: /* negative explicit process group */
1498 		return killpg1(cp, uap->signum, -(uap->pid), 0, posix);
1499 	}
1500 	/* NOTREACHED */
1501 }
1502 
1503 os_reason_t
build_userspace_exit_reason(uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1504 build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1505     user_addr_t reason_string, uint64_t reason_flags)
1506 {
1507 	os_reason_t exit_reason = OS_REASON_NULL;
1508 
1509 	int error = 0;
1510 	int num_items_to_copy = 0;
1511 	uint32_t user_data_to_copy = 0;
1512 	char *reason_user_desc = NULL;
1513 	size_t reason_user_desc_len = 0;
1514 
1515 	exit_reason = os_reason_create(reason_namespace, reason_code);
1516 	if (exit_reason == OS_REASON_NULL) {
1517 		printf("build_userspace_exit_reason: failed to allocate exit reason\n");
1518 		return exit_reason;
1519 	}
1520 
1521 	exit_reason->osr_flags |= OS_REASON_FLAG_FROM_USERSPACE;
1522 
1523 	/*
1524 	 * Only apply flags that are allowed to be passed from userspace.
1525 	 */
1526 	exit_reason->osr_flags |= (reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER);
1527 	if ((reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER) != reason_flags) {
1528 		printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n",
1529 		    reason_flags, reason_namespace, reason_code);
1530 	}
1531 
1532 	if (!(exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT)) {
1533 		exit_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1534 	}
1535 
1536 	if (payload != USER_ADDR_NULL) {
1537 		if (payload_size == 0) {
1538 			printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n",
1539 			    reason_namespace);
1540 			exit_reason->osr_flags |= OS_REASON_FLAG_BAD_PARAMS;
1541 			payload = USER_ADDR_NULL;
1542 		} else {
1543 			num_items_to_copy++;
1544 
1545 			if (payload_size > EXIT_REASON_PAYLOAD_MAX_LEN) {
1546 				exit_reason->osr_flags |= OS_REASON_FLAG_PAYLOAD_TRUNCATED;
1547 				payload_size = EXIT_REASON_PAYLOAD_MAX_LEN;
1548 			}
1549 
1550 			user_data_to_copy += payload_size;
1551 		}
1552 	}
1553 
1554 	if (reason_string != USER_ADDR_NULL) {
1555 		reason_user_desc = (char *)kalloc_data(EXIT_REASON_USER_DESC_MAX_LEN, Z_WAITOK);
1556 
1557 		if (reason_user_desc != NULL) {
1558 			error = copyinstr(reason_string, (void *) reason_user_desc,
1559 			    EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len);
1560 
1561 			if (error == 0) {
1562 				num_items_to_copy++;
1563 				user_data_to_copy += reason_user_desc_len;
1564 			} else if (error == ENAMETOOLONG) {
1565 				num_items_to_copy++;
1566 				reason_user_desc[EXIT_REASON_USER_DESC_MAX_LEN - 1] = '\0';
1567 				user_data_to_copy += reason_user_desc_len;
1568 			} else {
1569 				exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1570 				kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1571 				reason_user_desc = NULL;
1572 				reason_user_desc_len = 0;
1573 			}
1574 		}
1575 	}
1576 
1577 	if (num_items_to_copy != 0) {
1578 		uint32_t reason_buffer_size_estimate = 0;
1579 		mach_vm_address_t data_addr = 0;
1580 
1581 		reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(num_items_to_copy, user_data_to_copy);
1582 
1583 		error = os_reason_alloc_buffer(exit_reason, reason_buffer_size_estimate);
1584 		if (error != 0) {
1585 			printf("build_userspace_exit_reason: failed to allocate signal reason buffer\n");
1586 			goto out_failed_copyin;
1587 		}
1588 
1589 		if (reason_user_desc != NULL && reason_user_desc_len != 0) {
1590 			if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1591 			    EXIT_REASON_USER_DESC,
1592 			    (uint32_t)reason_user_desc_len,
1593 			    &data_addr)) {
1594 				kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr,
1595 				    reason_user_desc, (uint32_t)reason_user_desc_len);
1596 			} else {
1597 				printf("build_userspace_exit_reason: failed to allocate space for reason string\n");
1598 				goto out_failed_copyin;
1599 			}
1600 		}
1601 
1602 		if (payload != USER_ADDR_NULL) {
1603 			if (KERN_SUCCESS ==
1604 			    kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor,
1605 			    EXIT_REASON_USER_PAYLOAD,
1606 			    payload_size,
1607 			    &data_addr)) {
1608 				error = copyin(payload, (void *) data_addr, payload_size);
1609 				if (error) {
1610 					printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error);
1611 					goto out_failed_copyin;
1612 				}
1613 			} else {
1614 				printf("build_userspace_exit_reason: failed to allocate space for payload data\n");
1615 				goto out_failed_copyin;
1616 			}
1617 		}
1618 	}
1619 
1620 	if (reason_user_desc != NULL) {
1621 		kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1622 		reason_user_desc = NULL;
1623 		reason_user_desc_len = 0;
1624 	}
1625 
1626 	return exit_reason;
1627 
1628 out_failed_copyin:
1629 
1630 	if (reason_user_desc != NULL) {
1631 		kfree_data(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN);
1632 		reason_user_desc = NULL;
1633 		reason_user_desc_len = 0;
1634 	}
1635 
1636 	exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN;
1637 	os_reason_alloc_buffer(exit_reason, 0);
1638 	return exit_reason;
1639 }
1640 
1641 static int
terminate_with_payload_internal(struct proc * cur_proc,int target_pid,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags)1642 terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t reason_namespace,
1643     uint64_t reason_code, user_addr_t payload, uint32_t payload_size,
1644     user_addr_t reason_string, uint64_t reason_flags)
1645 {
1646 	proc_t target_proc = PROC_NULL;
1647 	kauth_cred_t cur_cred = kauth_cred_get();
1648 
1649 	os_reason_t signal_reason = OS_REASON_NULL;
1650 
1651 	AUDIT_ARG(pid, target_pid);
1652 	if ((target_pid <= 0)) {
1653 		return EINVAL;
1654 	}
1655 
1656 	target_proc = proc_find(target_pid);
1657 	if (target_proc == PROC_NULL) {
1658 		return ESRCH;
1659 	}
1660 
1661 	AUDIT_ARG(process, target_proc);
1662 
1663 	if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL)) {
1664 		proc_rele(target_proc);
1665 		return EPERM;
1666 	}
1667 
1668 	if (target_pid != proc_getpid(cur_proc)) {
1669 		/*
1670 		 * FLAG_ABORT should only be set on terminate_with_reason(getpid()) that
1671 		 * was a fallback from an unsuccessful abort_with_reason(). In that case
1672 		 * caller's pid matches the target one. Otherwise remove the flag.
1673 		 */
1674 		reason_flags &= ~((typeof(reason_flags))OS_REASON_FLAG_ABORT);
1675 	}
1676 
1677 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1678 	    proc_getpid(target_proc), reason_namespace,
1679 	    reason_code, 0, 0);
1680 
1681 	signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size,
1682 	    reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID));
1683 
1684 	if (target_pid == proc_getpid(cur_proc)) {
1685 		/*
1686 		 * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
1687 		 * return if the thread and/or task are already terminating. Either way, the
1688 		 * current thread won't return to userspace.
1689 		 */
1690 		psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason);
1691 	} else {
1692 		psignal_with_reason(target_proc, SIGKILL, signal_reason);
1693 	}
1694 
1695 	proc_rele(target_proc);
1696 
1697 	return 0;
1698 }
1699 
1700 int
terminate_with_payload(struct proc * cur_proc,struct terminate_with_payload_args * args,__unused int32_t * retval)1701 terminate_with_payload(struct proc *cur_proc, struct terminate_with_payload_args *args,
1702     __unused int32_t *retval)
1703 {
1704 	return terminate_with_payload_internal(cur_proc, args->pid, args->reason_namespace, args->reason_code, args->payload,
1705 	           args->payload_size, args->reason_string, args->reason_flags);
1706 }
1707 
1708 static int
killpg1_allfilt(proc_t p,void * arg)1709 killpg1_allfilt(proc_t p, void * arg)
1710 {
1711 	struct killpg1_filtargs * kfargp = (struct killpg1_filtargs *)arg;
1712 
1713 	/*
1714 	 * Don't signal initproc, a system process, or the current process if POSIX
1715 	 * isn't specified.
1716 	 */
1717 	return proc_getpid(p) > 1 && !(p->p_flag & P_SYSTEM) &&
1718 	       (kfargp->posix ? true : p != kfargp->curproc);
1719 }
1720 
1721 static int
killpg1_callback(proc_t p,void * arg)1722 killpg1_callback(proc_t p, void *arg)
1723 {
1724 	struct killpg1_iterargs *kargp = (struct killpg1_iterargs *)arg;
1725 	int signum = kargp->signum;
1726 
1727 	if (proc_list_exited(p)) {
1728 		/*
1729 		 * Count zombies as found for the purposes of signalling, since POSIX
1730 		 * 1003.1-2001 sees signalling zombies as successful.  If killpg(2) or
1731 		 * kill(2) with pid -1 only finds zombies that can be signalled, it
1732 		 * shouldn't return ESRCH.  See the Rationale for kill(2).
1733 		 *
1734 		 * Don't call into MAC -- it's not expecting signal checks for exited
1735 		 * processes.
1736 		 */
1737 		if (cansignal_nomac(kargp->curproc, kargp->uc, p, signum)) {
1738 			kargp->nfound++;
1739 		}
1740 	} else if (cansignal(kargp->curproc, kargp->uc, p, signum)) {
1741 		kargp->nfound++;
1742 
1743 		if (signum != 0) {
1744 			psignal(p, signum);
1745 		}
1746 	}
1747 
1748 	return PROC_RETURNED;
1749 }
1750 
1751 /*
1752  * Common code for kill process group/broadcast kill.
1753  */
1754 int
killpg1(proc_t curproc,int signum,int pgid,int all,int posix)1755 killpg1(proc_t curproc, int signum, int pgid, int all, int posix)
1756 {
1757 	kauth_cred_t uc;
1758 	struct pgrp *pgrp;
1759 	int error = 0;
1760 
1761 	uc = kauth_cred_proc_ref(curproc);
1762 	struct killpg1_iterargs karg = {
1763 		.curproc = curproc, .uc = uc, .nfound = 0, .signum = signum
1764 	};
1765 
1766 	if (all) {
1767 		/*
1768 		 * Broadcast to all processes that the user can signal (pid was -1).
1769 		 */
1770 		struct killpg1_filtargs kfarg = {
1771 			.posix = posix, .curproc = curproc
1772 		};
1773 		proc_iterate(PROC_ALLPROCLIST | PROC_ZOMBPROCLIST, killpg1_callback,
1774 		    &karg, killpg1_allfilt, &kfarg);
1775 	} else {
1776 		if (pgid == 0) {
1777 			/*
1778 			 * Send to current the current process' process group.
1779 			 */
1780 			pgrp = proc_pgrp(curproc, NULL);
1781 		} else {
1782 			pgrp = pgrp_find(pgid);
1783 			if (pgrp == NULL) {
1784 				error = ESRCH;
1785 				goto out;
1786 			}
1787 		}
1788 
1789 		pgrp_iterate(pgrp, killpg1_callback, &karg, ^bool (proc_t p) {
1790 			if (p == kernproc || p == initproc) {
1791 			        return false;
1792 			}
1793 			/* XXX shouldn't this allow signalling zombies? */
1794 			return !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB;
1795 		});
1796 		pgrp_rele(pgrp);
1797 	}
1798 	error = (karg.nfound > 0 ? 0 : (posix ? EPERM : ESRCH));
1799 out:
1800 	kauth_cred_unref(&uc);
1801 	return error;
1802 }
1803 
1804 /*
1805  * Send a signal to a process group.
1806  */
1807 void
gsignal(int pgid,int signum)1808 gsignal(int pgid, int signum)
1809 {
1810 	struct pgrp *pgrp;
1811 
1812 	if (pgid && (pgrp = pgrp_find(pgid))) {
1813 		pgsignal(pgrp, signum, 0);
1814 		pgrp_rele(pgrp);
1815 	}
1816 }
1817 
1818 /*
1819  * Send a signal to a process group.  If checkctty is 1,
1820  * limit to members which have a controlling terminal.
1821  */
1822 
1823 static int
pgsignal_callback(proc_t p,void * arg)1824 pgsignal_callback(proc_t p, void * arg)
1825 {
1826 	int  signum = *(int*)arg;
1827 
1828 	psignal(p, signum);
1829 	return PROC_RETURNED;
1830 }
1831 
1832 void
pgsignal(struct pgrp * pgrp,int signum,int checkctty)1833 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
1834 {
1835 	if (pgrp == PGRP_NULL) {
1836 		return;
1837 	}
1838 
1839 	bool (^filter)(proc_t) = ^bool (proc_t p) {
1840 		return p->p_flag & P_CONTROLT;
1841 	};
1842 
1843 	pgrp_iterate(pgrp, pgsignal_callback, &signum, checkctty ? filter : NULL);
1844 }
1845 
1846 
1847 void
tty_pgsignal_locked(struct tty * tp,int signum,int checkctty)1848 tty_pgsignal_locked(struct tty *tp, int signum, int checkctty)
1849 {
1850 	struct pgrp * pg;
1851 
1852 	pg = tty_pgrp_locked(tp);
1853 	if (pg != PGRP_NULL) {
1854 		tty_unlock(tp);
1855 		pgsignal(pg, signum, checkctty);
1856 		pgrp_rele(pg);
1857 		tty_lock(tp);
1858 	}
1859 }
1860 /*
1861  * Send a signal caused by a trap to a specific thread.
1862  */
1863 void
threadsignal(thread_t sig_actthread,int signum,mach_exception_code_t code,boolean_t set_exitreason)1864 threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boolean_t set_exitreason)
1865 {
1866 	struct uthread *uth;
1867 	struct task * sig_task;
1868 	proc_t p;
1869 	int mask;
1870 
1871 	if ((u_int)signum >= NSIG || signum == 0) {
1872 		return;
1873 	}
1874 
1875 	mask = sigmask(signum);
1876 	if ((mask & threadmask) == 0) {
1877 		return;
1878 	}
1879 	sig_task = get_threadtask(sig_actthread);
1880 	p = (proc_t)(get_bsdtask_info(sig_task));
1881 
1882 	uth = get_bsdthread_info(sig_actthread);
1883 
1884 	proc_lock(p);
1885 	if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) {
1886 		proc_unlock(p);
1887 		return;
1888 	}
1889 
1890 	uth->uu_siglist |= mask;
1891 	uth->uu_code = code;
1892 
1893 	/* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */
1894 	if (set_exitreason && ((p->p_lflag & P_LTRACED) || (!(uth->uu_sigwait & mask)
1895 	    && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) &&
1896 	    !(mask & stopsigmask) && !(mask & contsigmask)) {
1897 		if (uth->uu_exit_reason == OS_REASON_NULL) {
1898 			KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1899 			    proc_getpid(p), OS_REASON_SIGNAL, signum, 0, 0);
1900 
1901 			os_reason_t signal_reason = build_signal_reason(signum, "exc handler");
1902 
1903 			set_thread_exit_reason(sig_actthread, signal_reason, TRUE);
1904 
1905 			/* We dropped/consumed the reference in set_thread_exit_reason() */
1906 			signal_reason = OS_REASON_NULL;
1907 		}
1908 	}
1909 
1910 	proc_unlock(p);
1911 
1912 	/* mark on process as well */
1913 	signal_setast(sig_actthread);
1914 }
1915 
1916 /* Called with proc locked */
1917 static void
set_thread_extra_flags(struct uthread * uth,os_reason_t reason)1918 set_thread_extra_flags(struct uthread *uth, os_reason_t reason)
1919 {
1920 	extern int vm_shared_region_reslide_restrict;
1921 	assert(uth != NULL);
1922 	/*
1923 	 * Check whether the userland fault address falls within the shared
1924 	 * region and notify userland if so. This allows launchd to apply
1925 	 * special policies around this fault type.
1926 	 */
1927 	if (reason->osr_namespace == OS_REASON_SIGNAL &&
1928 	    reason->osr_code == SIGSEGV) {
1929 		mach_vm_address_t fault_address = uth->uu_subcode;
1930 
1931 #if defined(__arm64__)
1932 		/* Address is in userland, so we hard clear TBI bits to 0 here */
1933 		fault_address = tbi_clear(fault_address);
1934 #endif /* __arm64__ */
1935 
1936 		if (fault_address >= SHARED_REGION_BASE &&
1937 		    fault_address <= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1938 			/*
1939 			 * Always report whether the fault happened within the shared cache
1940 			 * region, but only stale the slide if the resliding is extended
1941 			 * to all processes or if the process faulting is a platform one.
1942 			 */
1943 			reason->osr_flags |= OS_REASON_FLAG_SHAREDREGION_FAULT;
1944 
1945 #if __has_feature(ptrauth_calls)
1946 			if (!vm_shared_region_reslide_restrict || csproc_get_platform_binary(current_proc())) {
1947 				vm_shared_region_reslide_stale();
1948 			}
1949 #endif /* __has_feature(ptrauth_calls) */
1950 		}
1951 	}
1952 }
1953 
1954 void
set_thread_exit_reason(void * th,void * reason,boolean_t proc_locked)1955 set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked)
1956 {
1957 	struct uthread *targ_uth = get_bsdthread_info(th);
1958 	struct task *targ_task = NULL;
1959 	proc_t targ_proc = NULL;
1960 
1961 	os_reason_t exit_reason = (os_reason_t)reason;
1962 
1963 	if (exit_reason == OS_REASON_NULL) {
1964 		return;
1965 	}
1966 
1967 	if (!proc_locked) {
1968 		targ_task = get_threadtask(th);
1969 		targ_proc = (proc_t)(get_bsdtask_info(targ_task));
1970 
1971 		proc_lock(targ_proc);
1972 	}
1973 
1974 	set_thread_extra_flags(targ_uth, exit_reason);
1975 
1976 	if (targ_uth->uu_exit_reason == OS_REASON_NULL) {
1977 		targ_uth->uu_exit_reason = exit_reason;
1978 	} else {
1979 		/* The caller expects that we drop a reference on the exit reason */
1980 		os_reason_free(exit_reason);
1981 	}
1982 
1983 	if (!proc_locked) {
1984 		assert(targ_proc != NULL);
1985 		proc_unlock(targ_proc);
1986 	}
1987 }
1988 
1989 /*
1990  * get_signalthread
1991  *
1992  * Picks an appropriate thread from a process to target with a signal.
1993  *
1994  * Called with proc locked.
1995  * Returns thread with BSD ast set.
1996  *
1997  * We attempt to deliver a proc-wide signal to the first thread in the task.
1998  * This allows single threaded applications which use signals to
1999  * be able to be linked with multithreaded libraries.
2000  */
2001 static kern_return_t
get_signalthread(proc_t p,int signum,thread_t * thr)2002 get_signalthread(proc_t p, int signum, thread_t * thr)
2003 {
2004 	struct uthread *uth;
2005 	sigset_t mask = sigmask(signum);
2006 	bool skip_wqthreads = true;
2007 
2008 	*thr = THREAD_NULL;
2009 
2010 
2011 again:
2012 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
2013 		if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2014 		    (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) {
2015 			thread_t th = get_machthread(uth);
2016 			if (skip_wqthreads && (thread_get_tag(th) & THREAD_TAG_WORKQUEUE)) {
2017 				/* Workqueue threads may be parked in the kernel unable to
2018 				 * deliver signals for an extended period of time, so skip them
2019 				 * in favor of pthreads in a first pass. (rdar://50054475). */
2020 			} else if (check_actforsig(p->task, th, 1) == KERN_SUCCESS) {
2021 				*thr = th;
2022 				return KERN_SUCCESS;
2023 			}
2024 		}
2025 	}
2026 	if (skip_wqthreads) {
2027 		skip_wqthreads = false;
2028 		goto again;
2029 	}
2030 	if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) {
2031 		return KERN_SUCCESS;
2032 	}
2033 
2034 	return KERN_FAILURE;
2035 }
2036 
2037 static os_reason_t
build_signal_reason(int signum,const char * procname)2038 build_signal_reason(int signum, const char *procname)
2039 {
2040 	os_reason_t signal_reason = OS_REASON_NULL;
2041 	proc_t sender_proc = current_proc();
2042 	uint32_t reason_buffer_size_estimate = 0, proc_name_length = 0;
2043 	const char *default_sender_procname = "unknown";
2044 	mach_vm_address_t data_addr;
2045 	int ret;
2046 
2047 	signal_reason = os_reason_create(OS_REASON_SIGNAL, signum);
2048 	if (signal_reason == OS_REASON_NULL) {
2049 		printf("build_signal_reason: unable to allocate signal reason structure.\n");
2050 		return signal_reason;
2051 	}
2052 
2053 	reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) +
2054 	    sizeof(pid_t));
2055 
2056 	ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate);
2057 	if (ret != 0) {
2058 		printf("build_signal_reason: unable to allocate signal reason buffer.\n");
2059 		return signal_reason;
2060 	}
2061 
2062 	if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PID,
2063 	    sizeof(pid_t), &data_addr)) {
2064 		pid_t pid = proc_getpid(sender_proc);
2065 		kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &pid, sizeof(pid));
2066 	} else {
2067 		printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n");
2068 	}
2069 
2070 	proc_name_length = sizeof(sender_proc->p_name);
2071 	if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PROCNAME,
2072 	    proc_name_length, &data_addr)) {
2073 		if (procname) {
2074 			char truncated_procname[proc_name_length];
2075 			strncpy((char *) &truncated_procname, procname, proc_name_length);
2076 			truncated_procname[proc_name_length - 1] = '\0';
2077 
2078 			kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname,
2079 			    (uint32_t)strlen((char *) &truncated_procname));
2080 		} else if (*sender_proc->p_name) {
2081 			kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name,
2082 			    sizeof(sender_proc->p_name));
2083 		} else {
2084 			kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname,
2085 			    (uint32_t)strlen(default_sender_procname) + 1);
2086 		}
2087 	} else {
2088 		printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n");
2089 	}
2090 
2091 	return signal_reason;
2092 }
2093 
2094 /*
2095  * Send the signal to the process.  If the signal has an action, the action
2096  * is usually performed by the target process rather than the caller; we add
2097  * the signal to the set of pending signals for the process.
2098  *
2099  * Always drops a reference on a signal_reason if one is provided, whether via
2100  * passing it to a thread or deallocating directly.
2101  *
2102  * Exceptions:
2103  *   o When a stop signal is sent to a sleeping process that takes the
2104  *     default action, the process is stopped without awakening it.
2105  *   o SIGCONT restarts stopped processes (or puts them back to sleep)
2106  *     regardless of the signal action (eg, blocked or ignored).
2107  *
2108  * Other ignored signals are discarded immediately.
2109  */
2110 static void
psignal_internal(proc_t p,task_t task,thread_t thread,int flavor,int signum,os_reason_t signal_reason)2111 psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, os_reason_t signal_reason)
2112 {
2113 	int prop;
2114 	user_addr_t action = USER_ADDR_NULL;
2115 	proc_t                  sig_proc;
2116 	thread_t                sig_thread;
2117 	task_t                  sig_task;
2118 	int                     mask;
2119 	struct uthread          *uth;
2120 	kern_return_t           kret;
2121 	uid_t                   r_uid;
2122 	proc_t                  pp;
2123 	kauth_cred_t            my_cred;
2124 	char                    *launchd_exit_reason_desc = NULL;
2125 	boolean_t               update_thread_policy = FALSE;
2126 
2127 	if ((u_int)signum >= NSIG || signum == 0) {
2128 		panic("psignal: bad signal number %d", signum);
2129 	}
2130 
2131 	mask = sigmask(signum);
2132 	prop = sigprop[signum];
2133 
2134 #if SIGNAL_DEBUG
2135 	if (rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) {
2136 		ram_printf(3);
2137 	}
2138 #endif /* SIGNAL_DEBUG */
2139 
2140 	/* catch unexpected initproc kills early for easier debuggging */
2141 	if (signum == SIGKILL && p == initproc) {
2142 		if (signal_reason == NULL) {
2143 			panic_plain("unexpected SIGKILL of %s %s (no reason provided)",
2144 			    (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2145 			    ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""));
2146 		} else {
2147 			launchd_exit_reason_desc = launchd_exit_reason_get_string_desc(signal_reason);
2148 			panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
2149 			    (p->p_name[0] != '\0' ? p->p_name : "initproc"),
2150 			    ((proc_getcsflags(p) & CS_KILLED) ? "(CS_KILLED)" : ""),
2151 			    signal_reason->osr_namespace, signal_reason->osr_code,
2152 			    launchd_exit_reason_desc ? launchd_exit_reason_desc : "none");
2153 		}
2154 	}
2155 
2156 	/*
2157 	 *	We will need the task pointer later.  Grab it now to
2158 	 *	check for a zombie process.  Also don't send signals
2159 	 *	to kernel internal tasks.
2160 	 */
2161 	if (flavor & PSIG_VFORK) {
2162 		sig_task = task;
2163 		sig_thread = thread;
2164 		sig_proc = p;
2165 	} else if (flavor & PSIG_THREAD) {
2166 		sig_task = get_threadtask(thread);
2167 		sig_thread = thread;
2168 		sig_proc = (proc_t)get_bsdtask_info(sig_task);
2169 	} else if (flavor & PSIG_TRY_THREAD) {
2170 		assert((thread == current_thread()) && (p == current_proc()));
2171 		sig_task = p->task;
2172 		sig_thread = thread;
2173 		sig_proc = p;
2174 	} else {
2175 		sig_task = p->task;
2176 		sig_thread = THREAD_NULL;
2177 		sig_proc = p;
2178 	}
2179 
2180 	if ((sig_task == TASK_NULL) || is_kerneltask(sig_task)) {
2181 		os_reason_free(signal_reason);
2182 		return;
2183 	}
2184 
2185 	if ((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) {
2186 		proc_knote(sig_proc, NOTE_SIGNAL | signum);
2187 	}
2188 
2189 	if ((flavor & PSIG_LOCKED) == 0) {
2190 		proc_signalstart(sig_proc, 0);
2191 	}
2192 
2193 	/* Don't send signals to a process that has ignored them. */
2194 	if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) {
2195 		DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2196 		goto sigout_unlocked;
2197 	}
2198 
2199 	/*
2200 	 * The proc_lock prevents the targeted thread from being deallocated
2201 	 * or handling the signal until we're done signaling it.
2202 	 *
2203 	 * Once the proc_lock is dropped, we have no guarantee the thread or uthread exists anymore.
2204 	 *
2205 	 * XXX: What if the thread goes inactive after the thread passes bsd ast point?
2206 	 */
2207 	proc_lock(sig_proc);
2208 
2209 	/*
2210 	 * Don't send signals to a process which has already exited and thus
2211 	 * committed to a particular p_xstat exit code.
2212 	 * Additionally, don't abort the process running 'reboot'.
2213 	 */
2214 	if (ISSET(sig_proc->p_flag, P_REBOOT) || ISSET(sig_proc->p_lflag, P_LEXIT)) {
2215 		DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2216 		goto sigout_locked;
2217 	}
2218 
2219 	if (flavor & PSIG_VFORK) {
2220 		action = SIG_DFL;
2221 		act_set_astbsd(sig_thread);
2222 		kret = KERN_SUCCESS;
2223 	} else if (flavor & PSIG_TRY_THREAD) {
2224 		uth = get_bsdthread_info(sig_thread);
2225 		if (((uth->uu_flag & UT_NO_SIGMASK) == 0) &&
2226 		    (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) &&
2227 		    ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) {
2228 			/* deliver to specified thread */
2229 		} else {
2230 			/* deliver to any willing thread */
2231 			kret = get_signalthread(sig_proc, signum, &sig_thread);
2232 		}
2233 	} else if (flavor & PSIG_THREAD) {
2234 		/* If successful return with ast set */
2235 		kret = check_actforsig(sig_task, sig_thread, 1);
2236 	} else {
2237 		/* If successful return with ast set */
2238 		kret = get_signalthread(sig_proc, signum, &sig_thread);
2239 	}
2240 
2241 	if (kret != KERN_SUCCESS) {
2242 		DTRACE_PROC3(signal__discard, thread_t, sig_thread, proc_t, sig_proc, int, signum);
2243 		proc_unlock(sig_proc);
2244 		goto sigout_unlocked;
2245 	}
2246 
2247 	uth = get_bsdthread_info(sig_thread);
2248 
2249 	/*
2250 	 * If proc is traced, always give parent a chance.
2251 	 */
2252 
2253 	if ((flavor & PSIG_VFORK) == 0) {
2254 		if (sig_proc->p_lflag & P_LTRACED) {
2255 			action = SIG_DFL;
2256 		} else {
2257 			/*
2258 			 * If the signal is being ignored,
2259 			 * then we forget about it immediately.
2260 			 * (Note: we don't set SIGCONT in p_sigignore,
2261 			 * and if it is set to SIG_IGN,
2262 			 * action will be SIG_DFL here.)
2263 			 */
2264 			if (sig_proc->p_sigignore & mask) {
2265 				goto sigout_locked;
2266 			}
2267 
2268 			if (uth->uu_sigwait & mask) {
2269 				action = KERN_SIG_WAIT;
2270 			} else if (uth->uu_sigmask & mask) {
2271 				action = KERN_SIG_HOLD;
2272 			} else if (sig_proc->p_sigcatch & mask) {
2273 				action = KERN_SIG_CATCH;
2274 			} else {
2275 				action = SIG_DFL;
2276 			}
2277 		}
2278 	}
2279 
2280 	/* TODO: p_nice isn't hooked up to the scheduler... */
2281 	if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
2282 	    (sig_proc->p_lflag & P_LTRACED) == 0) {
2283 		sig_proc->p_nice = NZERO;
2284 	}
2285 
2286 	if (prop & SA_CONT) {
2287 		uth->uu_siglist &= ~stopsigmask;
2288 	}
2289 
2290 	if (prop & SA_STOP) {
2291 		struct pgrp *pg;
2292 		/*
2293 		 * If sending a tty stop signal to a member of an orphaned
2294 		 * process group, discard the signal here if the action
2295 		 * is default; don't stop the process below if sleeping,
2296 		 * and don't clear any pending SIGCONT.
2297 		 */
2298 		pg = proc_pgrp(sig_proc, NULL);
2299 		if (prop & SA_TTYSTOP && pg->pg_jobc == 0 &&
2300 		    action == SIG_DFL) {
2301 			pgrp_rele(pg);
2302 			goto sigout_locked;
2303 		}
2304 		pgrp_rele(pg);
2305 		uth->uu_siglist &= ~contsigmask;
2306 	}
2307 
2308 	uth->uu_siglist |= mask;
2309 
2310 	/*
2311 	 * Defer further processing for signals which are held,
2312 	 * except that stopped processes must be continued by SIGCONT.
2313 	 */
2314 	if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) {
2315 		goto sigout_locked;
2316 	}
2317 
2318 	/*
2319 	 *	SIGKILL priority twiddling moved here from above because
2320 	 *	it needs sig_thread.  Could merge it into large switch
2321 	 *	below if we didn't care about priority for tracing
2322 	 *	as SIGKILL's action is always SIG_DFL.
2323 	 *
2324 	 *	TODO: p_nice isn't hooked up to the scheduler...
2325 	 */
2326 	if ((signum == SIGKILL) && (sig_proc->p_nice > NZERO)) {
2327 		sig_proc->p_nice = NZERO;
2328 	}
2329 
2330 	/*
2331 	 *	Process is traced - wake it up (if not already
2332 	 *	stopped) so that it can discover the signal in
2333 	 *	issig() and stop for the parent.
2334 	 */
2335 	if (sig_proc->p_lflag & P_LTRACED) {
2336 		if (sig_proc->p_stat != SSTOP) {
2337 			goto runlocked;
2338 		} else {
2339 			goto sigout_locked;
2340 		}
2341 	}
2342 
2343 	if ((flavor & PSIG_VFORK) != 0) {
2344 		goto runlocked;
2345 	}
2346 
2347 	if (action == KERN_SIG_WAIT) {
2348 #if CONFIG_DTRACE
2349 		/*
2350 		 * DTrace proc signal-clear returns a siginfo_t. Collect the needed info.
2351 		 */
2352 		r_uid = kauth_getruid(); /* per thread credential; protected by our thread context */
2353 
2354 		bzero((caddr_t)&(uth->t_dtrace_siginfo), sizeof(uth->t_dtrace_siginfo));
2355 
2356 		uth->t_dtrace_siginfo.si_signo = signum;
2357 		uth->t_dtrace_siginfo.si_pid = proc_getpid(current_proc());
2358 		uth->t_dtrace_siginfo.si_status = W_EXITCODE(signum, 0);
2359 		uth->t_dtrace_siginfo.si_uid = r_uid;
2360 		uth->t_dtrace_siginfo.si_code = 0;
2361 #endif
2362 		uth->uu_sigwait = mask;
2363 		uth->uu_siglist &= ~mask;
2364 		wakeup(&uth->uu_sigwait);
2365 		/* if it is SIGCONT resume whole process */
2366 		if (prop & SA_CONT) {
2367 			OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2368 			sig_proc->p_contproc = proc_getpid(current_proc());
2369 			(void) task_resume_internal(sig_task);
2370 		}
2371 		goto sigout_locked;
2372 	}
2373 
2374 	if (action != SIG_DFL) {
2375 		/*
2376 		 *	User wants to catch the signal.
2377 		 *	Wake up the thread, but don't un-suspend it
2378 		 *	(except for SIGCONT).
2379 		 */
2380 		if (prop & SA_CONT) {
2381 			OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2382 			(void) task_resume_internal(sig_task);
2383 			sig_proc->p_stat = SRUN;
2384 		} else if (sig_proc->p_stat == SSTOP) {
2385 			goto sigout_locked;
2386 		}
2387 		/*
2388 		 * Fill out siginfo structure information to pass to the
2389 		 * signalled process/thread sigaction handler, when it
2390 		 * wakes up.  si_code is 0 because this is an ordinary
2391 		 * signal, not a SIGCHLD, and so si_status is the signal
2392 		 * number itself, instead of the child process exit status.
2393 		 * We shift this left because it will be shifted right before
2394 		 * it is passed to user space.  kind of ugly to use W_EXITCODE
2395 		 * this way, but it beats defining a new macro.
2396 		 *
2397 		 * Note:	Avoid the SIGCHLD recursion case!
2398 		 */
2399 		if (signum != SIGCHLD) {
2400 			r_uid = kauth_getruid();
2401 
2402 			sig_proc->si_pid = proc_getpid(current_proc());
2403 			sig_proc->si_status = W_EXITCODE(signum, 0);
2404 			sig_proc->si_uid = r_uid;
2405 			sig_proc->si_code = 0;
2406 		}
2407 
2408 		goto runlocked;
2409 	} else {
2410 		/*	Default action - varies */
2411 		if (mask & stopsigmask) {
2412 			assert(signal_reason == NULL);
2413 			/*
2414 			 * These are the signals which by default
2415 			 * stop a process.
2416 			 *
2417 			 * Don't clog system with children of init
2418 			 * stopped from the keyboard.
2419 			 */
2420 			if (!(prop & SA_STOP) && sig_proc->p_pptr == initproc) {
2421 				uth->uu_siglist &= ~mask;
2422 				proc_unlock(sig_proc);
2423 				/* siglock still locked, proc_lock not locked */
2424 				psignal_locked(sig_proc, SIGKILL);
2425 				goto sigout_unlocked;
2426 			}
2427 
2428 			/*
2429 			 *	Stop the task
2430 			 *	if task hasn't already been stopped by
2431 			 *	a signal.
2432 			 */
2433 			uth->uu_siglist &= ~mask;
2434 			if (sig_proc->p_stat != SSTOP) {
2435 				sig_proc->p_xstat = signum;
2436 				sig_proc->p_stat = SSTOP;
2437 				OSBitAndAtomic(~((uint32_t)P_CONTINUED), &sig_proc->p_flag);
2438 				sig_proc->p_lflag &= ~P_LWAITED;
2439 				proc_unlock(sig_proc);
2440 
2441 				pp = proc_parentholdref(sig_proc);
2442 				stop(sig_proc, pp);
2443 				if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2444 					my_cred = kauth_cred_proc_ref(sig_proc);
2445 					r_uid = kauth_cred_getruid(my_cred);
2446 					kauth_cred_unref(&my_cred);
2447 
2448 					proc_lock(sig_proc);
2449 					pp->si_pid = proc_getpid(sig_proc);
2450 					/*
2451 					 * POSIX: sigaction for a stopped child
2452 					 * when sent to the parent must set the
2453 					 * child's signal number into si_status.
2454 					 */
2455 					if (signum != SIGSTOP) {
2456 						pp->si_status = WEXITSTATUS(sig_proc->p_xstat);
2457 					} else {
2458 						pp->si_status = W_EXITCODE(signum, signum);
2459 					}
2460 					pp->si_code = CLD_STOPPED;
2461 					pp->si_uid = r_uid;
2462 					proc_unlock(sig_proc);
2463 
2464 					psignal(pp, SIGCHLD);
2465 				}
2466 				if (pp != PROC_NULL) {
2467 					proc_parentdropref(pp, 0);
2468 				}
2469 
2470 				goto sigout_unlocked;
2471 			}
2472 
2473 			goto sigout_locked;
2474 		}
2475 
2476 		DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum);
2477 
2478 		switch (signum) {
2479 		/*
2480 		 * Signals ignored by default have been dealt
2481 		 * with already, since their bits are on in
2482 		 * p_sigignore.
2483 		 */
2484 
2485 		case SIGKILL:
2486 			/*
2487 			 * Kill signal always sets process running and
2488 			 * unsuspends it.
2489 			 */
2490 			/*
2491 			 *	Process will be running after 'run'
2492 			 */
2493 			sig_proc->p_stat = SRUN;
2494 			/*
2495 			 * In scenarios where suspend/resume are racing
2496 			 * the signal we are missing AST_BSD by the time
2497 			 * we get here, set again to avoid races. This
2498 			 * was the scenario with spindump enabled shutdowns.
2499 			 * We would need to cover this approp down the line.
2500 			 */
2501 			act_set_astbsd(sig_thread);
2502 			kret = thread_abort(sig_thread);
2503 			update_thread_policy = (kret == KERN_SUCCESS);
2504 
2505 			if (uth->uu_exit_reason == OS_REASON_NULL) {
2506 				if (signal_reason == OS_REASON_NULL) {
2507 					KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2508 					    proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2509 
2510 					signal_reason = build_signal_reason(signum, NULL);
2511 				}
2512 
2513 				os_reason_ref(signal_reason);
2514 				set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2515 			}
2516 
2517 			goto sigout_locked;
2518 
2519 		case SIGCONT:
2520 			/*
2521 			 * Let the process run.  If it's sleeping on an
2522 			 * event, it remains so.
2523 			 */
2524 			assert(signal_reason == NULL);
2525 			OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag);
2526 			sig_proc->p_contproc = proc_getpid(sig_proc);
2527 			sig_proc->p_xstat = signum;
2528 
2529 			(void) task_resume_internal(sig_task);
2530 
2531 			/*
2532 			 * When processing a SIGCONT, we need to check
2533 			 * to see if there are signals pending that
2534 			 * were not delivered because we had been
2535 			 * previously stopped.  If that's the case,
2536 			 * we need to thread_abort_safely() to trigger
2537 			 * interruption of the current system call to
2538 			 * cause their handlers to fire.  If it's only
2539 			 * the SIGCONT, then don't wake up.
2540 			 */
2541 			if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) {
2542 				uth->uu_siglist &= ~mask;
2543 				sig_proc->p_stat = SRUN;
2544 				goto runlocked;
2545 			}
2546 
2547 			uth->uu_siglist &= ~mask;
2548 			sig_proc->p_stat = SRUN;
2549 			goto sigout_locked;
2550 
2551 		default:
2552 			/*
2553 			 * A signal which has a default action of killing
2554 			 * the process, and for which there is no handler,
2555 			 * needs to act like SIGKILL
2556 			 */
2557 			if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) {
2558 				sig_proc->p_stat = SRUN;
2559 				kret = thread_abort(sig_thread);
2560 				update_thread_policy = (kret == KERN_SUCCESS);
2561 
2562 				if (uth->uu_exit_reason == OS_REASON_NULL) {
2563 					if (signal_reason == OS_REASON_NULL) {
2564 						KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2565 						    proc_getpid(sig_proc), OS_REASON_SIGNAL, signum, 0, 0);
2566 
2567 						signal_reason = build_signal_reason(signum, NULL);
2568 					}
2569 
2570 					os_reason_ref(signal_reason);
2571 					set_thread_exit_reason(sig_thread, signal_reason, TRUE);
2572 				}
2573 
2574 				goto sigout_locked;
2575 			}
2576 
2577 			/*
2578 			 * All other signals wake up the process, but don't
2579 			 * resume it.
2580 			 */
2581 			if (sig_proc->p_stat == SSTOP) {
2582 				goto sigout_locked;
2583 			}
2584 			goto runlocked;
2585 		}
2586 	}
2587 	/*NOTREACHED*/
2588 
2589 runlocked:
2590 	/*
2591 	 * If we're being traced (possibly because someone attached us
2592 	 * while we were stopped), check for a signal from the debugger.
2593 	 */
2594 	if (sig_proc->p_stat == SSTOP) {
2595 		if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) {
2596 			uth->uu_siglist |= sigmask(sig_proc->p_xstat);
2597 		}
2598 
2599 		if ((flavor & PSIG_VFORK) != 0) {
2600 			sig_proc->p_stat = SRUN;
2601 		}
2602 	} else {
2603 		/*
2604 		 * setrunnable(p) in BSD and
2605 		 * Wake up the thread if it is interruptible.
2606 		 */
2607 		sig_proc->p_stat = SRUN;
2608 		if ((flavor & PSIG_VFORK) == 0) {
2609 			thread_abort_safely(sig_thread);
2610 		}
2611 	}
2612 
2613 sigout_locked:
2614 	if (update_thread_policy) {
2615 		/*
2616 		 * Update the thread policy to heading to terminate, increase priority if
2617 		 * necessary. This needs to be done before we drop the proc lock because the
2618 		 * thread can take the fatal signal once it's dropped.
2619 		 */
2620 		proc_set_thread_policy(sig_thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2621 	}
2622 
2623 	proc_unlock(sig_proc);
2624 
2625 sigout_unlocked:
2626 	os_reason_free(signal_reason);
2627 	if ((flavor & PSIG_LOCKED) == 0) {
2628 		proc_signalend(sig_proc, 0);
2629 	}
2630 }
2631 
2632 void
psignal(proc_t p,int signum)2633 psignal(proc_t p, int signum)
2634 {
2635 	psignal_internal(p, NULL, NULL, 0, signum, NULL);
2636 }
2637 
2638 void
psignal_with_reason(proc_t p,int signum,struct os_reason * signal_reason)2639 psignal_with_reason(proc_t p, int signum, struct os_reason *signal_reason)
2640 {
2641 	psignal_internal(p, NULL, NULL, 0, signum, signal_reason);
2642 }
2643 
2644 void
psignal_sigkill_with_reason(struct proc * p,struct os_reason * signal_reason)2645 psignal_sigkill_with_reason(struct proc *p, struct os_reason *signal_reason)
2646 {
2647 	psignal_internal(p, NULL, NULL, 0, SIGKILL, signal_reason);
2648 }
2649 
2650 void
psignal_locked(proc_t p,int signum)2651 psignal_locked(proc_t p, int signum)
2652 {
2653 	psignal_internal(p, NULL, NULL, PSIG_LOCKED, signum, NULL);
2654 }
2655 
2656 void
psignal_vfork_with_reason(proc_t p,task_t new_task,thread_t thread,int signum,struct os_reason * signal_reason)2657 psignal_vfork_with_reason(proc_t p, task_t new_task, thread_t thread, int signum, struct os_reason *signal_reason)
2658 {
2659 	psignal_internal(p, new_task, thread, PSIG_VFORK, signum, signal_reason);
2660 }
2661 
2662 void
psignal_vfork(proc_t p,task_t new_task,thread_t thread,int signum)2663 psignal_vfork(proc_t p, task_t new_task, thread_t thread, int signum)
2664 {
2665 	psignal_internal(p, new_task, thread, PSIG_VFORK, signum, NULL);
2666 }
2667 
2668 void
psignal_uthread(thread_t thread,int signum)2669 psignal_uthread(thread_t thread, int signum)
2670 {
2671 	psignal_internal(PROC_NULL, TASK_NULL, thread, PSIG_THREAD, signum, NULL);
2672 }
2673 
2674 /* same as psignal(), but prefer delivery to 'thread' if possible */
2675 void
psignal_try_thread(proc_t p,thread_t thread,int signum)2676 psignal_try_thread(proc_t p, thread_t thread, int signum)
2677 {
2678 	psignal_internal(p, NULL, thread, PSIG_TRY_THREAD, signum, NULL);
2679 }
2680 
2681 void
psignal_try_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2682 psignal_try_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2683 {
2684 	psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason);
2685 }
2686 
2687 void
psignal_thread_with_reason(proc_t p,thread_t thread,int signum,struct os_reason * signal_reason)2688 psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
2689 {
2690 	psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason);
2691 }
2692 
2693 /*
2694  * If the current process has received a signal (should be caught or cause
2695  * termination, should interrupt current syscall), return the signal number.
2696  * Stop signals with default action are processed immediately, then cleared;
2697  * they aren't returned.  This is checked after each entry to the system for
2698  * a syscall or trap (though this can usually be done without calling issignal
2699  * by checking the pending signal masks in the CURSIG macro.) The normal call
2700  * sequence is
2701  *
2702  *	while (signum = CURSIG(curproc))
2703  *		postsig(signum);
2704  */
2705 int
issignal_locked(proc_t p)2706 issignal_locked(proc_t p)
2707 {
2708 	int signum, mask, prop, sigbits;
2709 	thread_t cur_act;
2710 	struct uthread * ut;
2711 	proc_t pp;
2712 	kauth_cred_t my_cred;
2713 	int retval = 0;
2714 	uid_t r_uid;
2715 
2716 	cur_act = current_thread();
2717 
2718 #if SIGNAL_DEBUG
2719 	if (rdebug_proc && (p == rdebug_proc)) {
2720 		ram_printf(3);
2721 	}
2722 #endif /* SIGNAL_DEBUG */
2723 
2724 	/*
2725 	 * Try to grab the signal lock.
2726 	 */
2727 	if (sig_try_locked(p) <= 0) {
2728 		return 0;
2729 	}
2730 
2731 	proc_signalstart(p, 1);
2732 
2733 	ut = get_bsdthread_info(cur_act);
2734 	for (;;) {
2735 		sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2736 
2737 		if (p->p_lflag & P_LPPWAIT) {
2738 			sigbits &= ~stopsigmask;
2739 		}
2740 		if (sigbits == 0) {             /* no signal to send */
2741 			retval = 0;
2742 			goto out;
2743 		}
2744 
2745 		signum = ffs((unsigned int)sigbits);
2746 		mask = sigmask(signum);
2747 		prop = sigprop[signum];
2748 
2749 		/*
2750 		 * We should see pending but ignored signals
2751 		 * only if P_LTRACED was on when they were posted.
2752 		 */
2753 		if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
2754 			ut->uu_siglist &= ~mask;
2755 			continue;
2756 		}
2757 
2758 		if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
2759 			/*
2760 			 * If traced, deliver the signal to the debugger, and wait to be
2761 			 * released.
2762 			 */
2763 			task_t  task;
2764 			p->p_xstat = signum;
2765 
2766 			if (p->p_lflag & P_LSIGEXC) {
2767 				p->sigwait = TRUE;
2768 				p->sigwait_thread = cur_act;
2769 				p->p_stat = SSTOP;
2770 				OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2771 				p->p_lflag &= ~P_LWAITED;
2772 				ut->uu_siglist &= ~mask; /* clear the current signal from the pending list */
2773 				proc_signalend(p, 1);
2774 				proc_unlock(p);
2775 				do_bsdexception(EXC_SOFTWARE, EXC_SOFT_SIGNAL, signum);
2776 				proc_lock(p);
2777 				proc_signalstart(p, 1);
2778 			} else {
2779 				proc_unlock(p);
2780 				my_cred = kauth_cred_proc_ref(p);
2781 				r_uid = kauth_cred_getruid(my_cred);
2782 				kauth_cred_unref(&my_cred);
2783 
2784 				pp = proc_parentholdref(p);
2785 				if (pp != PROC_NULL) {
2786 					proc_lock(pp);
2787 
2788 					pp->si_pid = proc_getpid(p);
2789 					pp->p_xhighbits = p->p_xhighbits;
2790 					p->p_xhighbits = 0;
2791 					pp->si_status = p->p_xstat;
2792 					pp->si_code = CLD_TRAPPED;
2793 					pp->si_uid = r_uid;
2794 
2795 					proc_unlock(pp);
2796 				}
2797 
2798 				/*
2799 				 *	XXX Have to really stop for debuggers;
2800 				 *	XXX stop() doesn't do the right thing.
2801 				 */
2802 				task = p->task;
2803 				task_suspend_internal(task);
2804 
2805 				proc_lock(p);
2806 				p->sigwait = TRUE;
2807 				p->sigwait_thread = cur_act;
2808 				p->p_stat = SSTOP;
2809 				OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2810 				p->p_lflag &= ~P_LWAITED;
2811 				ut->uu_siglist &= ~mask;
2812 
2813 				proc_signalend(p, 1);
2814 				proc_unlock(p);
2815 
2816 				if (pp != PROC_NULL) {
2817 					psignal(pp, SIGCHLD);
2818 					proc_list_lock();
2819 					wakeup((caddr_t)pp);
2820 					proc_parentdropref(pp, 1);
2821 					proc_list_unlock();
2822 				}
2823 
2824 				assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE));
2825 				thread_block(THREAD_CONTINUE_NULL);
2826 				proc_lock(p);
2827 				proc_signalstart(p, 1);
2828 			}
2829 
2830 			p->sigwait = FALSE;
2831 			p->sigwait_thread = NULL;
2832 			wakeup((caddr_t)&p->sigwait_thread);
2833 
2834 			if (signum == SIGKILL || ut->uu_siglist & sigmask(SIGKILL)) {
2835 				/*
2836 				 * Deliver a pending sigkill even if it's not the current signal.
2837 				 * Necessary for PT_KILL, which should not be delivered to the
2838 				 * debugger, but we can't differentiate it from any other KILL.
2839 				 */
2840 				signum = SIGKILL;
2841 				goto deliver_sig;
2842 			}
2843 
2844 			/* We may have to quit. */
2845 			if (thread_should_abort(current_thread())) {
2846 				retval = 0;
2847 				goto out;
2848 			}
2849 
2850 			/*
2851 			 * If parent wants us to take the signal,
2852 			 * then it will leave it in p->p_xstat;
2853 			 * otherwise we just look for signals again.
2854 			 */
2855 			signum = p->p_xstat;
2856 			if (signum == 0) {
2857 				continue;
2858 			}
2859 
2860 			/*
2861 			 * Put the new signal into p_siglist.  If the
2862 			 * signal is being masked, look for other signals.
2863 			 */
2864 			mask = sigmask(signum);
2865 			ut->uu_siglist |= mask;
2866 			if (ut->uu_sigmask & mask) {
2867 				continue;
2868 			}
2869 		}
2870 
2871 		/*
2872 		 * Decide whether the signal should be returned.
2873 		 * Return the signal's number, or fall through
2874 		 * to clear it from the pending mask.
2875 		 */
2876 
2877 		switch ((long)SIGACTION(p, signum)) {
2878 		case (long)SIG_DFL:
2879 			/*
2880 			 * If there is a pending stop signal to process
2881 			 * with default action, stop here,
2882 			 * then clear the signal.  However,
2883 			 * if process is member of an orphaned
2884 			 * process group, ignore tty stop signals.
2885 			 */
2886 			if (prop & SA_STOP) {
2887 				struct pgrp * pg;
2888 
2889 				proc_unlock(p);
2890 				pg = proc_pgrp(p, NULL);
2891 				if (p->p_lflag & P_LTRACED ||
2892 				    (pg->pg_jobc == 0 &&
2893 				    prop & SA_TTYSTOP)) {
2894 					proc_lock(p);
2895 					pgrp_rele(pg);
2896 					break; /* ignore signal */
2897 				}
2898 				pgrp_rele(pg);
2899 				if (p->p_stat != SSTOP) {
2900 					proc_lock(p);
2901 					p->p_xstat = signum;
2902 					p->p_stat = SSTOP;
2903 					p->p_lflag &= ~P_LWAITED;
2904 					proc_unlock(p);
2905 
2906 					pp = proc_parentholdref(p);
2907 					stop(p, pp);
2908 					if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) {
2909 						my_cred = kauth_cred_proc_ref(p);
2910 						r_uid = kauth_cred_getruid(my_cred);
2911 						kauth_cred_unref(&my_cred);
2912 
2913 						proc_lock(pp);
2914 						pp->si_pid = proc_getpid(p);
2915 						pp->si_status = WEXITSTATUS(p->p_xstat);
2916 						pp->si_code = CLD_STOPPED;
2917 						pp->si_uid = r_uid;
2918 						proc_unlock(pp);
2919 
2920 						psignal(pp, SIGCHLD);
2921 					}
2922 					if (pp != PROC_NULL) {
2923 						proc_parentdropref(pp, 0);
2924 					}
2925 				}
2926 				proc_lock(p);
2927 				break;
2928 			} else if (prop & SA_IGNORE) {
2929 				/*
2930 				 * Except for SIGCONT, shouldn't get here.
2931 				 * Default action is to ignore; drop it.
2932 				 */
2933 				break; /* ignore signal */
2934 			} else {
2935 				goto deliver_sig;
2936 			}
2937 
2938 		case (long)SIG_IGN:
2939 			/*
2940 			 * Masking above should prevent us ever trying
2941 			 * to take action on an ignored signal other
2942 			 * than SIGCONT, unless process is traced.
2943 			 */
2944 			if ((prop & SA_CONT) == 0 &&
2945 			    (p->p_lflag & P_LTRACED) == 0) {
2946 				printf("issignal\n");
2947 			}
2948 			break; /* ignore signal */
2949 
2950 		default:
2951 			/* This signal has an action - deliver it. */
2952 			goto deliver_sig;
2953 		}
2954 
2955 		/* If we dropped through, the signal was ignored - remove it from pending list. */
2956 		ut->uu_siglist &= ~mask;
2957 	} /* for(;;) */
2958 
2959 	/* NOTREACHED */
2960 
2961 deliver_sig:
2962 	ut->uu_siglist &= ~mask;
2963 	retval = signum;
2964 
2965 out:
2966 	proc_signalend(p, 1);
2967 	return retval;
2968 }
2969 
2970 /* called from _sleep */
2971 int
CURSIG(proc_t p)2972 CURSIG(proc_t p)
2973 {
2974 	int signum, mask, prop, sigbits;
2975 	thread_t cur_act;
2976 	struct uthread * ut;
2977 	int retnum = 0;
2978 
2979 
2980 	cur_act = current_thread();
2981 
2982 	ut = get_bsdthread_info(cur_act);
2983 
2984 	if (ut->uu_siglist == 0) {
2985 		return 0;
2986 	}
2987 
2988 	if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) {
2989 		return 0;
2990 	}
2991 
2992 	sigbits = ut->uu_siglist & ~ut->uu_sigmask;
2993 
2994 	for (;;) {
2995 		if (p->p_lflag & P_LPPWAIT) {
2996 			sigbits &= ~stopsigmask;
2997 		}
2998 		if (sigbits == 0) {             /* no signal to send */
2999 			return retnum;
3000 		}
3001 
3002 		signum = ffs((unsigned int)sigbits);
3003 		mask = sigmask(signum);
3004 		prop = sigprop[signum];
3005 		sigbits &= ~mask;               /* take the signal out */
3006 
3007 		/*
3008 		 * We should see pending but ignored signals
3009 		 * only if P_LTRACED was on when they were posted.
3010 		 */
3011 		if (mask & p->p_sigignore && (p->p_lflag & P_LTRACED) == 0) {
3012 			continue;
3013 		}
3014 
3015 		if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) {
3016 			return signum;
3017 		}
3018 
3019 		/*
3020 		 * Decide whether the signal should be returned.
3021 		 * Return the signal's number, or fall through
3022 		 * to clear it from the pending mask.
3023 		 */
3024 
3025 		switch ((long)SIGACTION(p, signum)) {
3026 		case (long)SIG_DFL:
3027 			/*
3028 			 * If there is a pending stop signal to process
3029 			 * with default action, stop here,
3030 			 * then clear the signal.  However,
3031 			 * if process is member of an orphaned
3032 			 * process group, ignore tty stop signals.
3033 			 */
3034 			if (prop & SA_STOP) {
3035 				struct pgrp *pg;
3036 
3037 				pg = proc_pgrp(p, NULL);
3038 
3039 				if (p->p_lflag & P_LTRACED ||
3040 				    (pg->pg_jobc == 0 &&
3041 				    prop & SA_TTYSTOP)) {
3042 					pgrp_rele(pg);
3043 					break;  /* == ignore */
3044 				}
3045 				pgrp_rele(pg);
3046 				retnum = signum;
3047 				break;
3048 			} else if (prop & SA_IGNORE) {
3049 				/*
3050 				 * Except for SIGCONT, shouldn't get here.
3051 				 * Default action is to ignore; drop it.
3052 				 */
3053 				break;          /* == ignore */
3054 			} else {
3055 				return signum;
3056 			}
3057 		/*NOTREACHED*/
3058 
3059 		case (long)SIG_IGN:
3060 			/*
3061 			 * Masking above should prevent us ever trying
3062 			 * to take action on an ignored signal other
3063 			 * than SIGCONT, unless process is traced.
3064 			 */
3065 			if ((prop & SA_CONT) == 0 &&
3066 			    (p->p_lflag & P_LTRACED) == 0) {
3067 				printf("issignal\n");
3068 			}
3069 			break;          /* == ignore */
3070 
3071 		default:
3072 			/*
3073 			 * This signal has an action, let
3074 			 * postsig() process it.
3075 			 */
3076 			return signum;
3077 		}
3078 	}
3079 	/* NOTREACHED */
3080 }
3081 
3082 /*
3083  * Put the argument process into the stopped state and notify the parent
3084  * via wakeup.  Signals are handled elsewhere.  The process must not be
3085  * on the run queue.
3086  */
3087 static void
stop(proc_t p,proc_t parent)3088 stop(proc_t p, proc_t parent)
3089 {
3090 	OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3091 	if ((parent != PROC_NULL) && (parent->p_stat != SSTOP)) {
3092 		proc_list_lock();
3093 		wakeup((caddr_t)parent);
3094 		proc_list_unlock();
3095 	}
3096 	(void) task_suspend_internal(p->task);
3097 }
3098 
3099 /*
3100  * Take the action for the specified signal
3101  * from the current set of pending signals.
3102  */
3103 void
postsig_locked(int signum)3104 postsig_locked(int signum)
3105 {
3106 	proc_t p = current_proc();
3107 	struct sigacts *ps = &p->p_sigacts;
3108 	user_addr_t catcher;
3109 	uint32_t code;
3110 	int mask, returnmask;
3111 	struct uthread * ut;
3112 	os_reason_t ut_exit_reason = OS_REASON_NULL;
3113 
3114 #if DIAGNOSTIC
3115 	if (signum == 0) {
3116 		panic("postsig");
3117 	}
3118 	/*
3119 	 *	This must be called on master cpu
3120 	 */
3121 	if (cpu_number() != master_cpu) {
3122 		panic("psig not on master");
3123 	}
3124 #endif
3125 
3126 	/*
3127 	 * Try to grab the signal lock.
3128 	 */
3129 	if (sig_try_locked(p) <= 0) {
3130 		return;
3131 	}
3132 
3133 	proc_signalstart(p, 1);
3134 
3135 	ut = current_uthread();
3136 	mask = sigmask(signum);
3137 	ut->uu_siglist &= ~mask;
3138 	catcher = SIGACTION(p, signum);
3139 	if (catcher == SIG_DFL) {
3140 		/*
3141 		 * Default catcher, where the default is to kill
3142 		 * the process.  (Other cases were ignored above.)
3143 		 */
3144 
3145 		/*
3146 		 * exit_with_reason() below will consume a reference to the thread's exit reason, so we take another
3147 		 * reference so the thread still has one even after we call exit_with_reason(). The thread's reference will
3148 		 * ultimately be destroyed in uthread_cleanup().
3149 		 */
3150 		ut_exit_reason = ut->uu_exit_reason;
3151 		os_reason_ref(ut_exit_reason);
3152 
3153 		p->p_acflag |= AXSIG;
3154 		if (sigprop[signum] & SA_CORE) {
3155 			p->p_sigacts.ps_sig = signum;
3156 			proc_signalend(p, 1);
3157 			proc_unlock(p);
3158 #if CONFIG_COREDUMP
3159 			if (coredump(p, 0, 0) == 0) {
3160 				signum |= WCOREFLAG;
3161 			}
3162 #endif
3163 		} else {
3164 			proc_signalend(p, 1);
3165 			proc_unlock(p);
3166 		}
3167 
3168 #if CONFIG_DTRACE
3169 		bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo));
3170 
3171 		ut->t_dtrace_siginfo.si_signo = signum;
3172 		ut->t_dtrace_siginfo.si_pid = p->si_pid;
3173 		ut->t_dtrace_siginfo.si_uid = p->si_uid;
3174 		ut->t_dtrace_siginfo.si_status = WEXITSTATUS(p->si_status);
3175 
3176 		/* Fire DTrace proc:::fault probe when signal is generated by hardware. */
3177 		switch (signum) {
3178 		case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP:
3179 			DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo));
3180 			break;
3181 		default:
3182 			break;
3183 		}
3184 
3185 
3186 		DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo),
3187 		    void (*)(void), SIG_DFL);
3188 #endif
3189 
3190 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
3191 		    proc_getpid(p), W_EXITCODE(0, signum), 3, 0, 0);
3192 
3193 		exit_with_reason(p, W_EXITCODE(0, signum), (int *)NULL, TRUE, TRUE, 0, ut_exit_reason);
3194 
3195 		proc_lock(p);
3196 		return;
3197 	} else {
3198 		/*
3199 		 * If we get here, the signal must be caught.
3200 		 */
3201 #if DIAGNOSTIC
3202 		if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) {
3203 			log(LOG_WARNING,
3204 			    "postsig: processing masked or ignored signal\n");
3205 		}
3206 #endif
3207 
3208 		/*
3209 		 * Set the new mask value and also defer further
3210 		 * occurences of this signal.
3211 		 *
3212 		 * Special case: user has done a sigpause.  Here the
3213 		 * current mask is not of interest, but rather the
3214 		 * mask from before the sigpause is what we want
3215 		 * restored after the signal processing is completed.
3216 		 */
3217 		if (ut->uu_flag & UT_SAS_OLDMASK) {
3218 			returnmask = ut->uu_oldmask;
3219 			ut->uu_flag &= ~UT_SAS_OLDMASK;
3220 			ut->uu_oldmask = 0;
3221 		} else {
3222 			returnmask = ut->uu_sigmask;
3223 		}
3224 		ut->uu_sigmask |= ps->ps_catchmask[signum];
3225 		if ((ps->ps_signodefer & mask) == 0) {
3226 			ut->uu_sigmask |= mask;
3227 		}
3228 		sigset_t siginfo = ps->ps_siginfo;
3229 		if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) {
3230 			if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) {
3231 				p->p_sigignore |= mask;
3232 			}
3233 			if (SIGACTION(p, signum) != SIG_DFL) {
3234 				proc_set_sigact(p, signum, SIG_DFL);
3235 			}
3236 			ps->ps_siginfo &= ~mask;
3237 			ps->ps_signodefer &= ~mask;
3238 		}
3239 
3240 		if (ps->ps_sig != signum) {
3241 			code = 0;
3242 		} else {
3243 			code = ps->ps_code;
3244 			ps->ps_code = 0;
3245 		}
3246 		OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nsignals);
3247 		sendsig(p, catcher, signum, returnmask, code, siginfo);
3248 	}
3249 	proc_signalend(p, 1);
3250 }
3251 
3252 /*
3253  * Attach a signal knote to the list of knotes for this process.
3254  *
3255  * Signal knotes share the knote list with proc knotes.  This
3256  * could be avoided by using a signal-specific knote list, but
3257  * probably isn't worth the trouble.
3258  */
3259 
3260 static int
filt_sigattach(struct knote * kn,__unused struct kevent_qos_s * kev)3261 filt_sigattach(struct knote *kn, __unused struct kevent_qos_s *kev)
3262 {
3263 	proc_t p = current_proc();  /* can attach only to oneself */
3264 
3265 	proc_klist_lock();
3266 
3267 	kn->kn_proc = p;
3268 	kn->kn_flags |= EV_CLEAR; /* automatically set */
3269 	kn->kn_sdata = 0;         /* incoming data is ignored */
3270 
3271 	KNOTE_ATTACH(&p->p_klist, kn);
3272 
3273 	proc_klist_unlock();
3274 
3275 	/* edge-triggered events can't have fired before we attached */
3276 	return 0;
3277 }
3278 
3279 /*
3280  * remove the knote from the process list, if it hasn't already
3281  * been removed by exit processing.
3282  */
3283 
3284 static void
filt_sigdetach(struct knote * kn)3285 filt_sigdetach(struct knote *kn)
3286 {
3287 	proc_t p = kn->kn_proc;
3288 
3289 	proc_klist_lock();
3290 	kn->kn_proc = NULL;
3291 	KNOTE_DETACH(&p->p_klist, kn);
3292 	proc_klist_unlock();
3293 }
3294 
3295 /*
3296  * Post an event to the signal filter.  Because we share the same list
3297  * as process knotes, we have to filter out and handle only signal events.
3298  *
3299  * We assume that we process fdt_invalidate() before we post the NOTE_EXIT for
3300  * a process during exit.  Therefore, since signal filters can only be
3301  * set up "in-process", we should have already torn down the kqueue
3302  * hosting the EVFILT_SIGNAL knote and should never see NOTE_EXIT.
3303  */
3304 static int
filt_signal(struct knote * kn,long hint)3305 filt_signal(struct knote *kn, long hint)
3306 {
3307 	if (hint & NOTE_SIGNAL) {
3308 		hint &= ~NOTE_SIGNAL;
3309 
3310 		if (kn->kn_id == (unsigned int)hint) {
3311 			kn->kn_hook32++;
3312 		}
3313 	} else if (hint & NOTE_EXIT) {
3314 		panic("filt_signal: detected NOTE_EXIT event");
3315 	}
3316 
3317 	return kn->kn_hook32 != 0;
3318 }
3319 
3320 static int
filt_signaltouch(struct knote * kn,struct kevent_qos_s * kev)3321 filt_signaltouch(struct knote *kn, struct kevent_qos_s *kev)
3322 {
3323 #pragma unused(kev)
3324 
3325 	int res;
3326 
3327 	proc_klist_lock();
3328 
3329 	/*
3330 	 * No data to save - just capture if it is already fired
3331 	 */
3332 	res = (kn->kn_hook32 > 0);
3333 
3334 	proc_klist_unlock();
3335 
3336 	return res;
3337 }
3338 
3339 static int
filt_signalprocess(struct knote * kn,struct kevent_qos_s * kev)3340 filt_signalprocess(struct knote *kn, struct kevent_qos_s *kev)
3341 {
3342 	int res = 0;
3343 
3344 	/*
3345 	 * Snapshot the event data.
3346 	 */
3347 
3348 	proc_klist_lock();
3349 	if (kn->kn_hook32) {
3350 		knote_fill_kevent(kn, kev, kn->kn_hook32);
3351 		kn->kn_hook32 = 0;
3352 		res = 1;
3353 	}
3354 	proc_klist_unlock();
3355 	return res;
3356 }
3357 
3358 void
bsd_ast(thread_t thread)3359 bsd_ast(thread_t thread)
3360 {
3361 	proc_t p = current_proc();
3362 	struct uthread *ut = get_bsdthread_info(thread);
3363 	int     signum;
3364 	static int bsd_init_done = 0;
3365 
3366 	if (p == NULL) {
3367 		return;
3368 	}
3369 
3370 	/* don't run bsd ast on exec copy or exec'ed tasks */
3371 	if (task_did_exec(current_task()) || task_is_exec_copy(current_task())) {
3372 		return;
3373 	}
3374 
3375 	if (timerisset(&p->p_vtimer_user.it_value)) {
3376 		uint32_t        microsecs;
3377 
3378 		task_vtimer_update(p->task, TASK_VTIMER_USER, &microsecs);
3379 
3380 		if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) {
3381 			if (timerisset(&p->p_vtimer_user.it_value)) {
3382 				task_vtimer_set(p->task, TASK_VTIMER_USER);
3383 			} else {
3384 				task_vtimer_clear(p->task, TASK_VTIMER_USER);
3385 			}
3386 
3387 			psignal_try_thread(p, thread, SIGVTALRM);
3388 		}
3389 	}
3390 
3391 	if (timerisset(&p->p_vtimer_prof.it_value)) {
3392 		uint32_t        microsecs;
3393 
3394 		task_vtimer_update(p->task, TASK_VTIMER_PROF, &microsecs);
3395 
3396 		if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) {
3397 			if (timerisset(&p->p_vtimer_prof.it_value)) {
3398 				task_vtimer_set(p->task, TASK_VTIMER_PROF);
3399 			} else {
3400 				task_vtimer_clear(p->task, TASK_VTIMER_PROF);
3401 			}
3402 
3403 			psignal_try_thread(p, thread, SIGPROF);
3404 		}
3405 	}
3406 
3407 	if (timerisset(&p->p_rlim_cpu)) {
3408 		struct timeval          tv;
3409 
3410 		task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec);
3411 
3412 		proc_spinlock(p);
3413 		if (p->p_rlim_cpu.tv_sec > 0 || p->p_rlim_cpu.tv_usec > tv.tv_usec) {
3414 			tv.tv_sec = 0;
3415 			timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu);
3416 			proc_spinunlock(p);
3417 		} else {
3418 			timerclear(&p->p_rlim_cpu);
3419 			proc_spinunlock(p);
3420 
3421 			task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
3422 
3423 			psignal_try_thread(p, thread, SIGXCPU);
3424 		}
3425 	}
3426 
3427 #if CONFIG_DTRACE
3428 	if (ut->t_dtrace_sig) {
3429 		uint8_t dt_action_sig = ut->t_dtrace_sig;
3430 		ut->t_dtrace_sig = 0;
3431 		psignal(p, dt_action_sig);
3432 	}
3433 
3434 	if (ut->t_dtrace_stop) {
3435 		ut->t_dtrace_stop = 0;
3436 		proc_lock(p);
3437 		p->p_dtrace_stop = 1;
3438 		proc_unlock(p);
3439 		(void)task_suspend_internal(p->task);
3440 	}
3441 
3442 	if (ut->t_dtrace_resumepid) {
3443 		proc_t resumeproc = proc_find((int)ut->t_dtrace_resumepid);
3444 		ut->t_dtrace_resumepid = 0;
3445 		if (resumeproc != PROC_NULL) {
3446 			proc_lock(resumeproc);
3447 			/* We only act on processes stopped by dtrace */
3448 			if (resumeproc->p_dtrace_stop) {
3449 				resumeproc->p_dtrace_stop = 0;
3450 				proc_unlock(resumeproc);
3451 				task_resume_internal(resumeproc->task);
3452 			} else {
3453 				proc_unlock(resumeproc);
3454 			}
3455 			proc_rele(resumeproc);
3456 		}
3457 	}
3458 
3459 #endif /* CONFIG_DTRACE */
3460 
3461 	proc_lock(p);
3462 	if (CHECK_SIGNALS(p, current_thread(), ut)) {
3463 		while ((signum = issignal_locked(p))) {
3464 			postsig_locked(signum);
3465 		}
3466 	}
3467 	proc_unlock(p);
3468 
3469 #ifdef CONFIG_32BIT_TELEMETRY
3470 	if (task_consume_32bit_log_flag(p->task)) {
3471 		proc_log_32bit_telemetry(p);
3472 	}
3473 #endif /* CONFIG_32BIT_TELEMETRY */
3474 
3475 	if (!bsd_init_done) {
3476 		bsd_init_done = 1;
3477 		bsdinit_task();
3478 	}
3479 }
3480 
3481 /* ptrace set runnable */
3482 void
pt_setrunnable(proc_t p)3483 pt_setrunnable(proc_t p)
3484 {
3485 	task_t task;
3486 
3487 	task = p->task;
3488 
3489 	if (p->p_lflag & P_LTRACED) {
3490 		proc_lock(p);
3491 		p->p_stat = SRUN;
3492 		proc_unlock(p);
3493 		if (p->sigwait) {
3494 			wakeup((caddr_t)&(p->sigwait));
3495 			if ((p->p_lflag & P_LSIGEXC) == 0) {    // 5878479
3496 				task_release(task);
3497 			}
3498 		}
3499 	}
3500 }
3501 
3502 kern_return_t
do_bsdexception(int exc,int code,int sub)3503 do_bsdexception(
3504 	int exc,
3505 	int code,
3506 	int sub)
3507 {
3508 	mach_exception_data_type_t   codes[EXCEPTION_CODE_MAX];
3509 
3510 	codes[0] = code;
3511 	codes[1] = sub;
3512 	return bsd_exception(exc, codes, 2);
3513 }
3514 
3515 int
proc_pendingsignals(proc_t p,sigset_t mask)3516 proc_pendingsignals(proc_t p, sigset_t mask)
3517 {
3518 	struct uthread * uth;
3519 	sigset_t bits = 0;
3520 
3521 	proc_lock(p);
3522 	/* If the process is in proc exit return no signal info */
3523 	if (p->p_lflag & P_LPEXIT) {
3524 		goto out;
3525 	}
3526 
3527 
3528 	bits = 0;
3529 	TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) {
3530 		bits |= (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3531 	}
3532 out:
3533 	proc_unlock(p);
3534 	return bits;
3535 }
3536 
3537 int
thread_issignal(proc_t p,thread_t th,sigset_t mask)3538 thread_issignal(proc_t p, thread_t th, sigset_t mask)
3539 {
3540 	struct uthread * uth;
3541 	sigset_t  bits = 0;
3542 
3543 	proc_lock(p);
3544 	uth = (struct uthread *)get_bsdthread_info(th);
3545 	if (uth) {
3546 		bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask);
3547 	}
3548 	proc_unlock(p);
3549 	return bits;
3550 }
3551 
3552 /*
3553  * Allow external reads of the sigprop array.
3554  */
3555 int
hassigprop(int sig,int prop)3556 hassigprop(int sig, int prop)
3557 {
3558 	return sigprop[sig] & prop;
3559 }
3560 
3561 void
pgsigio(pid_t pgid,int sig)3562 pgsigio(pid_t pgid, int sig)
3563 {
3564 	proc_t p = PROC_NULL;
3565 
3566 	if (pgid < 0) {
3567 		gsignal(-(pgid), sig);
3568 	} else if (pgid > 0 && (p = proc_find(pgid)) != 0) {
3569 		psignal(p, sig);
3570 	}
3571 	if (p != PROC_NULL) {
3572 		proc_rele(p);
3573 	}
3574 }
3575 
3576 void
proc_signalstart(proc_t p,int locked)3577 proc_signalstart(proc_t p, int locked)
3578 {
3579 	if (!locked) {
3580 		proc_lock(p);
3581 	}
3582 
3583 	if (p->p_signalholder == current_thread()) {
3584 		panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock");
3585 	}
3586 
3587 	p->p_sigwaitcnt++;
3588 	while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) {
3589 		msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL);
3590 	}
3591 	p->p_sigwaitcnt--;
3592 
3593 	p->p_lflag |= P_LINSIGNAL;
3594 	p->p_signalholder = current_thread();
3595 	if (!locked) {
3596 		proc_unlock(p);
3597 	}
3598 }
3599 
3600 void
proc_signalend(proc_t p,int locked)3601 proc_signalend(proc_t p, int locked)
3602 {
3603 	if (!locked) {
3604 		proc_lock(p);
3605 	}
3606 	p->p_lflag &= ~P_LINSIGNAL;
3607 
3608 	if (p->p_sigwaitcnt > 0) {
3609 		wakeup(&p->p_sigmask);
3610 	}
3611 
3612 	p->p_signalholder = NULL;
3613 	if (!locked) {
3614 		proc_unlock(p);
3615 	}
3616 }
3617 
3618 void
sig_lock_to_exit(proc_t p)3619 sig_lock_to_exit(proc_t p)
3620 {
3621 	thread_t        self = current_thread();
3622 
3623 	p->exit_thread = self;
3624 	proc_unlock(p);
3625 
3626 	task_hold(p->task);
3627 	task_wait(p->task, FALSE);
3628 
3629 	proc_lock(p);
3630 }
3631 
3632 int
sig_try_locked(proc_t p)3633 sig_try_locked(proc_t p)
3634 {
3635 	thread_t        self = current_thread();
3636 
3637 	while (p->sigwait || p->exit_thread) {
3638 		if (p->exit_thread) {
3639 			return 0;
3640 		}
3641 		msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0);
3642 		if (thread_should_abort(self)) {
3643 			/*
3644 			 * Terminate request - clean up.
3645 			 */
3646 			proc_lock(p);
3647 			return -1;
3648 		}
3649 		proc_lock(p);
3650 	}
3651 	return 1;
3652 }
3653