xref: /xnu-11417.140.69/bsd/kern/kern_exit.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113 
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116 
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120 
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134 #include <vm/vm_map_xnu.h>
135 
136 #include <vm/vm_protos.h>
137 #include <os/log.h>
138 #include <os/system_event_log.h>
139 
140 #include <pexpert/pexpert.h>
141 
142 #include <kdp/kdp_dyld.h>
143 
144 #if SYSV_SHM
145 #include <sys/shm_internal.h>   /* shmexit */
146 #endif /* SYSV_SHM */
147 #if CONFIG_PERSONAS
148 #include <sys/persona.h>
149 #endif /* CONFIG_PERSONAS */
150 #if CONFIG_MEMORYSTATUS
151 #include <sys/kern_memorystatus.h>
152 #endif /* CONFIG_MEMORYSTATUS */
153 #if CONFIG_DTRACE
154 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
155 void dtrace_proc_exit(proc_t p);
156 #include <sys/dtrace_ptss.h>
157 #endif /* CONFIG_DTRACE */
158 #if CONFIG_MACF
159 #include <security/mac_framework.h>
160 #include <security/mac_mach_internal.h>
161 #include <sys/syscall.h>
162 #endif /* CONFIG_MACF */
163 
164 #ifdef CONFIG_EXCLAVES
165 void
166 task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
167 #endif /* CONFIG_EXCLAVES */
168 
169 #if CONFIG_MEMORYSTATUS
170 static void proc_memorystatus_remove(proc_t p);
171 #endif /* CONFIG_MEMORYSTATUS */
172 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
173 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
174     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
175     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
176 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
177 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
178 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
179 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
180 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
181     struct rusage_superset *rup, mach_exception_data_type_t code,
182     mach_exception_data_type_t subcode, uint64_t *udata_buffer,
183     int num_udata, os_reason_t reason, exception_type_t etype);
184 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
185 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
186 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
187 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
188 extern uint64_t get_task_phys_footprint_limit(task_t);
189 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
190 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
191 
192 extern unsigned int exception_log_max_pid;
193 
194 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
195 
196 /*
197  * Flags for `reap_child_locked`.
198  */
199 __options_decl(reap_flags_t, uint32_t, {
200 	/*
201 	 * Parent is exiting, so the kernel is responsible for reaping children.
202 	 */
203 	REAP_DEAD_PARENT = 0x01,
204 	/*
205 	 * Childr process was re-parented to initproc.
206 	 */
207 	REAP_REPARENTED_TO_INIT = 0x02,
208 	/*
209 	 * `proc_list_lock` is held on entry.
210 	 */
211 	REAP_LOCKED = 0x04,
212 	/*
213 	 * Drop the `proc_list_lock` on return.  Note that the `proc_list_lock` will
214 	 * be dropped internally by the function regardless.
215 	 */
216 	REAP_DROP_LOCK = 0x08,
217 });
218 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
219 
220 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
221 
222 /*
223  * Things which should have prototypes in headers, but don't
224  */
225 void    proc_exit(proc_t p);
226 int     wait1continue(int result);
227 int     waitidcontinue(int result);
228 kern_return_t sys_perf_notify(thread_t thread, int pid);
229 kern_return_t task_exception_notify(exception_type_t exception,
230     mach_exception_data_type_t code, mach_exception_data_type_t subcode, bool fatal);
231 void    delay(int);
232 
233 #if DEVELOPMENT || DEBUG
234 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
235 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
236 static pid_t proc_exit_lpexit_spin_pid = -1;            /* wakeup point */
237 static int proc_exit_lpexit_spin_pos = -1;              /* point to block */
238 static int proc_exit_lpexit_spinning = 0;
239 enum {
240 	PELS_POS_START = 0,             /* beginning of proc_exit */
241 	PELS_POS_PRE_TASK_DETACH,       /* before task/proc detach */
242 	PELS_POS_POST_TASK_DETACH,      /* after task/proc detach */
243 	PELS_POS_END,                   /* end of proc_exit */
244 	PELS_NPOS                       /* # valid values */
245 };
246 
247 /* Panic if matching processes (delimited by ',') exit on error. */
248 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
249 
250 static int
251 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
252 {
253 #pragma unused(oidp, arg1, arg2)
254 	pid_t new_value;
255 	int changed;
256 	int error;
257 
258 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
259 		return ENOENT;
260 	}
261 
262 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
263 	    sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
264 	if (error == 0 && changed != 0) {
265 		if (new_value < -1) {
266 			return EINVAL;
267 		}
268 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
269 		proc_exit_lpexit_spin_pid = new_value;
270 		wakeup(&proc_exit_lpexit_spin_pid);
271 		proc_exit_lpexit_spinning = 0;
272 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
273 	}
274 	return error;
275 }
276 
277 static int
278 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
279 {
280 #pragma unused(oidp, arg1, arg2)
281 	int new_value;
282 	int changed;
283 	int error;
284 
285 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
286 		return ENOENT;
287 	}
288 
289 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
290 	    sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
291 	if (error == 0 && changed != 0) {
292 		if (new_value < -1 || new_value >= PELS_NPOS) {
293 			return EINVAL;
294 		}
295 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
296 		proc_exit_lpexit_spin_pos = new_value;
297 		wakeup(&proc_exit_lpexit_spin_pid);
298 		proc_exit_lpexit_spinning = 0;
299 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
300 	}
301 	return error;
302 }
303 
304 static int
305 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
306 {
307 #pragma unused(oidp, arg1, arg2)
308 	int new_value;
309 	int changed;
310 	int error;
311 
312 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
313 		return ENOENT;
314 	}
315 
316 	error = sysctl_io_number(req, proc_exit_lpexit_spinning,
317 	    sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
318 	if (error == 0 && changed != 0) {
319 		return EINVAL;
320 	}
321 	return error;
322 }
323 
324 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
325     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
326     NULL, sizeof(pid_t),
327     proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
328 
329 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
330     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
331     NULL, sizeof(int),
332     proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
333 
334 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
335     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
336     NULL, sizeof(int),
337     proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
338 
339 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)340 proc_exit_lpexit_check(pid_t pid, int pos)
341 {
342 	if (proc_exit_lpexit_spin_pid == pid) {
343 		bool slept = false;
344 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
345 		while (proc_exit_lpexit_spin_pid == pid &&
346 		    proc_exit_lpexit_spin_pos == pos) {
347 			if (!slept) {
348 				os_log(OS_LOG_DEFAULT,
349 				    "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
350 				slept = true;
351 			}
352 			proc_exit_lpexit_spinning = 1;
353 			msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
354 			    PWAIT, "proc_exit_lpexit_check", NULL);
355 			proc_exit_lpexit_spinning = 0;
356 		}
357 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
358 		if (slept) {
359 			os_log(OS_LOG_DEFAULT,
360 			    "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
361 		}
362 	}
363 }
364 #endif /* DEVELOPMENT || DEBUG */
365 
366 /*
367  * NOTE: Source and target may *NOT* overlap!
368  * XXX Should share code with bsd/dev/ppc/unix_signal.c
369  */
370 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)371 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
372 {
373 	out->si_signo   = in->si_signo;
374 	out->si_errno   = in->si_errno;
375 	out->si_code    = in->si_code;
376 	out->si_pid     = in->si_pid;
377 	out->si_uid     = in->si_uid;
378 	out->si_status  = in->si_status;
379 	out->si_addr    = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
380 	/* following cast works for sival_int because of padding */
381 	out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
382 	out->si_band    = (user32_long_t)in->si_band;                  /* range reduction */
383 }
384 
385 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)386 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
387 {
388 	out->si_signo   = in->si_signo;
389 	out->si_errno   = in->si_errno;
390 	out->si_code    = in->si_code;
391 	out->si_pid     = in->si_pid;
392 	out->si_uid     = in->si_uid;
393 	out->si_status  = in->si_status;
394 	out->si_addr    = in->si_addr;
395 	/* following cast works for sival_int because of padding */
396 	out->si_value.sival_ptr = in->si_value.sival_ptr;
397 	out->si_band    = in->si_band;                  /* range reduction */
398 }
399 
400 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)401 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
402 {
403 	if (is64) {
404 		user64_siginfo_t sinfo64;
405 
406 		bzero(&sinfo64, sizeof(sinfo64));
407 		siginfo_user_to_user64(native, &sinfo64);
408 		return copyout(&sinfo64, uaddr, sizeof(sinfo64));
409 	} else {
410 		user32_siginfo_t sinfo32;
411 
412 		bzero(&sinfo32, sizeof(sinfo32));
413 		siginfo_user_to_user32(native, &sinfo32);
414 		return copyout(&sinfo32, uaddr, sizeof(sinfo32));
415 	}
416 }
417 
418 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)419 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
420     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
421     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
422 {
423 	struct rusage_superset rup;
424 
425 	gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
426 	rup.ri.ri_phys_footprint = 0;
427 	populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
428 	    udata_buffer, num_udata, reason, etype);
429 }
430 
431 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)432 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
433 {
434 	mach_exception_data_type_t code_update = *code;
435 	mach_exception_data_type_t subcode_update = *subcode;
436 	if (p->p_exit_reason == OS_REASON_NULL) {
437 		return;
438 	}
439 
440 	switch (p->p_exit_reason->osr_namespace) {
441 	case OS_REASON_JETSAM:
442 		if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
443 			/* Update the code with EXC_RESOURCE code for high memory watermark */
444 			EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
445 			EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
446 			EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
447 			subcode_update = 0;
448 			break;
449 		}
450 
451 		break;
452 	default:
453 		break;
454 	}
455 
456 	*code = code_update;
457 	*subcode = subcode_update;
458 	return;
459 }
460 
461 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)462 proc_encode_exit_exception_code(proc_t p)
463 {
464 	uint64_t subcode = 0;
465 
466 	if (p->p_exit_reason == OS_REASON_NULL) {
467 		return 0;
468 	}
469 
470 	/* Embed first 32 bits of osr_namespace and osr_code in exception code */
471 	ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
472 	ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
473 	return (mach_exception_data_type_t)subcode;
474 }
475 
476 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)477 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
478     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
479     uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
480 {
481 	mach_vm_address_t uaddr = 0;
482 	mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
483 	exc_codes[0] = code;
484 	exc_codes[1] = subcode;
485 	cpu_type_t cputype;
486 	struct proc_uniqidentifierinfo p_uniqidinfo;
487 	struct proc_workqueueinfo pwqinfo;
488 	int retval = 0;
489 	uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
490 	boolean_t is_corpse_fork;
491 	uint32_t csflags;
492 	unsigned int pflags = 0;
493 	uint64_t max_footprint_mb;
494 	uint64_t max_footprint;
495 
496 	uint64_t ledger_internal;
497 	uint64_t ledger_internal_compressed;
498 	uint64_t ledger_iokit_mapped;
499 	uint64_t ledger_alternate_accounting;
500 	uint64_t ledger_alternate_accounting_compressed;
501 	uint64_t ledger_purgeable_nonvolatile;
502 	uint64_t ledger_purgeable_nonvolatile_compressed;
503 	uint64_t ledger_page_table;
504 	uint64_t ledger_phys_footprint;
505 	uint64_t ledger_phys_footprint_lifetime_max;
506 	uint64_t ledger_network_nonvolatile;
507 	uint64_t ledger_network_nonvolatile_compressed;
508 	uint64_t ledger_wired_mem;
509 	uint64_t ledger_tagged_footprint;
510 	uint64_t ledger_tagged_footprint_compressed;
511 	uint64_t ledger_media_footprint;
512 	uint64_t ledger_media_footprint_compressed;
513 	uint64_t ledger_graphics_footprint;
514 	uint64_t ledger_graphics_footprint_compressed;
515 	uint64_t ledger_neural_footprint;
516 	uint64_t ledger_neural_footprint_compressed;
517 
518 	void *crash_info_ptr = task_get_corpseinfo(corpse_task);
519 
520 #if CONFIG_MEMORYSTATUS
521 	int memstat_dirty_flags = 0;
522 #endif
523 
524 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
525 		kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
526 	}
527 
528 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
529 		pid_t pid = proc_getpid(p);
530 		kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
531 	}
532 
533 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
534 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
535 	}
536 
537 	/* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
538 	if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
539 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
540 			kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
541 		}
542 	}
543 
544 	static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
545 	if (KERN_SUCCESS ==
546 	    kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
547 		proc_piduniqidentifierinfo(p, &p_uniqidinfo);
548 		kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
549 	}
550 
551 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
552 		kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
553 	}
554 
555 	csflags = (uint32_t)proc_getcsflags(p);
556 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
557 		kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
558 	}
559 
560 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
561 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
562 	}
563 
564 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
565 		struct timeval64 t64;
566 		t64.tv_sec = (int64_t)p->p_start.tv_sec;
567 		t64.tv_usec = (int64_t)p->p_start.tv_usec;
568 		kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
569 	}
570 
571 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
572 		kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
573 	}
574 
575 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
576 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
577 	}
578 
579 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
580 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
581 	}
582 
583 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
584 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
585 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
586 		kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
587 		zfree(ZV_NAMEI, buf);
588 	}
589 
590 	pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
591 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
592 		kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
593 	}
594 
595 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
596 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
597 	}
598 
599 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
600 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
601 	}
602 
603 	cputype = cpu_type() & ~CPU_ARCH_MASK;
604 	if (IS_64BIT_PROCESS(p)) {
605 		cputype |= CPU_ARCH_ABI64;
606 	} else if (proc_is64bit_data(p)) {
607 		cputype |= CPU_ARCH_ABI64_32;
608 	}
609 
610 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
611 		kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
612 	}
613 
614 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
615 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_cputype, sizeof(cpu_type_t));
616 	}
617 
618 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
619 		max_footprint = get_task_phys_footprint_limit(proc_task(p));
620 		max_footprint_mb = max_footprint >> 20;
621 		kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
622 	}
623 
624 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
625 		ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
626 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
627 	}
628 
629 	// In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
630 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
631 		ledger_internal = get_task_internal(corpse_task);
632 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
633 	}
634 
635 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
636 		ledger_internal_compressed = get_task_internal_compressed(corpse_task);
637 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
638 	}
639 
640 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
641 		ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
642 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
643 	}
644 
645 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
646 		ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
647 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
648 	}
649 
650 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
651 		ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
652 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
653 	}
654 
655 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
656 		ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
657 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
658 	}
659 
660 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
661 		ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
662 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
663 	}
664 
665 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
666 		ledger_page_table = get_task_page_table(corpse_task);
667 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
668 	}
669 
670 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
671 		ledger_phys_footprint = get_task_phys_footprint(corpse_task);
672 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
673 	}
674 
675 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
676 		ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
677 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
678 	}
679 
680 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
681 		ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
682 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
683 	}
684 
685 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
686 		ledger_wired_mem = get_task_wired_mem(corpse_task);
687 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
688 	}
689 
690 	bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
691 	retval = fill_procworkqueue(p, &pwqinfo);
692 	if (retval == 0) {
693 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
694 			kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
695 		}
696 	}
697 
698 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
699 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
700 	}
701 
702 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
703 		uid_t persona_id = proc_persona_id(p);
704 		kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
705 	}
706 
707 #if CONFIG_COALITIONS
708 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
709 		uint64_t coalition_ids[COALITION_NUM_TYPES];
710 		task_coalition_ids(proc_task(p), coalition_ids);
711 		kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
712 	}
713 #endif /* CONFIG_COALITIONS */
714 
715 #if CONFIG_MEMORYSTATUS
716 	memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
717 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
718 		kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
719 	}
720 #endif
721 
722 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
723 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
724 	}
725 
726 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
727 		ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
728 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
729 	}
730 
731 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
732 		ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
733 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
734 	}
735 
736 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
737 		ledger_media_footprint = get_task_media_footprint(corpse_task);
738 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
739 	}
740 
741 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
742 		ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
743 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
744 	}
745 
746 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
747 		ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
748 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
749 	}
750 
751 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
752 		ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
753 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
754 	}
755 
756 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
757 		ledger_neural_footprint = get_task_neural_footprint(corpse_task);
758 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
759 	}
760 
761 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
762 		ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
763 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
764 	}
765 
766 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
767 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
768 	}
769 
770 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
771 		char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
772 		ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
773 		kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
774 	}
775 
776 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
777 		is_corpse_fork = is_corpsefork(corpse_task);
778 		kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
779 	}
780 
781 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
782 		kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
783 	}
784 
785 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
786 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
787 	}
788 
789 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
790 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
791 	}
792 
793 	char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
794 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
795 		const char * id = cs_identity_get(p);
796 		if (id) {
797 			strlcpy(signing_id, id, sizeof(signing_id));
798 		}
799 		kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
800 	}
801 	char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
802 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
803 		const char * id = csproc_get_teamid(p);
804 		if (id) {
805 			strlcpy(team_id, id, sizeof(team_id));
806 		}
807 		kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
808 	}
809 
810 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
811 		uint32_t category = 0;
812 		if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
813 			category = CS_VALIDATION_CATEGORY_INVALID;
814 		}
815 		kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
816 	}
817 
818 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
819 		uint32_t trust = 0;
820 		kern_return_t ret = get_trust_level_kdp(get_task_pmap(corpse_task), &trust);
821 		if (ret != KERN_SUCCESS) {
822 			trust = KCDATA_INVALID_CS_TRUST_LEVEL;
823 		}
824 		kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
825 	}
826 
827 	uint64_t jit_start_addr = 0;
828 	uint64_t jit_end_addr = 0;
829 	kern_return_t ret = get_jit_address_range_kdp(get_task_pmap(corpse_task), (uintptr_t*)&jit_start_addr, (uintptr_t*)&jit_end_addr);
830 	if (KERN_SUCCESS == ret) {
831 		struct crashinfo_jit_address_range range = {};
832 		range.start_address = jit_start_addr;
833 		range.end_address = jit_end_addr;
834 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_JIT_ADDRESS_RANGE, sizeof(struct crashinfo_jit_address_range), &uaddr)) {
835 			kcdata_memcpy(crash_info_ptr, uaddr, &range, sizeof(range));
836 		}
837 	}
838 
839 	uint64_t cs_auxiliary_info = task_get_cs_auxiliary_info_kdp(corpse_task);
840 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_AUXILIARY_INFO, sizeof(cs_auxiliary_info), &uaddr)) {
841 		kcdata_memcpy(crash_info_ptr, uaddr, &cs_auxiliary_info, sizeof(cs_auxiliary_info));
842 	}
843 
844 	if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
845 		reason = p->p_exit_reason;
846 	}
847 
848 
849 	if (reason != OS_REASON_NULL) {
850 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
851 			struct exit_reason_snapshot ers = {
852 				.ers_namespace = reason->osr_namespace,
853 				.ers_code = reason->osr_code,
854 				.ers_flags = reason->osr_flags
855 			};
856 
857 			kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
858 		}
859 
860 		if (reason->osr_kcd_buf != 0) {
861 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
862 			assert(reason_buf_size != 0);
863 
864 			if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
865 				kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
866 			}
867 		}
868 	}
869 
870 	if (num_udata > 0) {
871 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
872 		    sizeof(uint64_t), num_udata, &uaddr)) {
873 			kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
874 		}
875 	}
876 
877 #if CONFIG_EXCLAVES
878 	task_add_conclave_crash_info(corpse_task, crash_info_ptr);
879 #endif /* CONFIG_EXCLAVES */
880 }
881 
882 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)883 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
884 {
885 	kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
886 	    corpse_info->kcd_length);
887 	__assert_only uint32_t type = kcdata_iter_type(iter);
888 	assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
889 
890 	iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
891 	exception_type_t *etype = kcdata_iter_payload(iter);
892 	return *etype;
893 }
894 
895 /*
896  * Collect information required for generating lightweight corpse for current
897  * task, which can be terminating.
898  */
899 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)900 current_thread_collect_backtrace_info(
901 	kcdata_descriptor_t *new_desc,
902 	exception_type_t etype,
903 	mach_exception_data_t code,
904 	mach_msg_type_number_t codeCnt,
905 	void *reasonp)
906 {
907 	kcdata_descriptor_t kcdata;
908 	kern_return_t kr;
909 	int frame_count = 0, max_frames = 100;
910 	mach_vm_address_t uuid_info_addr = 0;
911 	uint32_t uuid_info_count         = 0;
912 	uint32_t btinfo_flag             = 0;
913 	mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
914 	natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
915 	mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
916 	thread_identifier_info_data_t th_info;
917 	char threadname[MAXTHREADNAMESIZE];
918 	void *btdata_kernel = NULL;
919 	typedef uintptr_t user_btframe_t __kernel_data_semantics;
920 	user_btframe_t *btframes = NULL;
921 	os_reason_t reason = (os_reason_t)reasonp;
922 	struct backtrace_user_info info = BTUINFO_INIT;
923 	struct rusage_superset rup;
924 	uint32_t platform;
925 
926 	task_t task = current_task();
927 	proc_t p = current_proc();
928 
929 	bool has_64bit_addr = task_get_64bit_addr(current_task());
930 	bool has_64bit_data = task_get_64bit_data(current_task());
931 
932 	if (new_desc == NULL) {
933 		return KERN_INVALID_ARGUMENT;
934 	}
935 
936 	/* First, collect backtrace frames */
937 	btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
938 	if (!btframes) {
939 		return KERN_RESOURCE_SHORTAGE;
940 	}
941 
942 	frame_count = backtrace_user(btframes, max_frames, NULL, &info);
943 	if (info.btui_error || frame_count == 0) {
944 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
945 		return KERN_FAILURE;
946 	}
947 
948 	if ((info.btui_info & BTI_TRUNCATED) != 0) {
949 		btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
950 	}
951 
952 	/* Captured in kcdata descriptor below */
953 	btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
954 	if (!btdata_kernel) {
955 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
956 		return KERN_RESOURCE_SHORTAGE;
957 	}
958 
959 	kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
960 	if (!kcdata) {
961 		kfree_data(btdata_kernel, alloc_size);
962 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
963 		return KERN_RESOURCE_SHORTAGE;
964 	}
965 
966 	/* First reserve space in kcdata blob for the btinfo flag fields */
967 	if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
968 	    sizeof(uint32_t), &btinfo_flag_addr)) {
969 		kfree_data(btdata_kernel, alloc_size);
970 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
971 		kcdata_memory_destroy(kcdata);
972 		return KERN_RESOURCE_SHORTAGE;
973 	}
974 
975 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
976 	    (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
977 	    sizeof(uintptr_t), frame_count, &kaddr)) {
978 		kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
979 	}
980 
981 #if __LP64__
982 	/* We only support async stacks on 64-bit kernels */
983 	frame_count = 0;
984 
985 	if (info.btui_async_frame_addr != 0) {
986 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
987 		    sizeof(uint32_t), &kaddr)) {
988 			uint32_t idx = info.btui_async_start_index;
989 			kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
990 		}
991 		struct backtrace_control ctl = {
992 			.btc_frame_addr = info.btui_async_frame_addr,
993 			.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
994 		};
995 
996 		info = BTUINFO_INIT;
997 		frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
998 		if (info.btui_error == 0 && frame_count > 0) {
999 			if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1000 			    TASK_BTINFO_ASYNC_BACKTRACE64,
1001 			    sizeof(uintptr_t), frame_count, &kaddr)) {
1002 				kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
1003 			}
1004 		}
1005 
1006 		if ((info.btui_info & BTI_TRUNCATED) != 0) {
1007 			btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
1008 		}
1009 	}
1010 #endif
1011 
1012 	/* Backtrace collection done, free the frames buffer */
1013 	kfree_data(btframes, max_frames * sizeof(btframes[0]));
1014 	btframes = NULL;
1015 
1016 	thread_set_exec_promotion(current_thread());
1017 	/* Next, suspend the task briefly and collect image load infos */
1018 	task_suspend_internal(task);
1019 
1020 	/* all_image_info struct is ABI, in agreement with address width */
1021 	if (has_64bit_addr) {
1022 		struct user64_dyld_all_image_infos task_image_infos = {};
1023 		struct btinfo_sc_load_info64 sc_info;
1024 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1025 		    sizeof(struct user64_dyld_all_image_infos));
1026 		uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1027 		uuid_info_addr = task_image_infos.uuidArray;
1028 
1029 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1030 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1031 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1032 		    sizeof(task_image_infos.sharedCacheUUID));
1033 
1034 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1035 		    TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1036 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1037 		}
1038 	} else {
1039 		struct user32_dyld_all_image_infos task_image_infos = {};
1040 		struct btinfo_sc_load_info sc_info;
1041 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1042 		    sizeof(struct user32_dyld_all_image_infos));
1043 		uuid_info_count = task_image_infos.uuidArrayCount;
1044 		uuid_info_addr = task_image_infos.uuidArray;
1045 
1046 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1047 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1048 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1049 		    sizeof(task_image_infos.sharedCacheUUID));
1050 
1051 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1052 		    TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1053 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1054 		}
1055 	}
1056 
1057 	if (!uuid_info_addr) {
1058 		/*
1059 		 * Can happen when we catch dyld in the middle of updating
1060 		 * this data structure, or copyin of all_image_info struct failed.
1061 		 */
1062 		task_resume_internal(task);
1063 		thread_clear_exec_promotion(current_thread());
1064 		kfree_data(btdata_kernel, alloc_size);
1065 		kcdata_memory_destroy(kcdata);
1066 		return KERN_MEMORY_ERROR;
1067 	}
1068 
1069 	if (uuid_info_count > 0) {
1070 		uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1071 		    sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1072 
1073 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1074 		    (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1075 		    uuid_info_size, uuid_info_count, &kaddr)) {
1076 			if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1077 				task_resume_internal(task);
1078 				thread_clear_exec_promotion(current_thread());
1079 				kfree_data(btdata_kernel, alloc_size);
1080 				kcdata_memory_destroy(kcdata);
1081 				return KERN_MEMORY_ERROR;
1082 			}
1083 		}
1084 	}
1085 
1086 	task_resume_internal(task);
1087 	thread_clear_exec_promotion(current_thread());
1088 
1089 	/* Next, collect all other information */
1090 	thread_flavor_t tsflavor;
1091 	mach_msg_type_number_t tscount;
1092 
1093 #if defined(__x86_64__) || defined(__i386__)
1094 	tsflavor = x86_THREAD_STATE;      /* unified */
1095 	tscount  = x86_THREAD_STATE_COUNT;
1096 #else
1097 	tsflavor = ARM_THREAD_STATE;      /* unified */
1098 	tscount  = ARM_UNIFIED_THREAD_STATE_COUNT;
1099 #endif
1100 
1101 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1102 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1103 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1104 		bt_thread_state->flavor = tsflavor;
1105 		bt_thread_state->count = tscount;
1106 		/* variable-sized tstate array follows */
1107 
1108 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1109 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1110 		if (kr != KERN_SUCCESS) {
1111 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1112 			if (kr == KERN_TERMINATED) {
1113 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1114 			}
1115 		}
1116 	}
1117 
1118 #if defined(__x86_64__) || defined(__i386__)
1119 	tsflavor = x86_EXCEPTION_STATE;       /* unified */
1120 	tscount  = x86_EXCEPTION_STATE_COUNT;
1121 #else
1122 #if defined(__arm64__)
1123 	if (has_64bit_data) {
1124 		tsflavor = ARM_EXCEPTION_STATE64;
1125 		tscount  = ARM_EXCEPTION_STATE64_COUNT;
1126 	} else
1127 #endif /* defined(__arm64__) */
1128 	{
1129 		tsflavor = ARM_EXCEPTION_STATE;
1130 		tscount  = ARM_EXCEPTION_STATE_COUNT;
1131 	}
1132 #endif /* defined(__x86_64__) || defined(__i386__) */
1133 
1134 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1135 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1136 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1137 		bt_thread_state->flavor = tsflavor;
1138 		bt_thread_state->count = tscount;
1139 		/* variable-sized tstate array follows */
1140 
1141 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1142 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1143 		if (kr != KERN_SUCCESS) {
1144 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1145 			if (kr == KERN_TERMINATED) {
1146 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1147 			}
1148 		}
1149 	}
1150 
1151 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1152 		pid_t pid = proc_getpid(p);
1153 		kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1154 	}
1155 
1156 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1157 		kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1158 	}
1159 
1160 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1161 		kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1162 	}
1163 
1164 #if CONFIG_COALITIONS
1165 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1166 		uint64_t coalition_ids[COALITION_NUM_TYPES];
1167 		task_coalition_ids(proc_task(p), coalition_ids);
1168 		kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1169 	}
1170 #endif /* CONFIG_COALITIONS */
1171 
1172 	/* V0 is sufficient for ReportCrash */
1173 	gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1174 	rup.ri.ri_phys_footprint = 0;
1175 	/* Soft crash, proc did not exit */
1176 	rup.ri.ri_proc_exit_abstime = 0;
1177 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1178 		kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1179 	}
1180 
1181 	platform = proc_platform(current_proc());
1182 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1183 		kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1184 	}
1185 
1186 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1187 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1188 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1189 		kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1190 		zfree(ZV_NAMEI, buf);
1191 	}
1192 
1193 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1194 		kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1195 	}
1196 
1197 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1198 		kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1199 	}
1200 
1201 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1202 		unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1203 		kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1204 	}
1205 
1206 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1207 		cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1208 		if (has_64bit_addr) {
1209 			cputype |= CPU_ARCH_ABI64;
1210 		} else if (has_64bit_data) {
1211 			cputype |= CPU_ARCH_ABI64_32;
1212 		}
1213 		kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1214 	}
1215 
1216 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1217 		kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1218 	}
1219 
1220 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1221 		kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1222 	}
1223 
1224 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1225 		kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1226 	}
1227 
1228 	assert(codeCnt <= EXCEPTION_CODE_MAX);
1229 
1230 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1231 	    sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1232 		kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1233 	}
1234 
1235 	if (reason != OS_REASON_NULL) {
1236 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1237 			struct exit_reason_snapshot ers = {
1238 				.ers_namespace = reason->osr_namespace,
1239 				.ers_code = reason->osr_code,
1240 				.ers_flags = reason->osr_flags
1241 			};
1242 
1243 			kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1244 		}
1245 
1246 		if (reason->osr_kcd_buf != 0) {
1247 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1248 			assert(reason_buf_size != 0);
1249 
1250 			if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1251 				kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1252 			}
1253 		}
1254 	}
1255 
1256 	threadname[0] = '\0';
1257 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1258 	    sizeof(threadname), &kaddr)) {
1259 		bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1260 		kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1261 	}
1262 
1263 	kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1264 	if (kr == KERN_TERMINATED) {
1265 		btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1266 	}
1267 
1268 
1269 	kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1270 	    sizeof(uint64_t), &kaddr);
1271 
1272 	/*
1273 	 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1274 	 * handler (ReportCrash) that lw corpse collection ran out of space and the
1275 	 * result is incomplete.
1276 	 */
1277 	if (last_kr != KERN_SUCCESS) {
1278 		btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1279 	}
1280 
1281 	if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1282 		kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1283 	}
1284 
1285 	/* Lastly, copy the flags to the address we reserved at the beginning. */
1286 	kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1287 
1288 	*new_desc = kcdata;
1289 
1290 	return KERN_SUCCESS;
1291 }
1292 
1293 /*
1294  * We only parse exit reason kcdata blobs for critical process before they die
1295  * and we're going to panic or for opt-in, limited diagnostic tools.
1296  *
1297  * Meant to be called immediately before panicking or limited diagnostic
1298  * scenarios.
1299  */
1300 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1301 exit_reason_get_string_desc(os_reason_t exit_reason)
1302 {
1303 	kcdata_iter_t iter;
1304 
1305 	if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1306 	    exit_reason->osr_bufsize == 0) {
1307 		return NULL;
1308 	}
1309 
1310 	iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1311 	if (!kcdata_iter_valid(iter)) {
1312 #if DEBUG || DEVELOPMENT
1313 		printf("exit reason has invalid exit reason buffer\n");
1314 #endif
1315 		return NULL;
1316 	}
1317 
1318 	if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1319 #if DEBUG || DEVELOPMENT
1320 		printf("exit reason buffer type mismatch, expected %d got %d\n",
1321 		    KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1322 #endif
1323 		return NULL;
1324 	}
1325 
1326 	iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1327 	if (!kcdata_iter_valid(iter)) {
1328 		return NULL;
1329 	}
1330 
1331 	return (char *)kcdata_iter_payload(iter);
1332 }
1333 
1334 static int initproc_spawned = 0;
1335 
1336 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1337 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1338 {
1339 	if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1340 		// Can only ever be set by launchd, and only once at boot
1341 		return EPERM;
1342 	}
1343 	return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1344 }
1345 
1346 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1347     CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1348     sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1349 
1350 #if DEVELOPMENT || DEBUG
1351 
1352 /* disable user faults */
1353 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1354 #endif /* DEVELOPMENT || DEBUG */
1355 
1356 #define OS_REASON_IFLAG_USER_FAULT 0x1
1357 
1358 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC  5
1359 
1360 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1361 abort_with_payload_internal(proc_t p,
1362     uint32_t reason_namespace, uint64_t reason_code,
1363     user_addr_t payload, uint32_t payload_size,
1364     user_addr_t reason_string, uint64_t reason_flags,
1365     uint32_t internal_flags)
1366 {
1367 	os_reason_t exit_reason = OS_REASON_NULL;
1368 	kern_return_t kr = KERN_SUCCESS;
1369 
1370 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1371 		uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1372 		    memory_order_relaxed);
1373 
1374 #if DEVELOPMENT || DEBUG
1375 		if (bootarg_disable_user_faults) {
1376 			return EQFULL;
1377 		}
1378 #endif /* DEVELOPMENT || DEBUG */
1379 
1380 		for (;;) {
1381 			if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1382 				return EQFULL;
1383 			}
1384 			// this reloads the value in old_value
1385 			if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1386 			    &old_value, old_value + 1, memory_order_relaxed,
1387 			    memory_order_relaxed)) {
1388 				break;
1389 			}
1390 		}
1391 	}
1392 
1393 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1394 	    proc_getpid(p), reason_namespace,
1395 	    reason_code, 0, 0);
1396 
1397 	exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1398 	    payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1399 
1400 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1401 		mach_exception_code_t code = 0;
1402 
1403 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1404 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
1405 		EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1406 
1407 		if (exit_reason == OS_REASON_NULL) {
1408 			kr = KERN_RESOURCE_SHORTAGE;
1409 		} else {
1410 			kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1411 		}
1412 		os_reason_free(exit_reason);
1413 	} else {
1414 		/*
1415 		 * We use SIGABRT (rather than calling exit directly from here) so that
1416 		 * the debugger can catch abort_with_{reason,payload} calls.
1417 		 */
1418 		psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1419 	}
1420 
1421 	switch (kr) {
1422 	case KERN_SUCCESS:
1423 		return 0;
1424 	case KERN_NOT_SUPPORTED:
1425 		return ENOTSUP;
1426 	case KERN_INVALID_ARGUMENT:
1427 		return EINVAL;
1428 	case KERN_RESOURCE_SHORTAGE:
1429 	default:
1430 		return EBUSY;
1431 	}
1432 }
1433 
1434 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1435 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1436     __unused void *retval)
1437 {
1438 	abort_with_payload_internal(cur_proc, args->reason_namespace,
1439 	    args->reason_code, args->payload, args->payload_size,
1440 	    args->reason_string, args->reason_flags, 0);
1441 
1442 	return 0;
1443 }
1444 
1445 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1446 os_fault_with_payload(struct proc *cur_proc,
1447     struct os_fault_with_payload_args *args, __unused int *retval)
1448 {
1449 	return abort_with_payload_internal(cur_proc, args->reason_namespace,
1450 	           args->reason_code, args->payload, args->payload_size,
1451 	           args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1452 }
1453 
1454 
1455 /*
1456  * exit --
1457  *	Death of process.
1458  */
1459 __attribute__((noreturn))
1460 void
exit(proc_t p,struct exit_args * uap,int * retval)1461 exit(proc_t p, struct exit_args *uap, int *retval)
1462 {
1463 	p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1464 	exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1465 
1466 	thread_exception_return();
1467 	/* NOTREACHED */
1468 	while (TRUE) {
1469 		thread_block(THREAD_CONTINUE_NULL);
1470 	}
1471 	/* NOTREACHED */
1472 }
1473 
1474 /*
1475  * Exit: deallocate address space and other resources, change proc state
1476  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
1477  * status and rusage for wait().  Check for child processes and orphan them.
1478  */
1479 int
exit1(proc_t p,int rv,int * retval)1480 exit1(proc_t p, int rv, int *retval)
1481 {
1482 	return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1483 }
1484 
1485 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1486 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1487     int jetsam_flags)
1488 {
1489 	return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1490 }
1491 
1492 /*
1493  * NOTE: exit_with_reason drops a reference on the passed exit_reason
1494  */
1495 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1496 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1497     int jetsam_flags, struct os_reason *exit_reason)
1498 {
1499 	thread_t self = current_thread();
1500 	struct task *task = proc_task(p);
1501 	struct uthread *ut;
1502 	int error = 0;
1503 	bool proc_exiting = false;
1504 
1505 #if DEVELOPMENT || DEBUG
1506 	/*
1507 	 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1508 	 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1509 	 */
1510 	if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1511 		panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1512 		    __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1513 	}
1514 #endif
1515 
1516 	/*
1517 	 * If a thread in this task has already
1518 	 * called exit(), then halt any others
1519 	 * right here.
1520 	 */
1521 
1522 	ut = get_bsdthread_info(self);
1523 	(void)retval;
1524 
1525 	/*
1526 	 * The parameter list of audit_syscall_exit() was augmented to
1527 	 * take the Darwin syscall number as the first parameter,
1528 	 * which is currently required by mac_audit_postselect().
1529 	 */
1530 
1531 	/*
1532 	 * The BSM token contains two components: an exit status as passed
1533 	 * to exit(), and a return value to indicate what sort of exit it
1534 	 * was.  The exit status is WEXITSTATUS(rv), but it's not clear
1535 	 * what the return value is.
1536 	 */
1537 	AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1538 	/*
1539 	 * TODO: what to audit here when jetsam calls exit and the uthread,
1540 	 * 'ut' does not belong to the proc, 'p'.
1541 	 */
1542 	AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1543 
1544 	DTRACE_PROC1(exit, int, CLD_EXITED);
1545 
1546 	/* mark process is going to exit and pull out of DBG/disk throttle */
1547 	/* TODO: This should be done after becoming exit thread */
1548 	proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1549 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1550 
1551 	proc_lock(p);
1552 	error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1553 	if (error == EDEADLK) {
1554 		/*
1555 		 * If proc_transstart() returns EDEADLK, then another thread
1556 		 * is either exec'ing or exiting. Return an error and allow
1557 		 * the other thread to continue.
1558 		 */
1559 		proc_unlock(p);
1560 		os_reason_free(exit_reason);
1561 		if (current_proc() == p) {
1562 			if (p->exit_thread == self) {
1563 				panic("exit_thread failed to exit");
1564 			}
1565 
1566 			if (thread_can_terminate) {
1567 				thread_exception_return();
1568 			}
1569 		}
1570 
1571 		return error;
1572 	}
1573 
1574 	proc_exiting = !!(p->p_lflag & P_LEXIT);
1575 
1576 	while (proc_exiting || p->exit_thread != self) {
1577 		if (proc_exiting || sig_try_locked(p) <= 0) {
1578 			proc_transend(p, 1);
1579 			os_reason_free(exit_reason);
1580 
1581 			if (get_threadtask(self) != task) {
1582 				proc_unlock(p);
1583 				return 0;
1584 			}
1585 			proc_unlock(p);
1586 
1587 			thread_terminate(self);
1588 			if (!thread_can_terminate) {
1589 				return 0;
1590 			}
1591 
1592 			thread_exception_return();
1593 			/* NOTREACHED */
1594 		}
1595 		sig_lock_to_exit(p);
1596 	}
1597 
1598 	if (exit_reason != OS_REASON_NULL) {
1599 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1600 		    proc_getpid(p), exit_reason->osr_namespace,
1601 		    exit_reason->osr_code, 0, 0);
1602 	}
1603 
1604 	assert(p->p_exit_reason == OS_REASON_NULL);
1605 	p->p_exit_reason = exit_reason;
1606 
1607 	p->p_lflag |= P_LEXIT;
1608 	p->p_xstat = rv;
1609 	p->p_lflag |= jetsam_flags;
1610 
1611 	proc_transend(p, 1);
1612 	proc_unlock(p);
1613 
1614 	proc_prepareexit(p, rv, perf_notify);
1615 
1616 	/* Last thread to terminate will call proc_exit() */
1617 	task_terminate_internal(task);
1618 
1619 	return 0;
1620 }
1621 
1622 #if CONFIG_MEMORYSTATUS
1623 /*
1624  * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1625  * is currently being frozen.
1626  * The proc_list_lock is held by the caller.
1627  * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1628  * the p_refcount P_REF_DEAD bit.
1629  */
1630 static void
proc_memorystatus_remove(proc_t p)1631 proc_memorystatus_remove(proc_t p)
1632 {
1633 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1634 	while (memorystatus_remove(p) == EAGAIN) {
1635 		os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1636 		msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1637 	}
1638 }
1639 #endif
1640 
1641 #if DEVELOPMENT
1642 boolean_t crash_behavior_test_mode = FALSE;
1643 boolean_t crash_behavior_test_would_panic = FALSE;
1644 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1645 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1646 #endif /* DEVELOPMENT */
1647 
1648 static bool
_proc_is_crashing_signal(int sig)1649 _proc_is_crashing_signal(int sig)
1650 {
1651 	bool result = false;
1652 	switch (sig) {
1653 	case SIGILL:
1654 	case SIGABRT:
1655 	case SIGFPE:
1656 	case SIGBUS:
1657 	case SIGSEGV:
1658 	case SIGSYS:
1659 	/*
1660 	 * If SIGTRAP is the terminating signal, then we can safely assume the
1661 	 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1662 	 * a process calls __builtin_trap(), which will abort.)
1663 	 */
1664 	case SIGTRAP:
1665 		result = true;
1666 	}
1667 
1668 	return result;
1669 }
1670 
1671 static bool
_proc_is_fatal_reason(os_reason_t reason)1672 _proc_is_fatal_reason(os_reason_t reason)
1673 {
1674 	if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1675 		/* Abort is always fatal even if there is no crash report generated */
1676 		return true;
1677 	}
1678 	if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1679 		/*
1680 		 * No crash report means this reason shouldn't be considered fatal
1681 		 * unless we are in test mode
1682 		 */
1683 #if DEVELOPMENT
1684 		if (crash_behavior_test_mode) {
1685 			return true;
1686 		}
1687 #endif /* DEVELOPMENT */
1688 		return false;
1689 	}
1690 	// By default all OS_REASON are fatal
1691 	return true;
1692 }
1693 
1694 static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled", false);
1695 
1696 static bool
proc_should_trigger_panic(proc_t p,int rv)1697 proc_should_trigger_panic(proc_t p, int rv)
1698 {
1699 	if (p == initproc) {
1700 		/* Always panic for launchd */
1701 		return true;
1702 	}
1703 
1704 	if (panic_on_crash_disabled) {
1705 		printf("panic-on-crash disabled via boot-arg\n");
1706 		return false;
1707 	}
1708 
1709 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1710 		return true;
1711 	}
1712 
1713 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1714 		return true;
1715 	}
1716 
1717 	if (p->p_posix_spawn_failed) {
1718 		/* posix_spawn failures normally don't qualify for panics */
1719 		return false;
1720 	}
1721 
1722 	bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1723 	if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1724 		return false;
1725 	}
1726 
1727 	if (WIFEXITED(rv)) {
1728 		int code = WEXITSTATUS(rv);
1729 
1730 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1731 			if (code == 0) {
1732 				/* No panic if we exit 0 */
1733 				return false;
1734 			} else {
1735 				/* Panic on non-zero exit */
1736 				return true;
1737 			}
1738 		} else {
1739 			/* No panic on normal exit if the process doesn't have the non-zero flag set */
1740 			return false;
1741 		}
1742 	} else if (WIFSIGNALED(rv)) {
1743 		int signal = WTERMSIG(rv);
1744 		/* This is a crash (non-normal exit) */
1745 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1746 			os_reason_t reason = p->p_exit_reason;
1747 			if (reason != OS_REASON_NULL) {
1748 				if (!_proc_is_fatal_reason(reason)) {
1749 					// Skip non-fatal terminate_with_reason
1750 					return false;
1751 				}
1752 				if (reason->osr_namespace == OS_REASON_SIGNAL) {
1753 					/*
1754 					 * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal
1755 					 * in osr_code, so we should check that signal here
1756 					 */
1757 					return _proc_is_crashing_signal((int)reason->osr_code);
1758 				} else {
1759 					/*
1760 					 * This branch covers the case of terminate_with_reason which
1761 					 * delivers a SIGTERM which is still considered a crash even
1762 					 * thought the signal is not considered a crashing signal
1763 					 */
1764 					return true;
1765 				}
1766 			}
1767 			return _proc_is_crashing_signal(signal);
1768 		} else {
1769 			return false;
1770 		}
1771 	} else {
1772 		/*
1773 		 * This branch implies that we didn't exit normally nor did we receive
1774 		 * a signal. This should be unreachable.
1775 		 */
1776 		return true;
1777 	}
1778 }
1779 
1780 static void
proc_crash_coredump(proc_t p)1781 proc_crash_coredump(proc_t p)
1782 {
1783 	(void)p;
1784 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1785 	/*
1786 	 * For debugging purposes, generate a core file of initproc before
1787 	 * panicking. Leave at least 300 MB free on the root volume, and ignore
1788 	 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1789 	 * before the panic hits.
1790 	 */
1791 
1792 	int             err;
1793 	uint64_t        coredump_start = mach_absolute_time();
1794 	uint64_t        coredump_end;
1795 	clock_sec_t     tv_sec;
1796 	clock_usec_t    tv_usec;
1797 	uint32_t        tv_msec;
1798 
1799 
1800 	err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1801 
1802 	coredump_end = mach_absolute_time();
1803 
1804 	absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1805 
1806 	tv_msec = tv_usec / 1000;
1807 
1808 	if (err != 0) {
1809 		printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1810 		    proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1811 	} else {
1812 		printf("Generated core file for pid: %d in %d.%03d seconds\n",
1813 		    proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1814 	}
1815 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1816 }
1817 
1818 static void
proc_handle_critical_exit(proc_t p,int rv)1819 proc_handle_critical_exit(proc_t p, int rv)
1820 {
1821 	if (!proc_should_trigger_panic(p, rv)) {
1822 		// No panic, bail out
1823 		return;
1824 	}
1825 
1826 #if DEVELOPMENT
1827 	if (crash_behavior_test_mode) {
1828 		crash_behavior_test_would_panic = TRUE;
1829 		// Force test mode off after hitting a panic
1830 		crash_behavior_test_mode = FALSE;
1831 		return;
1832 	}
1833 #endif /* DEVELOPMENT */
1834 
1835 	char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1836 
1837 	if (p->p_exit_reason == OS_REASON_NULL) {
1838 		printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1839 		    proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1840 	} else {
1841 		printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1842 		    p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1843 		    exit_reason_desc : "none");
1844 	}
1845 
1846 	const char *prefix_str;
1847 	char prefix_str_buf[128];
1848 
1849 	if (p == initproc) {
1850 		if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1851 			prefix_str = "LTE preinit process exited";
1852 		} else if (initproc_spawned) {
1853 			prefix_str = "initproc exited";
1854 		} else {
1855 			prefix_str = "initproc failed to start";
1856 		}
1857 	} else {
1858 		/* For processes that aren't launchd, just use the process name and pid */
1859 		snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1860 		prefix_str = prefix_str_buf;
1861 	}
1862 
1863 	proc_crash_coredump(p);
1864 
1865 	sync(p, (void *)NULL, (int *)NULL);
1866 	const uint64_t panic_options_mask = DEBUGGER_OPTION_INITPROC_PANIC | DEBUGGER_OPTION_USERSPACE_INITIATED_PANIC;
1867 
1868 	if (p->p_exit_reason == OS_REASON_NULL) {
1869 		panic_with_options(0, NULL, panic_options_mask, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1870 		    prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1871 	} else {
1872 		panic_with_options(0, NULL, panic_options_mask, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1873 		    ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1874 		    prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1875 		    exit_reason_desc ? exit_reason_desc : "none");
1876 	}
1877 }
1878 
1879 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1880 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1881 {
1882 	mach_exception_data_type_t code = 0, subcode = 0;
1883 	exception_type_t etype;
1884 
1885 	struct uthread *ut;
1886 	thread_t self = current_thread();
1887 	ut = get_bsdthread_info(self);
1888 	struct rusage_superset *rup;
1889 	int kr = 0;
1890 	int create_corpse = FALSE;
1891 	bool corpse_source = false;
1892 	task_t task = proc_task(p);
1893 
1894 
1895 	if (p->p_crash_behavior != 0 || p == initproc) {
1896 		proc_handle_critical_exit(p, rv);
1897 	}
1898 
1899 	if (task) {
1900 		corpse_source = vm_map_is_corpse_source(get_task_map(task));
1901 	}
1902 
1903 	/*
1904 	 * Generate a corefile/crashlog if:
1905 	 *      The process doesn't have an exit reason that indicates no crash report should be created
1906 	 *      AND any of the following are true:
1907 	 *	- The process was terminated due to a fatal signal that generates a core
1908 	 *	- The process was killed due to a code signing violation
1909 	 *	- The process has an exit reason that indicates we should generate a crash report
1910 	 *
1911 	 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1912 	 * (which normally triggers a core) but may indicate that no crash report should be created.
1913 	 */
1914 	if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1915 	    (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1916 	    (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1917 	    OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1918 		/*
1919 		 * Workaround for processes checking up on PT_DENY_ATTACH:
1920 		 * should be backed out post-Leopard (details in 5431025).
1921 		 */
1922 		if ((SIGSEGV == WTERMSIG(rv)) &&
1923 		    (p->p_pptr->p_lflag & P_LNOATTACH)) {
1924 			goto skipcheck;
1925 		}
1926 
1927 		/*
1928 		 * Crash Reporter looks for the signal value, original exception
1929 		 * type, and low 20 bits of the original code in code[0]
1930 		 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1931 		 */
1932 		code = ((WTERMSIG(rv) & 0xff) << 24) |
1933 		    ((ut->uu_exception & 0x0f) << 20) |
1934 		    ((int)ut->uu_code & 0xfffff);
1935 		subcode = ut->uu_subcode;
1936 		etype = ut->uu_exception;
1937 
1938 		/* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1939 		if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1940 			etype = EXC_CRASH;
1941 		}
1942 
1943 #if (DEVELOPMENT || DEBUG)
1944 		if (p->p_pid <= exception_log_max_pid) {
1945 			const char *proc_name = proc_best_name(p);
1946 			if (PROC_HAS_EXITREASON(p)) {
1947 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1948 				    "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1949 				    proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1950 				    exit_reason_get_string_desc(p->p_exit_reason));
1951 			} else {
1952 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1953 				    "pid: %d -- process name: %s -- exit status %d",
1954 				    proc_getpid(p), proc_name, WEXITSTATUS(rv));
1955 			}
1956 		}
1957 #endif
1958 		const bool fatal = false;
1959 		kr = task_exception_notify(EXC_CRASH, code, subcode, fatal);
1960 		/* Nobody handled EXC_CRASH?? remember to make corpse */
1961 		if ((kr != 0 || corpse_source) && p == current_proc()) {
1962 			/*
1963 			 * Do not create corpse when exit is called from jetsam thread.
1964 			 * Corpse creation code requires that proc_prepareexit is
1965 			 * called by the exiting proc and not the kernel_proc.
1966 			 */
1967 			create_corpse = TRUE;
1968 		}
1969 
1970 		/*
1971 		 * Revalidate the code signing of the text pages around current PC.
1972 		 * This is an attempt to detect and repair faults due to memory
1973 		 * corruption of text pages.
1974 		 *
1975 		 * The goal here is to fixup infrequent memory corruptions due to
1976 		 * things like aging RAM bit flips. So the approach is to only expect
1977 		 * to have to fixup one thing per crash. This also limits the amount
1978 		 * of extra work we cause in case this is a development kernel with an
1979 		 * active memory stomp happening.
1980 		 */
1981 		uintptr_t bt[2];
1982 		struct backtrace_user_info btinfo = BTUINFO_INIT;
1983 		unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
1984 		int bt_err = btinfo.btui_error;
1985 		if (bt_err == 0 && frame_count >= 1) {
1986 			/*
1987 			 * First check at the page containing the current PC.
1988 			 * This passes if the page code signs -or- if we can't figure out
1989 			 * what is at that address. The latter action is so we continue checking
1990 			 * previous pages which may be corrupt and caused a wild branch.
1991 			 */
1992 			kr = revalidate_text_page(task, bt[0]);
1993 
1994 			/* No corruption found, check the previous sequential page */
1995 			if (kr == KERN_SUCCESS) {
1996 				kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
1997 			}
1998 
1999 			/* Still no corruption found, check the current function's caller */
2000 			if (kr == KERN_SUCCESS) {
2001 				if (frame_count > 1 &&
2002 				    atop(bt[0]) != atop(bt[1]) &&           /* don't recheck PC page */
2003 				    atop(bt[0]) - 1 != atop(bt[1])) {       /* don't recheck page before */
2004 					kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
2005 				}
2006 			}
2007 
2008 			/*
2009 			 * Log that we found a corruption.
2010 			 */
2011 			if (kr != KERN_SUCCESS) {
2012 				os_log(OS_LOG_DEFAULT,
2013 				    "Text page corruption detected in dying process %d\n", proc_getpid(p));
2014 			}
2015 		}
2016 	}
2017 
2018 skipcheck:
2019 	if (task_is_driver(task) && PROC_HAS_EXITREASON(p)) {
2020 		IOUserServerRecordExitReason(task, p->p_exit_reason);
2021 	}
2022 
2023 	/* Notify the perf server? */
2024 	if (perf_notify) {
2025 		(void)sys_perf_notify(self, proc_getpid(p));
2026 	}
2027 
2028 
2029 	/* stash the usage into corpse data if making_corpse == true */
2030 	if (create_corpse == TRUE) {
2031 		kr = task_mark_corpse(task);
2032 		if (kr != KERN_SUCCESS) {
2033 			if (kr == KERN_NO_SPACE) {
2034 				printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
2035 			} else if (kr == KERN_NOT_SUPPORTED) {
2036 				printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
2037 			} else if (kr == KERN_TERMINATED) {
2038 				printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
2039 			} else {
2040 				printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
2041 			}
2042 			create_corpse = FALSE;
2043 		}
2044 	}
2045 
2046 	if (corpse_source && !create_corpse) {
2047 		/* vm_map was marked for corpse, but we decided to not create one, unmark the vmmap */
2048 		vm_map_unset_corpse_source(get_task_map(task));
2049 	}
2050 
2051 	if (!proc_is_shadow(p)) {
2052 		/*
2053 		 * Before this process becomes a zombie, stash resource usage
2054 		 * stats in the proc for external observers to query
2055 		 * via proc_pid_rusage().
2056 		 *
2057 		 * If the zombie allocation fails, just punt the stats.
2058 		 */
2059 		rup = zalloc(zombie_zone);
2060 		gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2061 		rup->ri.ri_phys_footprint = 0;
2062 		rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2063 		/*
2064 		 * Make the rusage_info visible to external observers
2065 		 * only after it has been completely filled in.
2066 		 */
2067 		p->p_ru = rup;
2068 	}
2069 
2070 	if (create_corpse) {
2071 		int est_knotes = 0, num_knotes = 0;
2072 		uint64_t *buffer = NULL;
2073 		uint32_t buf_size = 0;
2074 
2075 		/* Get all the udata pointers from kqueue */
2076 		est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2077 		if (est_knotes > 0) {
2078 			buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2079 			buffer = kalloc_data(buf_size, Z_WAITOK);
2080 			if (buffer) {
2081 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2082 				if (num_knotes > est_knotes + 32) {
2083 					num_knotes = est_knotes + 32;
2084 				}
2085 			}
2086 		}
2087 
2088 		/* Update the code, subcode based on exit reason */
2089 		proc_update_corpse_exception_codes(p, &code, &subcode);
2090 		populate_corpse_crashinfo(p, task, rup,
2091 		    code, subcode, buffer, num_knotes, NULL, etype);
2092 		kfree_data(buffer, buf_size);
2093 	}
2094 	/*
2095 	 * Remove proc from allproc queue and from pidhash chain.
2096 	 * Need to do this before we do anything that can block.
2097 	 * Not doing causes things like mount() find this on allproc
2098 	 * in partially cleaned state.
2099 	 */
2100 
2101 	proc_list_lock();
2102 
2103 #if CONFIG_MEMORYSTATUS
2104 	proc_memorystatus_remove(p);
2105 #endif
2106 
2107 	LIST_REMOVE(p, p_list);
2108 	LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2109 	/* will not be visible via proc_find */
2110 	os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2111 
2112 	proc_list_unlock();
2113 
2114 	/*
2115 	 * If parent is waiting for us to exit or exec,
2116 	 * P_LPPWAIT is set; we will wakeup the parent below.
2117 	 */
2118 	proc_lock(p);
2119 	p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2120 	p->p_sigignore = ~(sigcantmask);
2121 
2122 	/*
2123 	 * If a thread is already waiting for us in proc_exit,
2124 	 * P_LTERM is set, wakeup the thread.
2125 	 */
2126 	if (p->p_lflag & P_LTERM) {
2127 		wakeup(&p->exit_thread);
2128 	} else {
2129 		p->p_lflag |= P_LTERM;
2130 	}
2131 
2132 	/* If current proc is exiting, ignore signals on the exit thread */
2133 	if (p == current_proc()) {
2134 		ut->uu_siglist = 0;
2135 	}
2136 	proc_unlock(p);
2137 }
2138 
2139 void
proc_exit(proc_t p)2140 proc_exit(proc_t p)
2141 {
2142 	proc_t q;
2143 	proc_t pp;
2144 	struct task *task = proc_task(p);
2145 	vnode_t tvp = NULLVP;
2146 	struct pgrp * pg;
2147 	struct session *sessp;
2148 	struct uthread * uth;
2149 	pid_t pid;
2150 	int exitval;
2151 	int knote_hint;
2152 
2153 	uth = current_uthread();
2154 
2155 	proc_lock(p);
2156 	proc_transstart(p, 1, 0);
2157 	if (!(p->p_lflag & P_LEXIT)) {
2158 		/*
2159 		 * This can happen if a thread_terminate() occurs
2160 		 * in a single-threaded process.
2161 		 */
2162 		p->p_lflag |= P_LEXIT;
2163 		proc_transend(p, 1);
2164 		proc_unlock(p);
2165 		proc_prepareexit(p, 0, TRUE);
2166 		(void) task_terminate_internal(task);
2167 		proc_lock(p);
2168 	} else if (!(p->p_lflag & P_LTERM)) {
2169 		proc_transend(p, 1);
2170 		/* Jetsam is in middle of calling proc_prepareexit, wait for it */
2171 		p->p_lflag |= P_LTERM;
2172 		msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2173 	} else {
2174 		proc_transend(p, 1);
2175 	}
2176 
2177 	p->p_lflag |= P_LPEXIT;
2178 
2179 	/*
2180 	 * Other kernel threads may be in the middle of signalling this process.
2181 	 * Wait for those threads to wrap it up before making the process
2182 	 * disappear on them.
2183 	 */
2184 	if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2185 		p->p_sigwaitcnt++;
2186 		while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2187 			msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2188 		}
2189 		p->p_sigwaitcnt--;
2190 	}
2191 
2192 	proc_unlock(p);
2193 	pid = proc_getpid(p);
2194 	exitval = p->p_xstat;
2195 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2196 	    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2197 	    pid, exitval, 0, 0, 0);
2198 
2199 #if DEVELOPMENT || DEBUG
2200 	proc_exit_lpexit_check(pid, PELS_POS_START);
2201 #endif
2202 
2203 #if CONFIG_DTRACE
2204 	dtrace_proc_exit(p);
2205 #endif
2206 
2207 	proc_refdrain(p);
2208 	/* We now have unique ref to the proc */
2209 
2210 	/* if any pending cpu limits action, clear it */
2211 	task_clear_cpuusage(proc_task(p), TRUE);
2212 
2213 	workq_mark_exiting(p);
2214 
2215 	/*
2216 	 * need to cancel async IO requests that can be cancelled and wait for those
2217 	 * already active.  MAY BLOCK!
2218 	 */
2219 	_aio_exit( p );
2220 
2221 	/*
2222 	 * Close open files and release open-file table.
2223 	 * This may block!
2224 	 */
2225 	fdt_invalidate(p);
2226 
2227 	/*
2228 	 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2229 	 * workqueue.
2230 	 */
2231 	workq_exit(p);
2232 
2233 	if (uth->uu_lowpri_window) {
2234 		/*
2235 		 * task is marked as a low priority I/O type
2236 		 * and the I/O we issued while in flushing files on close
2237 		 * collided with normal I/O operations...
2238 		 * no need to throttle this thread since its going away
2239 		 * but we do need to update our bookeeping w/r to throttled threads
2240 		 */
2241 		throttle_lowpri_io(0);
2242 	}
2243 
2244 	if (p->p_lflag & P_LNSPACE_RESOLVER) {
2245 		/*
2246 		 * The namespace resolver is exiting; there may be
2247 		 * outstanding materialization requests to clean up.
2248 		 */
2249 		nspace_resolver_exited(p);
2250 	}
2251 
2252 #if SYSV_SHM
2253 	/* Close ref SYSV Shared memory*/
2254 	if (p->vm_shm) {
2255 		shmexit(p);
2256 	}
2257 #endif
2258 #if SYSV_SEM
2259 	/* Release SYSV semaphores */
2260 	semexit(p);
2261 #endif
2262 
2263 #if PSYNCH
2264 	pth_proc_hashdelete(p);
2265 #endif /* PSYNCH */
2266 
2267 	pg = proc_pgrp(p, &sessp);
2268 	if (SESS_LEADER(p, sessp)) {
2269 		if (sessp->s_ttyvp != NULLVP) {
2270 			struct vnode *ttyvp;
2271 			int ttyvid;
2272 			int cttyflag = 0;
2273 			struct vfs_context context;
2274 			struct tty *tp;
2275 			struct pgrp *tpgrp = PGRP_NULL;
2276 
2277 			/*
2278 			 * Controlling process.
2279 			 * Signal foreground pgrp,
2280 			 * drain controlling terminal
2281 			 * and revoke access to controlling terminal.
2282 			 */
2283 
2284 			proc_list_lock(); /* prevent any t_pgrp from changing */
2285 			session_lock(sessp);
2286 			if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2287 				tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2288 			}
2289 			proc_list_unlock();
2290 
2291 			if (tpgrp != PGRP_NULL) {
2292 				session_unlock(sessp);
2293 				pgsignal(tpgrp, SIGHUP, 1);
2294 				pgrp_rele(tpgrp);
2295 				session_lock(sessp);
2296 			}
2297 
2298 			cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2299 			    S_CTTYREF, relaxed) & S_CTTYREF);
2300 			ttyvp = sessp->s_ttyvp;
2301 			ttyvid = sessp->s_ttyvid;
2302 			tp = session_clear_tty_locked(sessp);
2303 			if (ttyvp) {
2304 				vnode_hold(ttyvp);
2305 			}
2306 			session_unlock(sessp);
2307 
2308 			if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2309 				if (tp != TTY_NULL) {
2310 					tty_lock(tp);
2311 					(void) ttywait(tp);
2312 					tty_unlock(tp);
2313 				}
2314 
2315 				context.vc_thread = NULL;
2316 				context.vc_ucred = kauth_cred_proc_ref(p);
2317 				VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2318 				if (cttyflag) {
2319 					/*
2320 					 * Release the extra usecount taken in cttyopen.
2321 					 * usecount should be released after VNOP_REVOKE is called.
2322 					 * This usecount was taken to ensure that
2323 					 * the VNOP_REVOKE results in a close to
2324 					 * the tty since cttyclose is a no-op.
2325 					 */
2326 					vnode_rele(ttyvp);
2327 				}
2328 				vnode_put(ttyvp);
2329 				kauth_cred_unref(&context.vc_ucred);
2330 				vnode_drop(ttyvp);
2331 				ttyvp = NULLVP;
2332 			}
2333 			if (ttyvp) {
2334 				vnode_drop(ttyvp);
2335 			}
2336 			if (tp) {
2337 				ttyfree(tp);
2338 			}
2339 		}
2340 		session_lock(sessp);
2341 		sessp->s_leader = NULL;
2342 		session_unlock(sessp);
2343 	}
2344 
2345 	if (!proc_is_shadow(p)) {
2346 		fixjobc(p, pg, 0);
2347 	}
2348 	pgrp_rele(pg);
2349 
2350 	/*
2351 	 * Change RLIMIT_FSIZE for accounting/debugging.
2352 	 */
2353 	proc_limitsetcur_fsize(p, RLIM_INFINITY);
2354 
2355 	(void)acct_process(p);
2356 
2357 	proc_list_lock();
2358 
2359 	if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2360 		p->p_listflag &= ~P_LIST_EXITCOUNT;
2361 		proc_shutdown_exitcount--;
2362 		if (proc_shutdown_exitcount == 0) {
2363 			wakeup(&proc_shutdown_exitcount);
2364 		}
2365 	}
2366 
2367 	/* wait till parentrefs are dropped and grant no more */
2368 	proc_childdrainstart(p);
2369 	while ((q = p->p_children.lh_first) != NULL) {
2370 		if (q->p_stat == SZOMB) {
2371 			if (p != q->p_pptr) {
2372 				panic("parent child linkage broken");
2373 			}
2374 			/* check for sysctl zomb lookup */
2375 			while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2376 				msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2377 			}
2378 			q->p_listflag |= P_LIST_WAITING;
2379 			/*
2380 			 * This is a named reference and it is not granted
2381 			 * if the reap is already in progress. So we get
2382 			 * the reference here exclusively and their can be
2383 			 * no waiters. So there is no need for a wakeup
2384 			 * after we are done.  Also the reap frees the structure
2385 			 * and the proc struct cannot be used for wakeups as well.
2386 			 * It is safe to use q here as this is system reap
2387 			 */
2388 			reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2389 			    REAP_REPARENTED_TO_INIT : 0;
2390 			reap_child_locked(p, q,
2391 			    REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2392 		} else {
2393 			/*
2394 			 * Traced processes are killed
2395 			 * since their existence means someone is messing up.
2396 			 */
2397 			if (q->p_lflag & P_LTRACED) {
2398 				struct proc *opp;
2399 
2400 				/*
2401 				 * Take a reference on the child process to
2402 				 * ensure it doesn't exit and disappear between
2403 				 * the time we drop the list_lock and attempt
2404 				 * to acquire its proc_lock.
2405 				 */
2406 				if (proc_ref(q, true) != q) {
2407 					continue;
2408 				}
2409 
2410 				proc_list_unlock();
2411 
2412 				opp = proc_find(q->p_oppid);
2413 				if (opp != PROC_NULL) {
2414 					proc_list_lock();
2415 					q->p_oppid = 0;
2416 					proc_list_unlock();
2417 					proc_reparentlocked(q, opp, 0, 0);
2418 					proc_rele(opp);
2419 				} else {
2420 					/* original parent exited while traced */
2421 					proc_list_lock();
2422 					q->p_listflag |= P_LIST_DEADPARENT;
2423 					q->p_oppid = 0;
2424 					proc_list_unlock();
2425 					proc_reparentlocked(q, initproc, 0, 0);
2426 				}
2427 
2428 				proc_lock(q);
2429 				q->p_lflag &= ~P_LTRACED;
2430 
2431 				if (q->sigwait_thread) {
2432 					thread_t thread = q->sigwait_thread;
2433 
2434 					proc_unlock(q);
2435 					/*
2436 					 * The sigwait_thread could be stopped at a
2437 					 * breakpoint. Wake it up to kill.
2438 					 * Need to do this as it could be a thread which is not
2439 					 * the first thread in the task. So any attempts to kill
2440 					 * the process would result into a deadlock on q->sigwait.
2441 					 */
2442 					thread_resume(thread);
2443 					clear_wait(thread, THREAD_INTERRUPTED);
2444 					threadsignal(thread, SIGKILL, 0, TRUE);
2445 				} else {
2446 					proc_unlock(q);
2447 				}
2448 
2449 				psignal(q, SIGKILL);
2450 				proc_list_lock();
2451 				proc_rele(q);
2452 			} else {
2453 				q->p_listflag |= P_LIST_DEADPARENT;
2454 				proc_reparentlocked(q, initproc, 0, 1);
2455 			}
2456 		}
2457 	}
2458 
2459 	proc_childdrainend(p);
2460 	proc_list_unlock();
2461 
2462 #if CONFIG_MACF
2463 	if (!proc_is_shadow(p)) {
2464 		/*
2465 		 * Notify MAC policies that proc is dead.
2466 		 * This should be replaced with proper label management
2467 		 * (rdar://problem/32126399).
2468 		 */
2469 		mac_proc_notify_exit(p);
2470 	}
2471 #endif
2472 
2473 	/*
2474 	 * Release reference to text vnode
2475 	 */
2476 	tvp = p->p_textvp;
2477 	p->p_textvp = NULL;
2478 	if (tvp != NULLVP) {
2479 		vnode_rele(tvp);
2480 	}
2481 
2482 	/*
2483 	 * Save exit status and final rusage info, adding in child rusage
2484 	 * info and self times.  If we were unable to allocate a zombie
2485 	 * structure, this information is lost.
2486 	 */
2487 	if (p->p_ru != NULL) {
2488 		calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2489 		p->p_ru->ru = p->p_stats->p_ru;
2490 
2491 		ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2492 	}
2493 
2494 	/*
2495 	 * Free up profiling buffers.
2496 	 */
2497 	{
2498 		struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2499 
2500 		p1 = p0->pr_next;
2501 		p0->pr_next = NULL;
2502 		p0->pr_scale = 0;
2503 
2504 		for (; p1 != NULL; p1 = pn) {
2505 			pn = p1->pr_next;
2506 			kfree_type(struct uprof, p1);
2507 		}
2508 	}
2509 
2510 	proc_free_realitimer(p);
2511 
2512 	/*
2513 	 * Other substructures are freed from wait().
2514 	 */
2515 	zfree(proc_stats_zone, p->p_stats);
2516 	p->p_stats = NULL;
2517 
2518 	if (p->p_subsystem_root_path) {
2519 		zfree(ZV_NAMEI, p->p_subsystem_root_path);
2520 		p->p_subsystem_root_path = NULL;
2521 	}
2522 
2523 	proc_limitdrop(p);
2524 
2525 #if DEVELOPMENT || DEBUG
2526 	proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2527 #endif
2528 
2529 	/*
2530 	 * Finish up by terminating the task
2531 	 * and halt this thread (only if a
2532 	 * member of the task exiting).
2533 	 */
2534 	proc_set_task(p, TASK_NULL);
2535 	set_bsdtask_info(task, NULL);
2536 	clear_thread_ro_proc(get_machthread(uth));
2537 
2538 #if DEVELOPMENT || DEBUG
2539 	proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2540 #endif
2541 
2542 	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2543 	proc_knote(p, knote_hint);
2544 
2545 	/* mark the thread as the one that is doing proc_exit
2546 	 * no need to hold proc lock in uthread_free
2547 	 */
2548 	uth->uu_flag |= UT_PROCEXIT;
2549 	/*
2550 	 * Notify parent that we're gone.
2551 	 */
2552 	pp = proc_parent(p);
2553 	if (proc_is_shadow(p)) {
2554 		/* kernel can reap this one, no need to move it to launchd */
2555 		proc_list_lock();
2556 		p->p_listflag |= P_LIST_DEADPARENT;
2557 		proc_list_unlock();
2558 	} else if (pp->p_flag & P_NOCLDWAIT) {
2559 		if (p->p_ru != NULL) {
2560 			proc_lock(pp);
2561 #if 3839178
2562 			/*
2563 			 * If the parent is ignoring SIGCHLD, then POSIX requires
2564 			 * us to not add the resource usage to the parent process -
2565 			 * we are only going to hand it off to init to get reaped.
2566 			 * We should contest the standard in this case on the basis
2567 			 * of RLIMIT_CPU.
2568 			 */
2569 #else   /* !3839178 */
2570 			/*
2571 			 * Add child resource usage to parent before giving
2572 			 * zombie to init.  If we were unable to allocate a
2573 			 * zombie structure, this information is lost.
2574 			 */
2575 			ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2576 #endif  /* !3839178 */
2577 			update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2578 			proc_unlock(pp);
2579 		}
2580 
2581 		/* kernel can reap this one, no need to move it to launchd */
2582 		proc_list_lock();
2583 		p->p_listflag |= P_LIST_DEADPARENT;
2584 		proc_list_unlock();
2585 	}
2586 	if (!proc_is_shadow(p) &&
2587 	    ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2588 		if (pp != initproc) {
2589 			proc_lock(pp);
2590 			pp->si_pid = proc_getpid(p);
2591 			pp->p_xhighbits = p->p_xhighbits;
2592 			p->p_xhighbits = 0;
2593 			pp->si_status = p->p_xstat;
2594 			pp->si_code = CLD_EXITED;
2595 			/*
2596 			 * p_ucred usage is safe as it is an exiting process
2597 			 * and reference is dropped in reap
2598 			 */
2599 			pp->si_uid = kauth_cred_getruid(proc_ucred_unsafe(p));
2600 			proc_unlock(pp);
2601 		}
2602 		/* mark as a zombie */
2603 		/* No need to take proc lock as all refs are drained and
2604 		 * no one except parent (reaping ) can look at this.
2605 		 * The write is to an int and is coherent. Also parent is
2606 		 *  keyed off of list lock for reaping
2607 		 */
2608 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2609 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2610 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2611 		    pid, exitval, 0, 0, 0);
2612 		p->p_stat = SZOMB;
2613 		/*
2614 		 * The current process can be reaped so, no one
2615 		 * can depend on this
2616 		 */
2617 
2618 		psignal(pp, SIGCHLD);
2619 
2620 		/* and now wakeup the parent */
2621 		proc_list_lock();
2622 		wakeup((caddr_t)pp);
2623 		proc_list_unlock();
2624 	} else {
2625 		/* should be fine as parent proc would be initproc */
2626 		/* mark as a zombie */
2627 		/* No need to take proc lock as all refs are drained and
2628 		 * no one except parent (reaping ) can look at this.
2629 		 * The write is to an int and is coherent. Also parent is
2630 		 *  keyed off of list lock for reaping
2631 		 */
2632 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2633 		proc_list_lock();
2634 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2635 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2636 		    pid, exitval, 0, 0, 0);
2637 		/* check for sysctl zomb lookup */
2638 		while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2639 			msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2640 		}
2641 		/* safe to use p as this is a system reap */
2642 		p->p_stat = SZOMB;
2643 		p->p_listflag |= P_LIST_WAITING;
2644 
2645 		/*
2646 		 * This is a named reference and it is not granted
2647 		 * if the reap is already in progress. So we get
2648 		 * the reference here exclusively and their can be
2649 		 * no waiters. So there is no need for a wakeup
2650 		 * after we are done. AlsO  the reap frees the structure
2651 		 * and the proc struct cannot be used for wakeups as well.
2652 		 * It is safe to use p here as this is system reap
2653 		 */
2654 		reap_child_locked(pp, p,
2655 		    REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2656 	}
2657 	if (uth->uu_lowpri_window) {
2658 		/*
2659 		 * task is marked as a low priority I/O type and we've
2660 		 * somehow picked up another throttle during exit processing...
2661 		 * no need to throttle this thread since its going away
2662 		 * but we do need to update our bookeeping w/r to throttled threads
2663 		 */
2664 		throttle_lowpri_io(0);
2665 	}
2666 
2667 	proc_rele(pp);
2668 #if DEVELOPMENT || DEBUG
2669 	proc_exit_lpexit_check(pid, PELS_POS_END);
2670 #endif
2671 }
2672 
2673 
2674 /*
2675  * reap_child_locked
2676  *
2677  * Finalize a child exit once its status has been saved.
2678  *
2679  * If ptrace has attached, detach it and return it to its real parent.  Free any
2680  * remaining resources.
2681  *
2682  * Parameters:
2683  * - proc_t parent      Parent of process being reaped
2684  * - proc_t child       Process to reap
2685  * - reap_flags_t flags Control locking and re-parenting behavior
2686  */
2687 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2688 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2689 {
2690 	struct pgrp *pg;
2691 	boolean_t shadow_proc = proc_is_shadow(child);
2692 
2693 	if (flags & REAP_LOCKED) {
2694 		proc_list_unlock();
2695 	}
2696 
2697 	/*
2698 	 * Under ptrace, the child should now be re-parented back to its original
2699 	 * parent, unless that parent was initproc or it didn't come to initproc
2700 	 * through re-parenting.
2701 	 */
2702 	bool child_ptraced = child->p_oppid != 0;
2703 	if (!shadow_proc && child_ptraced) {
2704 		int knote_hint;
2705 		pid_t orig_ppid = 0;
2706 		proc_t orig_parent = PROC_NULL;
2707 
2708 		proc_lock(child);
2709 		orig_ppid = child->p_oppid;
2710 		child->p_oppid = 0;
2711 		knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2712 		proc_unlock(child);
2713 
2714 		orig_parent = proc_find(orig_ppid);
2715 		if (orig_parent) {
2716 			/*
2717 			 * Only re-parent the process if its original parent was not
2718 			 * initproc and it did not come to initproc from re-parenting.
2719 			 */
2720 			bool reparenting = orig_parent != initproc ||
2721 			    (flags & REAP_REPARENTED_TO_INIT) == 0;
2722 			if (reparenting) {
2723 				if (orig_parent != initproc) {
2724 					/*
2725 					 * Internal fields should be safe to access here because the
2726 					 * child is exited and not reaped or re-parented yet.
2727 					 */
2728 					proc_lock(orig_parent);
2729 					orig_parent->si_pid = proc_getpid(child);
2730 					orig_parent->si_status = child->p_xstat;
2731 					orig_parent->si_code = CLD_CONTINUED;
2732 					orig_parent->si_uid = kauth_cred_getruid(proc_ucred_unsafe(child));
2733 					proc_unlock(orig_parent);
2734 				}
2735 				proc_reparentlocked(child, orig_parent, 1, 0);
2736 
2737 				/*
2738 				 * After re-parenting, re-send the child's NOTE_EXIT to the
2739 				 * original parent.
2740 				 */
2741 				proc_knote(child, knote_hint);
2742 				psignal(orig_parent, SIGCHLD);
2743 
2744 				proc_list_lock();
2745 				wakeup((caddr_t)orig_parent);
2746 				child->p_listflag &= ~P_LIST_WAITING;
2747 				wakeup(&child->p_stat);
2748 				proc_list_unlock();
2749 
2750 				proc_rele(orig_parent);
2751 				if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2752 					proc_list_lock();
2753 				}
2754 				return;
2755 			} else {
2756 				/*
2757 				 * Satisfy the knote lifecycle because ptraced processes don't
2758 				 * broadcast NOTE_EXIT during initial child termination.
2759 				 */
2760 				proc_knote(child, knote_hint);
2761 				proc_rele(orig_parent);
2762 			}
2763 		}
2764 	}
2765 
2766 #pragma clang diagnostic push
2767 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2768 	proc_knote(child, NOTE_REAP);
2769 #pragma clang diagnostic pop
2770 
2771 	proc_knote_drain(child);
2772 
2773 	child->p_xstat = 0;
2774 	if (!shadow_proc && child->p_ru) {
2775 		/*
2776 		 * Roll up the rusage statistics to the parent, unless the parent is
2777 		 * ignoring SIGCHLD.  POSIX requires the children's resources of such a
2778 		 * parent to not be included in the parent's usage (seems odd given
2779 		 * RLIMIT_CPU, though).
2780 		 */
2781 		proc_lock(parent);
2782 		bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2783 		if (rollup_child) {
2784 			ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2785 		}
2786 		update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2787 		proc_unlock(parent);
2788 		zfree(zombie_zone, child->p_ru);
2789 		child->p_ru = NULL;
2790 	} else if (!shadow_proc) {
2791 		printf("Warning : lost p_ru for %s\n", child->p_comm);
2792 	} else {
2793 		assert(child->p_ru == NULL);
2794 	}
2795 
2796 	AUDIT_SESSION_PROCEXIT(child);
2797 
2798 #if CONFIG_PERSONAS
2799 	persona_proc_drop(child);
2800 #endif /* CONFIG_PERSONAS */
2801 	/* proc_ucred_unsafe is safe, because child is not running */
2802 	(void)chgproccnt(kauth_cred_getruid(proc_ucred_unsafe(child)), -1);
2803 
2804 	os_reason_free(child->p_exit_reason);
2805 
2806 	proc_list_lock();
2807 
2808 	pg = pgrp_leave_locked(child);
2809 	LIST_REMOVE(child, p_list);
2810 	parent->p_childrencnt--;
2811 	LIST_REMOVE(child, p_sibling);
2812 	bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2813 	    LIST_EMPTY(&parent->p_children);
2814 	if (no_more_children) {
2815 		wakeup((caddr_t)parent);
2816 	}
2817 	child->p_listflag &= ~P_LIST_WAITING;
2818 	wakeup(&child->p_stat);
2819 
2820 	/* Take it out of process hash */
2821 	if (!shadow_proc) {
2822 		phash_remove_locked(child);
2823 	}
2824 	proc_checkdeadrefs(child);
2825 	nprocs--;
2826 	if (flags & REAP_DEAD_PARENT) {
2827 		child->p_listflag |= P_LIST_DEADPARENT;
2828 	}
2829 
2830 	proc_list_unlock();
2831 
2832 	pgrp_rele(pg);
2833 	fdt_destroy(child);
2834 	lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2835 	lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2836 #if CONFIG_AUDIT
2837 	lck_mtx_destroy(&child->p_audit_mlock, &proc_ucred_mlock_grp);
2838 #endif /* CONFIG_AUDIT */
2839 #if CONFIG_DTRACE
2840 	lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2841 #endif
2842 	lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2843 	proc_wait_release(child);
2844 
2845 	if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2846 		proc_list_lock();
2847 	}
2848 }
2849 
2850 int
wait1continue(int result)2851 wait1continue(int result)
2852 {
2853 	proc_t p;
2854 	thread_t thread;
2855 	uthread_t uth;
2856 	struct _wait4_data *wait4_data;
2857 	struct wait4_nocancel_args *uap;
2858 	int *retval;
2859 
2860 	if (result) {
2861 		return result;
2862 	}
2863 
2864 	p = current_proc();
2865 	thread = current_thread();
2866 	uth = (struct uthread *)get_bsdthread_info(thread);
2867 
2868 	wait4_data = &uth->uu_save.uus_wait4_data;
2869 	uap = wait4_data->args;
2870 	retval = wait4_data->retval;
2871 	return wait4_nocancel(p, uap, retval);
2872 }
2873 
2874 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2875 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2876 {
2877 	__pthread_testcancel(1);
2878 	return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2879 }
2880 
2881 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2882 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2883 {
2884 	int nfound;
2885 	int sibling_count;
2886 	proc_t p;
2887 	int status, error;
2888 	uthread_t uth;
2889 	struct _wait4_data *wait4_data;
2890 
2891 	AUDIT_ARG(pid, uap->pid);
2892 
2893 	if (uap->pid == 0) {
2894 		uap->pid = -q->p_pgrpid;
2895 	}
2896 
2897 	if (uap->pid == INT_MIN) {
2898 		return EINVAL;
2899 	}
2900 
2901 loop:
2902 	proc_list_lock();
2903 loop1:
2904 	nfound = 0;
2905 	sibling_count = 0;
2906 
2907 	PCHILDREN_FOREACH(q, p) {
2908 		if (p->p_sibling.le_next != 0) {
2909 			sibling_count++;
2910 		}
2911 		if (uap->pid != WAIT_ANY &&
2912 		    proc_getpid(p) != uap->pid &&
2913 		    p->p_pgrpid != -(uap->pid)) {
2914 			continue;
2915 		}
2916 
2917 		if (proc_is_shadow(p)) {
2918 			continue;
2919 		}
2920 
2921 		nfound++;
2922 
2923 		/* XXX This is racy because we don't get the lock!!!! */
2924 
2925 		if (p->p_listflag & P_LIST_WAITING) {
2926 			/* we're not using a continuation here but we still need to stash
2927 			 * the args for stackshot. */
2928 			uth = current_uthread();
2929 			wait4_data = &uth->uu_save.uus_wait4_data;
2930 			wait4_data->args = uap;
2931 			thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2932 
2933 			(void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2934 			goto loop1;
2935 		}
2936 		p->p_listflag |= P_LIST_WAITING;   /* only allow single thread to wait() */
2937 
2938 
2939 		if (p->p_stat == SZOMB) {
2940 			reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2941 			    REAP_REPARENTED_TO_INIT : 0;
2942 
2943 			proc_list_unlock();
2944 #if CONFIG_MACF
2945 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2946 				goto out;
2947 			}
2948 #endif
2949 			retval[0] = proc_getpid(p);
2950 			if (uap->status) {
2951 				/* Legacy apps expect only 8 bits of status */
2952 				status = 0xffff & p->p_xstat;   /* convert to int */
2953 				error = copyout((caddr_t)&status,
2954 				    uap->status,
2955 				    sizeof(status));
2956 				if (error) {
2957 					goto out;
2958 				}
2959 			}
2960 			if (uap->rusage) {
2961 				if (p->p_ru == NULL) {
2962 					error = ENOMEM;
2963 				} else {
2964 					if (IS_64BIT_PROCESS(q)) {
2965 						struct user64_rusage    my_rusage = {};
2966 						munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2967 						error = copyout((caddr_t)&my_rusage,
2968 						    uap->rusage,
2969 						    sizeof(my_rusage));
2970 					} else {
2971 						struct user32_rusage    my_rusage = {};
2972 						munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2973 						error = copyout((caddr_t)&my_rusage,
2974 						    uap->rusage,
2975 						    sizeof(my_rusage));
2976 					}
2977 				}
2978 				/* information unavailable? */
2979 				if (error) {
2980 					goto out;
2981 				}
2982 			}
2983 
2984 			/* Conformance change for 6577252.
2985 			 * When SIGCHLD is blocked and wait() returns because the status
2986 			 * of a child process is available and there are no other
2987 			 * children processes, then any pending SIGCHLD signal is cleared.
2988 			 */
2989 			if (sibling_count == 0) {
2990 				int mask = sigmask(SIGCHLD);
2991 				uth = current_uthread();
2992 
2993 				if ((uth->uu_sigmask & mask) != 0) {
2994 					/* we are blocking SIGCHLD signals.  clear any pending SIGCHLD.
2995 					 * This locking looks funny but it is protecting access to the
2996 					 * thread via p_uthlist.
2997 					 */
2998 					proc_lock(q);
2999 					uth->uu_siglist &= ~mask;       /* clear pending signal */
3000 					proc_unlock(q);
3001 				}
3002 			}
3003 
3004 			/* Clean up */
3005 			(void)reap_child_locked(q, p, reap_flags);
3006 
3007 			return 0;
3008 		}
3009 		if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
3010 		    (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
3011 			proc_list_unlock();
3012 #if CONFIG_MACF
3013 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3014 				goto out;
3015 			}
3016 #endif
3017 			proc_lock(p);
3018 			p->p_lflag |= P_LWAITED;
3019 			proc_unlock(p);
3020 			retval[0] = proc_getpid(p);
3021 			if (uap->status) {
3022 				status = W_STOPCODE(p->p_xstat);
3023 				error = copyout((caddr_t)&status,
3024 				    uap->status,
3025 				    sizeof(status));
3026 			} else {
3027 				error = 0;
3028 			}
3029 			goto out;
3030 		}
3031 		/*
3032 		 * If we are waiting for continued processses, and this
3033 		 * process was continued
3034 		 */
3035 		if ((uap->options & WCONTINUED) &&
3036 		    (p->p_flag & P_CONTINUED)) {
3037 			proc_list_unlock();
3038 #if CONFIG_MACF
3039 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3040 				goto out;
3041 			}
3042 #endif
3043 
3044 			/* Prevent other process for waiting for this event */
3045 			OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3046 			retval[0] = proc_getpid(p);
3047 			if (uap->status) {
3048 				status = W_STOPCODE(SIGCONT);
3049 				error = copyout((caddr_t)&status,
3050 				    uap->status,
3051 				    sizeof(status));
3052 			} else {
3053 				error = 0;
3054 			}
3055 			goto out;
3056 		}
3057 		p->p_listflag &= ~P_LIST_WAITING;
3058 		wakeup(&p->p_stat);
3059 	}
3060 	/* list lock is held when we get here any which way */
3061 	if (nfound == 0) {
3062 		proc_list_unlock();
3063 		return ECHILD;
3064 	}
3065 
3066 	if (uap->options & WNOHANG) {
3067 		retval[0] = 0;
3068 		proc_list_unlock();
3069 		return 0;
3070 	}
3071 
3072 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3073 	uth = current_uthread();
3074 	wait4_data = &uth->uu_save.uus_wait4_data;
3075 	wait4_data->args = uap;
3076 	wait4_data->retval = retval;
3077 
3078 	thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3079 	if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3080 		return error;
3081 	}
3082 
3083 	goto loop;
3084 out:
3085 	proc_list_lock();
3086 	p->p_listflag &= ~P_LIST_WAITING;
3087 	wakeup(&p->p_stat);
3088 	proc_list_unlock();
3089 	return error;
3090 }
3091 
3092 #if DEBUG
3093 #define ASSERT_LCK_MTX_OWNED(lock)      \
3094 	                        lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3095 #else
3096 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */
3097 #endif
3098 
3099 int
waitidcontinue(int result)3100 waitidcontinue(int result)
3101 {
3102 	proc_t p;
3103 	thread_t thread;
3104 	uthread_t uth;
3105 	struct _waitid_data *waitid_data;
3106 	struct waitid_nocancel_args *uap;
3107 	int *retval;
3108 
3109 	if (result) {
3110 		return result;
3111 	}
3112 
3113 	p = current_proc();
3114 	thread = current_thread();
3115 	uth = (struct uthread *)get_bsdthread_info(thread);
3116 
3117 	waitid_data = &uth->uu_save.uus_waitid_data;
3118 	uap = waitid_data->args;
3119 	retval = waitid_data->retval;
3120 	return waitid_nocancel(p, uap, retval);
3121 }
3122 
3123 /*
3124  * Description:	Suspend the calling thread until one child of the process
3125  *		containing the calling thread changes state.
3126  *
3127  * Parameters:	uap->idtype		one of P_PID, P_PGID, P_ALL
3128  *		uap->id			pid_t or gid_t or ignored
3129  *		uap->infop		Address of siginfo_t struct in
3130  *					user space into which to return status
3131  *		uap->options		flag values
3132  *
3133  * Returns:	0			Success
3134  *		!0			Error returning status to user space
3135  */
3136 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3137 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3138 {
3139 	__pthread_testcancel(1);
3140 	return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3141 }
3142 
3143 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3144 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3145     __unused int32_t *retval)
3146 {
3147 	user_siginfo_t  siginfo;        /* siginfo data to return to caller */
3148 	boolean_t caller64 = IS_64BIT_PROCESS(q);
3149 	int nfound;
3150 	proc_t p;
3151 	int error;
3152 	uthread_t uth;
3153 	struct _waitid_data *waitid_data;
3154 
3155 	if (uap->options == 0 ||
3156 	    (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3157 		return EINVAL;        /* bits set that aren't recognized */
3158 	}
3159 	switch (uap->idtype) {
3160 	case P_PID:     /* child with process ID equal to... */
3161 	case P_PGID:    /* child with process group ID equal to... */
3162 		if (((int)uap->id) < 0) {
3163 			return EINVAL;
3164 		}
3165 		break;
3166 	case P_ALL:     /* any child */
3167 		break;
3168 	}
3169 
3170 loop:
3171 	proc_list_lock();
3172 loop1:
3173 	nfound = 0;
3174 
3175 	PCHILDREN_FOREACH(q, p) {
3176 		switch (uap->idtype) {
3177 		case P_PID:     /* child with process ID equal to... */
3178 			if (proc_getpid(p) != (pid_t)uap->id) {
3179 				continue;
3180 			}
3181 			break;
3182 		case P_PGID:    /* child with process group ID equal to... */
3183 			if (p->p_pgrpid != (pid_t)uap->id) {
3184 				continue;
3185 			}
3186 			break;
3187 		case P_ALL:     /* any child */
3188 			break;
3189 		}
3190 
3191 		if (proc_is_shadow(p)) {
3192 			continue;
3193 		}
3194 		/* XXX This is racy because we don't get the lock!!!! */
3195 
3196 		/*
3197 		 * Wait collision; go to sleep and restart; used to maintain
3198 		 * the single return for waited process guarantee.
3199 		 */
3200 		if (p->p_listflag & P_LIST_WAITING) {
3201 			(void) msleep(&p->p_stat, &proc_list_mlock,
3202 			    PWAIT, "waitidcoll", 0);
3203 			goto loop1;
3204 		}
3205 		p->p_listflag |= P_LIST_WAITING;                /* mark busy */
3206 
3207 		nfound++;
3208 
3209 		bzero(&siginfo, sizeof(siginfo));
3210 
3211 		switch (p->p_stat) {
3212 		case SZOMB:             /* Exited */
3213 			if (!(uap->options & WEXITED)) {
3214 				break;
3215 			}
3216 			proc_list_unlock();
3217 #if CONFIG_MACF
3218 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3219 				goto out;
3220 			}
3221 #endif
3222 			siginfo.si_signo = SIGCHLD;
3223 			siginfo.si_pid = proc_getpid(p);
3224 
3225 			/* If the child terminated abnormally due to a signal, the signum
3226 			 * needs to be preserved in the exit status.
3227 			 */
3228 			if (WIFSIGNALED(p->p_xstat)) {
3229 				siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3230 				    CLD_DUMPED : CLD_KILLED;
3231 				siginfo.si_status = WTERMSIG(p->p_xstat);
3232 			} else {
3233 				siginfo.si_code = CLD_EXITED;
3234 				siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3235 			}
3236 			siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3237 			p->p_xhighbits = 0;
3238 
3239 			if ((error = copyoutsiginfo(&siginfo,
3240 			    caller64, uap->infop)) != 0) {
3241 				goto out;
3242 			}
3243 
3244 			/* Prevent other process for waiting for this event? */
3245 			if (!(uap->options & WNOWAIT)) {
3246 				reap_child_locked(q, p, 0);
3247 				return 0;
3248 			}
3249 			goto out;
3250 
3251 		case SSTOP:             /* Stopped */
3252 			/*
3253 			 * If we are not interested in stopped processes, then
3254 			 * ignore this one.
3255 			 */
3256 			if (!(uap->options & WSTOPPED)) {
3257 				break;
3258 			}
3259 
3260 			/*
3261 			 * If someone has already waited it, we lost a race
3262 			 * to be the one to return status.
3263 			 */
3264 			if ((p->p_lflag & P_LWAITED) != 0) {
3265 				break;
3266 			}
3267 			proc_list_unlock();
3268 #if CONFIG_MACF
3269 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3270 				goto out;
3271 			}
3272 #endif
3273 			siginfo.si_signo = SIGCHLD;
3274 			siginfo.si_pid = proc_getpid(p);
3275 			siginfo.si_status = p->p_xstat; /* signal number */
3276 			siginfo.si_code = CLD_STOPPED;
3277 
3278 			if ((error = copyoutsiginfo(&siginfo,
3279 			    caller64, uap->infop)) != 0) {
3280 				goto out;
3281 			}
3282 
3283 			/* Prevent other process for waiting for this event? */
3284 			if (!(uap->options & WNOWAIT)) {
3285 				proc_lock(p);
3286 				p->p_lflag |= P_LWAITED;
3287 				proc_unlock(p);
3288 			}
3289 			goto out;
3290 
3291 		default:                /* All other states => Continued */
3292 			if (!(uap->options & WCONTINUED)) {
3293 				break;
3294 			}
3295 
3296 			/*
3297 			 * If the flag isn't set, then this process has not
3298 			 * been stopped and continued, or the status has
3299 			 * already been reaped by another caller of waitid().
3300 			 */
3301 			if ((p->p_flag & P_CONTINUED) == 0) {
3302 				break;
3303 			}
3304 			proc_list_unlock();
3305 #if CONFIG_MACF
3306 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3307 				goto out;
3308 			}
3309 #endif
3310 			siginfo.si_signo = SIGCHLD;
3311 			siginfo.si_code = CLD_CONTINUED;
3312 			proc_lock(p);
3313 			siginfo.si_pid = p->p_contproc;
3314 			siginfo.si_status = p->p_xstat;
3315 			proc_unlock(p);
3316 
3317 			if ((error = copyoutsiginfo(&siginfo,
3318 			    caller64, uap->infop)) != 0) {
3319 				goto out;
3320 			}
3321 
3322 			/* Prevent other process for waiting for this event? */
3323 			if (!(uap->options & WNOWAIT)) {
3324 				OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3325 				    &p->p_flag);
3326 			}
3327 			goto out;
3328 		}
3329 		ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3330 
3331 		/* Not a process we are interested in; go on to next child */
3332 
3333 		p->p_listflag &= ~P_LIST_WAITING;
3334 		wakeup(&p->p_stat);
3335 	}
3336 	ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3337 
3338 	/* No child processes that could possibly satisfy the request? */
3339 
3340 	if (nfound == 0) {
3341 		proc_list_unlock();
3342 		return ECHILD;
3343 	}
3344 
3345 	if (uap->options & WNOHANG) {
3346 		proc_list_unlock();
3347 #if CONFIG_MACF
3348 		if ((error = mac_proc_check_wait(q, p)) != 0) {
3349 			return error;
3350 		}
3351 #endif
3352 		/*
3353 		 * The state of the siginfo structure in this case
3354 		 * is undefined.  Some implementations bzero it, some
3355 		 * (like here) leave it untouched for efficiency.
3356 		 *
3357 		 * Thus the most portable check for "no matching pid with
3358 		 * WNOHANG" is to store a zero into si_pid before
3359 		 * invocation, then check for a non-zero value afterwards.
3360 		 */
3361 		return 0;
3362 	}
3363 
3364 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3365 	uth = current_uthread();
3366 	waitid_data = &uth->uu_save.uus_waitid_data;
3367 	waitid_data->args = uap;
3368 	waitid_data->retval = retval;
3369 
3370 	if ((error = msleep0(q, &proc_list_mlock,
3371 	    PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3372 		return error;
3373 	}
3374 
3375 	goto loop;
3376 out:
3377 	proc_list_lock();
3378 	p->p_listflag &= ~P_LIST_WAITING;
3379 	wakeup(&p->p_stat);
3380 	proc_list_unlock();
3381 	return error;
3382 }
3383 
3384 /*
3385  * make process 'parent' the new parent of process 'child'.
3386  */
3387 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3388 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3389 {
3390 	proc_t oldparent = PROC_NULL;
3391 
3392 	if (child->p_pptr == parent) {
3393 		return;
3394 	}
3395 
3396 	if (locked == 0) {
3397 		proc_list_lock();
3398 	}
3399 
3400 	oldparent = child->p_pptr;
3401 #if __PROC_INTERNAL_DEBUG
3402 	if (oldparent == PROC_NULL) {
3403 		panic("proc_reparent: process %p does not have a parent", child);
3404 	}
3405 #endif
3406 
3407 	LIST_REMOVE(child, p_sibling);
3408 #if __PROC_INTERNAL_DEBUG
3409 	if (oldparent->p_childrencnt == 0) {
3410 		panic("process children count already 0");
3411 	}
3412 #endif
3413 	oldparent->p_childrencnt--;
3414 #if __PROC_INTERNAL_DEBUG
3415 	if (oldparent->p_childrencnt < 0) {
3416 		panic("process children count -ve");
3417 	}
3418 #endif
3419 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3420 	parent->p_childrencnt++;
3421 	child->p_pptr = parent;
3422 	child->p_ppid = proc_getpid(parent);
3423 
3424 	proc_list_unlock();
3425 
3426 	if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3427 		psignal(initproc, SIGCHLD);
3428 	}
3429 	if (locked == 1) {
3430 		proc_list_lock();
3431 	}
3432 }
3433 
3434 /*
3435  * Exit: deallocate address space and other resources, change proc state
3436  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
3437  * status and rusage for wait().  Check for child processes and orphan them.
3438  */
3439 
3440 
3441 /*
3442  * munge_rusage
3443  *	LP64 support - long is 64 bits if we are dealing with a 64 bit user
3444  *	process.  We munge the kernel version of rusage into the
3445  *	64 bit version.
3446  */
3447 __private_extern__  void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3448 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3449 {
3450 	/* Zero-out struct so that padding is cleared */
3451 	bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3452 
3453 	/* timeval changes size, so utime and stime need special handling */
3454 	a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3455 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3456 	a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3457 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3458 	/*
3459 	 * everything else can be a direct assign, since there is no loss
3460 	 * of precision implied boing 32->64.
3461 	 */
3462 	a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3463 	a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3464 	a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3465 	a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3466 	a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3467 	a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3468 	a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3469 	a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3470 	a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3471 	a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3472 	a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3473 	a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3474 	a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3475 	a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3476 }
3477 
3478 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3479 __private_extern__  void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3480 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3481 {
3482 	bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3483 
3484 	/* timeval changes size, so utime and stime need special handling */
3485 	a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3486 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3487 	a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3488 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3489 	/*
3490 	 * everything else can be a direct assign. We currently ignore
3491 	 * the loss of precision
3492 	 */
3493 	a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3494 	a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3495 	a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3496 	a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3497 	a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3498 	a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3499 	a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3500 	a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3501 	a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3502 	a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3503 	a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3504 	a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3505 	a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3506 	a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3507 }
3508 
3509 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3510 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3511 {
3512 	assert(thread != NULL);
3513 	assert(waitinfo != NULL);
3514 
3515 	struct uthread *ut = get_bsdthread_info(thread);
3516 	waitinfo->context = 0;
3517 	// ensure wmesg is consistent with a thread waiting in wait4
3518 	assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3519 	struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3520 	// May not actually contain a pid; this is just the argument to wait4.
3521 	// See man wait4 for other valid wait4 arguments.
3522 	waitinfo->owner = args->pid;
3523 }
3524 
3525 static int
exit_with_exception_internal(struct proc * p,exception_info_t exception,uint32_t flags)3526 exit_with_exception_internal(
3527 	struct proc *p,
3528 	exception_info_t exception,
3529 	uint32_t flags)
3530 {
3531 	os_reason_t reason = OS_REASON_NULL;
3532 	struct uthread *ut = NULL;
3533 
3534 	if (p == PROC_NULL) {
3535 		panic("exception type %d without a valid proc",
3536 		    exception.os_reason);
3537 	}
3538 
3539 	if (!(flags & PX_DEBUG_NO_HONOR)
3540 	    && is_address_space_debugged(p)) {
3541 		return 0;
3542 	}
3543 
3544 	if ((flags & PX_KTRIAGE)) {
3545 		/* Leave a ktriage record */
3546 		ktriage_record(
3547 			thread_tid(current_thread()),
3548 			KDBG_TRIAGE_EVENTID(
3549 				exception.kt_info.kt_subsys,
3550 				KDBG_TRIAGE_RESERVED,
3551 				exception.kt_info.kt_error),
3552 			0);
3553 	}
3554 
3555 	if ((flags & PX_PSIGNAL)) {
3556 		int signal = (exception.signal > 0) ? exception.signal : SIGKILL;
3557 
3558 		printf("[%s%s] sending signal %d to process\n", proc_best_name(p),
3559 		    (signal == SIGKILL) ? ": killed" : "", signal);
3560 		psignal(p, signal);
3561 		return 0;
3562 	} else {
3563 		assert(exception.exception_type > 0);
3564 
3565 		reason = os_reason_create(
3566 			exception.os_reason,
3567 			(uint64_t)exception.mx_code);
3568 		assert(reason != OS_REASON_NULL);
3569 		reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3570 
3571 		if (!(flags & PX_NO_EXCEPTION_UTHREAD)) {
3572 			ut = get_bsdthread_info(current_thread());
3573 			ut->uu_exception = exception.exception_type;
3574 			ut->uu_code = exception.mx_code;
3575 			ut->uu_subcode = exception.mx_subcode;
3576 		}
3577 
3578 		printf("[%s: killed] sending signal %d and force exiting process\n",
3579 		    proc_best_name(p), SIGKILL);
3580 		return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3581 		           FALSE, FALSE, 0, reason);
3582 	}
3583 }
3584 
3585 /*
3586  * Use a separate function call for mach and exclave exceptions so that we
3587  * see the exception's origin show up clearly in the backtrace on dev kernels.
3588  */
3589 
3590 int
exit_with_mach_exception(struct proc * p,exception_info_t exception,uint32_t flags)3591 exit_with_mach_exception(
3592 	struct proc *p,
3593 	exception_info_t exception,
3594 	uint32_t flags)
3595 {
3596 	return exit_with_exception_internal(p, exception, flags);
3597 }
3598 
3599 
3600 #if CONFIG_EXCLAVES
3601 int
exit_with_exclave_exception(struct proc * p,exception_info_t exception,uint32_t flags)3602 exit_with_exclave_exception(
3603 	struct proc *p,
3604 	exception_info_t exception,
3605 	uint32_t flags)
3606 {
3607 	return exit_with_exception_internal(p, exception, flags);
3608 }
3609 #endif /* CONFIG_EXCLAVES */
3610 
3611 /**
3612  * Causes the current process to exit with a Mach exception.
3613  *
3614  * Compared to exit_with_mach_exception(), exit_with_mach_exception_using_ast()
3615  * can be called in a preemption-disabled context.  This function defers
3616  * updating the process state until an AST.
3617  *
3618  * @note Currently only the PX_KTRIAGE flag is implemented.
3619  *
3620  * @param exception information about the exception
3621  * @param flags a bitmask of PX_* flags describing how to deliver the exception
3622  */
3623 void
exit_with_mach_exception_using_ast(exception_info_t exception,uint32_t flags)3624 exit_with_mach_exception_using_ast(
3625 	exception_info_t exception,
3626 	uint32_t flags)
3627 {
3628 	const uint32_t __assert_only supported_flags = PX_KTRIAGE;
3629 	assert((flags & ~supported_flags) == 0);
3630 
3631 	bool ktriage = flags & PX_KTRIAGE;
3632 	thread_ast_mach_exception(current_thread(), exception.os_reason, exception.exception_type,
3633 	    exception.mx_code, exception.mx_subcode, false, ktriage);
3634 }
3635