xref: /xnu-12377.81.4/bsd/kern/kern_exit.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113 
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116 
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120 
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134 #include <vm/vm_map_xnu.h>
135 
136 #include <vm/vm_protos.h>
137 #include <os/log.h>
138 #include <os/system_event_log.h>
139 
140 #include <pexpert/pexpert.h>
141 
142 #include <kdp/kdp_dyld.h>
143 
144 #if SYSV_SHM
145 #include <sys/shm_internal.h>   /* shmexit */
146 #endif /* SYSV_SHM */
147 #if CONFIG_PERSONAS
148 #include <sys/persona.h>
149 #endif /* CONFIG_PERSONAS */
150 #if CONFIG_MEMORYSTATUS
151 #include <sys/kern_memorystatus.h>
152 #endif /* CONFIG_MEMORYSTATUS */
153 #if CONFIG_DTRACE
154 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
155 void dtrace_proc_exit(proc_t p);
156 #include <sys/dtrace_ptss.h>
157 #endif /* CONFIG_DTRACE */
158 #if CONFIG_MACF
159 #include <security/mac_framework.h>
160 #include <security/mac_mach_internal.h>
161 #include <sys/syscall.h>
162 #endif /* CONFIG_MACF */
163 
164 #ifdef CONFIG_EXCLAVES
165 void
166 task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
167 #endif /* CONFIG_EXCLAVES */
168 
169 #if CONFIG_MEMORYSTATUS
170 static void proc_memorystatus_remove(proc_t p);
171 #endif /* CONFIG_MEMORYSTATUS */
172 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
173 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
174     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
175     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
176 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
177 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
178 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
179 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
180 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
181     struct rusage_superset *rup, mach_exception_data_type_t code,
182     mach_exception_data_type_t subcode, uint64_t *udata_buffer,
183     int num_udata, os_reason_t reason, exception_type_t etype, mach_exception_data_type_t saved_code);
184 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
185 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
186 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
187 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
188 extern uint64_t get_task_phys_footprint_limit(task_t);
189 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
190 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
191 
192 extern unsigned int exception_log_max_pid;
193 
194 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
195 
196 /*
197  * Flags for `reap_child_locked`.
198  */
199 __options_decl(reap_flags_t, uint32_t, {
200 	/*
201 	 * Parent is exiting, so the kernel is responsible for reaping children.
202 	 */
203 	REAP_DEAD_PARENT = 0x01,
204 	/*
205 	 * Childr process was re-parented to initproc.
206 	 */
207 	REAP_REPARENTED_TO_INIT = 0x02,
208 	/*
209 	 * `proc_list_lock` is held on entry.
210 	 */
211 	REAP_LOCKED = 0x04,
212 	/*
213 	 * Drop the `proc_list_lock` on return.  Note that the `proc_list_lock` will
214 	 * be dropped internally by the function regardless.
215 	 */
216 	REAP_DROP_LOCK = 0x08,
217 });
218 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
219 
220 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
221 
222 /*
223  * Things which should have prototypes in headers, but don't
224  */
225 void    proc_exit(proc_t p);
226 int     wait1continue(int result);
227 int     waitidcontinue(int result);
228 kern_return_t sys_perf_notify(thread_t thread, int pid);
229 kern_return_t task_exception_notify(exception_type_t exception,
230     mach_exception_data_type_t code, mach_exception_data_type_t subcode, bool fatal);
231 void    delay(int);
232 
233 #if DEVELOPMENT || DEBUG
234 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
235 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
236 static pid_t proc_exit_lpexit_spin_pid = -1;            /* wakeup point */
237 static int proc_exit_lpexit_spin_pos = -1;              /* point to block */
238 static int proc_exit_lpexit_spinning = 0;
239 enum {
240 	PELS_POS_START = 0,             /* beginning of proc_exit */
241 	PELS_POS_PRE_TASK_DETACH,       /* before task/proc detach */
242 	PELS_POS_POST_TASK_DETACH,      /* after task/proc detach */
243 	PELS_POS_END,                   /* end of proc_exit */
244 	PELS_NPOS                       /* # valid values */
245 };
246 
247 /* Panic if matching processes (delimited by ',') exit on error. */
248 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
249 
250 static int
251 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
252 {
253 #pragma unused(oidp, arg1, arg2)
254 	pid_t new_value;
255 	int changed;
256 	int error;
257 
258 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
259 		return ENOENT;
260 	}
261 
262 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
263 	    sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
264 	if (error == 0 && changed != 0) {
265 		if (new_value < -1) {
266 			return EINVAL;
267 		}
268 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
269 		proc_exit_lpexit_spin_pid = new_value;
270 		wakeup(&proc_exit_lpexit_spin_pid);
271 		proc_exit_lpexit_spinning = 0;
272 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
273 	}
274 	return error;
275 }
276 
277 static int
278 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
279 {
280 #pragma unused(oidp, arg1, arg2)
281 	int new_value;
282 	int changed;
283 	int error;
284 
285 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
286 		return ENOENT;
287 	}
288 
289 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
290 	    sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
291 	if (error == 0 && changed != 0) {
292 		if (new_value < -1 || new_value >= PELS_NPOS) {
293 			return EINVAL;
294 		}
295 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
296 		proc_exit_lpexit_spin_pos = new_value;
297 		wakeup(&proc_exit_lpexit_spin_pid);
298 		proc_exit_lpexit_spinning = 0;
299 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
300 	}
301 	return error;
302 }
303 
304 static int
305 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
306 {
307 #pragma unused(oidp, arg1, arg2)
308 	int new_value;
309 	int changed;
310 	int error;
311 
312 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
313 		return ENOENT;
314 	}
315 
316 	error = sysctl_io_number(req, proc_exit_lpexit_spinning,
317 	    sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
318 	if (error == 0 && changed != 0) {
319 		return EINVAL;
320 	}
321 	return error;
322 }
323 
324 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
325     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
326     NULL, sizeof(pid_t),
327     proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
328 
329 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
330     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
331     NULL, sizeof(int),
332     proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
333 
334 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
335     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
336     NULL, sizeof(int),
337     proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
338 
339 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)340 proc_exit_lpexit_check(pid_t pid, int pos)
341 {
342 	if (proc_exit_lpexit_spin_pid == pid) {
343 		bool slept = false;
344 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
345 		while (proc_exit_lpexit_spin_pid == pid &&
346 		    proc_exit_lpexit_spin_pos == pos) {
347 			if (!slept) {
348 				os_log(OS_LOG_DEFAULT,
349 				    "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
350 				slept = true;
351 			}
352 			proc_exit_lpexit_spinning = 1;
353 			msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
354 			    PWAIT, "proc_exit_lpexit_check", NULL);
355 			proc_exit_lpexit_spinning = 0;
356 		}
357 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
358 		if (slept) {
359 			os_log(OS_LOG_DEFAULT,
360 			    "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
361 		}
362 	}
363 }
364 #endif /* DEVELOPMENT || DEBUG */
365 
366 /*
367  * NOTE: Source and target may *NOT* overlap!
368  * XXX Should share code with bsd/dev/ppc/unix_signal.c
369  */
370 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)371 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
372 {
373 	out->si_signo   = in->si_signo;
374 	out->si_errno   = in->si_errno;
375 	out->si_code    = in->si_code;
376 	out->si_pid     = in->si_pid;
377 	out->si_uid     = in->si_uid;
378 	out->si_status  = in->si_status;
379 	out->si_addr    = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
380 	/* following cast works for sival_int because of padding */
381 	out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
382 	out->si_band    = (user32_long_t)in->si_band;                  /* range reduction */
383 }
384 
385 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)386 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
387 {
388 	out->si_signo   = in->si_signo;
389 	out->si_errno   = in->si_errno;
390 	out->si_code    = in->si_code;
391 	out->si_pid     = in->si_pid;
392 	out->si_uid     = in->si_uid;
393 	out->si_status  = in->si_status;
394 	out->si_addr    = in->si_addr;
395 	/* following cast works for sival_int because of padding */
396 	out->si_value.sival_ptr = in->si_value.sival_ptr;
397 	out->si_band    = in->si_band;                  /* range reduction */
398 }
399 
400 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)401 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
402 {
403 	if (is64) {
404 		user64_siginfo_t sinfo64;
405 
406 		bzero(&sinfo64, sizeof(sinfo64));
407 		siginfo_user_to_user64(native, &sinfo64);
408 		return copyout(&sinfo64, uaddr, sizeof(sinfo64));
409 	} else {
410 		user32_siginfo_t sinfo32;
411 
412 		bzero(&sinfo32, sizeof(sinfo32));
413 		siginfo_user_to_user32(native, &sinfo32);
414 		return copyout(&sinfo32, uaddr, sizeof(sinfo32));
415 	}
416 }
417 
418 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)419 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
420     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
421     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
422 {
423 	struct rusage_superset rup;
424 
425 	gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
426 	rup.ri.ri_phys_footprint = 0;
427 	populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
428 	    udata_buffer, num_udata, reason, etype, code);
429 }
430 
431 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)432 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
433 {
434 	mach_exception_data_type_t code_update = *code;
435 	mach_exception_data_type_t subcode_update = *subcode;
436 	if (p->p_exit_reason == OS_REASON_NULL) {
437 		return;
438 	}
439 
440 	switch (p->p_exit_reason->osr_namespace) {
441 	case OS_REASON_JETSAM:
442 		if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
443 			/* Update the code with EXC_RESOURCE code for high memory watermark */
444 			EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
445 			EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
446 			EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
447 			subcode_update = 0;
448 			break;
449 		}
450 
451 		break;
452 	default:
453 		break;
454 	}
455 
456 	*code = code_update;
457 	*subcode = subcode_update;
458 	return;
459 }
460 
461 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)462 proc_encode_exit_exception_code(proc_t p)
463 {
464 	uint64_t subcode = 0;
465 
466 	if (p->p_exit_reason == OS_REASON_NULL) {
467 		return 0;
468 	}
469 
470 	/* Embed first 32 bits of osr_namespace and osr_code in exception code */
471 	ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
472 	ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
473 	return (mach_exception_data_type_t)subcode;
474 }
475 
476 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype,__unused mach_exception_data_type_t saved_code)477 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
478     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
479     uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype,
480     __unused mach_exception_data_type_t saved_code)
481 {
482 	mach_vm_address_t uaddr = 0;
483 	mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
484 	exc_codes[0] = code;
485 	exc_codes[1] = subcode;
486 	cpu_type_t cputype;
487 	struct proc_uniqidentifierinfo p_uniqidinfo;
488 	struct proc_workqueueinfo pwqinfo;
489 	int retval = 0;
490 	uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
491 	boolean_t is_corpse_fork;
492 	uint32_t csflags;
493 	unsigned int pflags = 0;
494 	uint64_t max_footprint_mb;
495 	uint64_t max_footprint;
496 
497 	uint64_t ledger_internal;
498 	uint64_t ledger_internal_compressed;
499 	uint64_t ledger_iokit_mapped;
500 	uint64_t ledger_alternate_accounting;
501 	uint64_t ledger_alternate_accounting_compressed;
502 	uint64_t ledger_purgeable_nonvolatile;
503 	uint64_t ledger_purgeable_nonvolatile_compressed;
504 	uint64_t ledger_page_table;
505 	uint64_t ledger_phys_footprint;
506 	uint64_t ledger_phys_footprint_lifetime_max;
507 	uint64_t ledger_network_nonvolatile;
508 	uint64_t ledger_network_nonvolatile_compressed;
509 	uint64_t ledger_wired_mem;
510 	uint64_t ledger_tagged_footprint;
511 	uint64_t ledger_tagged_footprint_compressed;
512 	uint64_t ledger_media_footprint;
513 	uint64_t ledger_media_footprint_compressed;
514 	uint64_t ledger_graphics_footprint;
515 	uint64_t ledger_graphics_footprint_compressed;
516 	uint64_t ledger_neural_footprint;
517 	uint64_t ledger_neural_footprint_compressed;
518 
519 	void *crash_info_ptr = task_get_corpseinfo(corpse_task);
520 
521 #if CONFIG_MEMORYSTATUS
522 	int memstat_dirty_flags = 0;
523 #endif
524 
525 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
526 		kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
527 	}
528 
529 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
530 		pid_t pid = proc_getpid(p);
531 		kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
532 	}
533 
534 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
535 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
536 	}
537 
538 	/* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
539 	if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
540 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
541 			kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
542 		}
543 	}
544 
545 	static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
546 	if (KERN_SUCCESS ==
547 	    kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
548 		proc_piduniqidentifierinfo(p, &p_uniqidinfo);
549 		kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
550 	}
551 
552 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
553 		kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
554 	}
555 
556 	csflags = (uint32_t)proc_getcsflags(p);
557 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
558 		kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
559 	}
560 
561 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
562 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
563 	}
564 
565 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
566 		struct timeval64 t64;
567 		t64.tv_sec = (int64_t)p->p_start.tv_sec;
568 		t64.tv_usec = (int64_t)p->p_start.tv_usec;
569 		kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
570 	}
571 
572 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
573 		kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
574 	}
575 
576 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
577 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
578 	}
579 
580 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
581 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
582 	}
583 
584 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
585 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
586 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
587 		kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
588 		zfree(ZV_NAMEI, buf);
589 	}
590 
591 	pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
592 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
593 		kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
594 	}
595 
596 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
597 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
598 	}
599 
600 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
601 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
602 	}
603 
604 	cputype = cpu_type() & ~CPU_ARCH_MASK;
605 	if (IS_64BIT_PROCESS(p)) {
606 		cputype |= CPU_ARCH_ABI64;
607 	} else if (proc_is64bit_data(p)) {
608 		cputype |= CPU_ARCH_ABI64_32;
609 	}
610 
611 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
612 		kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
613 	}
614 
615 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
616 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_cputype, sizeof(cpu_type_t));
617 	}
618 
619 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
620 		max_footprint = get_task_phys_footprint_limit(proc_task(p));
621 		max_footprint_mb = max_footprint >> 20;
622 		kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
623 	}
624 
625 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
626 		ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
627 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
628 	}
629 
630 	// In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
631 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
632 		ledger_internal = get_task_internal(corpse_task);
633 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
634 	}
635 
636 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
637 		ledger_internal_compressed = get_task_internal_compressed(corpse_task);
638 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
639 	}
640 
641 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
642 		ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
643 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
644 	}
645 
646 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
647 		ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
648 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
649 	}
650 
651 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
652 		ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
653 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
654 	}
655 
656 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
657 		ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
658 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
659 	}
660 
661 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
662 		ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
663 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
664 	}
665 
666 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
667 		ledger_page_table = get_task_page_table(corpse_task);
668 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
669 	}
670 
671 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
672 		ledger_phys_footprint = get_task_phys_footprint(corpse_task);
673 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
674 	}
675 
676 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
677 		ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
678 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
679 	}
680 
681 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
682 		ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
683 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
684 	}
685 
686 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
687 		ledger_wired_mem = get_task_wired_mem(corpse_task);
688 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
689 	}
690 
691 	bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
692 	retval = fill_procworkqueue(p, &pwqinfo);
693 	if (retval == 0) {
694 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
695 			kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
696 		}
697 	}
698 
699 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
700 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
701 	}
702 
703 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
704 		uid_t persona_id = proc_persona_id(p);
705 		kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
706 	}
707 
708 #if CONFIG_COALITIONS
709 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
710 		uint64_t coalition_ids[COALITION_NUM_TYPES];
711 		task_coalition_ids(proc_task(p), coalition_ids);
712 		kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
713 	}
714 #endif /* CONFIG_COALITIONS */
715 
716 #if CONFIG_MEMORYSTATUS
717 	memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
718 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
719 		kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
720 	}
721 #endif
722 
723 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
724 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
725 	}
726 
727 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
728 		ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
729 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
730 	}
731 
732 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
733 		ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
734 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
735 	}
736 
737 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
738 		ledger_media_footprint = get_task_media_footprint(corpse_task);
739 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
740 	}
741 
742 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
743 		ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
744 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
745 	}
746 
747 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
748 		ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
749 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
750 	}
751 
752 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
753 		ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
754 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
755 	}
756 
757 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
758 		ledger_neural_footprint = get_task_neural_footprint(corpse_task);
759 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
760 	}
761 
762 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
763 		ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
764 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
765 	}
766 
767 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
768 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
769 	}
770 
771 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
772 		char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
773 		ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
774 		kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
775 	}
776 
777 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
778 		is_corpse_fork = is_corpsefork(corpse_task);
779 		kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
780 	}
781 
782 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
783 		kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
784 	}
785 
786 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
787 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
788 	}
789 
790 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
791 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
792 	}
793 
794 	char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
795 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
796 		const char * id = cs_identity_get(p);
797 		if (id) {
798 			strlcpy(signing_id, id, sizeof(signing_id));
799 		}
800 		kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
801 	}
802 	char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
803 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
804 		const char * id = csproc_get_teamid(p);
805 		if (id) {
806 			strlcpy(team_id, id, sizeof(team_id));
807 		}
808 		kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
809 	}
810 
811 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
812 		uint32_t category = 0;
813 		if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
814 			category = CS_VALIDATION_CATEGORY_INVALID;
815 		}
816 		kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
817 	}
818 
819 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
820 		uint32_t trust = 0;
821 		kern_return_t ret = get_trust_level_kdp(get_task_pmap(corpse_task), &trust);
822 		if (ret != KERN_SUCCESS) {
823 			trust = KCDATA_INVALID_CS_TRUST_LEVEL;
824 		}
825 		kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
826 	}
827 
828 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_SECURITY_CONFIG, sizeof(uint32_t), &uaddr)) {
829 		struct crashinfo_task_security_config task_security;
830 		task_security.task_security_config = task_get_security_config(corpse_task);
831 		kcdata_memcpy(crash_info_ptr, uaddr, &task_security, sizeof(task_security));
832 	}
833 
834 	uint64_t jit_start_addr = 0;
835 	uint64_t jit_end_addr = 0;
836 	kern_return_t ret = get_jit_address_range_kdp(get_task_pmap(corpse_task), (uintptr_t*)&jit_start_addr, (uintptr_t*)&jit_end_addr);
837 	if (KERN_SUCCESS == ret) {
838 		struct crashinfo_jit_address_range range = {};
839 		range.start_address = jit_start_addr;
840 		range.end_address = jit_end_addr;
841 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_JIT_ADDRESS_RANGE, sizeof(struct crashinfo_jit_address_range), &uaddr)) {
842 			kcdata_memcpy(crash_info_ptr, uaddr, &range, sizeof(range));
843 		}
844 	}
845 
846 	uint64_t cs_auxiliary_info = task_get_cs_auxiliary_info_kdp(corpse_task);
847 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_AUXILIARY_INFO, sizeof(cs_auxiliary_info), &uaddr)) {
848 		kcdata_memcpy(crash_info_ptr, uaddr, &cs_auxiliary_info, sizeof(cs_auxiliary_info));
849 	}
850 
851 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RLIM_CORE, sizeof(rlim_t), &uaddr)) {
852 		const rlim_t lim = proc_limitgetcur(p, RLIMIT_CORE);
853 		kcdata_memcpy(crash_info_ptr, uaddr, &lim, sizeof(lim));
854 	}
855 
856 #if CONFIG_UCOREDUMP
857 	if (do_ucoredump && !task_is_driver(proc_task(p)) &&
858 	    KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CORE_ALLOWED, sizeof(uint8_t), &uaddr)) {
859 		const uint8_t allow = is_coredump_eligible(p) == 0;
860 		kcdata_memcpy(crash_info_ptr, uaddr, &allow, sizeof(allow));
861 	}
862 #endif /* CONFIG_UCOREDUMP */
863 
864 	if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
865 		reason = p->p_exit_reason;
866 	}
867 
868 #if HAS_MTE
869 	/* For MTE-related failures, we add the tag data for the whole faulting address's page */
870 	uint32_t guard_type = EXC_GUARD_DECODE_GUARD_TYPE(saved_code);
871 	uint32_t guard_flavor = EXC_GUARD_DECODE_GUARD_FLAVOR(saved_code);
872 
873 	/*
874 	 * Extract tag data information for any possible synchronous or asynchronous MTE
875 	 * failure (both in hard and soft mode).
876 	 */
877 	if ((reason != OS_REASON_NULL && reason->osr_namespace == OS_REASON_MTE_FAIL) ||
878 	    (etype == EXC_GUARD && guard_type == GUARD_TYPE_VIRT_MEMORY && vm_guard_is_mte_fault(guard_flavor))) {
879 		vm_map_t task_map = get_task_map(corpse_task);
880 		if (task_map != kernel_map) {
881 			/* subcode is the faulting address, see propagation from  handle_user_abort */
882 			vm_address_t page_addr = vm_map_trunc_page_mask(subcode, PAGE_MASK);
883 			vm_address_t canonicalized_page_addr = vm_memtag_canonicalize(task_map, page_addr);
884 			struct crashinfo_mb tag_info = {
885 				.start_address = canonicalized_page_addr,
886 				.data = {0},
887 			};
888 			kern_return_t res = vm_map_page_tags_get(task_map, canonicalized_page_addr, tag_info.data, (sizeof(tag_info.data)));
889 			if (KERN_SUCCESS == res &&
890 			    KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MB, sizeof(struct crashinfo_mb), &uaddr)) {
891 				kcdata_memcpy(crash_info_ptr, uaddr, &tag_info, sizeof(tag_info));
892 			}
893 		}
894 	}
895 #endif /* HAS_MTE */
896 
897 	if (reason != OS_REASON_NULL) {
898 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
899 			struct exit_reason_snapshot ers = {
900 				.ers_namespace = reason->osr_namespace,
901 				.ers_code = reason->osr_code,
902 				.ers_flags = reason->osr_flags
903 			};
904 
905 			kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
906 		}
907 
908 		if (reason->osr_kcd_buf != 0) {
909 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
910 			assert(reason_buf_size != 0);
911 
912 			if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
913 				kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
914 			}
915 		}
916 	}
917 
918 	if (num_udata > 0) {
919 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
920 		    sizeof(uint64_t), num_udata, &uaddr)) {
921 			kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
922 		}
923 	}
924 
925 #if CONFIG_EXCLAVES
926 	task_add_conclave_crash_info(corpse_task, crash_info_ptr);
927 #endif /* CONFIG_EXCLAVES */
928 }
929 
930 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)931 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
932 {
933 	kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
934 	    corpse_info->kcd_length);
935 	__assert_only uint32_t type = kcdata_iter_type(iter);
936 	assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
937 
938 	iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
939 	exception_type_t *etype = kcdata_iter_payload(iter);
940 	return *etype;
941 }
942 
943 /*
944  * Collect information required for generating lightweight corpse for current
945  * task, which can be terminating.
946  */
947 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)948 current_thread_collect_backtrace_info(
949 	kcdata_descriptor_t *new_desc,
950 	exception_type_t etype,
951 	mach_exception_data_t code,
952 	mach_msg_type_number_t codeCnt,
953 	void *reasonp)
954 {
955 	kcdata_descriptor_t kcdata;
956 	kern_return_t kr;
957 	int frame_count = 0, max_frames = 100;
958 	mach_vm_address_t uuid_info_addr = 0;
959 	uint32_t uuid_info_count         = 0;
960 	uint32_t btinfo_flag             = 0;
961 	mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
962 	natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
963 	mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
964 	thread_identifier_info_data_t th_info;
965 	char threadname[MAXTHREADNAMESIZE];
966 	void *btdata_kernel = NULL;
967 	typedef uintptr_t user_btframe_t __kernel_data_semantics;
968 	user_btframe_t *btframes = NULL;
969 	os_reason_t reason = (os_reason_t)reasonp;
970 	struct backtrace_user_info info = BTUINFO_INIT;
971 	struct rusage_superset rup;
972 	uint32_t platform;
973 
974 	task_t task = current_task();
975 	proc_t p = current_proc();
976 
977 	bool has_64bit_addr = task_get_64bit_addr(current_task());
978 	bool has_64bit_data = task_get_64bit_data(current_task());
979 
980 	if (new_desc == NULL) {
981 		return KERN_INVALID_ARGUMENT;
982 	}
983 
984 	/* First, collect backtrace frames */
985 	btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
986 	if (!btframes) {
987 		return KERN_RESOURCE_SHORTAGE;
988 	}
989 
990 	frame_count = backtrace_user(btframes, max_frames, NULL, &info);
991 	if (info.btui_error || frame_count == 0) {
992 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
993 		return KERN_FAILURE;
994 	}
995 
996 	if ((info.btui_info & BTI_TRUNCATED) != 0) {
997 		btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
998 	}
999 
1000 	/* Captured in kcdata descriptor below */
1001 	btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
1002 	if (!btdata_kernel) {
1003 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
1004 		return KERN_RESOURCE_SHORTAGE;
1005 	}
1006 
1007 	kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
1008 	if (!kcdata) {
1009 		kfree_data(btdata_kernel, alloc_size);
1010 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
1011 		return KERN_RESOURCE_SHORTAGE;
1012 	}
1013 
1014 	/* First reserve space in kcdata blob for the btinfo flag fields */
1015 	if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
1016 	    sizeof(uint32_t), &btinfo_flag_addr)) {
1017 		kfree_data(btdata_kernel, alloc_size);
1018 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
1019 		kcdata_memory_destroy(kcdata);
1020 		return KERN_RESOURCE_SHORTAGE;
1021 	}
1022 
1023 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1024 	    (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
1025 	    sizeof(uintptr_t), frame_count, &kaddr)) {
1026 		kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
1027 	}
1028 
1029 #if __LP64__
1030 	/* We only support async stacks on 64-bit kernels */
1031 	frame_count = 0;
1032 
1033 	if (info.btui_async_frame_addr != 0) {
1034 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
1035 		    sizeof(uint32_t), &kaddr)) {
1036 			uint32_t idx = info.btui_async_start_index;
1037 			kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
1038 		}
1039 		struct backtrace_control ctl = {
1040 			.btc_frame_addr = info.btui_async_frame_addr,
1041 			.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
1042 		};
1043 
1044 		info = BTUINFO_INIT;
1045 		frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
1046 		if (info.btui_error == 0 && frame_count > 0) {
1047 			if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1048 			    TASK_BTINFO_ASYNC_BACKTRACE64,
1049 			    sizeof(uintptr_t), frame_count, &kaddr)) {
1050 				kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
1051 			}
1052 		}
1053 
1054 		if ((info.btui_info & BTI_TRUNCATED) != 0) {
1055 			btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
1056 		}
1057 	}
1058 #endif
1059 
1060 	/* Backtrace collection done, free the frames buffer */
1061 	kfree_data(btframes, max_frames * sizeof(btframes[0]));
1062 	btframes = NULL;
1063 
1064 	thread_set_exec_promotion(current_thread());
1065 	/* Next, suspend the task briefly and collect image load infos */
1066 	task_suspend_internal(task);
1067 
1068 	/* all_image_info struct is ABI, in agreement with address width */
1069 	if (has_64bit_addr) {
1070 		struct user64_dyld_all_image_infos task_image_infos = {};
1071 		struct btinfo_sc_load_info64 sc_info;
1072 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1073 		    sizeof(struct user64_dyld_all_image_infos));
1074 		uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1075 		uuid_info_addr = task_image_infos.uuidArray;
1076 
1077 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1078 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1079 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1080 		    sizeof(task_image_infos.sharedCacheUUID));
1081 
1082 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1083 		    TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1084 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1085 		}
1086 	} else {
1087 		struct user32_dyld_all_image_infos task_image_infos = {};
1088 		struct btinfo_sc_load_info sc_info;
1089 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1090 		    sizeof(struct user32_dyld_all_image_infos));
1091 		uuid_info_count = task_image_infos.uuidArrayCount;
1092 		uuid_info_addr = task_image_infos.uuidArray;
1093 
1094 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1095 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1096 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1097 		    sizeof(task_image_infos.sharedCacheUUID));
1098 
1099 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1100 		    TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1101 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1102 		}
1103 	}
1104 
1105 	if (!uuid_info_addr) {
1106 		/*
1107 		 * Can happen when we catch dyld in the middle of updating
1108 		 * this data structure, or copyin of all_image_info struct failed.
1109 		 */
1110 		task_resume_internal(task);
1111 		thread_clear_exec_promotion(current_thread());
1112 		kfree_data(btdata_kernel, alloc_size);
1113 		kcdata_memory_destroy(kcdata);
1114 		return KERN_MEMORY_ERROR;
1115 	}
1116 
1117 	if (uuid_info_count > 0) {
1118 		uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1119 		    sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1120 
1121 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1122 		    (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1123 		    uuid_info_size, uuid_info_count, &kaddr)) {
1124 			if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1125 				task_resume_internal(task);
1126 				thread_clear_exec_promotion(current_thread());
1127 				kfree_data(btdata_kernel, alloc_size);
1128 				kcdata_memory_destroy(kcdata);
1129 				return KERN_MEMORY_ERROR;
1130 			}
1131 		}
1132 	}
1133 
1134 	task_resume_internal(task);
1135 	thread_clear_exec_promotion(current_thread());
1136 
1137 	/* Next, collect all other information */
1138 	thread_flavor_t tsflavor;
1139 	mach_msg_type_number_t tscount;
1140 
1141 #if defined(__x86_64__) || defined(__i386__)
1142 	tsflavor = x86_THREAD_STATE;      /* unified */
1143 	tscount  = x86_THREAD_STATE_COUNT;
1144 #else
1145 	tsflavor = ARM_THREAD_STATE;      /* unified */
1146 	tscount  = ARM_UNIFIED_THREAD_STATE_COUNT;
1147 #endif
1148 
1149 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1150 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1151 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1152 		bt_thread_state->flavor = tsflavor;
1153 		bt_thread_state->count = tscount;
1154 		/* variable-sized tstate array follows */
1155 
1156 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1157 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1158 		if (kr != KERN_SUCCESS) {
1159 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1160 			if (kr == KERN_TERMINATED) {
1161 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1162 			}
1163 		}
1164 	}
1165 
1166 #if defined(__x86_64__) || defined(__i386__)
1167 	tsflavor = x86_EXCEPTION_STATE;       /* unified */
1168 	tscount  = x86_EXCEPTION_STATE_COUNT;
1169 #else
1170 #if defined(__arm64__)
1171 	if (has_64bit_data) {
1172 		tsflavor = ARM_EXCEPTION_STATE64;
1173 		tscount  = ARM_EXCEPTION_STATE64_COUNT;
1174 	} else
1175 #endif /* defined(__arm64__) */
1176 	{
1177 		tsflavor = ARM_EXCEPTION_STATE;
1178 		tscount  = ARM_EXCEPTION_STATE_COUNT;
1179 	}
1180 #endif /* defined(__x86_64__) || defined(__i386__) */
1181 
1182 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1183 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1184 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1185 		bt_thread_state->flavor = tsflavor;
1186 		bt_thread_state->count = tscount;
1187 		/* variable-sized tstate array follows */
1188 
1189 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1190 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1191 		if (kr != KERN_SUCCESS) {
1192 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1193 			if (kr == KERN_TERMINATED) {
1194 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1195 			}
1196 		}
1197 	}
1198 
1199 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1200 		pid_t pid = proc_getpid(p);
1201 		kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1202 	}
1203 
1204 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1205 		kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1206 	}
1207 
1208 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1209 		kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1210 	}
1211 
1212 #if CONFIG_COALITIONS
1213 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1214 		uint64_t coalition_ids[COALITION_NUM_TYPES];
1215 		task_coalition_ids(proc_task(p), coalition_ids);
1216 		kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1217 	}
1218 #endif /* CONFIG_COALITIONS */
1219 
1220 	/* V0 is sufficient for ReportCrash */
1221 	gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1222 	rup.ri.ri_phys_footprint = 0;
1223 	/* Soft crash, proc did not exit */
1224 	rup.ri.ri_proc_exit_abstime = 0;
1225 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1226 		kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1227 	}
1228 
1229 	platform = proc_platform(current_proc());
1230 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1231 		kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1232 	}
1233 
1234 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1235 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1236 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1237 		kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1238 		zfree(ZV_NAMEI, buf);
1239 	}
1240 
1241 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1242 		kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1243 	}
1244 
1245 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1246 		kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1247 	}
1248 
1249 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1250 		unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1251 		kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1252 	}
1253 
1254 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1255 		cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1256 		if (has_64bit_addr) {
1257 			cputype |= CPU_ARCH_ABI64;
1258 		} else if (has_64bit_data) {
1259 			cputype |= CPU_ARCH_ABI64_32;
1260 		}
1261 		kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1262 	}
1263 
1264 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1265 		kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1266 	}
1267 
1268 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1269 		kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1270 	}
1271 
1272 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1273 		kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1274 	}
1275 
1276 	assert(codeCnt <= EXCEPTION_CODE_MAX);
1277 
1278 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1279 	    sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1280 		kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1281 	}
1282 
1283 	if (reason != OS_REASON_NULL) {
1284 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1285 			struct exit_reason_snapshot ers = {
1286 				.ers_namespace = reason->osr_namespace,
1287 				.ers_code = reason->osr_code,
1288 				.ers_flags = reason->osr_flags
1289 			};
1290 
1291 			kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1292 		}
1293 
1294 		if (reason->osr_kcd_buf != 0) {
1295 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1296 			assert(reason_buf_size != 0);
1297 
1298 			if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1299 				kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1300 			}
1301 		}
1302 	}
1303 
1304 	threadname[0] = '\0';
1305 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1306 	    sizeof(threadname), &kaddr)) {
1307 		bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1308 		kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1309 	}
1310 
1311 	kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1312 	if (kr == KERN_TERMINATED) {
1313 		btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1314 	}
1315 
1316 
1317 	kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1318 	    sizeof(uint64_t), &kaddr);
1319 
1320 	/*
1321 	 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1322 	 * handler (ReportCrash) that lw corpse collection ran out of space and the
1323 	 * result is incomplete.
1324 	 */
1325 	if (last_kr != KERN_SUCCESS) {
1326 		btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1327 	}
1328 
1329 	if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1330 		kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1331 	}
1332 
1333 	/* Lastly, copy the flags to the address we reserved at the beginning. */
1334 	kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1335 
1336 	*new_desc = kcdata;
1337 
1338 	return KERN_SUCCESS;
1339 }
1340 
1341 /*
1342  * We only parse exit reason kcdata blobs for critical process before they die
1343  * and we're going to panic or for opt-in, limited diagnostic tools.
1344  *
1345  * Meant to be called immediately before panicking or limited diagnostic
1346  * scenarios.
1347  */
1348 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1349 exit_reason_get_string_desc(os_reason_t exit_reason)
1350 {
1351 	kcdata_iter_t iter;
1352 
1353 	if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1354 	    exit_reason->osr_bufsize == 0) {
1355 		return NULL;
1356 	}
1357 
1358 	iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1359 	if (!kcdata_iter_valid(iter)) {
1360 #if DEBUG || DEVELOPMENT
1361 		printf("exit reason has invalid exit reason buffer\n");
1362 #endif
1363 		return NULL;
1364 	}
1365 
1366 	if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1367 #if DEBUG || DEVELOPMENT
1368 		printf("exit reason buffer type mismatch, expected %d got %d\n",
1369 		    KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1370 #endif
1371 		return NULL;
1372 	}
1373 
1374 	iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1375 	if (!kcdata_iter_valid(iter)) {
1376 		return NULL;
1377 	}
1378 
1379 	return (char *)kcdata_iter_payload(iter);
1380 }
1381 
1382 static int initproc_spawned = 0;
1383 
1384 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1385 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1386 {
1387 	if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1388 		// Can only ever be set by launchd, and only once at boot
1389 		return EPERM;
1390 	}
1391 	return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1392 }
1393 
1394 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1395     CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1396     sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1397 
1398 #if DEVELOPMENT || DEBUG
1399 
1400 /* disable user faults */
1401 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1402 #endif /* DEVELOPMENT || DEBUG */
1403 
1404 #define OS_REASON_IFLAG_USER_FAULT 0x1
1405 
1406 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC  5
1407 
1408 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1409 abort_with_payload_internal(proc_t p,
1410     uint32_t reason_namespace, uint64_t reason_code,
1411     user_addr_t payload, uint32_t payload_size,
1412     user_addr_t reason_string, uint64_t reason_flags,
1413     uint32_t internal_flags)
1414 {
1415 	os_reason_t exit_reason = OS_REASON_NULL;
1416 	kern_return_t kr = KERN_SUCCESS;
1417 
1418 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1419 		uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1420 		    memory_order_relaxed);
1421 
1422 #if DEVELOPMENT || DEBUG
1423 		if (bootarg_disable_user_faults) {
1424 			return EQFULL;
1425 		}
1426 #endif /* DEVELOPMENT || DEBUG */
1427 
1428 		for (;;) {
1429 			if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1430 				return EQFULL;
1431 			}
1432 			// this reloads the value in old_value
1433 			if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1434 			    &old_value, old_value + 1, memory_order_relaxed,
1435 			    memory_order_relaxed)) {
1436 				break;
1437 			}
1438 		}
1439 	}
1440 
1441 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1442 	    proc_getpid(p), reason_namespace,
1443 	    reason_code, 0, 0);
1444 
1445 	exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1446 	    payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1447 
1448 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1449 		mach_exception_code_t code = 0;
1450 
1451 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1452 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
1453 		EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1454 
1455 		if (exit_reason == OS_REASON_NULL) {
1456 			kr = KERN_RESOURCE_SHORTAGE;
1457 		} else {
1458 			kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1459 		}
1460 		os_reason_free(exit_reason);
1461 	} else {
1462 		/*
1463 		 * We use SIGABRT (rather than calling exit directly from here) so that
1464 		 * the debugger can catch abort_with_{reason,payload} calls.
1465 		 */
1466 		psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1467 	}
1468 
1469 	switch (kr) {
1470 	case KERN_SUCCESS:
1471 		return 0;
1472 	case KERN_NOT_SUPPORTED:
1473 		return ENOTSUP;
1474 	case KERN_INVALID_ARGUMENT:
1475 		return EINVAL;
1476 	case KERN_RESOURCE_SHORTAGE:
1477 	default:
1478 		return EBUSY;
1479 	}
1480 }
1481 
1482 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1483 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1484     __unused void *retval)
1485 {
1486 	abort_with_payload_internal(cur_proc, args->reason_namespace,
1487 	    args->reason_code, args->payload, args->payload_size,
1488 	    args->reason_string, args->reason_flags, 0);
1489 
1490 	return 0;
1491 }
1492 
1493 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1494 os_fault_with_payload(struct proc *cur_proc,
1495     struct os_fault_with_payload_args *args, __unused int *retval)
1496 {
1497 	return abort_with_payload_internal(cur_proc, args->reason_namespace,
1498 	           args->reason_code, args->payload, args->payload_size,
1499 	           args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1500 }
1501 
1502 
1503 /*
1504  * exit --
1505  *	Death of process.
1506  */
1507 __attribute__((noreturn))
1508 void
exit(proc_t p,struct exit_args * uap,int * retval)1509 exit(proc_t p, struct exit_args *uap, int *retval)
1510 {
1511 	p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1512 	exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1513 
1514 	thread_exception_return();
1515 	/* NOTREACHED */
1516 	while (TRUE) {
1517 		thread_block(THREAD_CONTINUE_NULL);
1518 	}
1519 	/* NOTREACHED */
1520 }
1521 
1522 /*
1523  * Exit: deallocate address space and other resources, change proc state
1524  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
1525  * status and rusage for wait().  Check for child processes and orphan them.
1526  */
1527 int
exit1(proc_t p,int rv,int * retval)1528 exit1(proc_t p, int rv, int *retval)
1529 {
1530 	return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1531 }
1532 
1533 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1534 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1535     int jetsam_flags)
1536 {
1537 	return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1538 }
1539 
1540 /*
1541  * NOTE: exit_with_reason drops a reference on the passed exit_reason
1542  */
1543 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1544 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1545     int jetsam_flags, struct os_reason *exit_reason)
1546 {
1547 	thread_t self = current_thread();
1548 	struct task *task = proc_task(p);
1549 	struct uthread *ut;
1550 	int error = 0;
1551 	bool proc_exiting = false;
1552 
1553 #if DEVELOPMENT || DEBUG
1554 	/*
1555 	 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1556 	 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1557 	 */
1558 	if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1559 		panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1560 		    __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1561 	}
1562 #endif
1563 
1564 	/*
1565 	 * If a thread in this task has already
1566 	 * called exit(), then halt any others
1567 	 * right here.
1568 	 */
1569 
1570 	ut = get_bsdthread_info(self);
1571 	(void)retval;
1572 
1573 	/*
1574 	 * The parameter list of audit_syscall_exit() was augmented to
1575 	 * take the Darwin syscall number as the first parameter,
1576 	 * which is currently required by mac_audit_postselect().
1577 	 */
1578 
1579 	/*
1580 	 * The BSM token contains two components: an exit status as passed
1581 	 * to exit(), and a return value to indicate what sort of exit it
1582 	 * was.  The exit status is WEXITSTATUS(rv), but it's not clear
1583 	 * what the return value is.
1584 	 */
1585 	AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1586 	/*
1587 	 * TODO: what to audit here when jetsam calls exit and the uthread,
1588 	 * 'ut' does not belong to the proc, 'p'.
1589 	 */
1590 	AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1591 
1592 	DTRACE_PROC1(exit, int, CLD_EXITED);
1593 
1594 	/* mark process is going to exit and pull out of DBG/disk throttle */
1595 	/* TODO: This should be done after becoming exit thread */
1596 	proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1597 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1598 
1599 	proc_lock(p);
1600 	error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1601 	if (error == EDEADLK) {
1602 		/*
1603 		 * If proc_transstart() returns EDEADLK, then another thread
1604 		 * is either exec'ing or exiting. Return an error and allow
1605 		 * the other thread to continue.
1606 		 */
1607 		proc_unlock(p);
1608 		os_reason_free(exit_reason);
1609 		if (current_proc() == p) {
1610 			if (p->exit_thread == self) {
1611 				panic("exit_thread failed to exit");
1612 			}
1613 
1614 			if (thread_can_terminate) {
1615 				thread_exception_return();
1616 			}
1617 		}
1618 
1619 		return error;
1620 	}
1621 
1622 	proc_exiting = !!(p->p_lflag & P_LEXIT);
1623 
1624 	while (proc_exiting || p->exit_thread != self) {
1625 		if (proc_exiting || sig_try_locked(p) <= 0) {
1626 			proc_transend(p, 1);
1627 			os_reason_free(exit_reason);
1628 
1629 			if (get_threadtask(self) != task) {
1630 				proc_unlock(p);
1631 				return 0;
1632 			}
1633 			proc_unlock(p);
1634 
1635 			thread_terminate(self);
1636 			if (!thread_can_terminate) {
1637 				return 0;
1638 			}
1639 
1640 			thread_exception_return();
1641 			/* NOTREACHED */
1642 		}
1643 		sig_lock_to_exit(p);
1644 	}
1645 
1646 	if (exit_reason != OS_REASON_NULL) {
1647 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1648 		    proc_getpid(p), exit_reason->osr_namespace,
1649 		    exit_reason->osr_code, 0, 0);
1650 	}
1651 
1652 	assert(p->p_exit_reason == OS_REASON_NULL);
1653 	p->p_exit_reason = exit_reason;
1654 
1655 	p->p_lflag |= P_LEXIT;
1656 	p->p_xstat = rv;
1657 	p->p_lflag |= jetsam_flags;
1658 
1659 	proc_transend(p, 1);
1660 	proc_unlock(p);
1661 
1662 	proc_prepareexit(p, rv, perf_notify);
1663 
1664 	/* Last thread to terminate will call proc_exit() */
1665 	task_terminate_internal(task);
1666 
1667 	return 0;
1668 }
1669 
1670 #if CONFIG_MEMORYSTATUS
1671 /*
1672  * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1673  * is currently being frozen.
1674  * The proc_list_lock is held by the caller.
1675  * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1676  * the p_refcount P_REF_DEAD bit.
1677  */
1678 static void
proc_memorystatus_remove(proc_t p)1679 proc_memorystatus_remove(proc_t p)
1680 {
1681 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1682 	while (memorystatus_remove(p) == EAGAIN) {
1683 		os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1684 		msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1685 	}
1686 }
1687 #endif
1688 
1689 #if DEVELOPMENT
1690 boolean_t crash_behavior_test_mode = FALSE;
1691 boolean_t crash_behavior_test_would_panic = FALSE;
1692 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1693 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1694 #endif /* DEVELOPMENT */
1695 
1696 static bool
_proc_is_crashing_signal(int sig)1697 _proc_is_crashing_signal(int sig)
1698 {
1699 	bool result = false;
1700 	switch (sig) {
1701 	case SIGILL:
1702 	case SIGABRT:
1703 	case SIGFPE:
1704 	case SIGBUS:
1705 	case SIGSEGV:
1706 	case SIGSYS:
1707 	/*
1708 	 * If SIGTRAP is the terminating signal, then we can safely assume the
1709 	 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1710 	 * a process calls __builtin_trap(), which will abort.)
1711 	 */
1712 	case SIGTRAP:
1713 		result = true;
1714 	}
1715 
1716 	return result;
1717 }
1718 
1719 static bool
_proc_is_fatal_reason(os_reason_t reason)1720 _proc_is_fatal_reason(os_reason_t reason)
1721 {
1722 	if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1723 		/* Abort is always fatal even if there is no crash report generated */
1724 		return true;
1725 	}
1726 	if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1727 		/*
1728 		 * No crash report means this reason shouldn't be considered fatal
1729 		 * unless we are in test mode
1730 		 */
1731 #if DEVELOPMENT
1732 		if (crash_behavior_test_mode) {
1733 			return true;
1734 		}
1735 #endif /* DEVELOPMENT */
1736 		return false;
1737 	}
1738 	// By default all OS_REASON are fatal
1739 	return true;
1740 }
1741 
1742 static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled", false);
1743 
1744 static bool
proc_should_trigger_panic(proc_t p,int rv)1745 proc_should_trigger_panic(proc_t p, int rv)
1746 {
1747 	if (p == initproc) {
1748 		/* Always panic for launchd */
1749 		return true;
1750 	}
1751 
1752 	if (panic_on_crash_disabled) {
1753 		printf("panic-on-crash disabled via boot-arg\n");
1754 		return false;
1755 	}
1756 
1757 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1758 		return true;
1759 	}
1760 
1761 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1762 		return true;
1763 	}
1764 
1765 	if (p->p_posix_spawn_failed) {
1766 		/* posix_spawn failures normally don't qualify for panics */
1767 		return false;
1768 	}
1769 
1770 	bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1771 	if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1772 		return false;
1773 	}
1774 
1775 	if (WIFEXITED(rv)) {
1776 		int code = WEXITSTATUS(rv);
1777 
1778 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1779 			if (code == 0) {
1780 				/* No panic if we exit 0 */
1781 				return false;
1782 			} else {
1783 				/* Panic on non-zero exit */
1784 				return true;
1785 			}
1786 		} else {
1787 			/* No panic on normal exit if the process doesn't have the non-zero flag set */
1788 			return false;
1789 		}
1790 	} else if (WIFSIGNALED(rv)) {
1791 		int signal = WTERMSIG(rv);
1792 		/* This is a crash (non-normal exit) */
1793 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1794 			os_reason_t reason = p->p_exit_reason;
1795 			if (reason != OS_REASON_NULL) {
1796 				if (!_proc_is_fatal_reason(reason)) {
1797 					// Skip non-fatal terminate_with_reason
1798 					return false;
1799 				}
1800 				if (reason->osr_namespace == OS_REASON_SIGNAL) {
1801 					/*
1802 					 * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal
1803 					 * in osr_code, so we should check that signal here
1804 					 */
1805 					return _proc_is_crashing_signal((int)reason->osr_code);
1806 				} else {
1807 					/*
1808 					 * This branch covers the case of terminate_with_reason which
1809 					 * delivers a SIGTERM which is still considered a crash even
1810 					 * thought the signal is not considered a crashing signal
1811 					 */
1812 					return true;
1813 				}
1814 			}
1815 			return _proc_is_crashing_signal(signal);
1816 		} else {
1817 			return false;
1818 		}
1819 	} else {
1820 		/*
1821 		 * This branch implies that we didn't exit normally nor did we receive
1822 		 * a signal. This should be unreachable.
1823 		 */
1824 		return true;
1825 	}
1826 }
1827 
1828 static void
proc_crash_coredump(proc_t p)1829 proc_crash_coredump(proc_t p)
1830 {
1831 	(void)p;
1832 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1833 	/*
1834 	 * For debugging purposes, generate a core file of initproc before
1835 	 * panicking. Leave at least 300 MB free on the root volume, and ignore
1836 	 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1837 	 * before the panic hits.
1838 	 */
1839 
1840 	int             err;
1841 	uint64_t        coredump_start = mach_absolute_time();
1842 	uint64_t        coredump_end;
1843 	clock_sec_t     tv_sec;
1844 	clock_usec_t    tv_usec;
1845 	uint32_t        tv_msec;
1846 
1847 
1848 	err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1849 
1850 	coredump_end = mach_absolute_time();
1851 
1852 	absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1853 
1854 	tv_msec = tv_usec / 1000;
1855 
1856 	if (err != 0) {
1857 		printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1858 		    proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1859 	} else {
1860 		printf("Generated core file for pid: %d in %d.%03d seconds\n",
1861 		    proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1862 	}
1863 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1864 }
1865 
1866 static void
proc_handle_critical_exit(proc_t p,int rv)1867 proc_handle_critical_exit(proc_t p, int rv)
1868 {
1869 	if (!proc_should_trigger_panic(p, rv)) {
1870 		// No panic, bail out
1871 		return;
1872 	}
1873 
1874 #if DEVELOPMENT
1875 	if (crash_behavior_test_mode) {
1876 		crash_behavior_test_would_panic = TRUE;
1877 		// Force test mode off after hitting a panic
1878 		crash_behavior_test_mode = FALSE;
1879 		return;
1880 	}
1881 #endif /* DEVELOPMENT */
1882 
1883 	char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1884 
1885 	if (p->p_exit_reason == OS_REASON_NULL) {
1886 		printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1887 		    proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1888 	} else {
1889 		printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1890 		    p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1891 		    exit_reason_desc : "none");
1892 	}
1893 
1894 	const char *prefix_str;
1895 	char prefix_str_buf[128];
1896 
1897 	if (p == initproc) {
1898 		if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1899 			prefix_str = "LTE preinit process exited";
1900 		} else if (initproc_spawned) {
1901 			prefix_str = "initproc exited";
1902 		} else {
1903 			prefix_str = "initproc failed to start";
1904 		}
1905 	} else {
1906 		/* For processes that aren't launchd, just use the process name and pid */
1907 		snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1908 		prefix_str = prefix_str_buf;
1909 	}
1910 
1911 	proc_crash_coredump(p);
1912 
1913 	sync(p, (void *)NULL, (int *)NULL);
1914 	const uint64_t panic_options_mask = DEBUGGER_OPTION_INITPROC_PANIC | DEBUGGER_OPTION_USERSPACE_INITIATED_PANIC;
1915 
1916 	if (p->p_exit_reason == OS_REASON_NULL) {
1917 		panic_with_options(0, NULL, panic_options_mask, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1918 		    prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1919 	} else {
1920 		panic_with_options(0, NULL, panic_options_mask, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1921 		    ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1922 		    prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1923 		    exit_reason_desc ? exit_reason_desc : "none");
1924 	}
1925 }
1926 
1927 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1928 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1929 {
1930 	mach_exception_data_type_t code = 0, subcode = 0, saved_code = 0;
1931 	exception_type_t etype;
1932 
1933 	struct uthread *ut;
1934 	thread_t self = current_thread();
1935 	ut = get_bsdthread_info(self);
1936 	struct rusage_superset *rup;
1937 	int kr = 0;
1938 	int create_corpse = FALSE;
1939 	bool corpse_source = false;
1940 	task_t task = proc_task(p);
1941 
1942 
1943 	if (p->p_crash_behavior != 0 || p == initproc) {
1944 		proc_handle_critical_exit(p, rv);
1945 	}
1946 
1947 	if (task) {
1948 		corpse_source = vm_map_is_corpse_source(get_task_map(task));
1949 	}
1950 
1951 	/*
1952 	 * Generate a corefile/crashlog if:
1953 	 *      The process doesn't have an exit reason that indicates no crash report should be created
1954 	 *      AND any of the following are true:
1955 	 *	- The process was terminated due to a fatal signal that generates a core
1956 	 *	- The process was killed due to a code signing violation
1957 	 *	- The process has an exit reason that indicates we should generate a crash report
1958 	 *
1959 	 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1960 	 * (which normally triggers a core) but may indicate that no crash report should be created.
1961 	 */
1962 	if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1963 	    (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1964 	    (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1965 	    OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1966 		/*
1967 		 * Workaround for processes checking up on PT_DENY_ATTACH:
1968 		 * should be backed out post-Leopard (details in 5431025).
1969 		 */
1970 		if ((SIGSEGV == WTERMSIG(rv)) &&
1971 		    (p->p_pptr->p_lflag & P_LNOATTACH)) {
1972 			goto skipcheck;
1973 		}
1974 
1975 		/*
1976 		 * Crash Reporter looks for the signal value, original exception
1977 		 * type, and low 20 bits of the original code in code[0]
1978 		 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1979 		 * We still pass down to populate_corpse_crashinfo() the original
1980 		 * code, to parse flavor/type in case of a EXC_GUARD.
1981 		 */
1982 		code = ((WTERMSIG(rv) & 0xff) << 24) |
1983 		    ((ut->uu_exception & 0x0f) << 20) |
1984 		    ((int)ut->uu_code & 0xfffff);
1985 
1986 		saved_code = ut->uu_code;
1987 		subcode = ut->uu_subcode;
1988 		etype = ut->uu_exception;
1989 
1990 		/* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1991 		if (etype != EXC_RESOURCE && etype != EXC_GUARD) {
1992 			etype = EXC_CRASH;
1993 		}
1994 
1995 #if (DEVELOPMENT || DEBUG)
1996 		if (p->p_pid <= exception_log_max_pid) {
1997 			const char *proc_name = proc_best_name(p);
1998 			if (PROC_HAS_EXITREASON(p)) {
1999 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
2000 				    "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
2001 				    proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
2002 				    exit_reason_get_string_desc(p->p_exit_reason));
2003 			} else {
2004 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
2005 				    "pid: %d -- process name: %s -- exit status %d",
2006 				    proc_getpid(p), proc_name, WEXITSTATUS(rv));
2007 			}
2008 		}
2009 #endif
2010 		const bool fatal = false;
2011 		kr = task_exception_notify(EXC_CRASH, code, subcode, fatal);
2012 		/* Nobody handled EXC_CRASH?? remember to make corpse */
2013 		if ((kr != 0 || corpse_source) && p == current_proc()) {
2014 			/*
2015 			 * Do not create corpse when exit is called from jetsam thread.
2016 			 * Corpse creation code requires that proc_prepareexit is
2017 			 * called by the exiting proc and not the kernel_proc.
2018 			 */
2019 			create_corpse = TRUE;
2020 		}
2021 
2022 		/*
2023 		 * Revalidate the code signing of the text pages around current PC.
2024 		 * This is an attempt to detect and repair faults due to memory
2025 		 * corruption of text pages.
2026 		 *
2027 		 * The goal here is to fixup infrequent memory corruptions due to
2028 		 * things like aging RAM bit flips. So the approach is to only expect
2029 		 * to have to fixup one thing per crash. This also limits the amount
2030 		 * of extra work we cause in case this is a development kernel with an
2031 		 * active memory stomp happening.
2032 		 */
2033 		uintptr_t bt[2];
2034 		struct backtrace_user_info btinfo = BTUINFO_INIT;
2035 		unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
2036 		int bt_err = btinfo.btui_error;
2037 		if (bt_err == 0 && frame_count >= 1) {
2038 			/*
2039 			 * First check at the page containing the current PC.
2040 			 * This passes if the page code signs -or- if we can't figure out
2041 			 * what is at that address. The latter action is so we continue checking
2042 			 * previous pages which may be corrupt and caused a wild branch.
2043 			 */
2044 			kr = revalidate_text_page(task, bt[0]);
2045 
2046 			/* No corruption found, check the previous sequential page */
2047 			if (kr == KERN_SUCCESS) {
2048 				kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
2049 			}
2050 
2051 			/* Still no corruption found, check the current function's caller */
2052 			if (kr == KERN_SUCCESS) {
2053 				if (frame_count > 1 &&
2054 				    atop(bt[0]) != atop(bt[1]) &&           /* don't recheck PC page */
2055 				    atop(bt[0]) - 1 != atop(bt[1])) {       /* don't recheck page before */
2056 					kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
2057 				}
2058 			}
2059 
2060 			/*
2061 			 * Log that we found a corruption.
2062 			 */
2063 			if (kr != KERN_SUCCESS) {
2064 				os_log(OS_LOG_DEFAULT,
2065 				    "Text page corruption detected in dying process %d\n", proc_getpid(p));
2066 			}
2067 		}
2068 	}
2069 
2070 skipcheck:
2071 	if (task_is_driver(task) && PROC_HAS_EXITREASON(p)) {
2072 		IOUserServerRecordExitReason(task, p->p_exit_reason);
2073 	}
2074 
2075 	/* Notify the perf server? */
2076 	if (perf_notify) {
2077 		(void)sys_perf_notify(self, proc_getpid(p));
2078 	}
2079 
2080 
2081 	/* stash the usage into corpse data if making_corpse == true */
2082 	if (create_corpse == TRUE) {
2083 		kr = task_mark_corpse(task);
2084 		if (kr != KERN_SUCCESS) {
2085 			if (kr == KERN_NO_SPACE) {
2086 				printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
2087 			} else if (kr == KERN_NOT_SUPPORTED) {
2088 				printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
2089 			} else if (kr == KERN_TERMINATED) {
2090 				printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
2091 			} else {
2092 				printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
2093 			}
2094 			create_corpse = FALSE;
2095 		}
2096 	}
2097 
2098 	if (corpse_source && !create_corpse) {
2099 		/* vm_map was marked for corpse, but we decided to not create one, unmark the vmmap */
2100 		vm_map_unset_corpse_source(get_task_map(task));
2101 	}
2102 
2103 	if (!proc_is_shadow(p)) {
2104 		/*
2105 		 * Before this process becomes a zombie, stash resource usage
2106 		 * stats in the proc for external observers to query
2107 		 * via proc_pid_rusage().
2108 		 *
2109 		 * If the zombie allocation fails, just punt the stats.
2110 		 */
2111 		rup = zalloc(zombie_zone);
2112 		gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2113 		rup->ri.ri_phys_footprint = 0;
2114 		rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2115 		/*
2116 		 * Make the rusage_info visible to external observers
2117 		 * only after it has been completely filled in.
2118 		 */
2119 		p->p_ru = rup;
2120 	}
2121 
2122 	if (create_corpse) {
2123 		int est_knotes = 0, num_knotes = 0;
2124 		uint64_t *buffer = NULL;
2125 		uint32_t buf_size = 0;
2126 
2127 		/* Get all the udata pointers from kqueue */
2128 		est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2129 		if (est_knotes > 0) {
2130 			buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2131 			buffer = kalloc_data(buf_size, Z_WAITOK);
2132 			if (buffer) {
2133 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2134 				if (num_knotes > est_knotes + 32) {
2135 					num_knotes = est_knotes + 32;
2136 				}
2137 			}
2138 		}
2139 
2140 		/*
2141 		 * Hack: propogate ktriage information from p_lflag. This should be
2142 		 * removed in favor of something with proper ReportCrash integration.
2143 		 * rdar://163281838 (Remove P_LWASSOFT)
2144 		 */
2145 		if (proc_was_ptraced_during_soft_mode(p)) {
2146 			ktriage_record(thread_tid(current_thread()),
2147 			    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED,
2148 			    KDBG_TRIAGE_VM_SOFT_MODE_DISABLE_TRACED), 0);
2149 		}
2150 
2151 		/* Update the code, subcode based on exit reason */
2152 		proc_update_corpse_exception_codes(p, &code, &subcode);
2153 		populate_corpse_crashinfo(p, task, rup,
2154 		    code, subcode, buffer, num_knotes, NULL, etype, saved_code);
2155 		kfree_data(buffer, buf_size);
2156 	}
2157 	/*
2158 	 * Remove proc from allproc queue and from pidhash chain.
2159 	 * Need to do this before we do anything that can block.
2160 	 * Not doing causes things like mount() find this on allproc
2161 	 * in partially cleaned state.
2162 	 */
2163 
2164 	proc_list_lock();
2165 
2166 #if CONFIG_MEMORYSTATUS
2167 	proc_memorystatus_remove(p);
2168 #endif
2169 
2170 	LIST_REMOVE(p, p_list);
2171 	LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2172 	/* will not be visible via proc_find */
2173 	os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2174 
2175 	proc_list_unlock();
2176 
2177 	/*
2178 	 * If parent is waiting for us to exit or exec,
2179 	 * P_LPPWAIT is set; we will wakeup the parent below.
2180 	 */
2181 	proc_lock(p);
2182 	p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2183 	p->p_sigignore = ~(sigcantmask);
2184 
2185 	/*
2186 	 * If a thread is already waiting for us in proc_exit,
2187 	 * P_LTERM is set, wakeup the thread.
2188 	 */
2189 	if (p->p_lflag & P_LTERM) {
2190 		wakeup(&p->exit_thread);
2191 	} else {
2192 		p->p_lflag |= P_LTERM;
2193 	}
2194 
2195 	/* If current proc is exiting, ignore signals on the exit thread */
2196 	if (p == current_proc()) {
2197 		ut->uu_siglist = 0;
2198 	}
2199 	proc_unlock(p);
2200 }
2201 
2202 void
proc_exit(proc_t p)2203 proc_exit(proc_t p)
2204 {
2205 	proc_t q;
2206 	proc_t pp;
2207 	struct task *task = proc_task(p);
2208 	vnode_t tvp = NULLVP;
2209 	struct pgrp * pg;
2210 	struct session *sessp;
2211 	struct uthread * uth;
2212 	pid_t pid;
2213 	int exitval;
2214 	int knote_hint;
2215 
2216 	uth = current_uthread();
2217 
2218 	proc_lock(p);
2219 	proc_transstart(p, 1, 0);
2220 	if (!(p->p_lflag & P_LEXIT)) {
2221 		/*
2222 		 * This can happen if a thread_terminate() occurs
2223 		 * in a single-threaded process.
2224 		 */
2225 		p->p_lflag |= P_LEXIT;
2226 		proc_transend(p, 1);
2227 		proc_unlock(p);
2228 		proc_prepareexit(p, 0, TRUE);
2229 		(void) task_terminate_internal(task);
2230 		proc_lock(p);
2231 	} else if (!(p->p_lflag & P_LTERM)) {
2232 		proc_transend(p, 1);
2233 		/* Jetsam is in middle of calling proc_prepareexit, wait for it */
2234 		p->p_lflag |= P_LTERM;
2235 		msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2236 	} else {
2237 		proc_transend(p, 1);
2238 	}
2239 
2240 	p->p_lflag |= P_LPEXIT;
2241 
2242 	/*
2243 	 * Other kernel threads may be in the middle of signalling this process.
2244 	 * Wait for those threads to wrap it up before making the process
2245 	 * disappear on them.
2246 	 */
2247 	if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2248 		p->p_sigwaitcnt++;
2249 		while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2250 			msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2251 		}
2252 		p->p_sigwaitcnt--;
2253 	}
2254 
2255 	proc_unlock(p);
2256 	pid = proc_getpid(p);
2257 	exitval = p->p_xstat;
2258 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2259 	    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2260 	    pid, exitval, 0, 0, 0);
2261 
2262 #if DEVELOPMENT || DEBUG
2263 	proc_exit_lpexit_check(pid, PELS_POS_START);
2264 #endif
2265 
2266 #if CONFIG_DTRACE
2267 	dtrace_proc_exit(p);
2268 #endif
2269 
2270 	proc_refdrain(p);
2271 	/* We now have unique ref to the proc */
2272 
2273 	/* if any pending cpu limits action, clear it */
2274 	task_clear_cpuusage(proc_task(p), TRUE);
2275 
2276 	workq_mark_exiting(p);
2277 
2278 	/*
2279 	 * need to cancel async IO requests that can be cancelled and wait for those
2280 	 * already active.  MAY BLOCK!
2281 	 */
2282 	_aio_exit( p );
2283 
2284 	/*
2285 	 * Close open files and release open-file table.
2286 	 * This may block!
2287 	 */
2288 	fdt_invalidate(p);
2289 
2290 	/*
2291 	 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2292 	 * workqueue.
2293 	 */
2294 	workq_exit(p);
2295 
2296 	if (uth->uu_lowpri_window) {
2297 		/*
2298 		 * task is marked as a low priority I/O type
2299 		 * and the I/O we issued while in flushing files on close
2300 		 * collided with normal I/O operations...
2301 		 * no need to throttle this thread since its going away
2302 		 * but we do need to update our bookeeping w/r to throttled threads
2303 		 */
2304 		throttle_lowpri_io(0);
2305 	}
2306 
2307 	if (p->p_lflag & P_LNSPACE_RESOLVER) {
2308 		/*
2309 		 * The namespace resolver is exiting; there may be
2310 		 * outstanding materialization requests to clean up.
2311 		 */
2312 		nspace_resolver_exited(p);
2313 	}
2314 
2315 #if SYSV_SHM
2316 	/* Close ref SYSV Shared memory*/
2317 	if (p->vm_shm) {
2318 		shmexit(p);
2319 	}
2320 #endif
2321 #if SYSV_SEM
2322 	/* Release SYSV semaphores */
2323 	semexit(p);
2324 #endif
2325 
2326 #if PSYNCH
2327 	pth_proc_hashdelete(p);
2328 #endif /* PSYNCH */
2329 
2330 	pg = proc_pgrp(p, &sessp);
2331 	if (SESS_LEADER(p, sessp)) {
2332 		if (sessp->s_ttyvp != NULLVP) {
2333 			struct vnode *ttyvp;
2334 			int ttyvid;
2335 			int cttyflag = 0;
2336 			struct vfs_context context;
2337 			struct tty *tp;
2338 			struct pgrp *tpgrp = PGRP_NULL;
2339 
2340 			/*
2341 			 * Controlling process.
2342 			 * Signal foreground pgrp,
2343 			 * drain controlling terminal
2344 			 * and revoke access to controlling terminal.
2345 			 */
2346 
2347 			proc_list_lock(); /* prevent any t_pgrp from changing */
2348 			session_lock(sessp);
2349 			if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2350 				tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2351 			}
2352 			proc_list_unlock();
2353 
2354 			if (tpgrp != PGRP_NULL) {
2355 				session_unlock(sessp);
2356 				pgsignal(tpgrp, SIGHUP, 1);
2357 				pgrp_rele(tpgrp);
2358 				session_lock(sessp);
2359 			}
2360 
2361 			cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2362 			    S_CTTYREF, relaxed) & S_CTTYREF);
2363 			ttyvp = sessp->s_ttyvp;
2364 			ttyvid = sessp->s_ttyvid;
2365 			tp = session_clear_tty_locked(sessp);
2366 			if (ttyvp) {
2367 				vnode_hold(ttyvp);
2368 			}
2369 			session_unlock(sessp);
2370 
2371 			if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2372 				if (tp != TTY_NULL) {
2373 					tty_lock(tp);
2374 					(void) ttywait(tp);
2375 					tty_unlock(tp);
2376 				}
2377 
2378 				context.vc_thread = NULL;
2379 				context.vc_ucred = kauth_cred_proc_ref(p);
2380 				VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2381 				if (cttyflag) {
2382 					/*
2383 					 * Release the extra usecount taken in cttyopen.
2384 					 * usecount should be released after VNOP_REVOKE is called.
2385 					 * This usecount was taken to ensure that
2386 					 * the VNOP_REVOKE results in a close to
2387 					 * the tty since cttyclose is a no-op.
2388 					 */
2389 					vnode_rele(ttyvp);
2390 				}
2391 				vnode_put(ttyvp);
2392 				kauth_cred_unref(&context.vc_ucred);
2393 				vnode_drop(ttyvp);
2394 				ttyvp = NULLVP;
2395 			}
2396 			if (ttyvp) {
2397 				vnode_drop(ttyvp);
2398 			}
2399 			if (tp) {
2400 				ttyfree(tp);
2401 			}
2402 		}
2403 		session_lock(sessp);
2404 		sessp->s_leader = NULL;
2405 		session_unlock(sessp);
2406 	}
2407 
2408 	if (!proc_is_shadow(p)) {
2409 		fixjobc(p, pg, 0);
2410 	}
2411 	pgrp_rele(pg);
2412 
2413 	/*
2414 	 * Change RLIMIT_FSIZE for accounting/debugging.
2415 	 */
2416 	proc_limitsetcur_fsize(p, RLIM_INFINITY);
2417 
2418 	(void)acct_process(p);
2419 
2420 	proc_list_lock();
2421 
2422 	if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2423 		p->p_listflag &= ~P_LIST_EXITCOUNT;
2424 		proc_shutdown_exitcount--;
2425 		if (proc_shutdown_exitcount == 0) {
2426 			wakeup(&proc_shutdown_exitcount);
2427 		}
2428 	}
2429 
2430 	/* wait till parentrefs are dropped and grant no more */
2431 	proc_childdrainstart(p);
2432 	while ((q = p->p_children.lh_first) != NULL) {
2433 		if (q->p_stat == SZOMB) {
2434 			if (p != q->p_pptr) {
2435 				panic("parent child linkage broken");
2436 			}
2437 			/* check for sysctl zomb lookup */
2438 			while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2439 				msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2440 			}
2441 			q->p_listflag |= P_LIST_WAITING;
2442 			/*
2443 			 * This is a named reference and it is not granted
2444 			 * if the reap is already in progress. So we get
2445 			 * the reference here exclusively and their can be
2446 			 * no waiters. So there is no need for a wakeup
2447 			 * after we are done.  Also the reap frees the structure
2448 			 * and the proc struct cannot be used for wakeups as well.
2449 			 * It is safe to use q here as this is system reap
2450 			 */
2451 			reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2452 			    REAP_REPARENTED_TO_INIT : 0;
2453 			reap_child_locked(p, q,
2454 			    REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2455 		} else {
2456 			/*
2457 			 * Traced processes are killed
2458 			 * since their existence means someone is messing up.
2459 			 */
2460 			if (q->p_lflag & P_LTRACED) {
2461 				struct proc *opp;
2462 
2463 				/*
2464 				 * Take a reference on the child process to
2465 				 * ensure it doesn't exit and disappear between
2466 				 * the time we drop the list_lock and attempt
2467 				 * to acquire its proc_lock.
2468 				 */
2469 				if (proc_ref(q, true) != q) {
2470 					continue;
2471 				}
2472 
2473 				proc_list_unlock();
2474 
2475 				opp = proc_find(q->p_oppid);
2476 				if (opp != PROC_NULL) {
2477 					proc_list_lock();
2478 					q->p_oppid = 0;
2479 					proc_list_unlock();
2480 					proc_reparentlocked(q, opp, 0, 0);
2481 					proc_rele(opp);
2482 				} else {
2483 					/* original parent exited while traced */
2484 					proc_list_lock();
2485 					q->p_listflag |= P_LIST_DEADPARENT;
2486 					q->p_oppid = 0;
2487 					proc_list_unlock();
2488 					proc_reparentlocked(q, initproc, 0, 0);
2489 				}
2490 
2491 				proc_lock(q);
2492 				q->p_lflag &= ~P_LTRACED;
2493 
2494 				if (q->sigwait_thread) {
2495 					thread_t thread = q->sigwait_thread;
2496 
2497 					proc_unlock(q);
2498 					/*
2499 					 * The sigwait_thread could be stopped at a
2500 					 * breakpoint. Wake it up to kill.
2501 					 * Need to do this as it could be a thread which is not
2502 					 * the first thread in the task. So any attempts to kill
2503 					 * the process would result into a deadlock on q->sigwait.
2504 					 */
2505 					thread_resume(thread);
2506 					clear_wait(thread, THREAD_INTERRUPTED);
2507 					threadsignal(thread, SIGKILL, 0, TRUE);
2508 				} else {
2509 					proc_unlock(q);
2510 				}
2511 
2512 				psignal(q, SIGKILL);
2513 				proc_list_lock();
2514 				proc_rele(q);
2515 			} else {
2516 				q->p_listflag |= P_LIST_DEADPARENT;
2517 				proc_reparentlocked(q, initproc, 0, 1);
2518 			}
2519 		}
2520 	}
2521 
2522 	proc_childdrainend(p);
2523 	proc_list_unlock();
2524 
2525 #if CONFIG_MACF
2526 	if (!proc_is_shadow(p)) {
2527 		/*
2528 		 * Notify MAC policies that proc is dead.
2529 		 * This should be replaced with proper label management
2530 		 * (rdar://problem/32126399).
2531 		 */
2532 		mac_proc_notify_exit(p);
2533 	}
2534 #endif
2535 
2536 	/*
2537 	 * Release reference to text vnode
2538 	 */
2539 	tvp = p->p_textvp;
2540 	p->p_textvp = NULL;
2541 	if (tvp != NULLVP) {
2542 		vnode_rele(tvp);
2543 	}
2544 
2545 	/*
2546 	 * Save exit status and final rusage info, adding in child rusage
2547 	 * info and self times.  If we were unable to allocate a zombie
2548 	 * structure, this information is lost.
2549 	 */
2550 	if (p->p_ru != NULL) {
2551 		calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2552 		p->p_ru->ru = p->p_stats->p_ru;
2553 
2554 		ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2555 	}
2556 
2557 	/*
2558 	 * Free up profiling buffers.
2559 	 */
2560 	{
2561 		struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2562 
2563 		p1 = p0->pr_next;
2564 		p0->pr_next = NULL;
2565 		p0->pr_scale = 0;
2566 
2567 		for (; p1 != NULL; p1 = pn) {
2568 			pn = p1->pr_next;
2569 			kfree_type(struct uprof, p1);
2570 		}
2571 	}
2572 
2573 	proc_free_realitimer(p);
2574 
2575 	/*
2576 	 * Other substructures are freed from wait().
2577 	 */
2578 	zfree(proc_stats_zone, p->p_stats);
2579 	p->p_stats = NULL;
2580 
2581 	if (p->p_subsystem_root_path) {
2582 		zfree(ZV_NAMEI, p->p_subsystem_root_path);
2583 		p->p_subsystem_root_path = NULL;
2584 	}
2585 
2586 	proc_limitdrop(p);
2587 
2588 #if DEVELOPMENT || DEBUG
2589 	proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2590 #endif
2591 
2592 	/*
2593 	 * Finish up by terminating the task
2594 	 * and halt this thread (only if a
2595 	 * member of the task exiting).
2596 	 */
2597 	proc_set_task(p, TASK_NULL);
2598 	set_bsdtask_info(task, NULL);
2599 	clear_thread_ro_proc(get_machthread(uth));
2600 
2601 #if DEVELOPMENT || DEBUG
2602 	proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2603 #endif
2604 
2605 	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2606 	proc_knote(p, knote_hint);
2607 
2608 	/* mark the thread as the one that is doing proc_exit
2609 	 * no need to hold proc lock in uthread_free
2610 	 */
2611 	uth->uu_flag |= UT_PROCEXIT;
2612 	/*
2613 	 * Notify parent that we're gone.
2614 	 */
2615 	pp = proc_parent(p);
2616 	if (proc_is_shadow(p)) {
2617 		/* kernel can reap this one, no need to move it to launchd */
2618 		proc_list_lock();
2619 		p->p_listflag |= P_LIST_DEADPARENT;
2620 		proc_list_unlock();
2621 	} else if (pp->p_flag & P_NOCLDWAIT) {
2622 		if (p->p_ru != NULL) {
2623 			proc_lock(pp);
2624 #if 3839178
2625 			/*
2626 			 * If the parent is ignoring SIGCHLD, then POSIX requires
2627 			 * us to not add the resource usage to the parent process -
2628 			 * we are only going to hand it off to init to get reaped.
2629 			 * We should contest the standard in this case on the basis
2630 			 * of RLIMIT_CPU.
2631 			 */
2632 #else   /* !3839178 */
2633 			/*
2634 			 * Add child resource usage to parent before giving
2635 			 * zombie to init.  If we were unable to allocate a
2636 			 * zombie structure, this information is lost.
2637 			 */
2638 			ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2639 #endif  /* !3839178 */
2640 			update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2641 			proc_unlock(pp);
2642 		}
2643 
2644 		/* kernel can reap this one, no need to move it to launchd */
2645 		proc_list_lock();
2646 		p->p_listflag |= P_LIST_DEADPARENT;
2647 		proc_list_unlock();
2648 	}
2649 	if (!proc_is_shadow(p) &&
2650 	    ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2651 		if (pp != initproc) {
2652 			proc_lock(pp);
2653 			pp->si_pid = proc_getpid(p);
2654 			pp->p_xhighbits = p->p_xhighbits;
2655 			p->p_xhighbits = 0;
2656 			pp->si_status = p->p_xstat;
2657 			pp->si_code = CLD_EXITED;
2658 			/*
2659 			 * p_ucred usage is safe as it is an exiting process
2660 			 * and reference is dropped in reap
2661 			 */
2662 			pp->si_uid = kauth_cred_getruid(proc_ucred_unsafe(p));
2663 			proc_unlock(pp);
2664 		}
2665 		/* mark as a zombie */
2666 		/* No need to take proc lock as all refs are drained and
2667 		 * no one except parent (reaping ) can look at this.
2668 		 * The write is to an int and is coherent. Also parent is
2669 		 *  keyed off of list lock for reaping
2670 		 */
2671 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2672 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2673 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2674 		    pid, exitval, 0, 0, 0);
2675 		p->p_stat = SZOMB;
2676 		/*
2677 		 * The current process can be reaped so, no one
2678 		 * can depend on this
2679 		 */
2680 
2681 		psignal(pp, SIGCHLD);
2682 
2683 		/* and now wakeup the parent */
2684 		proc_list_lock();
2685 		wakeup((caddr_t)pp);
2686 		proc_list_unlock();
2687 	} else {
2688 		/* should be fine as parent proc would be initproc */
2689 		/* mark as a zombie */
2690 		/* No need to take proc lock as all refs are drained and
2691 		 * no one except parent (reaping ) can look at this.
2692 		 * The write is to an int and is coherent. Also parent is
2693 		 *  keyed off of list lock for reaping
2694 		 */
2695 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2696 		proc_list_lock();
2697 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2698 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2699 		    pid, exitval, 0, 0, 0);
2700 		/* check for sysctl zomb lookup */
2701 		while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2702 			msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2703 		}
2704 		/* safe to use p as this is a system reap */
2705 		p->p_stat = SZOMB;
2706 		p->p_listflag |= P_LIST_WAITING;
2707 
2708 		/*
2709 		 * This is a named reference and it is not granted
2710 		 * if the reap is already in progress. So we get
2711 		 * the reference here exclusively and their can be
2712 		 * no waiters. So there is no need for a wakeup
2713 		 * after we are done. AlsO  the reap frees the structure
2714 		 * and the proc struct cannot be used for wakeups as well.
2715 		 * It is safe to use p here as this is system reap
2716 		 */
2717 		reap_child_locked(pp, p,
2718 		    REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2719 	}
2720 	if (uth->uu_lowpri_window) {
2721 		/*
2722 		 * task is marked as a low priority I/O type and we've
2723 		 * somehow picked up another throttle during exit processing...
2724 		 * no need to throttle this thread since its going away
2725 		 * but we do need to update our bookeeping w/r to throttled threads
2726 		 */
2727 		throttle_lowpri_io(0);
2728 	}
2729 
2730 	proc_rele(pp);
2731 #if DEVELOPMENT || DEBUG
2732 	proc_exit_lpexit_check(pid, PELS_POS_END);
2733 #endif
2734 }
2735 
2736 
2737 /*
2738  * reap_child_locked
2739  *
2740  * Finalize a child exit once its status has been saved.
2741  *
2742  * If ptrace has attached, detach it and return it to its real parent.  Free any
2743  * remaining resources.
2744  *
2745  * Parameters:
2746  * - proc_t parent      Parent of process being reaped
2747  * - proc_t child       Process to reap
2748  * - reap_flags_t flags Control locking and re-parenting behavior
2749  */
2750 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2751 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2752 {
2753 	struct pgrp *pg;
2754 	boolean_t shadow_proc = proc_is_shadow(child);
2755 
2756 	if (flags & REAP_LOCKED) {
2757 		proc_list_unlock();
2758 	}
2759 
2760 	/*
2761 	 * Under ptrace, the child should now be re-parented back to its original
2762 	 * parent, unless that parent was initproc or it didn't come to initproc
2763 	 * through re-parenting.
2764 	 */
2765 	bool child_ptraced = child->p_oppid != 0;
2766 	if (!shadow_proc && child_ptraced) {
2767 		int knote_hint;
2768 		pid_t orig_ppid = 0;
2769 		proc_t orig_parent = PROC_NULL;
2770 
2771 		proc_lock(child);
2772 		orig_ppid = child->p_oppid;
2773 		child->p_oppid = 0;
2774 		knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2775 		proc_unlock(child);
2776 
2777 		orig_parent = proc_find(orig_ppid);
2778 		if (orig_parent) {
2779 			/*
2780 			 * Only re-parent the process if its original parent was not
2781 			 * initproc and it did not come to initproc from re-parenting.
2782 			 */
2783 			bool reparenting = orig_parent != initproc ||
2784 			    (flags & REAP_REPARENTED_TO_INIT) == 0;
2785 			if (reparenting) {
2786 				if (orig_parent != initproc) {
2787 					/*
2788 					 * Internal fields should be safe to access here because the
2789 					 * child is exited and not reaped or re-parented yet.
2790 					 */
2791 					proc_lock(orig_parent);
2792 					orig_parent->si_pid = proc_getpid(child);
2793 					orig_parent->si_status = child->p_xstat;
2794 					orig_parent->si_code = CLD_CONTINUED;
2795 					orig_parent->si_uid = kauth_cred_getruid(proc_ucred_unsafe(child));
2796 					proc_unlock(orig_parent);
2797 				}
2798 				proc_reparentlocked(child, orig_parent, 1, 0);
2799 
2800 				/*
2801 				 * After re-parenting, re-send the child's NOTE_EXIT to the
2802 				 * original parent.
2803 				 */
2804 				proc_knote(child, knote_hint);
2805 				psignal(orig_parent, SIGCHLD);
2806 
2807 				proc_list_lock();
2808 				wakeup((caddr_t)orig_parent);
2809 				child->p_listflag &= ~P_LIST_WAITING;
2810 				wakeup(&child->p_stat);
2811 				proc_list_unlock();
2812 
2813 				proc_rele(orig_parent);
2814 				if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2815 					proc_list_lock();
2816 				}
2817 				return;
2818 			} else {
2819 				/*
2820 				 * Satisfy the knote lifecycle because ptraced processes don't
2821 				 * broadcast NOTE_EXIT during initial child termination.
2822 				 */
2823 				proc_knote(child, knote_hint);
2824 				proc_rele(orig_parent);
2825 			}
2826 		}
2827 	}
2828 
2829 #pragma clang diagnostic push
2830 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2831 	proc_knote(child, NOTE_REAP);
2832 #pragma clang diagnostic pop
2833 
2834 	proc_knote_drain(child);
2835 
2836 	child->p_xstat = 0;
2837 	if (!shadow_proc && child->p_ru) {
2838 		/*
2839 		 * Roll up the rusage statistics to the parent, unless the parent is
2840 		 * ignoring SIGCHLD.  POSIX requires the children's resources of such a
2841 		 * parent to not be included in the parent's usage (seems odd given
2842 		 * RLIMIT_CPU, though).
2843 		 */
2844 		proc_lock(parent);
2845 		bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2846 		if (rollup_child) {
2847 			ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2848 		}
2849 		update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2850 		proc_unlock(parent);
2851 		zfree(zombie_zone, child->p_ru);
2852 		child->p_ru = NULL;
2853 	} else if (!shadow_proc) {
2854 		printf("Warning : lost p_ru for %s\n", child->p_comm);
2855 	} else {
2856 		assert(child->p_ru == NULL);
2857 	}
2858 
2859 	AUDIT_SESSION_PROCEXIT(child);
2860 
2861 #if CONFIG_PERSONAS
2862 	persona_proc_drop(child);
2863 #endif /* CONFIG_PERSONAS */
2864 	/* proc_ucred_unsafe is safe, because child is not running */
2865 	(void)chgproccnt(kauth_cred_getruid(proc_ucred_unsafe(child)), -1);
2866 
2867 	os_reason_free(child->p_exit_reason);
2868 
2869 	proc_list_lock();
2870 
2871 	pg = pgrp_leave_locked(child);
2872 	LIST_REMOVE(child, p_list);
2873 	parent->p_childrencnt--;
2874 	LIST_REMOVE(child, p_sibling);
2875 	bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2876 	    LIST_EMPTY(&parent->p_children);
2877 	if (no_more_children) {
2878 		wakeup((caddr_t)parent);
2879 	}
2880 	child->p_listflag &= ~P_LIST_WAITING;
2881 	wakeup(&child->p_stat);
2882 
2883 	/* Take it out of process hash */
2884 	if (!shadow_proc) {
2885 		phash_remove_locked(child);
2886 	}
2887 	proc_checkdeadrefs(child);
2888 	nprocs--;
2889 	if (flags & REAP_DEAD_PARENT) {
2890 		child->p_listflag |= P_LIST_DEADPARENT;
2891 	}
2892 
2893 	proc_list_unlock();
2894 
2895 	pgrp_rele(pg);
2896 	fdt_destroy(child);
2897 	lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2898 	lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2899 #if CONFIG_AUDIT
2900 	lck_mtx_destroy(&child->p_audit_mlock, &proc_ucred_mlock_grp);
2901 #endif /* CONFIG_AUDIT */
2902 #if CONFIG_DTRACE
2903 	lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2904 #endif
2905 	lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2906 	proc_wait_release(child);
2907 
2908 	if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2909 		proc_list_lock();
2910 	}
2911 }
2912 
2913 int
wait1continue(int result)2914 wait1continue(int result)
2915 {
2916 	proc_t p;
2917 	thread_t thread;
2918 	uthread_t uth;
2919 	struct _wait4_data *wait4_data;
2920 	struct wait4_nocancel_args *uap;
2921 	int *retval;
2922 
2923 	if (result) {
2924 		return result;
2925 	}
2926 
2927 	p = current_proc();
2928 	thread = current_thread();
2929 	uth = (struct uthread *)get_bsdthread_info(thread);
2930 
2931 	wait4_data = &uth->uu_save.uus_wait4_data;
2932 	uap = wait4_data->args;
2933 	retval = wait4_data->retval;
2934 	return wait4_nocancel(p, uap, retval);
2935 }
2936 
2937 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2938 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2939 {
2940 	__pthread_testcancel(1);
2941 	return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2942 }
2943 
2944 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2945 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2946 {
2947 	int nfound;
2948 	int sibling_count;
2949 	proc_t p;
2950 	int status, error;
2951 	uthread_t uth;
2952 	struct _wait4_data *wait4_data;
2953 
2954 	AUDIT_ARG(pid, uap->pid);
2955 
2956 	if (uap->pid == 0) {
2957 		uap->pid = -q->p_pgrpid;
2958 	}
2959 
2960 	if (uap->pid == INT_MIN) {
2961 		return EINVAL;
2962 	}
2963 
2964 loop:
2965 	proc_list_lock();
2966 loop1:
2967 	nfound = 0;
2968 	sibling_count = 0;
2969 
2970 	PCHILDREN_FOREACH(q, p) {
2971 		if (p->p_sibling.le_next != 0) {
2972 			sibling_count++;
2973 		}
2974 		if (uap->pid != WAIT_ANY &&
2975 		    proc_getpid(p) != uap->pid &&
2976 		    p->p_pgrpid != -(uap->pid)) {
2977 			continue;
2978 		}
2979 
2980 		if (proc_is_shadow(p)) {
2981 			continue;
2982 		}
2983 
2984 		nfound++;
2985 
2986 		/* XXX This is racy because we don't get the lock!!!! */
2987 
2988 		if (p->p_listflag & P_LIST_WAITING) {
2989 			/* we're not using a continuation here but we still need to stash
2990 			 * the args for stackshot. */
2991 			uth = current_uthread();
2992 			wait4_data = &uth->uu_save.uus_wait4_data;
2993 			wait4_data->args = uap;
2994 			thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2995 
2996 			(void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2997 			goto loop1;
2998 		}
2999 		p->p_listflag |= P_LIST_WAITING;   /* only allow single thread to wait() */
3000 
3001 
3002 		if (p->p_stat == SZOMB) {
3003 			reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
3004 			    REAP_REPARENTED_TO_INIT : 0;
3005 
3006 			proc_list_unlock();
3007 #if CONFIG_MACF
3008 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3009 				goto out;
3010 			}
3011 #endif
3012 			retval[0] = proc_getpid(p);
3013 			if (uap->status) {
3014 				/* Legacy apps expect only 8 bits of status */
3015 				status = 0xffff & p->p_xstat;   /* convert to int */
3016 				error = copyout((caddr_t)&status,
3017 				    uap->status,
3018 				    sizeof(status));
3019 				if (error) {
3020 					goto out;
3021 				}
3022 			}
3023 			if (uap->rusage) {
3024 				if (p->p_ru == NULL) {
3025 					error = ENOMEM;
3026 				} else {
3027 					if (IS_64BIT_PROCESS(q)) {
3028 						struct user64_rusage    my_rusage = {};
3029 						munge_user64_rusage(&p->p_ru->ru, &my_rusage);
3030 						error = copyout((caddr_t)&my_rusage,
3031 						    uap->rusage,
3032 						    sizeof(my_rusage));
3033 					} else {
3034 						struct user32_rusage    my_rusage = {};
3035 						munge_user32_rusage(&p->p_ru->ru, &my_rusage);
3036 						error = copyout((caddr_t)&my_rusage,
3037 						    uap->rusage,
3038 						    sizeof(my_rusage));
3039 					}
3040 				}
3041 				/* information unavailable? */
3042 				if (error) {
3043 					goto out;
3044 				}
3045 			}
3046 
3047 			/* Conformance change for 6577252.
3048 			 * When SIGCHLD is blocked and wait() returns because the status
3049 			 * of a child process is available and there are no other
3050 			 * children processes, then any pending SIGCHLD signal is cleared.
3051 			 */
3052 			if (sibling_count == 0) {
3053 				int mask = sigmask(SIGCHLD);
3054 				uth = current_uthread();
3055 
3056 				if ((uth->uu_sigmask & mask) != 0) {
3057 					/* we are blocking SIGCHLD signals.  clear any pending SIGCHLD.
3058 					 * This locking looks funny but it is protecting access to the
3059 					 * thread via p_uthlist.
3060 					 */
3061 					proc_lock(q);
3062 					uth->uu_siglist &= ~mask;       /* clear pending signal */
3063 					proc_unlock(q);
3064 				}
3065 			}
3066 
3067 			/* Clean up */
3068 			(void)reap_child_locked(q, p, reap_flags);
3069 
3070 			return 0;
3071 		}
3072 		if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
3073 		    (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
3074 			proc_list_unlock();
3075 #if CONFIG_MACF
3076 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3077 				goto out;
3078 			}
3079 #endif
3080 			proc_lock(p);
3081 			p->p_lflag |= P_LWAITED;
3082 			proc_unlock(p);
3083 			retval[0] = proc_getpid(p);
3084 			if (uap->status) {
3085 				status = W_STOPCODE(p->p_xstat);
3086 				error = copyout((caddr_t)&status,
3087 				    uap->status,
3088 				    sizeof(status));
3089 			} else {
3090 				error = 0;
3091 			}
3092 			goto out;
3093 		}
3094 		/*
3095 		 * If we are waiting for continued processses, and this
3096 		 * process was continued
3097 		 */
3098 		if ((uap->options & WCONTINUED) &&
3099 		    (p->p_flag & P_CONTINUED)) {
3100 			proc_list_unlock();
3101 #if CONFIG_MACF
3102 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3103 				goto out;
3104 			}
3105 #endif
3106 
3107 			/* Prevent other process for waiting for this event */
3108 			OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3109 			retval[0] = proc_getpid(p);
3110 			if (uap->status) {
3111 				status = W_STOPCODE(SIGCONT);
3112 				error = copyout((caddr_t)&status,
3113 				    uap->status,
3114 				    sizeof(status));
3115 			} else {
3116 				error = 0;
3117 			}
3118 			goto out;
3119 		}
3120 		p->p_listflag &= ~P_LIST_WAITING;
3121 		wakeup(&p->p_stat);
3122 	}
3123 	/* list lock is held when we get here any which way */
3124 	if (nfound == 0) {
3125 		proc_list_unlock();
3126 		return ECHILD;
3127 	}
3128 
3129 	if (uap->options & WNOHANG) {
3130 		retval[0] = 0;
3131 		proc_list_unlock();
3132 		return 0;
3133 	}
3134 
3135 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3136 	uth = current_uthread();
3137 	wait4_data = &uth->uu_save.uus_wait4_data;
3138 	wait4_data->args = uap;
3139 	wait4_data->retval = retval;
3140 
3141 	thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3142 	if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3143 		return error;
3144 	}
3145 
3146 	goto loop;
3147 out:
3148 	proc_list_lock();
3149 	p->p_listflag &= ~P_LIST_WAITING;
3150 	wakeup(&p->p_stat);
3151 	proc_list_unlock();
3152 	return error;
3153 }
3154 
3155 #if DEBUG
3156 #define ASSERT_LCK_MTX_OWNED(lock)      \
3157 	                        lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3158 #else
3159 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */
3160 #endif
3161 
3162 int
waitidcontinue(int result)3163 waitidcontinue(int result)
3164 {
3165 	proc_t p;
3166 	thread_t thread;
3167 	uthread_t uth;
3168 	struct _waitid_data *waitid_data;
3169 	struct waitid_nocancel_args *uap;
3170 	int *retval;
3171 
3172 	if (result) {
3173 		return result;
3174 	}
3175 
3176 	p = current_proc();
3177 	thread = current_thread();
3178 	uth = (struct uthread *)get_bsdthread_info(thread);
3179 
3180 	waitid_data = &uth->uu_save.uus_waitid_data;
3181 	uap = waitid_data->args;
3182 	retval = waitid_data->retval;
3183 	return waitid_nocancel(p, uap, retval);
3184 }
3185 
3186 /*
3187  * Description:	Suspend the calling thread until one child of the process
3188  *		containing the calling thread changes state.
3189  *
3190  * Parameters:	uap->idtype		one of P_PID, P_PGID, P_ALL
3191  *		uap->id			pid_t or gid_t or ignored
3192  *		uap->infop		Address of siginfo_t struct in
3193  *					user space into which to return status
3194  *		uap->options		flag values
3195  *
3196  * Returns:	0			Success
3197  *		!0			Error returning status to user space
3198  */
3199 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3200 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3201 {
3202 	__pthread_testcancel(1);
3203 	return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3204 }
3205 
3206 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3207 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3208     __unused int32_t *retval)
3209 {
3210 	user_siginfo_t  siginfo;        /* siginfo data to return to caller */
3211 	boolean_t caller64 = IS_64BIT_PROCESS(q);
3212 	int nfound;
3213 	proc_t p;
3214 	int error;
3215 	uthread_t uth;
3216 	struct _waitid_data *waitid_data;
3217 
3218 	if (uap->options == 0 ||
3219 	    (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3220 		return EINVAL;        /* bits set that aren't recognized */
3221 	}
3222 	switch (uap->idtype) {
3223 	case P_PID:     /* child with process ID equal to... */
3224 	case P_PGID:    /* child with process group ID equal to... */
3225 		if (((int)uap->id) < 0) {
3226 			return EINVAL;
3227 		}
3228 		break;
3229 	case P_ALL:     /* any child */
3230 		break;
3231 	}
3232 
3233 loop:
3234 	proc_list_lock();
3235 loop1:
3236 	nfound = 0;
3237 
3238 	PCHILDREN_FOREACH(q, p) {
3239 		switch (uap->idtype) {
3240 		case P_PID:     /* child with process ID equal to... */
3241 			if (proc_getpid(p) != (pid_t)uap->id) {
3242 				continue;
3243 			}
3244 			break;
3245 		case P_PGID:    /* child with process group ID equal to... */
3246 			if (p->p_pgrpid != (pid_t)uap->id) {
3247 				continue;
3248 			}
3249 			break;
3250 		case P_ALL:     /* any child */
3251 			break;
3252 		}
3253 
3254 		if (proc_is_shadow(p)) {
3255 			continue;
3256 		}
3257 		/* XXX This is racy because we don't get the lock!!!! */
3258 
3259 		/*
3260 		 * Wait collision; go to sleep and restart; used to maintain
3261 		 * the single return for waited process guarantee.
3262 		 */
3263 		if (p->p_listflag & P_LIST_WAITING) {
3264 			(void) msleep(&p->p_stat, &proc_list_mlock,
3265 			    PWAIT, "waitidcoll", 0);
3266 			goto loop1;
3267 		}
3268 		p->p_listflag |= P_LIST_WAITING;                /* mark busy */
3269 
3270 		nfound++;
3271 
3272 		bzero(&siginfo, sizeof(siginfo));
3273 
3274 		switch (p->p_stat) {
3275 		case SZOMB:             /* Exited */
3276 			if (!(uap->options & WEXITED)) {
3277 				break;
3278 			}
3279 			proc_list_unlock();
3280 #if CONFIG_MACF
3281 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3282 				goto out;
3283 			}
3284 #endif
3285 			siginfo.si_signo = SIGCHLD;
3286 			siginfo.si_pid = proc_getpid(p);
3287 
3288 			/* If the child terminated abnormally due to a signal, the signum
3289 			 * needs to be preserved in the exit status.
3290 			 */
3291 			if (WIFSIGNALED(p->p_xstat)) {
3292 				siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3293 				    CLD_DUMPED : CLD_KILLED;
3294 				siginfo.si_status = WTERMSIG(p->p_xstat);
3295 			} else {
3296 				siginfo.si_code = CLD_EXITED;
3297 				siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3298 			}
3299 			siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3300 			p->p_xhighbits = 0;
3301 
3302 			if ((error = copyoutsiginfo(&siginfo,
3303 			    caller64, uap->infop)) != 0) {
3304 				goto out;
3305 			}
3306 
3307 			/* Prevent other process for waiting for this event? */
3308 			if (!(uap->options & WNOWAIT)) {
3309 				reap_child_locked(q, p, 0);
3310 				return 0;
3311 			}
3312 			goto out;
3313 
3314 		case SSTOP:             /* Stopped */
3315 			/*
3316 			 * If we are not interested in stopped processes, then
3317 			 * ignore this one.
3318 			 */
3319 			if (!(uap->options & WSTOPPED)) {
3320 				break;
3321 			}
3322 
3323 			/*
3324 			 * If someone has already waited it, we lost a race
3325 			 * to be the one to return status.
3326 			 */
3327 			if ((p->p_lflag & P_LWAITED) != 0) {
3328 				break;
3329 			}
3330 			proc_list_unlock();
3331 #if CONFIG_MACF
3332 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3333 				goto out;
3334 			}
3335 #endif
3336 			siginfo.si_signo = SIGCHLD;
3337 			siginfo.si_pid = proc_getpid(p);
3338 			siginfo.si_status = p->p_xstat; /* signal number */
3339 			siginfo.si_code = CLD_STOPPED;
3340 
3341 			if ((error = copyoutsiginfo(&siginfo,
3342 			    caller64, uap->infop)) != 0) {
3343 				goto out;
3344 			}
3345 
3346 			/* Prevent other process for waiting for this event? */
3347 			if (!(uap->options & WNOWAIT)) {
3348 				proc_lock(p);
3349 				p->p_lflag |= P_LWAITED;
3350 				proc_unlock(p);
3351 			}
3352 			goto out;
3353 
3354 		default:                /* All other states => Continued */
3355 			if (!(uap->options & WCONTINUED)) {
3356 				break;
3357 			}
3358 
3359 			/*
3360 			 * If the flag isn't set, then this process has not
3361 			 * been stopped and continued, or the status has
3362 			 * already been reaped by another caller of waitid().
3363 			 */
3364 			if ((p->p_flag & P_CONTINUED) == 0) {
3365 				break;
3366 			}
3367 			proc_list_unlock();
3368 #if CONFIG_MACF
3369 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3370 				goto out;
3371 			}
3372 #endif
3373 			siginfo.si_signo = SIGCHLD;
3374 			siginfo.si_code = CLD_CONTINUED;
3375 			proc_lock(p);
3376 			siginfo.si_pid = p->p_contproc;
3377 			siginfo.si_status = p->p_xstat;
3378 			proc_unlock(p);
3379 
3380 			if ((error = copyoutsiginfo(&siginfo,
3381 			    caller64, uap->infop)) != 0) {
3382 				goto out;
3383 			}
3384 
3385 			/* Prevent other process for waiting for this event? */
3386 			if (!(uap->options & WNOWAIT)) {
3387 				OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3388 				    &p->p_flag);
3389 			}
3390 			goto out;
3391 		}
3392 		ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3393 
3394 		/* Not a process we are interested in; go on to next child */
3395 
3396 		p->p_listflag &= ~P_LIST_WAITING;
3397 		wakeup(&p->p_stat);
3398 	}
3399 	ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3400 
3401 	/* No child processes that could possibly satisfy the request? */
3402 
3403 	if (nfound == 0) {
3404 		proc_list_unlock();
3405 		return ECHILD;
3406 	}
3407 
3408 	if (uap->options & WNOHANG) {
3409 		proc_list_unlock();
3410 #if CONFIG_MACF
3411 		if ((error = mac_proc_check_wait(q, p)) != 0) {
3412 			return error;
3413 		}
3414 #endif
3415 		/*
3416 		 * The state of the siginfo structure in this case
3417 		 * is undefined.  Some implementations bzero it, some
3418 		 * (like here) leave it untouched for efficiency.
3419 		 *
3420 		 * Thus the most portable check for "no matching pid with
3421 		 * WNOHANG" is to store a zero into si_pid before
3422 		 * invocation, then check for a non-zero value afterwards.
3423 		 */
3424 		return 0;
3425 	}
3426 
3427 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3428 	uth = current_uthread();
3429 	waitid_data = &uth->uu_save.uus_waitid_data;
3430 	waitid_data->args = uap;
3431 	waitid_data->retval = retval;
3432 
3433 	if ((error = msleep0(q, &proc_list_mlock,
3434 	    PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3435 		return error;
3436 	}
3437 
3438 	goto loop;
3439 out:
3440 	proc_list_lock();
3441 	p->p_listflag &= ~P_LIST_WAITING;
3442 	wakeup(&p->p_stat);
3443 	proc_list_unlock();
3444 	return error;
3445 }
3446 
3447 /*
3448  * make process 'parent' the new parent of process 'child'.
3449  */
3450 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3451 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3452 {
3453 	proc_t oldparent = PROC_NULL;
3454 
3455 	if (child->p_pptr == parent) {
3456 		return;
3457 	}
3458 
3459 	if (locked == 0) {
3460 		proc_list_lock();
3461 	}
3462 
3463 	oldparent = child->p_pptr;
3464 #if __PROC_INTERNAL_DEBUG
3465 	if (oldparent == PROC_NULL) {
3466 		panic("proc_reparent: process %p does not have a parent", child);
3467 	}
3468 #endif
3469 
3470 	LIST_REMOVE(child, p_sibling);
3471 #if __PROC_INTERNAL_DEBUG
3472 	if (oldparent->p_childrencnt == 0) {
3473 		panic("process children count already 0");
3474 	}
3475 #endif
3476 	oldparent->p_childrencnt--;
3477 #if __PROC_INTERNAL_DEBUG
3478 	if (oldparent->p_childrencnt < 0) {
3479 		panic("process children count -ve");
3480 	}
3481 #endif
3482 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3483 	parent->p_childrencnt++;
3484 	child->p_pptr = parent;
3485 	child->p_ppid = proc_getpid(parent);
3486 
3487 	proc_list_unlock();
3488 
3489 	if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3490 		psignal(initproc, SIGCHLD);
3491 	}
3492 	if (locked == 1) {
3493 		proc_list_lock();
3494 	}
3495 }
3496 
3497 /*
3498  * Exit: deallocate address space and other resources, change proc state
3499  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
3500  * status and rusage for wait().  Check for child processes and orphan them.
3501  */
3502 
3503 
3504 /*
3505  * munge_rusage
3506  *	LP64 support - long is 64 bits if we are dealing with a 64 bit user
3507  *	process.  We munge the kernel version of rusage into the
3508  *	64 bit version.
3509  */
3510 __private_extern__  void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3511 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3512 {
3513 	/* Zero-out struct so that padding is cleared */
3514 	bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3515 
3516 	/* timeval changes size, so utime and stime need special handling */
3517 	a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3518 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3519 	a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3520 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3521 	/*
3522 	 * everything else can be a direct assign, since there is no loss
3523 	 * of precision implied boing 32->64.
3524 	 */
3525 	a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3526 	a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3527 	a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3528 	a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3529 	a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3530 	a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3531 	a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3532 	a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3533 	a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3534 	a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3535 	a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3536 	a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3537 	a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3538 	a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3539 }
3540 
3541 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3542 __private_extern__  void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3543 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3544 {
3545 	bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3546 
3547 	/* timeval changes size, so utime and stime need special handling */
3548 	a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3549 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3550 	a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3551 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3552 	/*
3553 	 * everything else can be a direct assign. We currently ignore
3554 	 * the loss of precision
3555 	 */
3556 	a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3557 	a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3558 	a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3559 	a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3560 	a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3561 	a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3562 	a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3563 	a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3564 	a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3565 	a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3566 	a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3567 	a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3568 	a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3569 	a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3570 }
3571 
3572 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3573 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3574 {
3575 	assert(thread != NULL);
3576 	assert(waitinfo != NULL);
3577 
3578 	struct uthread *ut = get_bsdthread_info(thread);
3579 	waitinfo->context = 0;
3580 	// ensure wmesg is consistent with a thread waiting in wait4
3581 	assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3582 	struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3583 	// May not actually contain a pid; this is just the argument to wait4.
3584 	// See man wait4 for other valid wait4 arguments.
3585 	waitinfo->owner = args->pid;
3586 }
3587 
3588 static int
exit_with_exception_internal(struct proc * p,exception_info_t exception,uint32_t flags)3589 exit_with_exception_internal(
3590 	struct proc *p,
3591 	exception_info_t exception,
3592 	uint32_t flags)
3593 {
3594 	os_reason_t reason = OS_REASON_NULL;
3595 	struct uthread *ut = NULL;
3596 
3597 	if (p == PROC_NULL) {
3598 		panic("exception type %d without a valid proc",
3599 		    exception.os_reason);
3600 	}
3601 
3602 	if (!(flags & PX_DEBUG_NO_HONOR)
3603 	    && is_address_space_debugged(p)) {
3604 		return 0;
3605 	}
3606 
3607 	if ((flags & PX_KTRIAGE)) {
3608 		/* Leave a ktriage record */
3609 		ktriage_record(
3610 			thread_tid(current_thread()),
3611 			KDBG_TRIAGE_EVENTID(
3612 				exception.kt_info.kt_subsys,
3613 				KDBG_TRIAGE_RESERVED,
3614 				exception.kt_info.kt_error),
3615 			0);
3616 	}
3617 
3618 	if ((flags & PX_PSIGNAL)) {
3619 		int signal = (exception.signal > 0) ? exception.signal : SIGKILL;
3620 
3621 		printf("[%s%s] sending signal %d to process\n", proc_best_name(p),
3622 		    (signal == SIGKILL) ? ": killed" : "", signal);
3623 		psignal(p, signal);
3624 		return 0;
3625 	} else {
3626 		assert(exception.exception_type > 0);
3627 
3628 		reason = os_reason_create(
3629 			exception.os_reason,
3630 			(uint64_t)exception.mx_code);
3631 		assert(reason != OS_REASON_NULL);
3632 		reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3633 
3634 		if (!(flags & PX_NO_EXCEPTION_UTHREAD)) {
3635 			ut = get_bsdthread_info(current_thread());
3636 			ut->uu_exception = exception.exception_type;
3637 			ut->uu_code = exception.mx_code;
3638 			ut->uu_subcode = exception.mx_subcode;
3639 		}
3640 
3641 		printf("[%s: killed] sending signal %d and force exiting process\n",
3642 		    proc_best_name(p), SIGKILL);
3643 		return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3644 		           FALSE, FALSE, 0, reason);
3645 	}
3646 }
3647 
3648 /*
3649  * Use a separate function call for mach and exclave exceptions so that we
3650  * see the exception's origin show up clearly in the backtrace on dev kernels.
3651  */
3652 
3653 int
exit_with_mach_exception(struct proc * p,exception_info_t exception,uint32_t flags)3654 exit_with_mach_exception(
3655 	struct proc *p,
3656 	exception_info_t exception,
3657 	uint32_t flags)
3658 {
3659 	return exit_with_exception_internal(p, exception, flags);
3660 }
3661 
3662 
3663 #if CONFIG_EXCLAVES
3664 int
exit_with_exclave_exception(struct proc * p,exception_info_t exception,uint32_t flags)3665 exit_with_exclave_exception(
3666 	struct proc *p,
3667 	exception_info_t exception,
3668 	uint32_t flags)
3669 {
3670 	return exit_with_exception_internal(p, exception, flags);
3671 }
3672 #endif /* CONFIG_EXCLAVES */
3673 
3674 /**
3675  * Causes the current process to exit with a Mach exception.
3676  *
3677  * Compared to exit_with_mach_exception(), exit_with_mach_exception_using_ast()
3678  * can be called in a preemption-disabled context.  This function defers
3679  * updating the process state until an AST.
3680  *
3681  * @note Currently only the PX_KTRIAGE flag is implemented.
3682  *
3683  * @param exception information about the exception
3684  * @param flags a bitmask of PX_* flags describing how to deliver the exception
3685  */
3686 void
exit_with_mach_exception_using_ast(exception_info_t exception,uint32_t flags,bool fatal)3687 exit_with_mach_exception_using_ast(
3688 	exception_info_t exception,
3689 	uint32_t flags,
3690 	bool fatal)
3691 {
3692 	const uint32_t __assert_only supported_flags = PX_KTRIAGE;
3693 	assert((flags & ~supported_flags) == 0);
3694 
3695 	bool ktriage = flags & PX_KTRIAGE;
3696 	thread_ast_mach_exception(current_thread(), exception.os_reason, exception.exception_type,
3697 	    exception.mx_code, exception.mx_subcode, fatal, ktriage);
3698 }
3699