xref: /xnu-12377.1.9/bsd/kern/kern_exit.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113 
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116 
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120 
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134 #include <vm/vm_map_xnu.h>
135 
136 #include <vm/vm_protos.h>
137 #include <os/log.h>
138 #include <os/system_event_log.h>
139 
140 #include <pexpert/pexpert.h>
141 
142 #include <kdp/kdp_dyld.h>
143 
144 #if SYSV_SHM
145 #include <sys/shm_internal.h>   /* shmexit */
146 #endif /* SYSV_SHM */
147 #if CONFIG_PERSONAS
148 #include <sys/persona.h>
149 #endif /* CONFIG_PERSONAS */
150 #if CONFIG_MEMORYSTATUS
151 #include <sys/kern_memorystatus.h>
152 #endif /* CONFIG_MEMORYSTATUS */
153 #if CONFIG_DTRACE
154 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
155 void dtrace_proc_exit(proc_t p);
156 #include <sys/dtrace_ptss.h>
157 #endif /* CONFIG_DTRACE */
158 #if CONFIG_MACF
159 #include <security/mac_framework.h>
160 #include <security/mac_mach_internal.h>
161 #include <sys/syscall.h>
162 #endif /* CONFIG_MACF */
163 
164 #ifdef CONFIG_EXCLAVES
165 void
166 task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
167 #endif /* CONFIG_EXCLAVES */
168 
169 #if CONFIG_MEMORYSTATUS
170 static void proc_memorystatus_remove(proc_t p);
171 #endif /* CONFIG_MEMORYSTATUS */
172 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
173 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
174     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
175     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
176 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
177 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
178 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
179 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
180 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
181     struct rusage_superset *rup, mach_exception_data_type_t code,
182     mach_exception_data_type_t subcode, uint64_t *udata_buffer,
183     int num_udata, os_reason_t reason, exception_type_t etype);
184 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
185 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
186 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
187 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
188 extern uint64_t get_task_phys_footprint_limit(task_t);
189 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
190 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
191 
192 extern unsigned int exception_log_max_pid;
193 
194 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
195 
196 /*
197  * Flags for `reap_child_locked`.
198  */
199 __options_decl(reap_flags_t, uint32_t, {
200 	/*
201 	 * Parent is exiting, so the kernel is responsible for reaping children.
202 	 */
203 	REAP_DEAD_PARENT = 0x01,
204 	/*
205 	 * Childr process was re-parented to initproc.
206 	 */
207 	REAP_REPARENTED_TO_INIT = 0x02,
208 	/*
209 	 * `proc_list_lock` is held on entry.
210 	 */
211 	REAP_LOCKED = 0x04,
212 	/*
213 	 * Drop the `proc_list_lock` on return.  Note that the `proc_list_lock` will
214 	 * be dropped internally by the function regardless.
215 	 */
216 	REAP_DROP_LOCK = 0x08,
217 });
218 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
219 
220 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
221 
222 /*
223  * Things which should have prototypes in headers, but don't
224  */
225 void    proc_exit(proc_t p);
226 int     wait1continue(int result);
227 int     waitidcontinue(int result);
228 kern_return_t sys_perf_notify(thread_t thread, int pid);
229 kern_return_t task_exception_notify(exception_type_t exception,
230     mach_exception_data_type_t code, mach_exception_data_type_t subcode, bool fatal);
231 void    delay(int);
232 
233 #if DEVELOPMENT || DEBUG
234 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
235 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
236 static pid_t proc_exit_lpexit_spin_pid = -1;            /* wakeup point */
237 static int proc_exit_lpexit_spin_pos = -1;              /* point to block */
238 static int proc_exit_lpexit_spinning = 0;
239 enum {
240 	PELS_POS_START = 0,             /* beginning of proc_exit */
241 	PELS_POS_PRE_TASK_DETACH,       /* before task/proc detach */
242 	PELS_POS_POST_TASK_DETACH,      /* after task/proc detach */
243 	PELS_POS_END,                   /* end of proc_exit */
244 	PELS_NPOS                       /* # valid values */
245 };
246 
247 /* Panic if matching processes (delimited by ',') exit on error. */
248 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
249 
250 static int
251 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
252 {
253 #pragma unused(oidp, arg1, arg2)
254 	pid_t new_value;
255 	int changed;
256 	int error;
257 
258 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
259 		return ENOENT;
260 	}
261 
262 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
263 	    sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
264 	if (error == 0 && changed != 0) {
265 		if (new_value < -1) {
266 			return EINVAL;
267 		}
268 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
269 		proc_exit_lpexit_spin_pid = new_value;
270 		wakeup(&proc_exit_lpexit_spin_pid);
271 		proc_exit_lpexit_spinning = 0;
272 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
273 	}
274 	return error;
275 }
276 
277 static int
278 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
279 {
280 #pragma unused(oidp, arg1, arg2)
281 	int new_value;
282 	int changed;
283 	int error;
284 
285 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
286 		return ENOENT;
287 	}
288 
289 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
290 	    sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
291 	if (error == 0 && changed != 0) {
292 		if (new_value < -1 || new_value >= PELS_NPOS) {
293 			return EINVAL;
294 		}
295 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
296 		proc_exit_lpexit_spin_pos = new_value;
297 		wakeup(&proc_exit_lpexit_spin_pid);
298 		proc_exit_lpexit_spinning = 0;
299 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
300 	}
301 	return error;
302 }
303 
304 static int
305 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
306 {
307 #pragma unused(oidp, arg1, arg2)
308 	int new_value;
309 	int changed;
310 	int error;
311 
312 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
313 		return ENOENT;
314 	}
315 
316 	error = sysctl_io_number(req, proc_exit_lpexit_spinning,
317 	    sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
318 	if (error == 0 && changed != 0) {
319 		return EINVAL;
320 	}
321 	return error;
322 }
323 
324 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
325     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
326     NULL, sizeof(pid_t),
327     proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
328 
329 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
330     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
331     NULL, sizeof(int),
332     proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
333 
334 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
335     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
336     NULL, sizeof(int),
337     proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
338 
339 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)340 proc_exit_lpexit_check(pid_t pid, int pos)
341 {
342 	if (proc_exit_lpexit_spin_pid == pid) {
343 		bool slept = false;
344 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
345 		while (proc_exit_lpexit_spin_pid == pid &&
346 		    proc_exit_lpexit_spin_pos == pos) {
347 			if (!slept) {
348 				os_log(OS_LOG_DEFAULT,
349 				    "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
350 				slept = true;
351 			}
352 			proc_exit_lpexit_spinning = 1;
353 			msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
354 			    PWAIT, "proc_exit_lpexit_check", NULL);
355 			proc_exit_lpexit_spinning = 0;
356 		}
357 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
358 		if (slept) {
359 			os_log(OS_LOG_DEFAULT,
360 			    "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
361 		}
362 	}
363 }
364 #endif /* DEVELOPMENT || DEBUG */
365 
366 /*
367  * NOTE: Source and target may *NOT* overlap!
368  * XXX Should share code with bsd/dev/ppc/unix_signal.c
369  */
370 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)371 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
372 {
373 	out->si_signo   = in->si_signo;
374 	out->si_errno   = in->si_errno;
375 	out->si_code    = in->si_code;
376 	out->si_pid     = in->si_pid;
377 	out->si_uid     = in->si_uid;
378 	out->si_status  = in->si_status;
379 	out->si_addr    = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
380 	/* following cast works for sival_int because of padding */
381 	out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
382 	out->si_band    = (user32_long_t)in->si_band;                  /* range reduction */
383 }
384 
385 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)386 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
387 {
388 	out->si_signo   = in->si_signo;
389 	out->si_errno   = in->si_errno;
390 	out->si_code    = in->si_code;
391 	out->si_pid     = in->si_pid;
392 	out->si_uid     = in->si_uid;
393 	out->si_status  = in->si_status;
394 	out->si_addr    = in->si_addr;
395 	/* following cast works for sival_int because of padding */
396 	out->si_value.sival_ptr = in->si_value.sival_ptr;
397 	out->si_band    = in->si_band;                  /* range reduction */
398 }
399 
400 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)401 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
402 {
403 	if (is64) {
404 		user64_siginfo_t sinfo64;
405 
406 		bzero(&sinfo64, sizeof(sinfo64));
407 		siginfo_user_to_user64(native, &sinfo64);
408 		return copyout(&sinfo64, uaddr, sizeof(sinfo64));
409 	} else {
410 		user32_siginfo_t sinfo32;
411 
412 		bzero(&sinfo32, sizeof(sinfo32));
413 		siginfo_user_to_user32(native, &sinfo32);
414 		return copyout(&sinfo32, uaddr, sizeof(sinfo32));
415 	}
416 }
417 
418 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)419 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
420     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
421     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
422 {
423 	struct rusage_superset rup;
424 
425 	gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
426 	rup.ri.ri_phys_footprint = 0;
427 	populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
428 	    udata_buffer, num_udata, reason, etype);
429 }
430 
431 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)432 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
433 {
434 	mach_exception_data_type_t code_update = *code;
435 	mach_exception_data_type_t subcode_update = *subcode;
436 	if (p->p_exit_reason == OS_REASON_NULL) {
437 		return;
438 	}
439 
440 	switch (p->p_exit_reason->osr_namespace) {
441 	case OS_REASON_JETSAM:
442 		if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
443 			/* Update the code with EXC_RESOURCE code for high memory watermark */
444 			EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
445 			EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
446 			EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
447 			subcode_update = 0;
448 			break;
449 		}
450 
451 		break;
452 	default:
453 		break;
454 	}
455 
456 	*code = code_update;
457 	*subcode = subcode_update;
458 	return;
459 }
460 
461 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)462 proc_encode_exit_exception_code(proc_t p)
463 {
464 	uint64_t subcode = 0;
465 
466 	if (p->p_exit_reason == OS_REASON_NULL) {
467 		return 0;
468 	}
469 
470 	/* Embed first 32 bits of osr_namespace and osr_code in exception code */
471 	ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
472 	ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
473 	return (mach_exception_data_type_t)subcode;
474 }
475 
476 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)477 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
478     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
479     uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
480 {
481 	mach_vm_address_t uaddr = 0;
482 	mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
483 	exc_codes[0] = code;
484 	exc_codes[1] = subcode;
485 	cpu_type_t cputype;
486 	struct proc_uniqidentifierinfo p_uniqidinfo;
487 	struct proc_workqueueinfo pwqinfo;
488 	int retval = 0;
489 	uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
490 	boolean_t is_corpse_fork;
491 	uint32_t csflags;
492 	unsigned int pflags = 0;
493 	uint64_t max_footprint_mb;
494 	uint64_t max_footprint;
495 
496 	uint64_t ledger_internal;
497 	uint64_t ledger_internal_compressed;
498 	uint64_t ledger_iokit_mapped;
499 	uint64_t ledger_alternate_accounting;
500 	uint64_t ledger_alternate_accounting_compressed;
501 	uint64_t ledger_purgeable_nonvolatile;
502 	uint64_t ledger_purgeable_nonvolatile_compressed;
503 	uint64_t ledger_page_table;
504 	uint64_t ledger_phys_footprint;
505 	uint64_t ledger_phys_footprint_lifetime_max;
506 	uint64_t ledger_network_nonvolatile;
507 	uint64_t ledger_network_nonvolatile_compressed;
508 	uint64_t ledger_wired_mem;
509 	uint64_t ledger_tagged_footprint;
510 	uint64_t ledger_tagged_footprint_compressed;
511 	uint64_t ledger_media_footprint;
512 	uint64_t ledger_media_footprint_compressed;
513 	uint64_t ledger_graphics_footprint;
514 	uint64_t ledger_graphics_footprint_compressed;
515 	uint64_t ledger_neural_footprint;
516 	uint64_t ledger_neural_footprint_compressed;
517 
518 	void *crash_info_ptr = task_get_corpseinfo(corpse_task);
519 
520 #if CONFIG_MEMORYSTATUS
521 	int memstat_dirty_flags = 0;
522 #endif
523 
524 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
525 		kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
526 	}
527 
528 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
529 		pid_t pid = proc_getpid(p);
530 		kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
531 	}
532 
533 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
534 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
535 	}
536 
537 	/* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
538 	if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
539 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
540 			kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
541 		}
542 	}
543 
544 	static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
545 	if (KERN_SUCCESS ==
546 	    kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
547 		proc_piduniqidentifierinfo(p, &p_uniqidinfo);
548 		kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
549 	}
550 
551 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
552 		kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
553 	}
554 
555 	csflags = (uint32_t)proc_getcsflags(p);
556 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
557 		kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
558 	}
559 
560 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
561 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
562 	}
563 
564 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
565 		struct timeval64 t64;
566 		t64.tv_sec = (int64_t)p->p_start.tv_sec;
567 		t64.tv_usec = (int64_t)p->p_start.tv_usec;
568 		kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
569 	}
570 
571 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
572 		kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
573 	}
574 
575 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
576 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
577 	}
578 
579 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
580 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
581 	}
582 
583 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
584 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
585 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
586 		kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
587 		zfree(ZV_NAMEI, buf);
588 	}
589 
590 	pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
591 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
592 		kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
593 	}
594 
595 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
596 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
597 	}
598 
599 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
600 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
601 	}
602 
603 	cputype = cpu_type() & ~CPU_ARCH_MASK;
604 	if (IS_64BIT_PROCESS(p)) {
605 		cputype |= CPU_ARCH_ABI64;
606 	} else if (proc_is64bit_data(p)) {
607 		cputype |= CPU_ARCH_ABI64_32;
608 	}
609 
610 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
611 		kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
612 	}
613 
614 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
615 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_cputype, sizeof(cpu_type_t));
616 	}
617 
618 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
619 		max_footprint = get_task_phys_footprint_limit(proc_task(p));
620 		max_footprint_mb = max_footprint >> 20;
621 		kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
622 	}
623 
624 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
625 		ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
626 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
627 	}
628 
629 	// In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
630 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
631 		ledger_internal = get_task_internal(corpse_task);
632 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
633 	}
634 
635 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
636 		ledger_internal_compressed = get_task_internal_compressed(corpse_task);
637 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
638 	}
639 
640 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
641 		ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
642 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
643 	}
644 
645 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
646 		ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
647 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
648 	}
649 
650 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
651 		ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
652 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
653 	}
654 
655 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
656 		ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
657 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
658 	}
659 
660 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
661 		ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
662 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
663 	}
664 
665 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
666 		ledger_page_table = get_task_page_table(corpse_task);
667 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
668 	}
669 
670 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
671 		ledger_phys_footprint = get_task_phys_footprint(corpse_task);
672 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
673 	}
674 
675 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
676 		ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
677 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
678 	}
679 
680 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
681 		ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
682 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
683 	}
684 
685 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
686 		ledger_wired_mem = get_task_wired_mem(corpse_task);
687 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
688 	}
689 
690 	bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
691 	retval = fill_procworkqueue(p, &pwqinfo);
692 	if (retval == 0) {
693 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
694 			kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
695 		}
696 	}
697 
698 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
699 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
700 	}
701 
702 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
703 		uid_t persona_id = proc_persona_id(p);
704 		kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
705 	}
706 
707 #if CONFIG_COALITIONS
708 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
709 		uint64_t coalition_ids[COALITION_NUM_TYPES];
710 		task_coalition_ids(proc_task(p), coalition_ids);
711 		kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
712 	}
713 #endif /* CONFIG_COALITIONS */
714 
715 #if CONFIG_MEMORYSTATUS
716 	memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
717 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
718 		kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
719 	}
720 #endif
721 
722 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
723 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
724 	}
725 
726 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
727 		ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
728 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
729 	}
730 
731 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
732 		ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
733 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
734 	}
735 
736 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
737 		ledger_media_footprint = get_task_media_footprint(corpse_task);
738 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
739 	}
740 
741 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
742 		ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
743 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
744 	}
745 
746 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
747 		ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
748 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
749 	}
750 
751 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
752 		ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
753 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
754 	}
755 
756 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
757 		ledger_neural_footprint = get_task_neural_footprint(corpse_task);
758 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
759 	}
760 
761 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
762 		ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
763 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
764 	}
765 
766 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
767 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
768 	}
769 
770 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
771 		char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
772 		ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
773 		kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
774 	}
775 
776 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
777 		is_corpse_fork = is_corpsefork(corpse_task);
778 		kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
779 	}
780 
781 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
782 		kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
783 	}
784 
785 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
786 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
787 	}
788 
789 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
790 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
791 	}
792 
793 	char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
794 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
795 		const char * id = cs_identity_get(p);
796 		if (id) {
797 			strlcpy(signing_id, id, sizeof(signing_id));
798 		}
799 		kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
800 	}
801 	char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
802 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
803 		const char * id = csproc_get_teamid(p);
804 		if (id) {
805 			strlcpy(team_id, id, sizeof(team_id));
806 		}
807 		kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
808 	}
809 
810 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
811 		uint32_t category = 0;
812 		if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
813 			category = CS_VALIDATION_CATEGORY_INVALID;
814 		}
815 		kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
816 	}
817 
818 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
819 		uint32_t trust = 0;
820 		kern_return_t ret = get_trust_level_kdp(get_task_pmap(corpse_task), &trust);
821 		if (ret != KERN_SUCCESS) {
822 			trust = KCDATA_INVALID_CS_TRUST_LEVEL;
823 		}
824 		kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
825 	}
826 
827 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_SECURITY_CONFIG, sizeof(uint32_t), &uaddr)) {
828 		struct crashinfo_task_security_config task_security;
829 		task_security.task_security_config = task_get_security_config(corpse_task);
830 		kcdata_memcpy(crash_info_ptr, uaddr, &task_security, sizeof(task_security));
831 	}
832 
833 	uint64_t jit_start_addr = 0;
834 	uint64_t jit_end_addr = 0;
835 	kern_return_t ret = get_jit_address_range_kdp(get_task_pmap(corpse_task), (uintptr_t*)&jit_start_addr, (uintptr_t*)&jit_end_addr);
836 	if (KERN_SUCCESS == ret) {
837 		struct crashinfo_jit_address_range range = {};
838 		range.start_address = jit_start_addr;
839 		range.end_address = jit_end_addr;
840 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_JIT_ADDRESS_RANGE, sizeof(struct crashinfo_jit_address_range), &uaddr)) {
841 			kcdata_memcpy(crash_info_ptr, uaddr, &range, sizeof(range));
842 		}
843 	}
844 
845 	uint64_t cs_auxiliary_info = task_get_cs_auxiliary_info_kdp(corpse_task);
846 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_AUXILIARY_INFO, sizeof(cs_auxiliary_info), &uaddr)) {
847 		kcdata_memcpy(crash_info_ptr, uaddr, &cs_auxiliary_info, sizeof(cs_auxiliary_info));
848 	}
849 
850 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RLIM_CORE, sizeof(rlim_t), &uaddr)) {
851 		const rlim_t lim = proc_limitgetcur(p, RLIMIT_CORE);
852 		kcdata_memcpy(crash_info_ptr, uaddr, &lim, sizeof(lim));
853 	}
854 
855 #if CONFIG_UCOREDUMP
856 	if (do_ucoredump && !task_is_driver(proc_task(p)) &&
857 	    KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CORE_ALLOWED, sizeof(uint8_t), &uaddr)) {
858 		const uint8_t allow = is_coredump_eligible(p) == 0;
859 		kcdata_memcpy(crash_info_ptr, uaddr, &allow, sizeof(allow));
860 	}
861 #endif /* CONFIG_UCOREDUMP */
862 
863 	if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
864 		reason = p->p_exit_reason;
865 	}
866 
867 
868 	if (reason != OS_REASON_NULL) {
869 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
870 			struct exit_reason_snapshot ers = {
871 				.ers_namespace = reason->osr_namespace,
872 				.ers_code = reason->osr_code,
873 				.ers_flags = reason->osr_flags
874 			};
875 
876 			kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
877 		}
878 
879 		if (reason->osr_kcd_buf != 0) {
880 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
881 			assert(reason_buf_size != 0);
882 
883 			if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
884 				kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
885 			}
886 		}
887 	}
888 
889 	if (num_udata > 0) {
890 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
891 		    sizeof(uint64_t), num_udata, &uaddr)) {
892 			kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
893 		}
894 	}
895 
896 #if CONFIG_EXCLAVES
897 	task_add_conclave_crash_info(corpse_task, crash_info_ptr);
898 #endif /* CONFIG_EXCLAVES */
899 }
900 
901 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)902 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
903 {
904 	kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
905 	    corpse_info->kcd_length);
906 	__assert_only uint32_t type = kcdata_iter_type(iter);
907 	assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
908 
909 	iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
910 	exception_type_t *etype = kcdata_iter_payload(iter);
911 	return *etype;
912 }
913 
914 /*
915  * Collect information required for generating lightweight corpse for current
916  * task, which can be terminating.
917  */
918 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)919 current_thread_collect_backtrace_info(
920 	kcdata_descriptor_t *new_desc,
921 	exception_type_t etype,
922 	mach_exception_data_t code,
923 	mach_msg_type_number_t codeCnt,
924 	void *reasonp)
925 {
926 	kcdata_descriptor_t kcdata;
927 	kern_return_t kr;
928 	int frame_count = 0, max_frames = 100;
929 	mach_vm_address_t uuid_info_addr = 0;
930 	uint32_t uuid_info_count         = 0;
931 	uint32_t btinfo_flag             = 0;
932 	mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
933 	natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
934 	mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
935 	thread_identifier_info_data_t th_info;
936 	char threadname[MAXTHREADNAMESIZE];
937 	void *btdata_kernel = NULL;
938 	typedef uintptr_t user_btframe_t __kernel_data_semantics;
939 	user_btframe_t *btframes = NULL;
940 	os_reason_t reason = (os_reason_t)reasonp;
941 	struct backtrace_user_info info = BTUINFO_INIT;
942 	struct rusage_superset rup;
943 	uint32_t platform;
944 
945 	task_t task = current_task();
946 	proc_t p = current_proc();
947 
948 	bool has_64bit_addr = task_get_64bit_addr(current_task());
949 	bool has_64bit_data = task_get_64bit_data(current_task());
950 
951 	if (new_desc == NULL) {
952 		return KERN_INVALID_ARGUMENT;
953 	}
954 
955 	/* First, collect backtrace frames */
956 	btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
957 	if (!btframes) {
958 		return KERN_RESOURCE_SHORTAGE;
959 	}
960 
961 	frame_count = backtrace_user(btframes, max_frames, NULL, &info);
962 	if (info.btui_error || frame_count == 0) {
963 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
964 		return KERN_FAILURE;
965 	}
966 
967 	if ((info.btui_info & BTI_TRUNCATED) != 0) {
968 		btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
969 	}
970 
971 	/* Captured in kcdata descriptor below */
972 	btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
973 	if (!btdata_kernel) {
974 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
975 		return KERN_RESOURCE_SHORTAGE;
976 	}
977 
978 	kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
979 	if (!kcdata) {
980 		kfree_data(btdata_kernel, alloc_size);
981 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
982 		return KERN_RESOURCE_SHORTAGE;
983 	}
984 
985 	/* First reserve space in kcdata blob for the btinfo flag fields */
986 	if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
987 	    sizeof(uint32_t), &btinfo_flag_addr)) {
988 		kfree_data(btdata_kernel, alloc_size);
989 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
990 		kcdata_memory_destroy(kcdata);
991 		return KERN_RESOURCE_SHORTAGE;
992 	}
993 
994 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
995 	    (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
996 	    sizeof(uintptr_t), frame_count, &kaddr)) {
997 		kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
998 	}
999 
1000 #if __LP64__
1001 	/* We only support async stacks on 64-bit kernels */
1002 	frame_count = 0;
1003 
1004 	if (info.btui_async_frame_addr != 0) {
1005 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
1006 		    sizeof(uint32_t), &kaddr)) {
1007 			uint32_t idx = info.btui_async_start_index;
1008 			kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
1009 		}
1010 		struct backtrace_control ctl = {
1011 			.btc_frame_addr = info.btui_async_frame_addr,
1012 			.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
1013 		};
1014 
1015 		info = BTUINFO_INIT;
1016 		frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
1017 		if (info.btui_error == 0 && frame_count > 0) {
1018 			if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1019 			    TASK_BTINFO_ASYNC_BACKTRACE64,
1020 			    sizeof(uintptr_t), frame_count, &kaddr)) {
1021 				kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
1022 			}
1023 		}
1024 
1025 		if ((info.btui_info & BTI_TRUNCATED) != 0) {
1026 			btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
1027 		}
1028 	}
1029 #endif
1030 
1031 	/* Backtrace collection done, free the frames buffer */
1032 	kfree_data(btframes, max_frames * sizeof(btframes[0]));
1033 	btframes = NULL;
1034 
1035 	thread_set_exec_promotion(current_thread());
1036 	/* Next, suspend the task briefly and collect image load infos */
1037 	task_suspend_internal(task);
1038 
1039 	/* all_image_info struct is ABI, in agreement with address width */
1040 	if (has_64bit_addr) {
1041 		struct user64_dyld_all_image_infos task_image_infos = {};
1042 		struct btinfo_sc_load_info64 sc_info;
1043 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1044 		    sizeof(struct user64_dyld_all_image_infos));
1045 		uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1046 		uuid_info_addr = task_image_infos.uuidArray;
1047 
1048 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1049 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1050 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1051 		    sizeof(task_image_infos.sharedCacheUUID));
1052 
1053 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1054 		    TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1055 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1056 		}
1057 	} else {
1058 		struct user32_dyld_all_image_infos task_image_infos = {};
1059 		struct btinfo_sc_load_info sc_info;
1060 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1061 		    sizeof(struct user32_dyld_all_image_infos));
1062 		uuid_info_count = task_image_infos.uuidArrayCount;
1063 		uuid_info_addr = task_image_infos.uuidArray;
1064 
1065 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1066 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1067 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1068 		    sizeof(task_image_infos.sharedCacheUUID));
1069 
1070 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1071 		    TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1072 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1073 		}
1074 	}
1075 
1076 	if (!uuid_info_addr) {
1077 		/*
1078 		 * Can happen when we catch dyld in the middle of updating
1079 		 * this data structure, or copyin of all_image_info struct failed.
1080 		 */
1081 		task_resume_internal(task);
1082 		thread_clear_exec_promotion(current_thread());
1083 		kfree_data(btdata_kernel, alloc_size);
1084 		kcdata_memory_destroy(kcdata);
1085 		return KERN_MEMORY_ERROR;
1086 	}
1087 
1088 	if (uuid_info_count > 0) {
1089 		uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1090 		    sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1091 
1092 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1093 		    (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1094 		    uuid_info_size, uuid_info_count, &kaddr)) {
1095 			if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1096 				task_resume_internal(task);
1097 				thread_clear_exec_promotion(current_thread());
1098 				kfree_data(btdata_kernel, alloc_size);
1099 				kcdata_memory_destroy(kcdata);
1100 				return KERN_MEMORY_ERROR;
1101 			}
1102 		}
1103 	}
1104 
1105 	task_resume_internal(task);
1106 	thread_clear_exec_promotion(current_thread());
1107 
1108 	/* Next, collect all other information */
1109 	thread_flavor_t tsflavor;
1110 	mach_msg_type_number_t tscount;
1111 
1112 #if defined(__x86_64__) || defined(__i386__)
1113 	tsflavor = x86_THREAD_STATE;      /* unified */
1114 	tscount  = x86_THREAD_STATE_COUNT;
1115 #else
1116 	tsflavor = ARM_THREAD_STATE;      /* unified */
1117 	tscount  = ARM_UNIFIED_THREAD_STATE_COUNT;
1118 #endif
1119 
1120 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1121 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1122 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1123 		bt_thread_state->flavor = tsflavor;
1124 		bt_thread_state->count = tscount;
1125 		/* variable-sized tstate array follows */
1126 
1127 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1128 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1129 		if (kr != KERN_SUCCESS) {
1130 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1131 			if (kr == KERN_TERMINATED) {
1132 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1133 			}
1134 		}
1135 	}
1136 
1137 #if defined(__x86_64__) || defined(__i386__)
1138 	tsflavor = x86_EXCEPTION_STATE;       /* unified */
1139 	tscount  = x86_EXCEPTION_STATE_COUNT;
1140 #else
1141 #if defined(__arm64__)
1142 	if (has_64bit_data) {
1143 		tsflavor = ARM_EXCEPTION_STATE64;
1144 		tscount  = ARM_EXCEPTION_STATE64_COUNT;
1145 	} else
1146 #endif /* defined(__arm64__) */
1147 	{
1148 		tsflavor = ARM_EXCEPTION_STATE;
1149 		tscount  = ARM_EXCEPTION_STATE_COUNT;
1150 	}
1151 #endif /* defined(__x86_64__) || defined(__i386__) */
1152 
1153 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1154 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1155 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1156 		bt_thread_state->flavor = tsflavor;
1157 		bt_thread_state->count = tscount;
1158 		/* variable-sized tstate array follows */
1159 
1160 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1161 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1162 		if (kr != KERN_SUCCESS) {
1163 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1164 			if (kr == KERN_TERMINATED) {
1165 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1166 			}
1167 		}
1168 	}
1169 
1170 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1171 		pid_t pid = proc_getpid(p);
1172 		kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1173 	}
1174 
1175 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1176 		kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1177 	}
1178 
1179 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1180 		kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1181 	}
1182 
1183 #if CONFIG_COALITIONS
1184 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1185 		uint64_t coalition_ids[COALITION_NUM_TYPES];
1186 		task_coalition_ids(proc_task(p), coalition_ids);
1187 		kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1188 	}
1189 #endif /* CONFIG_COALITIONS */
1190 
1191 	/* V0 is sufficient for ReportCrash */
1192 	gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1193 	rup.ri.ri_phys_footprint = 0;
1194 	/* Soft crash, proc did not exit */
1195 	rup.ri.ri_proc_exit_abstime = 0;
1196 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1197 		kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1198 	}
1199 
1200 	platform = proc_platform(current_proc());
1201 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1202 		kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1203 	}
1204 
1205 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1206 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1207 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1208 		kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1209 		zfree(ZV_NAMEI, buf);
1210 	}
1211 
1212 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1213 		kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1214 	}
1215 
1216 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1217 		kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1218 	}
1219 
1220 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1221 		unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1222 		kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1223 	}
1224 
1225 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1226 		cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1227 		if (has_64bit_addr) {
1228 			cputype |= CPU_ARCH_ABI64;
1229 		} else if (has_64bit_data) {
1230 			cputype |= CPU_ARCH_ABI64_32;
1231 		}
1232 		kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1233 	}
1234 
1235 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1236 		kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1237 	}
1238 
1239 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1240 		kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1241 	}
1242 
1243 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1244 		kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1245 	}
1246 
1247 	assert(codeCnt <= EXCEPTION_CODE_MAX);
1248 
1249 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1250 	    sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1251 		kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1252 	}
1253 
1254 	if (reason != OS_REASON_NULL) {
1255 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1256 			struct exit_reason_snapshot ers = {
1257 				.ers_namespace = reason->osr_namespace,
1258 				.ers_code = reason->osr_code,
1259 				.ers_flags = reason->osr_flags
1260 			};
1261 
1262 			kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1263 		}
1264 
1265 		if (reason->osr_kcd_buf != 0) {
1266 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1267 			assert(reason_buf_size != 0);
1268 
1269 			if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1270 				kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1271 			}
1272 		}
1273 	}
1274 
1275 	threadname[0] = '\0';
1276 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1277 	    sizeof(threadname), &kaddr)) {
1278 		bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1279 		kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1280 	}
1281 
1282 	kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1283 	if (kr == KERN_TERMINATED) {
1284 		btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1285 	}
1286 
1287 
1288 	kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1289 	    sizeof(uint64_t), &kaddr);
1290 
1291 	/*
1292 	 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1293 	 * handler (ReportCrash) that lw corpse collection ran out of space and the
1294 	 * result is incomplete.
1295 	 */
1296 	if (last_kr != KERN_SUCCESS) {
1297 		btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1298 	}
1299 
1300 	if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1301 		kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1302 	}
1303 
1304 	/* Lastly, copy the flags to the address we reserved at the beginning. */
1305 	kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1306 
1307 	*new_desc = kcdata;
1308 
1309 	return KERN_SUCCESS;
1310 }
1311 
1312 /*
1313  * We only parse exit reason kcdata blobs for critical process before they die
1314  * and we're going to panic or for opt-in, limited diagnostic tools.
1315  *
1316  * Meant to be called immediately before panicking or limited diagnostic
1317  * scenarios.
1318  */
1319 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1320 exit_reason_get_string_desc(os_reason_t exit_reason)
1321 {
1322 	kcdata_iter_t iter;
1323 
1324 	if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1325 	    exit_reason->osr_bufsize == 0) {
1326 		return NULL;
1327 	}
1328 
1329 	iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1330 	if (!kcdata_iter_valid(iter)) {
1331 #if DEBUG || DEVELOPMENT
1332 		printf("exit reason has invalid exit reason buffer\n");
1333 #endif
1334 		return NULL;
1335 	}
1336 
1337 	if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1338 #if DEBUG || DEVELOPMENT
1339 		printf("exit reason buffer type mismatch, expected %d got %d\n",
1340 		    KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1341 #endif
1342 		return NULL;
1343 	}
1344 
1345 	iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1346 	if (!kcdata_iter_valid(iter)) {
1347 		return NULL;
1348 	}
1349 
1350 	return (char *)kcdata_iter_payload(iter);
1351 }
1352 
1353 static int initproc_spawned = 0;
1354 
1355 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1356 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1357 {
1358 	if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1359 		// Can only ever be set by launchd, and only once at boot
1360 		return EPERM;
1361 	}
1362 	return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1363 }
1364 
1365 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1366     CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1367     sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1368 
1369 #if DEVELOPMENT || DEBUG
1370 
1371 /* disable user faults */
1372 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1373 #endif /* DEVELOPMENT || DEBUG */
1374 
1375 #define OS_REASON_IFLAG_USER_FAULT 0x1
1376 
1377 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC  5
1378 
1379 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1380 abort_with_payload_internal(proc_t p,
1381     uint32_t reason_namespace, uint64_t reason_code,
1382     user_addr_t payload, uint32_t payload_size,
1383     user_addr_t reason_string, uint64_t reason_flags,
1384     uint32_t internal_flags)
1385 {
1386 	os_reason_t exit_reason = OS_REASON_NULL;
1387 	kern_return_t kr = KERN_SUCCESS;
1388 
1389 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1390 		uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1391 		    memory_order_relaxed);
1392 
1393 #if DEVELOPMENT || DEBUG
1394 		if (bootarg_disable_user_faults) {
1395 			return EQFULL;
1396 		}
1397 #endif /* DEVELOPMENT || DEBUG */
1398 
1399 		for (;;) {
1400 			if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1401 				return EQFULL;
1402 			}
1403 			// this reloads the value in old_value
1404 			if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1405 			    &old_value, old_value + 1, memory_order_relaxed,
1406 			    memory_order_relaxed)) {
1407 				break;
1408 			}
1409 		}
1410 	}
1411 
1412 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1413 	    proc_getpid(p), reason_namespace,
1414 	    reason_code, 0, 0);
1415 
1416 	exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1417 	    payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1418 
1419 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1420 		mach_exception_code_t code = 0;
1421 
1422 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1423 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
1424 		EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1425 
1426 		if (exit_reason == OS_REASON_NULL) {
1427 			kr = KERN_RESOURCE_SHORTAGE;
1428 		} else {
1429 			kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1430 		}
1431 		os_reason_free(exit_reason);
1432 	} else {
1433 		/*
1434 		 * We use SIGABRT (rather than calling exit directly from here) so that
1435 		 * the debugger can catch abort_with_{reason,payload} calls.
1436 		 */
1437 		psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1438 	}
1439 
1440 	switch (kr) {
1441 	case KERN_SUCCESS:
1442 		return 0;
1443 	case KERN_NOT_SUPPORTED:
1444 		return ENOTSUP;
1445 	case KERN_INVALID_ARGUMENT:
1446 		return EINVAL;
1447 	case KERN_RESOURCE_SHORTAGE:
1448 	default:
1449 		return EBUSY;
1450 	}
1451 }
1452 
1453 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1454 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1455     __unused void *retval)
1456 {
1457 	abort_with_payload_internal(cur_proc, args->reason_namespace,
1458 	    args->reason_code, args->payload, args->payload_size,
1459 	    args->reason_string, args->reason_flags, 0);
1460 
1461 	return 0;
1462 }
1463 
1464 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1465 os_fault_with_payload(struct proc *cur_proc,
1466     struct os_fault_with_payload_args *args, __unused int *retval)
1467 {
1468 	return abort_with_payload_internal(cur_proc, args->reason_namespace,
1469 	           args->reason_code, args->payload, args->payload_size,
1470 	           args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1471 }
1472 
1473 
1474 /*
1475  * exit --
1476  *	Death of process.
1477  */
1478 __attribute__((noreturn))
1479 void
exit(proc_t p,struct exit_args * uap,int * retval)1480 exit(proc_t p, struct exit_args *uap, int *retval)
1481 {
1482 	p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1483 	exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1484 
1485 	thread_exception_return();
1486 	/* NOTREACHED */
1487 	while (TRUE) {
1488 		thread_block(THREAD_CONTINUE_NULL);
1489 	}
1490 	/* NOTREACHED */
1491 }
1492 
1493 /*
1494  * Exit: deallocate address space and other resources, change proc state
1495  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
1496  * status and rusage for wait().  Check for child processes and orphan them.
1497  */
1498 int
exit1(proc_t p,int rv,int * retval)1499 exit1(proc_t p, int rv, int *retval)
1500 {
1501 	return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1502 }
1503 
1504 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1505 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1506     int jetsam_flags)
1507 {
1508 	return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1509 }
1510 
1511 /*
1512  * NOTE: exit_with_reason drops a reference on the passed exit_reason
1513  */
1514 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1515 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1516     int jetsam_flags, struct os_reason *exit_reason)
1517 {
1518 	thread_t self = current_thread();
1519 	struct task *task = proc_task(p);
1520 	struct uthread *ut;
1521 	int error = 0;
1522 	bool proc_exiting = false;
1523 
1524 #if DEVELOPMENT || DEBUG
1525 	/*
1526 	 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1527 	 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1528 	 */
1529 	if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1530 		panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1531 		    __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1532 	}
1533 #endif
1534 
1535 	/*
1536 	 * If a thread in this task has already
1537 	 * called exit(), then halt any others
1538 	 * right here.
1539 	 */
1540 
1541 	ut = get_bsdthread_info(self);
1542 	(void)retval;
1543 
1544 	/*
1545 	 * The parameter list of audit_syscall_exit() was augmented to
1546 	 * take the Darwin syscall number as the first parameter,
1547 	 * which is currently required by mac_audit_postselect().
1548 	 */
1549 
1550 	/*
1551 	 * The BSM token contains two components: an exit status as passed
1552 	 * to exit(), and a return value to indicate what sort of exit it
1553 	 * was.  The exit status is WEXITSTATUS(rv), but it's not clear
1554 	 * what the return value is.
1555 	 */
1556 	AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1557 	/*
1558 	 * TODO: what to audit here when jetsam calls exit and the uthread,
1559 	 * 'ut' does not belong to the proc, 'p'.
1560 	 */
1561 	AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1562 
1563 	DTRACE_PROC1(exit, int, CLD_EXITED);
1564 
1565 	/* mark process is going to exit and pull out of DBG/disk throttle */
1566 	/* TODO: This should be done after becoming exit thread */
1567 	proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1568 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1569 
1570 	proc_lock(p);
1571 	error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1572 	if (error == EDEADLK) {
1573 		/*
1574 		 * If proc_transstart() returns EDEADLK, then another thread
1575 		 * is either exec'ing or exiting. Return an error and allow
1576 		 * the other thread to continue.
1577 		 */
1578 		proc_unlock(p);
1579 		os_reason_free(exit_reason);
1580 		if (current_proc() == p) {
1581 			if (p->exit_thread == self) {
1582 				panic("exit_thread failed to exit");
1583 			}
1584 
1585 			if (thread_can_terminate) {
1586 				thread_exception_return();
1587 			}
1588 		}
1589 
1590 		return error;
1591 	}
1592 
1593 	proc_exiting = !!(p->p_lflag & P_LEXIT);
1594 
1595 	while (proc_exiting || p->exit_thread != self) {
1596 		if (proc_exiting || sig_try_locked(p) <= 0) {
1597 			proc_transend(p, 1);
1598 			os_reason_free(exit_reason);
1599 
1600 			if (get_threadtask(self) != task) {
1601 				proc_unlock(p);
1602 				return 0;
1603 			}
1604 			proc_unlock(p);
1605 
1606 			thread_terminate(self);
1607 			if (!thread_can_terminate) {
1608 				return 0;
1609 			}
1610 
1611 			thread_exception_return();
1612 			/* NOTREACHED */
1613 		}
1614 		sig_lock_to_exit(p);
1615 	}
1616 
1617 	if (exit_reason != OS_REASON_NULL) {
1618 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1619 		    proc_getpid(p), exit_reason->osr_namespace,
1620 		    exit_reason->osr_code, 0, 0);
1621 	}
1622 
1623 	assert(p->p_exit_reason == OS_REASON_NULL);
1624 	p->p_exit_reason = exit_reason;
1625 
1626 	p->p_lflag |= P_LEXIT;
1627 	p->p_xstat = rv;
1628 	p->p_lflag |= jetsam_flags;
1629 
1630 	proc_transend(p, 1);
1631 	proc_unlock(p);
1632 
1633 	proc_prepareexit(p, rv, perf_notify);
1634 
1635 	/* Last thread to terminate will call proc_exit() */
1636 	task_terminate_internal(task);
1637 
1638 	return 0;
1639 }
1640 
1641 #if CONFIG_MEMORYSTATUS
1642 /*
1643  * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1644  * is currently being frozen.
1645  * The proc_list_lock is held by the caller.
1646  * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1647  * the p_refcount P_REF_DEAD bit.
1648  */
1649 static void
proc_memorystatus_remove(proc_t p)1650 proc_memorystatus_remove(proc_t p)
1651 {
1652 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1653 	while (memorystatus_remove(p) == EAGAIN) {
1654 		os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1655 		msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1656 	}
1657 }
1658 #endif
1659 
1660 #if DEVELOPMENT
1661 boolean_t crash_behavior_test_mode = FALSE;
1662 boolean_t crash_behavior_test_would_panic = FALSE;
1663 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1664 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1665 #endif /* DEVELOPMENT */
1666 
1667 static bool
_proc_is_crashing_signal(int sig)1668 _proc_is_crashing_signal(int sig)
1669 {
1670 	bool result = false;
1671 	switch (sig) {
1672 	case SIGILL:
1673 	case SIGABRT:
1674 	case SIGFPE:
1675 	case SIGBUS:
1676 	case SIGSEGV:
1677 	case SIGSYS:
1678 	/*
1679 	 * If SIGTRAP is the terminating signal, then we can safely assume the
1680 	 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1681 	 * a process calls __builtin_trap(), which will abort.)
1682 	 */
1683 	case SIGTRAP:
1684 		result = true;
1685 	}
1686 
1687 	return result;
1688 }
1689 
1690 static bool
_proc_is_fatal_reason(os_reason_t reason)1691 _proc_is_fatal_reason(os_reason_t reason)
1692 {
1693 	if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1694 		/* Abort is always fatal even if there is no crash report generated */
1695 		return true;
1696 	}
1697 	if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1698 		/*
1699 		 * No crash report means this reason shouldn't be considered fatal
1700 		 * unless we are in test mode
1701 		 */
1702 #if DEVELOPMENT
1703 		if (crash_behavior_test_mode) {
1704 			return true;
1705 		}
1706 #endif /* DEVELOPMENT */
1707 		return false;
1708 	}
1709 	// By default all OS_REASON are fatal
1710 	return true;
1711 }
1712 
1713 static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled", false);
1714 
1715 static bool
proc_should_trigger_panic(proc_t p,int rv)1716 proc_should_trigger_panic(proc_t p, int rv)
1717 {
1718 	if (p == initproc) {
1719 		/* Always panic for launchd */
1720 		return true;
1721 	}
1722 
1723 	if (panic_on_crash_disabled) {
1724 		printf("panic-on-crash disabled via boot-arg\n");
1725 		return false;
1726 	}
1727 
1728 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1729 		return true;
1730 	}
1731 
1732 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1733 		return true;
1734 	}
1735 
1736 	if (p->p_posix_spawn_failed) {
1737 		/* posix_spawn failures normally don't qualify for panics */
1738 		return false;
1739 	}
1740 
1741 	bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1742 	if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1743 		return false;
1744 	}
1745 
1746 	if (WIFEXITED(rv)) {
1747 		int code = WEXITSTATUS(rv);
1748 
1749 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1750 			if (code == 0) {
1751 				/* No panic if we exit 0 */
1752 				return false;
1753 			} else {
1754 				/* Panic on non-zero exit */
1755 				return true;
1756 			}
1757 		} else {
1758 			/* No panic on normal exit if the process doesn't have the non-zero flag set */
1759 			return false;
1760 		}
1761 	} else if (WIFSIGNALED(rv)) {
1762 		int signal = WTERMSIG(rv);
1763 		/* This is a crash (non-normal exit) */
1764 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1765 			os_reason_t reason = p->p_exit_reason;
1766 			if (reason != OS_REASON_NULL) {
1767 				if (!_proc_is_fatal_reason(reason)) {
1768 					// Skip non-fatal terminate_with_reason
1769 					return false;
1770 				}
1771 				if (reason->osr_namespace == OS_REASON_SIGNAL) {
1772 					/*
1773 					 * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal
1774 					 * in osr_code, so we should check that signal here
1775 					 */
1776 					return _proc_is_crashing_signal((int)reason->osr_code);
1777 				} else {
1778 					/*
1779 					 * This branch covers the case of terminate_with_reason which
1780 					 * delivers a SIGTERM which is still considered a crash even
1781 					 * thought the signal is not considered a crashing signal
1782 					 */
1783 					return true;
1784 				}
1785 			}
1786 			return _proc_is_crashing_signal(signal);
1787 		} else {
1788 			return false;
1789 		}
1790 	} else {
1791 		/*
1792 		 * This branch implies that we didn't exit normally nor did we receive
1793 		 * a signal. This should be unreachable.
1794 		 */
1795 		return true;
1796 	}
1797 }
1798 
1799 static void
proc_crash_coredump(proc_t p)1800 proc_crash_coredump(proc_t p)
1801 {
1802 	(void)p;
1803 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1804 	/*
1805 	 * For debugging purposes, generate a core file of initproc before
1806 	 * panicking. Leave at least 300 MB free on the root volume, and ignore
1807 	 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1808 	 * before the panic hits.
1809 	 */
1810 
1811 	int             err;
1812 	uint64_t        coredump_start = mach_absolute_time();
1813 	uint64_t        coredump_end;
1814 	clock_sec_t     tv_sec;
1815 	clock_usec_t    tv_usec;
1816 	uint32_t        tv_msec;
1817 
1818 
1819 	err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1820 
1821 	coredump_end = mach_absolute_time();
1822 
1823 	absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1824 
1825 	tv_msec = tv_usec / 1000;
1826 
1827 	if (err != 0) {
1828 		printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1829 		    proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1830 	} else {
1831 		printf("Generated core file for pid: %d in %d.%03d seconds\n",
1832 		    proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1833 	}
1834 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1835 }
1836 
1837 static void
proc_handle_critical_exit(proc_t p,int rv)1838 proc_handle_critical_exit(proc_t p, int rv)
1839 {
1840 	if (!proc_should_trigger_panic(p, rv)) {
1841 		// No panic, bail out
1842 		return;
1843 	}
1844 
1845 #if DEVELOPMENT
1846 	if (crash_behavior_test_mode) {
1847 		crash_behavior_test_would_panic = TRUE;
1848 		// Force test mode off after hitting a panic
1849 		crash_behavior_test_mode = FALSE;
1850 		return;
1851 	}
1852 #endif /* DEVELOPMENT */
1853 
1854 	char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1855 
1856 	if (p->p_exit_reason == OS_REASON_NULL) {
1857 		printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1858 		    proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1859 	} else {
1860 		printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1861 		    p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1862 		    exit_reason_desc : "none");
1863 	}
1864 
1865 	const char *prefix_str;
1866 	char prefix_str_buf[128];
1867 
1868 	if (p == initproc) {
1869 		if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1870 			prefix_str = "LTE preinit process exited";
1871 		} else if (initproc_spawned) {
1872 			prefix_str = "initproc exited";
1873 		} else {
1874 			prefix_str = "initproc failed to start";
1875 		}
1876 	} else {
1877 		/* For processes that aren't launchd, just use the process name and pid */
1878 		snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1879 		prefix_str = prefix_str_buf;
1880 	}
1881 
1882 	proc_crash_coredump(p);
1883 
1884 	sync(p, (void *)NULL, (int *)NULL);
1885 	const uint64_t panic_options_mask = DEBUGGER_OPTION_INITPROC_PANIC | DEBUGGER_OPTION_USERSPACE_INITIATED_PANIC;
1886 
1887 	if (p->p_exit_reason == OS_REASON_NULL) {
1888 		panic_with_options(0, NULL, panic_options_mask, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1889 		    prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1890 	} else {
1891 		panic_with_options(0, NULL, panic_options_mask, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1892 		    ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1893 		    prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1894 		    exit_reason_desc ? exit_reason_desc : "none");
1895 	}
1896 }
1897 
1898 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1899 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1900 {
1901 	mach_exception_data_type_t code = 0, subcode = 0;
1902 	exception_type_t etype;
1903 
1904 	struct uthread *ut;
1905 	thread_t self = current_thread();
1906 	ut = get_bsdthread_info(self);
1907 	struct rusage_superset *rup;
1908 	int kr = 0;
1909 	int create_corpse = FALSE;
1910 	bool corpse_source = false;
1911 	task_t task = proc_task(p);
1912 
1913 
1914 	if (p->p_crash_behavior != 0 || p == initproc) {
1915 		proc_handle_critical_exit(p, rv);
1916 	}
1917 
1918 	if (task) {
1919 		corpse_source = vm_map_is_corpse_source(get_task_map(task));
1920 	}
1921 
1922 	/*
1923 	 * Generate a corefile/crashlog if:
1924 	 *      The process doesn't have an exit reason that indicates no crash report should be created
1925 	 *      AND any of the following are true:
1926 	 *	- The process was terminated due to a fatal signal that generates a core
1927 	 *	- The process was killed due to a code signing violation
1928 	 *	- The process has an exit reason that indicates we should generate a crash report
1929 	 *
1930 	 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1931 	 * (which normally triggers a core) but may indicate that no crash report should be created.
1932 	 */
1933 	if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1934 	    (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1935 	    (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1936 	    OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1937 		/*
1938 		 * Workaround for processes checking up on PT_DENY_ATTACH:
1939 		 * should be backed out post-Leopard (details in 5431025).
1940 		 */
1941 		if ((SIGSEGV == WTERMSIG(rv)) &&
1942 		    (p->p_pptr->p_lflag & P_LNOATTACH)) {
1943 			goto skipcheck;
1944 		}
1945 
1946 		/*
1947 		 * Crash Reporter looks for the signal value, original exception
1948 		 * type, and low 20 bits of the original code in code[0]
1949 		 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1950 		 */
1951 		code = ((WTERMSIG(rv) & 0xff) << 24) |
1952 		    ((ut->uu_exception & 0x0f) << 20) |
1953 		    ((int)ut->uu_code & 0xfffff);
1954 		subcode = ut->uu_subcode;
1955 		etype = ut->uu_exception;
1956 
1957 		/* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1958 		if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1959 			etype = EXC_CRASH;
1960 		}
1961 
1962 #if (DEVELOPMENT || DEBUG)
1963 		if (p->p_pid <= exception_log_max_pid) {
1964 			const char *proc_name = proc_best_name(p);
1965 			if (PROC_HAS_EXITREASON(p)) {
1966 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1967 				    "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1968 				    proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1969 				    exit_reason_get_string_desc(p->p_exit_reason));
1970 			} else {
1971 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1972 				    "pid: %d -- process name: %s -- exit status %d",
1973 				    proc_getpid(p), proc_name, WEXITSTATUS(rv));
1974 			}
1975 		}
1976 #endif
1977 		const bool fatal = false;
1978 		kr = task_exception_notify(EXC_CRASH, code, subcode, fatal);
1979 		/* Nobody handled EXC_CRASH?? remember to make corpse */
1980 		if ((kr != 0 || corpse_source) && p == current_proc()) {
1981 			/*
1982 			 * Do not create corpse when exit is called from jetsam thread.
1983 			 * Corpse creation code requires that proc_prepareexit is
1984 			 * called by the exiting proc and not the kernel_proc.
1985 			 */
1986 			create_corpse = TRUE;
1987 		}
1988 
1989 		/*
1990 		 * Revalidate the code signing of the text pages around current PC.
1991 		 * This is an attempt to detect and repair faults due to memory
1992 		 * corruption of text pages.
1993 		 *
1994 		 * The goal here is to fixup infrequent memory corruptions due to
1995 		 * things like aging RAM bit flips. So the approach is to only expect
1996 		 * to have to fixup one thing per crash. This also limits the amount
1997 		 * of extra work we cause in case this is a development kernel with an
1998 		 * active memory stomp happening.
1999 		 */
2000 		uintptr_t bt[2];
2001 		struct backtrace_user_info btinfo = BTUINFO_INIT;
2002 		unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
2003 		int bt_err = btinfo.btui_error;
2004 		if (bt_err == 0 && frame_count >= 1) {
2005 			/*
2006 			 * First check at the page containing the current PC.
2007 			 * This passes if the page code signs -or- if we can't figure out
2008 			 * what is at that address. The latter action is so we continue checking
2009 			 * previous pages which may be corrupt and caused a wild branch.
2010 			 */
2011 			kr = revalidate_text_page(task, bt[0]);
2012 
2013 			/* No corruption found, check the previous sequential page */
2014 			if (kr == KERN_SUCCESS) {
2015 				kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
2016 			}
2017 
2018 			/* Still no corruption found, check the current function's caller */
2019 			if (kr == KERN_SUCCESS) {
2020 				if (frame_count > 1 &&
2021 				    atop(bt[0]) != atop(bt[1]) &&           /* don't recheck PC page */
2022 				    atop(bt[0]) - 1 != atop(bt[1])) {       /* don't recheck page before */
2023 					kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
2024 				}
2025 			}
2026 
2027 			/*
2028 			 * Log that we found a corruption.
2029 			 */
2030 			if (kr != KERN_SUCCESS) {
2031 				os_log(OS_LOG_DEFAULT,
2032 				    "Text page corruption detected in dying process %d\n", proc_getpid(p));
2033 			}
2034 		}
2035 	}
2036 
2037 skipcheck:
2038 	if (task_is_driver(task) && PROC_HAS_EXITREASON(p)) {
2039 		IOUserServerRecordExitReason(task, p->p_exit_reason);
2040 	}
2041 
2042 	/* Notify the perf server? */
2043 	if (perf_notify) {
2044 		(void)sys_perf_notify(self, proc_getpid(p));
2045 	}
2046 
2047 
2048 	/* stash the usage into corpse data if making_corpse == true */
2049 	if (create_corpse == TRUE) {
2050 		kr = task_mark_corpse(task);
2051 		if (kr != KERN_SUCCESS) {
2052 			if (kr == KERN_NO_SPACE) {
2053 				printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
2054 			} else if (kr == KERN_NOT_SUPPORTED) {
2055 				printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
2056 			} else if (kr == KERN_TERMINATED) {
2057 				printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
2058 			} else {
2059 				printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
2060 			}
2061 			create_corpse = FALSE;
2062 		}
2063 	}
2064 
2065 	if (corpse_source && !create_corpse) {
2066 		/* vm_map was marked for corpse, but we decided to not create one, unmark the vmmap */
2067 		vm_map_unset_corpse_source(get_task_map(task));
2068 	}
2069 
2070 	if (!proc_is_shadow(p)) {
2071 		/*
2072 		 * Before this process becomes a zombie, stash resource usage
2073 		 * stats in the proc for external observers to query
2074 		 * via proc_pid_rusage().
2075 		 *
2076 		 * If the zombie allocation fails, just punt the stats.
2077 		 */
2078 		rup = zalloc(zombie_zone);
2079 		gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2080 		rup->ri.ri_phys_footprint = 0;
2081 		rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2082 		/*
2083 		 * Make the rusage_info visible to external observers
2084 		 * only after it has been completely filled in.
2085 		 */
2086 		p->p_ru = rup;
2087 	}
2088 
2089 	if (create_corpse) {
2090 		int est_knotes = 0, num_knotes = 0;
2091 		uint64_t *buffer = NULL;
2092 		uint32_t buf_size = 0;
2093 
2094 		/* Get all the udata pointers from kqueue */
2095 		est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2096 		if (est_knotes > 0) {
2097 			buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2098 			buffer = kalloc_data(buf_size, Z_WAITOK);
2099 			if (buffer) {
2100 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2101 				if (num_knotes > est_knotes + 32) {
2102 					num_knotes = est_knotes + 32;
2103 				}
2104 			}
2105 		}
2106 
2107 		/* Update the code, subcode based on exit reason */
2108 		proc_update_corpse_exception_codes(p, &code, &subcode);
2109 		populate_corpse_crashinfo(p, task, rup,
2110 		    code, subcode, buffer, num_knotes, NULL, etype);
2111 		kfree_data(buffer, buf_size);
2112 	}
2113 	/*
2114 	 * Remove proc from allproc queue and from pidhash chain.
2115 	 * Need to do this before we do anything that can block.
2116 	 * Not doing causes things like mount() find this on allproc
2117 	 * in partially cleaned state.
2118 	 */
2119 
2120 	proc_list_lock();
2121 
2122 #if CONFIG_MEMORYSTATUS
2123 	proc_memorystatus_remove(p);
2124 #endif
2125 
2126 	LIST_REMOVE(p, p_list);
2127 	LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2128 	/* will not be visible via proc_find */
2129 	os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2130 
2131 	proc_list_unlock();
2132 
2133 	/*
2134 	 * If parent is waiting for us to exit or exec,
2135 	 * P_LPPWAIT is set; we will wakeup the parent below.
2136 	 */
2137 	proc_lock(p);
2138 	p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2139 	p->p_sigignore = ~(sigcantmask);
2140 
2141 	/*
2142 	 * If a thread is already waiting for us in proc_exit,
2143 	 * P_LTERM is set, wakeup the thread.
2144 	 */
2145 	if (p->p_lflag & P_LTERM) {
2146 		wakeup(&p->exit_thread);
2147 	} else {
2148 		p->p_lflag |= P_LTERM;
2149 	}
2150 
2151 	/* If current proc is exiting, ignore signals on the exit thread */
2152 	if (p == current_proc()) {
2153 		ut->uu_siglist = 0;
2154 	}
2155 	proc_unlock(p);
2156 }
2157 
2158 void
proc_exit(proc_t p)2159 proc_exit(proc_t p)
2160 {
2161 	proc_t q;
2162 	proc_t pp;
2163 	struct task *task = proc_task(p);
2164 	vnode_t tvp = NULLVP;
2165 	struct pgrp * pg;
2166 	struct session *sessp;
2167 	struct uthread * uth;
2168 	pid_t pid;
2169 	int exitval;
2170 	int knote_hint;
2171 
2172 	uth = current_uthread();
2173 
2174 	proc_lock(p);
2175 	proc_transstart(p, 1, 0);
2176 	if (!(p->p_lflag & P_LEXIT)) {
2177 		/*
2178 		 * This can happen if a thread_terminate() occurs
2179 		 * in a single-threaded process.
2180 		 */
2181 		p->p_lflag |= P_LEXIT;
2182 		proc_transend(p, 1);
2183 		proc_unlock(p);
2184 		proc_prepareexit(p, 0, TRUE);
2185 		(void) task_terminate_internal(task);
2186 		proc_lock(p);
2187 	} else if (!(p->p_lflag & P_LTERM)) {
2188 		proc_transend(p, 1);
2189 		/* Jetsam is in middle of calling proc_prepareexit, wait for it */
2190 		p->p_lflag |= P_LTERM;
2191 		msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2192 	} else {
2193 		proc_transend(p, 1);
2194 	}
2195 
2196 	p->p_lflag |= P_LPEXIT;
2197 
2198 	/*
2199 	 * Other kernel threads may be in the middle of signalling this process.
2200 	 * Wait for those threads to wrap it up before making the process
2201 	 * disappear on them.
2202 	 */
2203 	if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2204 		p->p_sigwaitcnt++;
2205 		while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2206 			msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2207 		}
2208 		p->p_sigwaitcnt--;
2209 	}
2210 
2211 	proc_unlock(p);
2212 	pid = proc_getpid(p);
2213 	exitval = p->p_xstat;
2214 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2215 	    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2216 	    pid, exitval, 0, 0, 0);
2217 
2218 #if DEVELOPMENT || DEBUG
2219 	proc_exit_lpexit_check(pid, PELS_POS_START);
2220 #endif
2221 
2222 #if CONFIG_DTRACE
2223 	dtrace_proc_exit(p);
2224 #endif
2225 
2226 	proc_refdrain(p);
2227 	/* We now have unique ref to the proc */
2228 
2229 	/* if any pending cpu limits action, clear it */
2230 	task_clear_cpuusage(proc_task(p), TRUE);
2231 
2232 	workq_mark_exiting(p);
2233 
2234 	/*
2235 	 * need to cancel async IO requests that can be cancelled and wait for those
2236 	 * already active.  MAY BLOCK!
2237 	 */
2238 	_aio_exit( p );
2239 
2240 	/*
2241 	 * Close open files and release open-file table.
2242 	 * This may block!
2243 	 */
2244 	fdt_invalidate(p);
2245 
2246 	/*
2247 	 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2248 	 * workqueue.
2249 	 */
2250 	workq_exit(p);
2251 
2252 	if (uth->uu_lowpri_window) {
2253 		/*
2254 		 * task is marked as a low priority I/O type
2255 		 * and the I/O we issued while in flushing files on close
2256 		 * collided with normal I/O operations...
2257 		 * no need to throttle this thread since its going away
2258 		 * but we do need to update our bookeeping w/r to throttled threads
2259 		 */
2260 		throttle_lowpri_io(0);
2261 	}
2262 
2263 	if (p->p_lflag & P_LNSPACE_RESOLVER) {
2264 		/*
2265 		 * The namespace resolver is exiting; there may be
2266 		 * outstanding materialization requests to clean up.
2267 		 */
2268 		nspace_resolver_exited(p);
2269 	}
2270 
2271 #if SYSV_SHM
2272 	/* Close ref SYSV Shared memory*/
2273 	if (p->vm_shm) {
2274 		shmexit(p);
2275 	}
2276 #endif
2277 #if SYSV_SEM
2278 	/* Release SYSV semaphores */
2279 	semexit(p);
2280 #endif
2281 
2282 #if PSYNCH
2283 	pth_proc_hashdelete(p);
2284 #endif /* PSYNCH */
2285 
2286 	pg = proc_pgrp(p, &sessp);
2287 	if (SESS_LEADER(p, sessp)) {
2288 		if (sessp->s_ttyvp != NULLVP) {
2289 			struct vnode *ttyvp;
2290 			int ttyvid;
2291 			int cttyflag = 0;
2292 			struct vfs_context context;
2293 			struct tty *tp;
2294 			struct pgrp *tpgrp = PGRP_NULL;
2295 
2296 			/*
2297 			 * Controlling process.
2298 			 * Signal foreground pgrp,
2299 			 * drain controlling terminal
2300 			 * and revoke access to controlling terminal.
2301 			 */
2302 
2303 			proc_list_lock(); /* prevent any t_pgrp from changing */
2304 			session_lock(sessp);
2305 			if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2306 				tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2307 			}
2308 			proc_list_unlock();
2309 
2310 			if (tpgrp != PGRP_NULL) {
2311 				session_unlock(sessp);
2312 				pgsignal(tpgrp, SIGHUP, 1);
2313 				pgrp_rele(tpgrp);
2314 				session_lock(sessp);
2315 			}
2316 
2317 			cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2318 			    S_CTTYREF, relaxed) & S_CTTYREF);
2319 			ttyvp = sessp->s_ttyvp;
2320 			ttyvid = sessp->s_ttyvid;
2321 			tp = session_clear_tty_locked(sessp);
2322 			if (ttyvp) {
2323 				vnode_hold(ttyvp);
2324 			}
2325 			session_unlock(sessp);
2326 
2327 			if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2328 				if (tp != TTY_NULL) {
2329 					tty_lock(tp);
2330 					(void) ttywait(tp);
2331 					tty_unlock(tp);
2332 				}
2333 
2334 				context.vc_thread = NULL;
2335 				context.vc_ucred = kauth_cred_proc_ref(p);
2336 				VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2337 				if (cttyflag) {
2338 					/*
2339 					 * Release the extra usecount taken in cttyopen.
2340 					 * usecount should be released after VNOP_REVOKE is called.
2341 					 * This usecount was taken to ensure that
2342 					 * the VNOP_REVOKE results in a close to
2343 					 * the tty since cttyclose is a no-op.
2344 					 */
2345 					vnode_rele(ttyvp);
2346 				}
2347 				vnode_put(ttyvp);
2348 				kauth_cred_unref(&context.vc_ucred);
2349 				vnode_drop(ttyvp);
2350 				ttyvp = NULLVP;
2351 			}
2352 			if (ttyvp) {
2353 				vnode_drop(ttyvp);
2354 			}
2355 			if (tp) {
2356 				ttyfree(tp);
2357 			}
2358 		}
2359 		session_lock(sessp);
2360 		sessp->s_leader = NULL;
2361 		session_unlock(sessp);
2362 	}
2363 
2364 	if (!proc_is_shadow(p)) {
2365 		fixjobc(p, pg, 0);
2366 	}
2367 	pgrp_rele(pg);
2368 
2369 	/*
2370 	 * Change RLIMIT_FSIZE for accounting/debugging.
2371 	 */
2372 	proc_limitsetcur_fsize(p, RLIM_INFINITY);
2373 
2374 	(void)acct_process(p);
2375 
2376 	proc_list_lock();
2377 
2378 	if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2379 		p->p_listflag &= ~P_LIST_EXITCOUNT;
2380 		proc_shutdown_exitcount--;
2381 		if (proc_shutdown_exitcount == 0) {
2382 			wakeup(&proc_shutdown_exitcount);
2383 		}
2384 	}
2385 
2386 	/* wait till parentrefs are dropped and grant no more */
2387 	proc_childdrainstart(p);
2388 	while ((q = p->p_children.lh_first) != NULL) {
2389 		if (q->p_stat == SZOMB) {
2390 			if (p != q->p_pptr) {
2391 				panic("parent child linkage broken");
2392 			}
2393 			/* check for sysctl zomb lookup */
2394 			while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2395 				msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2396 			}
2397 			q->p_listflag |= P_LIST_WAITING;
2398 			/*
2399 			 * This is a named reference and it is not granted
2400 			 * if the reap is already in progress. So we get
2401 			 * the reference here exclusively and their can be
2402 			 * no waiters. So there is no need for a wakeup
2403 			 * after we are done.  Also the reap frees the structure
2404 			 * and the proc struct cannot be used for wakeups as well.
2405 			 * It is safe to use q here as this is system reap
2406 			 */
2407 			reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2408 			    REAP_REPARENTED_TO_INIT : 0;
2409 			reap_child_locked(p, q,
2410 			    REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2411 		} else {
2412 			/*
2413 			 * Traced processes are killed
2414 			 * since their existence means someone is messing up.
2415 			 */
2416 			if (q->p_lflag & P_LTRACED) {
2417 				struct proc *opp;
2418 
2419 				/*
2420 				 * Take a reference on the child process to
2421 				 * ensure it doesn't exit and disappear between
2422 				 * the time we drop the list_lock and attempt
2423 				 * to acquire its proc_lock.
2424 				 */
2425 				if (proc_ref(q, true) != q) {
2426 					continue;
2427 				}
2428 
2429 				proc_list_unlock();
2430 
2431 				opp = proc_find(q->p_oppid);
2432 				if (opp != PROC_NULL) {
2433 					proc_list_lock();
2434 					q->p_oppid = 0;
2435 					proc_list_unlock();
2436 					proc_reparentlocked(q, opp, 0, 0);
2437 					proc_rele(opp);
2438 				} else {
2439 					/* original parent exited while traced */
2440 					proc_list_lock();
2441 					q->p_listflag |= P_LIST_DEADPARENT;
2442 					q->p_oppid = 0;
2443 					proc_list_unlock();
2444 					proc_reparentlocked(q, initproc, 0, 0);
2445 				}
2446 
2447 				proc_lock(q);
2448 				q->p_lflag &= ~P_LTRACED;
2449 
2450 				if (q->sigwait_thread) {
2451 					thread_t thread = q->sigwait_thread;
2452 
2453 					proc_unlock(q);
2454 					/*
2455 					 * The sigwait_thread could be stopped at a
2456 					 * breakpoint. Wake it up to kill.
2457 					 * Need to do this as it could be a thread which is not
2458 					 * the first thread in the task. So any attempts to kill
2459 					 * the process would result into a deadlock on q->sigwait.
2460 					 */
2461 					thread_resume(thread);
2462 					clear_wait(thread, THREAD_INTERRUPTED);
2463 					threadsignal(thread, SIGKILL, 0, TRUE);
2464 				} else {
2465 					proc_unlock(q);
2466 				}
2467 
2468 				psignal(q, SIGKILL);
2469 				proc_list_lock();
2470 				proc_rele(q);
2471 			} else {
2472 				q->p_listflag |= P_LIST_DEADPARENT;
2473 				proc_reparentlocked(q, initproc, 0, 1);
2474 			}
2475 		}
2476 	}
2477 
2478 	proc_childdrainend(p);
2479 	proc_list_unlock();
2480 
2481 #if CONFIG_MACF
2482 	if (!proc_is_shadow(p)) {
2483 		/*
2484 		 * Notify MAC policies that proc is dead.
2485 		 * This should be replaced with proper label management
2486 		 * (rdar://problem/32126399).
2487 		 */
2488 		mac_proc_notify_exit(p);
2489 	}
2490 #endif
2491 
2492 	/*
2493 	 * Release reference to text vnode
2494 	 */
2495 	tvp = p->p_textvp;
2496 	p->p_textvp = NULL;
2497 	if (tvp != NULLVP) {
2498 		vnode_rele(tvp);
2499 	}
2500 
2501 	/*
2502 	 * Save exit status and final rusage info, adding in child rusage
2503 	 * info and self times.  If we were unable to allocate a zombie
2504 	 * structure, this information is lost.
2505 	 */
2506 	if (p->p_ru != NULL) {
2507 		calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2508 		p->p_ru->ru = p->p_stats->p_ru;
2509 
2510 		ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2511 	}
2512 
2513 	/*
2514 	 * Free up profiling buffers.
2515 	 */
2516 	{
2517 		struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2518 
2519 		p1 = p0->pr_next;
2520 		p0->pr_next = NULL;
2521 		p0->pr_scale = 0;
2522 
2523 		for (; p1 != NULL; p1 = pn) {
2524 			pn = p1->pr_next;
2525 			kfree_type(struct uprof, p1);
2526 		}
2527 	}
2528 
2529 	proc_free_realitimer(p);
2530 
2531 	/*
2532 	 * Other substructures are freed from wait().
2533 	 */
2534 	zfree(proc_stats_zone, p->p_stats);
2535 	p->p_stats = NULL;
2536 
2537 	if (p->p_subsystem_root_path) {
2538 		zfree(ZV_NAMEI, p->p_subsystem_root_path);
2539 		p->p_subsystem_root_path = NULL;
2540 	}
2541 
2542 	proc_limitdrop(p);
2543 
2544 #if DEVELOPMENT || DEBUG
2545 	proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2546 #endif
2547 
2548 	/*
2549 	 * Finish up by terminating the task
2550 	 * and halt this thread (only if a
2551 	 * member of the task exiting).
2552 	 */
2553 	proc_set_task(p, TASK_NULL);
2554 	set_bsdtask_info(task, NULL);
2555 	clear_thread_ro_proc(get_machthread(uth));
2556 
2557 #if DEVELOPMENT || DEBUG
2558 	proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2559 #endif
2560 
2561 	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2562 	proc_knote(p, knote_hint);
2563 
2564 	/* mark the thread as the one that is doing proc_exit
2565 	 * no need to hold proc lock in uthread_free
2566 	 */
2567 	uth->uu_flag |= UT_PROCEXIT;
2568 	/*
2569 	 * Notify parent that we're gone.
2570 	 */
2571 	pp = proc_parent(p);
2572 	if (proc_is_shadow(p)) {
2573 		/* kernel can reap this one, no need to move it to launchd */
2574 		proc_list_lock();
2575 		p->p_listflag |= P_LIST_DEADPARENT;
2576 		proc_list_unlock();
2577 	} else if (pp->p_flag & P_NOCLDWAIT) {
2578 		if (p->p_ru != NULL) {
2579 			proc_lock(pp);
2580 #if 3839178
2581 			/*
2582 			 * If the parent is ignoring SIGCHLD, then POSIX requires
2583 			 * us to not add the resource usage to the parent process -
2584 			 * we are only going to hand it off to init to get reaped.
2585 			 * We should contest the standard in this case on the basis
2586 			 * of RLIMIT_CPU.
2587 			 */
2588 #else   /* !3839178 */
2589 			/*
2590 			 * Add child resource usage to parent before giving
2591 			 * zombie to init.  If we were unable to allocate a
2592 			 * zombie structure, this information is lost.
2593 			 */
2594 			ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2595 #endif  /* !3839178 */
2596 			update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2597 			proc_unlock(pp);
2598 		}
2599 
2600 		/* kernel can reap this one, no need to move it to launchd */
2601 		proc_list_lock();
2602 		p->p_listflag |= P_LIST_DEADPARENT;
2603 		proc_list_unlock();
2604 	}
2605 	if (!proc_is_shadow(p) &&
2606 	    ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2607 		if (pp != initproc) {
2608 			proc_lock(pp);
2609 			pp->si_pid = proc_getpid(p);
2610 			pp->p_xhighbits = p->p_xhighbits;
2611 			p->p_xhighbits = 0;
2612 			pp->si_status = p->p_xstat;
2613 			pp->si_code = CLD_EXITED;
2614 			/*
2615 			 * p_ucred usage is safe as it is an exiting process
2616 			 * and reference is dropped in reap
2617 			 */
2618 			pp->si_uid = kauth_cred_getruid(proc_ucred_unsafe(p));
2619 			proc_unlock(pp);
2620 		}
2621 		/* mark as a zombie */
2622 		/* No need to take proc lock as all refs are drained and
2623 		 * no one except parent (reaping ) can look at this.
2624 		 * The write is to an int and is coherent. Also parent is
2625 		 *  keyed off of list lock for reaping
2626 		 */
2627 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2628 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2629 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2630 		    pid, exitval, 0, 0, 0);
2631 		p->p_stat = SZOMB;
2632 		/*
2633 		 * The current process can be reaped so, no one
2634 		 * can depend on this
2635 		 */
2636 
2637 		psignal(pp, SIGCHLD);
2638 
2639 		/* and now wakeup the parent */
2640 		proc_list_lock();
2641 		wakeup((caddr_t)pp);
2642 		proc_list_unlock();
2643 	} else {
2644 		/* should be fine as parent proc would be initproc */
2645 		/* mark as a zombie */
2646 		/* No need to take proc lock as all refs are drained and
2647 		 * no one except parent (reaping ) can look at this.
2648 		 * The write is to an int and is coherent. Also parent is
2649 		 *  keyed off of list lock for reaping
2650 		 */
2651 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2652 		proc_list_lock();
2653 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2654 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2655 		    pid, exitval, 0, 0, 0);
2656 		/* check for sysctl zomb lookup */
2657 		while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2658 			msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2659 		}
2660 		/* safe to use p as this is a system reap */
2661 		p->p_stat = SZOMB;
2662 		p->p_listflag |= P_LIST_WAITING;
2663 
2664 		/*
2665 		 * This is a named reference and it is not granted
2666 		 * if the reap is already in progress. So we get
2667 		 * the reference here exclusively and their can be
2668 		 * no waiters. So there is no need for a wakeup
2669 		 * after we are done. AlsO  the reap frees the structure
2670 		 * and the proc struct cannot be used for wakeups as well.
2671 		 * It is safe to use p here as this is system reap
2672 		 */
2673 		reap_child_locked(pp, p,
2674 		    REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2675 	}
2676 	if (uth->uu_lowpri_window) {
2677 		/*
2678 		 * task is marked as a low priority I/O type and we've
2679 		 * somehow picked up another throttle during exit processing...
2680 		 * no need to throttle this thread since its going away
2681 		 * but we do need to update our bookeeping w/r to throttled threads
2682 		 */
2683 		throttle_lowpri_io(0);
2684 	}
2685 
2686 	proc_rele(pp);
2687 #if DEVELOPMENT || DEBUG
2688 	proc_exit_lpexit_check(pid, PELS_POS_END);
2689 #endif
2690 }
2691 
2692 
2693 /*
2694  * reap_child_locked
2695  *
2696  * Finalize a child exit once its status has been saved.
2697  *
2698  * If ptrace has attached, detach it and return it to its real parent.  Free any
2699  * remaining resources.
2700  *
2701  * Parameters:
2702  * - proc_t parent      Parent of process being reaped
2703  * - proc_t child       Process to reap
2704  * - reap_flags_t flags Control locking and re-parenting behavior
2705  */
2706 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2707 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2708 {
2709 	struct pgrp *pg;
2710 	boolean_t shadow_proc = proc_is_shadow(child);
2711 
2712 	if (flags & REAP_LOCKED) {
2713 		proc_list_unlock();
2714 	}
2715 
2716 	/*
2717 	 * Under ptrace, the child should now be re-parented back to its original
2718 	 * parent, unless that parent was initproc or it didn't come to initproc
2719 	 * through re-parenting.
2720 	 */
2721 	bool child_ptraced = child->p_oppid != 0;
2722 	if (!shadow_proc && child_ptraced) {
2723 		int knote_hint;
2724 		pid_t orig_ppid = 0;
2725 		proc_t orig_parent = PROC_NULL;
2726 
2727 		proc_lock(child);
2728 		orig_ppid = child->p_oppid;
2729 		child->p_oppid = 0;
2730 		knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2731 		proc_unlock(child);
2732 
2733 		orig_parent = proc_find(orig_ppid);
2734 		if (orig_parent) {
2735 			/*
2736 			 * Only re-parent the process if its original parent was not
2737 			 * initproc and it did not come to initproc from re-parenting.
2738 			 */
2739 			bool reparenting = orig_parent != initproc ||
2740 			    (flags & REAP_REPARENTED_TO_INIT) == 0;
2741 			if (reparenting) {
2742 				if (orig_parent != initproc) {
2743 					/*
2744 					 * Internal fields should be safe to access here because the
2745 					 * child is exited and not reaped or re-parented yet.
2746 					 */
2747 					proc_lock(orig_parent);
2748 					orig_parent->si_pid = proc_getpid(child);
2749 					orig_parent->si_status = child->p_xstat;
2750 					orig_parent->si_code = CLD_CONTINUED;
2751 					orig_parent->si_uid = kauth_cred_getruid(proc_ucred_unsafe(child));
2752 					proc_unlock(orig_parent);
2753 				}
2754 				proc_reparentlocked(child, orig_parent, 1, 0);
2755 
2756 				/*
2757 				 * After re-parenting, re-send the child's NOTE_EXIT to the
2758 				 * original parent.
2759 				 */
2760 				proc_knote(child, knote_hint);
2761 				psignal(orig_parent, SIGCHLD);
2762 
2763 				proc_list_lock();
2764 				wakeup((caddr_t)orig_parent);
2765 				child->p_listflag &= ~P_LIST_WAITING;
2766 				wakeup(&child->p_stat);
2767 				proc_list_unlock();
2768 
2769 				proc_rele(orig_parent);
2770 				if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2771 					proc_list_lock();
2772 				}
2773 				return;
2774 			} else {
2775 				/*
2776 				 * Satisfy the knote lifecycle because ptraced processes don't
2777 				 * broadcast NOTE_EXIT during initial child termination.
2778 				 */
2779 				proc_knote(child, knote_hint);
2780 				proc_rele(orig_parent);
2781 			}
2782 		}
2783 	}
2784 
2785 #pragma clang diagnostic push
2786 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2787 	proc_knote(child, NOTE_REAP);
2788 #pragma clang diagnostic pop
2789 
2790 	proc_knote_drain(child);
2791 
2792 	child->p_xstat = 0;
2793 	if (!shadow_proc && child->p_ru) {
2794 		/*
2795 		 * Roll up the rusage statistics to the parent, unless the parent is
2796 		 * ignoring SIGCHLD.  POSIX requires the children's resources of such a
2797 		 * parent to not be included in the parent's usage (seems odd given
2798 		 * RLIMIT_CPU, though).
2799 		 */
2800 		proc_lock(parent);
2801 		bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2802 		if (rollup_child) {
2803 			ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2804 		}
2805 		update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2806 		proc_unlock(parent);
2807 		zfree(zombie_zone, child->p_ru);
2808 		child->p_ru = NULL;
2809 	} else if (!shadow_proc) {
2810 		printf("Warning : lost p_ru for %s\n", child->p_comm);
2811 	} else {
2812 		assert(child->p_ru == NULL);
2813 	}
2814 
2815 	AUDIT_SESSION_PROCEXIT(child);
2816 
2817 #if CONFIG_PERSONAS
2818 	persona_proc_drop(child);
2819 #endif /* CONFIG_PERSONAS */
2820 	/* proc_ucred_unsafe is safe, because child is not running */
2821 	(void)chgproccnt(kauth_cred_getruid(proc_ucred_unsafe(child)), -1);
2822 
2823 	os_reason_free(child->p_exit_reason);
2824 
2825 	proc_list_lock();
2826 
2827 	pg = pgrp_leave_locked(child);
2828 	LIST_REMOVE(child, p_list);
2829 	parent->p_childrencnt--;
2830 	LIST_REMOVE(child, p_sibling);
2831 	bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2832 	    LIST_EMPTY(&parent->p_children);
2833 	if (no_more_children) {
2834 		wakeup((caddr_t)parent);
2835 	}
2836 	child->p_listflag &= ~P_LIST_WAITING;
2837 	wakeup(&child->p_stat);
2838 
2839 	/* Take it out of process hash */
2840 	if (!shadow_proc) {
2841 		phash_remove_locked(child);
2842 	}
2843 	proc_checkdeadrefs(child);
2844 	nprocs--;
2845 	if (flags & REAP_DEAD_PARENT) {
2846 		child->p_listflag |= P_LIST_DEADPARENT;
2847 	}
2848 
2849 	proc_list_unlock();
2850 
2851 	pgrp_rele(pg);
2852 	fdt_destroy(child);
2853 	lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2854 	lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2855 #if CONFIG_AUDIT
2856 	lck_mtx_destroy(&child->p_audit_mlock, &proc_ucred_mlock_grp);
2857 #endif /* CONFIG_AUDIT */
2858 #if CONFIG_DTRACE
2859 	lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2860 #endif
2861 	lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2862 	proc_wait_release(child);
2863 
2864 	if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2865 		proc_list_lock();
2866 	}
2867 }
2868 
2869 int
wait1continue(int result)2870 wait1continue(int result)
2871 {
2872 	proc_t p;
2873 	thread_t thread;
2874 	uthread_t uth;
2875 	struct _wait4_data *wait4_data;
2876 	struct wait4_nocancel_args *uap;
2877 	int *retval;
2878 
2879 	if (result) {
2880 		return result;
2881 	}
2882 
2883 	p = current_proc();
2884 	thread = current_thread();
2885 	uth = (struct uthread *)get_bsdthread_info(thread);
2886 
2887 	wait4_data = &uth->uu_save.uus_wait4_data;
2888 	uap = wait4_data->args;
2889 	retval = wait4_data->retval;
2890 	return wait4_nocancel(p, uap, retval);
2891 }
2892 
2893 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2894 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2895 {
2896 	__pthread_testcancel(1);
2897 	return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2898 }
2899 
2900 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2901 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2902 {
2903 	int nfound;
2904 	int sibling_count;
2905 	proc_t p;
2906 	int status, error;
2907 	uthread_t uth;
2908 	struct _wait4_data *wait4_data;
2909 
2910 	AUDIT_ARG(pid, uap->pid);
2911 
2912 	if (uap->pid == 0) {
2913 		uap->pid = -q->p_pgrpid;
2914 	}
2915 
2916 	if (uap->pid == INT_MIN) {
2917 		return EINVAL;
2918 	}
2919 
2920 loop:
2921 	proc_list_lock();
2922 loop1:
2923 	nfound = 0;
2924 	sibling_count = 0;
2925 
2926 	PCHILDREN_FOREACH(q, p) {
2927 		if (p->p_sibling.le_next != 0) {
2928 			sibling_count++;
2929 		}
2930 		if (uap->pid != WAIT_ANY &&
2931 		    proc_getpid(p) != uap->pid &&
2932 		    p->p_pgrpid != -(uap->pid)) {
2933 			continue;
2934 		}
2935 
2936 		if (proc_is_shadow(p)) {
2937 			continue;
2938 		}
2939 
2940 		nfound++;
2941 
2942 		/* XXX This is racy because we don't get the lock!!!! */
2943 
2944 		if (p->p_listflag & P_LIST_WAITING) {
2945 			/* we're not using a continuation here but we still need to stash
2946 			 * the args for stackshot. */
2947 			uth = current_uthread();
2948 			wait4_data = &uth->uu_save.uus_wait4_data;
2949 			wait4_data->args = uap;
2950 			thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2951 
2952 			(void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2953 			goto loop1;
2954 		}
2955 		p->p_listflag |= P_LIST_WAITING;   /* only allow single thread to wait() */
2956 
2957 
2958 		if (p->p_stat == SZOMB) {
2959 			reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2960 			    REAP_REPARENTED_TO_INIT : 0;
2961 
2962 			proc_list_unlock();
2963 #if CONFIG_MACF
2964 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2965 				goto out;
2966 			}
2967 #endif
2968 			retval[0] = proc_getpid(p);
2969 			if (uap->status) {
2970 				/* Legacy apps expect only 8 bits of status */
2971 				status = 0xffff & p->p_xstat;   /* convert to int */
2972 				error = copyout((caddr_t)&status,
2973 				    uap->status,
2974 				    sizeof(status));
2975 				if (error) {
2976 					goto out;
2977 				}
2978 			}
2979 			if (uap->rusage) {
2980 				if (p->p_ru == NULL) {
2981 					error = ENOMEM;
2982 				} else {
2983 					if (IS_64BIT_PROCESS(q)) {
2984 						struct user64_rusage    my_rusage = {};
2985 						munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2986 						error = copyout((caddr_t)&my_rusage,
2987 						    uap->rusage,
2988 						    sizeof(my_rusage));
2989 					} else {
2990 						struct user32_rusage    my_rusage = {};
2991 						munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2992 						error = copyout((caddr_t)&my_rusage,
2993 						    uap->rusage,
2994 						    sizeof(my_rusage));
2995 					}
2996 				}
2997 				/* information unavailable? */
2998 				if (error) {
2999 					goto out;
3000 				}
3001 			}
3002 
3003 			/* Conformance change for 6577252.
3004 			 * When SIGCHLD is blocked and wait() returns because the status
3005 			 * of a child process is available and there are no other
3006 			 * children processes, then any pending SIGCHLD signal is cleared.
3007 			 */
3008 			if (sibling_count == 0) {
3009 				int mask = sigmask(SIGCHLD);
3010 				uth = current_uthread();
3011 
3012 				if ((uth->uu_sigmask & mask) != 0) {
3013 					/* we are blocking SIGCHLD signals.  clear any pending SIGCHLD.
3014 					 * This locking looks funny but it is protecting access to the
3015 					 * thread via p_uthlist.
3016 					 */
3017 					proc_lock(q);
3018 					uth->uu_siglist &= ~mask;       /* clear pending signal */
3019 					proc_unlock(q);
3020 				}
3021 			}
3022 
3023 			/* Clean up */
3024 			(void)reap_child_locked(q, p, reap_flags);
3025 
3026 			return 0;
3027 		}
3028 		if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
3029 		    (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
3030 			proc_list_unlock();
3031 #if CONFIG_MACF
3032 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3033 				goto out;
3034 			}
3035 #endif
3036 			proc_lock(p);
3037 			p->p_lflag |= P_LWAITED;
3038 			proc_unlock(p);
3039 			retval[0] = proc_getpid(p);
3040 			if (uap->status) {
3041 				status = W_STOPCODE(p->p_xstat);
3042 				error = copyout((caddr_t)&status,
3043 				    uap->status,
3044 				    sizeof(status));
3045 			} else {
3046 				error = 0;
3047 			}
3048 			goto out;
3049 		}
3050 		/*
3051 		 * If we are waiting for continued processses, and this
3052 		 * process was continued
3053 		 */
3054 		if ((uap->options & WCONTINUED) &&
3055 		    (p->p_flag & P_CONTINUED)) {
3056 			proc_list_unlock();
3057 #if CONFIG_MACF
3058 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3059 				goto out;
3060 			}
3061 #endif
3062 
3063 			/* Prevent other process for waiting for this event */
3064 			OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3065 			retval[0] = proc_getpid(p);
3066 			if (uap->status) {
3067 				status = W_STOPCODE(SIGCONT);
3068 				error = copyout((caddr_t)&status,
3069 				    uap->status,
3070 				    sizeof(status));
3071 			} else {
3072 				error = 0;
3073 			}
3074 			goto out;
3075 		}
3076 		p->p_listflag &= ~P_LIST_WAITING;
3077 		wakeup(&p->p_stat);
3078 	}
3079 	/* list lock is held when we get here any which way */
3080 	if (nfound == 0) {
3081 		proc_list_unlock();
3082 		return ECHILD;
3083 	}
3084 
3085 	if (uap->options & WNOHANG) {
3086 		retval[0] = 0;
3087 		proc_list_unlock();
3088 		return 0;
3089 	}
3090 
3091 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3092 	uth = current_uthread();
3093 	wait4_data = &uth->uu_save.uus_wait4_data;
3094 	wait4_data->args = uap;
3095 	wait4_data->retval = retval;
3096 
3097 	thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3098 	if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3099 		return error;
3100 	}
3101 
3102 	goto loop;
3103 out:
3104 	proc_list_lock();
3105 	p->p_listflag &= ~P_LIST_WAITING;
3106 	wakeup(&p->p_stat);
3107 	proc_list_unlock();
3108 	return error;
3109 }
3110 
3111 #if DEBUG
3112 #define ASSERT_LCK_MTX_OWNED(lock)      \
3113 	                        lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3114 #else
3115 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */
3116 #endif
3117 
3118 int
waitidcontinue(int result)3119 waitidcontinue(int result)
3120 {
3121 	proc_t p;
3122 	thread_t thread;
3123 	uthread_t uth;
3124 	struct _waitid_data *waitid_data;
3125 	struct waitid_nocancel_args *uap;
3126 	int *retval;
3127 
3128 	if (result) {
3129 		return result;
3130 	}
3131 
3132 	p = current_proc();
3133 	thread = current_thread();
3134 	uth = (struct uthread *)get_bsdthread_info(thread);
3135 
3136 	waitid_data = &uth->uu_save.uus_waitid_data;
3137 	uap = waitid_data->args;
3138 	retval = waitid_data->retval;
3139 	return waitid_nocancel(p, uap, retval);
3140 }
3141 
3142 /*
3143  * Description:	Suspend the calling thread until one child of the process
3144  *		containing the calling thread changes state.
3145  *
3146  * Parameters:	uap->idtype		one of P_PID, P_PGID, P_ALL
3147  *		uap->id			pid_t or gid_t or ignored
3148  *		uap->infop		Address of siginfo_t struct in
3149  *					user space into which to return status
3150  *		uap->options		flag values
3151  *
3152  * Returns:	0			Success
3153  *		!0			Error returning status to user space
3154  */
3155 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3156 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3157 {
3158 	__pthread_testcancel(1);
3159 	return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3160 }
3161 
3162 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3163 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3164     __unused int32_t *retval)
3165 {
3166 	user_siginfo_t  siginfo;        /* siginfo data to return to caller */
3167 	boolean_t caller64 = IS_64BIT_PROCESS(q);
3168 	int nfound;
3169 	proc_t p;
3170 	int error;
3171 	uthread_t uth;
3172 	struct _waitid_data *waitid_data;
3173 
3174 	if (uap->options == 0 ||
3175 	    (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3176 		return EINVAL;        /* bits set that aren't recognized */
3177 	}
3178 	switch (uap->idtype) {
3179 	case P_PID:     /* child with process ID equal to... */
3180 	case P_PGID:    /* child with process group ID equal to... */
3181 		if (((int)uap->id) < 0) {
3182 			return EINVAL;
3183 		}
3184 		break;
3185 	case P_ALL:     /* any child */
3186 		break;
3187 	}
3188 
3189 loop:
3190 	proc_list_lock();
3191 loop1:
3192 	nfound = 0;
3193 
3194 	PCHILDREN_FOREACH(q, p) {
3195 		switch (uap->idtype) {
3196 		case P_PID:     /* child with process ID equal to... */
3197 			if (proc_getpid(p) != (pid_t)uap->id) {
3198 				continue;
3199 			}
3200 			break;
3201 		case P_PGID:    /* child with process group ID equal to... */
3202 			if (p->p_pgrpid != (pid_t)uap->id) {
3203 				continue;
3204 			}
3205 			break;
3206 		case P_ALL:     /* any child */
3207 			break;
3208 		}
3209 
3210 		if (proc_is_shadow(p)) {
3211 			continue;
3212 		}
3213 		/* XXX This is racy because we don't get the lock!!!! */
3214 
3215 		/*
3216 		 * Wait collision; go to sleep and restart; used to maintain
3217 		 * the single return for waited process guarantee.
3218 		 */
3219 		if (p->p_listflag & P_LIST_WAITING) {
3220 			(void) msleep(&p->p_stat, &proc_list_mlock,
3221 			    PWAIT, "waitidcoll", 0);
3222 			goto loop1;
3223 		}
3224 		p->p_listflag |= P_LIST_WAITING;                /* mark busy */
3225 
3226 		nfound++;
3227 
3228 		bzero(&siginfo, sizeof(siginfo));
3229 
3230 		switch (p->p_stat) {
3231 		case SZOMB:             /* Exited */
3232 			if (!(uap->options & WEXITED)) {
3233 				break;
3234 			}
3235 			proc_list_unlock();
3236 #if CONFIG_MACF
3237 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3238 				goto out;
3239 			}
3240 #endif
3241 			siginfo.si_signo = SIGCHLD;
3242 			siginfo.si_pid = proc_getpid(p);
3243 
3244 			/* If the child terminated abnormally due to a signal, the signum
3245 			 * needs to be preserved in the exit status.
3246 			 */
3247 			if (WIFSIGNALED(p->p_xstat)) {
3248 				siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3249 				    CLD_DUMPED : CLD_KILLED;
3250 				siginfo.si_status = WTERMSIG(p->p_xstat);
3251 			} else {
3252 				siginfo.si_code = CLD_EXITED;
3253 				siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3254 			}
3255 			siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3256 			p->p_xhighbits = 0;
3257 
3258 			if ((error = copyoutsiginfo(&siginfo,
3259 			    caller64, uap->infop)) != 0) {
3260 				goto out;
3261 			}
3262 
3263 			/* Prevent other process for waiting for this event? */
3264 			if (!(uap->options & WNOWAIT)) {
3265 				reap_child_locked(q, p, 0);
3266 				return 0;
3267 			}
3268 			goto out;
3269 
3270 		case SSTOP:             /* Stopped */
3271 			/*
3272 			 * If we are not interested in stopped processes, then
3273 			 * ignore this one.
3274 			 */
3275 			if (!(uap->options & WSTOPPED)) {
3276 				break;
3277 			}
3278 
3279 			/*
3280 			 * If someone has already waited it, we lost a race
3281 			 * to be the one to return status.
3282 			 */
3283 			if ((p->p_lflag & P_LWAITED) != 0) {
3284 				break;
3285 			}
3286 			proc_list_unlock();
3287 #if CONFIG_MACF
3288 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3289 				goto out;
3290 			}
3291 #endif
3292 			siginfo.si_signo = SIGCHLD;
3293 			siginfo.si_pid = proc_getpid(p);
3294 			siginfo.si_status = p->p_xstat; /* signal number */
3295 			siginfo.si_code = CLD_STOPPED;
3296 
3297 			if ((error = copyoutsiginfo(&siginfo,
3298 			    caller64, uap->infop)) != 0) {
3299 				goto out;
3300 			}
3301 
3302 			/* Prevent other process for waiting for this event? */
3303 			if (!(uap->options & WNOWAIT)) {
3304 				proc_lock(p);
3305 				p->p_lflag |= P_LWAITED;
3306 				proc_unlock(p);
3307 			}
3308 			goto out;
3309 
3310 		default:                /* All other states => Continued */
3311 			if (!(uap->options & WCONTINUED)) {
3312 				break;
3313 			}
3314 
3315 			/*
3316 			 * If the flag isn't set, then this process has not
3317 			 * been stopped and continued, or the status has
3318 			 * already been reaped by another caller of waitid().
3319 			 */
3320 			if ((p->p_flag & P_CONTINUED) == 0) {
3321 				break;
3322 			}
3323 			proc_list_unlock();
3324 #if CONFIG_MACF
3325 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3326 				goto out;
3327 			}
3328 #endif
3329 			siginfo.si_signo = SIGCHLD;
3330 			siginfo.si_code = CLD_CONTINUED;
3331 			proc_lock(p);
3332 			siginfo.si_pid = p->p_contproc;
3333 			siginfo.si_status = p->p_xstat;
3334 			proc_unlock(p);
3335 
3336 			if ((error = copyoutsiginfo(&siginfo,
3337 			    caller64, uap->infop)) != 0) {
3338 				goto out;
3339 			}
3340 
3341 			/* Prevent other process for waiting for this event? */
3342 			if (!(uap->options & WNOWAIT)) {
3343 				OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3344 				    &p->p_flag);
3345 			}
3346 			goto out;
3347 		}
3348 		ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3349 
3350 		/* Not a process we are interested in; go on to next child */
3351 
3352 		p->p_listflag &= ~P_LIST_WAITING;
3353 		wakeup(&p->p_stat);
3354 	}
3355 	ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3356 
3357 	/* No child processes that could possibly satisfy the request? */
3358 
3359 	if (nfound == 0) {
3360 		proc_list_unlock();
3361 		return ECHILD;
3362 	}
3363 
3364 	if (uap->options & WNOHANG) {
3365 		proc_list_unlock();
3366 #if CONFIG_MACF
3367 		if ((error = mac_proc_check_wait(q, p)) != 0) {
3368 			return error;
3369 		}
3370 #endif
3371 		/*
3372 		 * The state of the siginfo structure in this case
3373 		 * is undefined.  Some implementations bzero it, some
3374 		 * (like here) leave it untouched for efficiency.
3375 		 *
3376 		 * Thus the most portable check for "no matching pid with
3377 		 * WNOHANG" is to store a zero into si_pid before
3378 		 * invocation, then check for a non-zero value afterwards.
3379 		 */
3380 		return 0;
3381 	}
3382 
3383 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3384 	uth = current_uthread();
3385 	waitid_data = &uth->uu_save.uus_waitid_data;
3386 	waitid_data->args = uap;
3387 	waitid_data->retval = retval;
3388 
3389 	if ((error = msleep0(q, &proc_list_mlock,
3390 	    PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3391 		return error;
3392 	}
3393 
3394 	goto loop;
3395 out:
3396 	proc_list_lock();
3397 	p->p_listflag &= ~P_LIST_WAITING;
3398 	wakeup(&p->p_stat);
3399 	proc_list_unlock();
3400 	return error;
3401 }
3402 
3403 /*
3404  * make process 'parent' the new parent of process 'child'.
3405  */
3406 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3407 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3408 {
3409 	proc_t oldparent = PROC_NULL;
3410 
3411 	if (child->p_pptr == parent) {
3412 		return;
3413 	}
3414 
3415 	if (locked == 0) {
3416 		proc_list_lock();
3417 	}
3418 
3419 	oldparent = child->p_pptr;
3420 #if __PROC_INTERNAL_DEBUG
3421 	if (oldparent == PROC_NULL) {
3422 		panic("proc_reparent: process %p does not have a parent", child);
3423 	}
3424 #endif
3425 
3426 	LIST_REMOVE(child, p_sibling);
3427 #if __PROC_INTERNAL_DEBUG
3428 	if (oldparent->p_childrencnt == 0) {
3429 		panic("process children count already 0");
3430 	}
3431 #endif
3432 	oldparent->p_childrencnt--;
3433 #if __PROC_INTERNAL_DEBUG
3434 	if (oldparent->p_childrencnt < 0) {
3435 		panic("process children count -ve");
3436 	}
3437 #endif
3438 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3439 	parent->p_childrencnt++;
3440 	child->p_pptr = parent;
3441 	child->p_ppid = proc_getpid(parent);
3442 
3443 	proc_list_unlock();
3444 
3445 	if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3446 		psignal(initproc, SIGCHLD);
3447 	}
3448 	if (locked == 1) {
3449 		proc_list_lock();
3450 	}
3451 }
3452 
3453 /*
3454  * Exit: deallocate address space and other resources, change proc state
3455  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
3456  * status and rusage for wait().  Check for child processes and orphan them.
3457  */
3458 
3459 
3460 /*
3461  * munge_rusage
3462  *	LP64 support - long is 64 bits if we are dealing with a 64 bit user
3463  *	process.  We munge the kernel version of rusage into the
3464  *	64 bit version.
3465  */
3466 __private_extern__  void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3467 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3468 {
3469 	/* Zero-out struct so that padding is cleared */
3470 	bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3471 
3472 	/* timeval changes size, so utime and stime need special handling */
3473 	a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3474 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3475 	a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3476 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3477 	/*
3478 	 * everything else can be a direct assign, since there is no loss
3479 	 * of precision implied boing 32->64.
3480 	 */
3481 	a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3482 	a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3483 	a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3484 	a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3485 	a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3486 	a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3487 	a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3488 	a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3489 	a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3490 	a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3491 	a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3492 	a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3493 	a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3494 	a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3495 }
3496 
3497 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3498 __private_extern__  void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3499 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3500 {
3501 	bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3502 
3503 	/* timeval changes size, so utime and stime need special handling */
3504 	a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3505 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3506 	a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3507 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3508 	/*
3509 	 * everything else can be a direct assign. We currently ignore
3510 	 * the loss of precision
3511 	 */
3512 	a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3513 	a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3514 	a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3515 	a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3516 	a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3517 	a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3518 	a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3519 	a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3520 	a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3521 	a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3522 	a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3523 	a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3524 	a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3525 	a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3526 }
3527 
3528 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3529 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3530 {
3531 	assert(thread != NULL);
3532 	assert(waitinfo != NULL);
3533 
3534 	struct uthread *ut = get_bsdthread_info(thread);
3535 	waitinfo->context = 0;
3536 	// ensure wmesg is consistent with a thread waiting in wait4
3537 	assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3538 	struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3539 	// May not actually contain a pid; this is just the argument to wait4.
3540 	// See man wait4 for other valid wait4 arguments.
3541 	waitinfo->owner = args->pid;
3542 }
3543 
3544 static int
exit_with_exception_internal(struct proc * p,exception_info_t exception,uint32_t flags)3545 exit_with_exception_internal(
3546 	struct proc *p,
3547 	exception_info_t exception,
3548 	uint32_t flags)
3549 {
3550 	os_reason_t reason = OS_REASON_NULL;
3551 	struct uthread *ut = NULL;
3552 
3553 	if (p == PROC_NULL) {
3554 		panic("exception type %d without a valid proc",
3555 		    exception.os_reason);
3556 	}
3557 
3558 	if (!(flags & PX_DEBUG_NO_HONOR)
3559 	    && is_address_space_debugged(p)) {
3560 		return 0;
3561 	}
3562 
3563 	if ((flags & PX_KTRIAGE)) {
3564 		/* Leave a ktriage record */
3565 		ktriage_record(
3566 			thread_tid(current_thread()),
3567 			KDBG_TRIAGE_EVENTID(
3568 				exception.kt_info.kt_subsys,
3569 				KDBG_TRIAGE_RESERVED,
3570 				exception.kt_info.kt_error),
3571 			0);
3572 	}
3573 
3574 	if ((flags & PX_PSIGNAL)) {
3575 		int signal = (exception.signal > 0) ? exception.signal : SIGKILL;
3576 
3577 		printf("[%s%s] sending signal %d to process\n", proc_best_name(p),
3578 		    (signal == SIGKILL) ? ": killed" : "", signal);
3579 		psignal(p, signal);
3580 		return 0;
3581 	} else {
3582 		assert(exception.exception_type > 0);
3583 
3584 		reason = os_reason_create(
3585 			exception.os_reason,
3586 			(uint64_t)exception.mx_code);
3587 		assert(reason != OS_REASON_NULL);
3588 		reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3589 
3590 		if (!(flags & PX_NO_EXCEPTION_UTHREAD)) {
3591 			ut = get_bsdthread_info(current_thread());
3592 			ut->uu_exception = exception.exception_type;
3593 			ut->uu_code = exception.mx_code;
3594 			ut->uu_subcode = exception.mx_subcode;
3595 		}
3596 
3597 		printf("[%s: killed] sending signal %d and force exiting process\n",
3598 		    proc_best_name(p), SIGKILL);
3599 		return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3600 		           FALSE, FALSE, 0, reason);
3601 	}
3602 }
3603 
3604 /*
3605  * Use a separate function call for mach and exclave exceptions so that we
3606  * see the exception's origin show up clearly in the backtrace on dev kernels.
3607  */
3608 
3609 int
exit_with_mach_exception(struct proc * p,exception_info_t exception,uint32_t flags)3610 exit_with_mach_exception(
3611 	struct proc *p,
3612 	exception_info_t exception,
3613 	uint32_t flags)
3614 {
3615 	return exit_with_exception_internal(p, exception, flags);
3616 }
3617 
3618 
3619 #if CONFIG_EXCLAVES
3620 int
exit_with_exclave_exception(struct proc * p,exception_info_t exception,uint32_t flags)3621 exit_with_exclave_exception(
3622 	struct proc *p,
3623 	exception_info_t exception,
3624 	uint32_t flags)
3625 {
3626 	return exit_with_exception_internal(p, exception, flags);
3627 }
3628 #endif /* CONFIG_EXCLAVES */
3629 
3630 /**
3631  * Causes the current process to exit with a Mach exception.
3632  *
3633  * Compared to exit_with_mach_exception(), exit_with_mach_exception_using_ast()
3634  * can be called in a preemption-disabled context.  This function defers
3635  * updating the process state until an AST.
3636  *
3637  * @note Currently only the PX_KTRIAGE flag is implemented.
3638  *
3639  * @param exception information about the exception
3640  * @param flags a bitmask of PX_* flags describing how to deliver the exception
3641  */
3642 void
exit_with_mach_exception_using_ast(exception_info_t exception,uint32_t flags,bool fatal)3643 exit_with_mach_exception_using_ast(
3644 	exception_info_t exception,
3645 	uint32_t flags,
3646 	bool fatal)
3647 {
3648 	const uint32_t __assert_only supported_flags = PX_KTRIAGE;
3649 	assert((flags & ~supported_flags) == 0);
3650 
3651 	bool ktriage = flags & PX_KTRIAGE;
3652 	thread_ast_mach_exception(current_thread(), exception.os_reason, exception.exception_type,
3653 	    exception.mx_code, exception.mx_subcode, fatal, ktriage);
3654 }
3655