xref: /xnu-10002.41.9/bsd/kern/kern_exit.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113 
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116 
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120 
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134 
135 #include <vm/vm_protos.h>
136 #include <os/log.h>
137 #include <os/system_event_log.h>
138 
139 #include <pexpert/pexpert.h>
140 
141 #include <kdp/kdp_dyld.h>
142 
143 #if SYSV_SHM
144 #include <sys/shm_internal.h>   /* shmexit */
145 #endif /* SYSV_SHM */
146 #if CONFIG_PERSONAS
147 #include <sys/persona.h>
148 #endif /* CONFIG_PERSONAS */
149 #if CONFIG_MEMORYSTATUS
150 #include <sys/kern_memorystatus.h>
151 #endif /* CONFIG_MEMORYSTATUS */
152 #if CONFIG_DTRACE
153 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
154 void dtrace_proc_exit(proc_t p);
155 #include <sys/dtrace_ptss.h>
156 #endif /* CONFIG_DTRACE */
157 #if CONFIG_MACF
158 #include <security/mac_framework.h>
159 #include <security/mac_mach_internal.h>
160 #include <sys/syscall.h>
161 #endif /* CONFIG_MACF */
162 
163 #if CONFIG_MEMORYSTATUS
164 static void proc_memorystatus_remove(proc_t p);
165 #endif /* CONFIG_MEMORYSTATUS */
166 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
167 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
168     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
169     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
170 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
171 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
172 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
173 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
174 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
175     struct rusage_superset *rup, mach_exception_data_type_t code,
176     mach_exception_data_type_t subcode, uint64_t *udata_buffer,
177     int num_udata, os_reason_t reason, exception_type_t etype);
178 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
179 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
180 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
181 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
182 extern uint64_t get_task_phys_footprint_limit(task_t);
183 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
184 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
185 
186 extern unsigned int exception_log_max_pid;
187 
188 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
189 
190 /*
191  * Flags for `reap_child_locked`.
192  */
193 __options_decl(reap_flags_t, uint32_t, {
194 	/*
195 	 * Parent is exiting, so the kernel is responsible for reaping children.
196 	 */
197 	REAP_DEAD_PARENT = 0x01,
198 	/*
199 	 * Childr process was re-parented to initproc.
200 	 */
201 	REAP_REPARENTED_TO_INIT = 0x02,
202 	/*
203 	 * `proc_list_lock` is held on entry.
204 	 */
205 	REAP_LOCKED = 0x04,
206 	/*
207 	 * Drop the `proc_list_lock` on return.  Note that the `proc_list_lock` will
208 	 * be dropped internally by the function regardless.
209 	 */
210 	REAP_DROP_LOCK = 0x08,
211 });
212 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
213 
214 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
215 
216 /*
217  * Things which should have prototypes in headers, but don't
218  */
219 void    proc_exit(proc_t p);
220 int     wait1continue(int result);
221 int     waitidcontinue(int result);
222 kern_return_t sys_perf_notify(thread_t thread, int pid);
223 kern_return_t task_exception_notify(exception_type_t exception,
224     mach_exception_data_type_t code, mach_exception_data_type_t subcode, bool fatal);
225 void    delay(int);
226 
227 #if __has_feature(ptrauth_calls)
228 int exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
229     mach_exception_subcode_t subcode);
230 #endif /* __has_feature(ptrauth_calls) */
231 
232 int exit_with_guard_exception(proc_t p, mach_exception_data_type_t code,
233     mach_exception_data_type_t subcode);
234 int exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
235     mach_exception_data_type_t subcode);
236 static int exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception,
237     mach_exception_code_t code, mach_exception_subcode_t subcode);
238 
239 #if DEVELOPMENT || DEBUG
240 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
241 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
242 static pid_t proc_exit_lpexit_spin_pid = -1;            /* wakeup point */
243 static int proc_exit_lpexit_spin_pos = -1;              /* point to block */
244 static int proc_exit_lpexit_spinning = 0;
245 enum {
246 	PELS_POS_START = 0,             /* beginning of proc_exit */
247 	PELS_POS_PRE_TASK_DETACH,       /* before task/proc detach */
248 	PELS_POS_POST_TASK_DETACH,      /* after task/proc detach */
249 	PELS_POS_END,                   /* end of proc_exit */
250 	PELS_NPOS                       /* # valid values */
251 };
252 
253 /* Panic if matching processes (delimited by ',') exit on error. */
254 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
255 
256 static int
257 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
258 {
259 #pragma unused(oidp, arg1, arg2)
260 	pid_t new_value;
261 	int changed;
262 	int error;
263 
264 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
265 		return ENOENT;
266 	}
267 
268 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
269 	    sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
270 	if (error == 0 && changed != 0) {
271 		if (new_value < -1) {
272 			return EINVAL;
273 		}
274 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
275 		proc_exit_lpexit_spin_pid = new_value;
276 		wakeup(&proc_exit_lpexit_spin_pid);
277 		proc_exit_lpexit_spinning = 0;
278 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
279 	}
280 	return error;
281 }
282 
283 static int
284 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
285 {
286 #pragma unused(oidp, arg1, arg2)
287 	int new_value;
288 	int changed;
289 	int error;
290 
291 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
292 		return ENOENT;
293 	}
294 
295 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
296 	    sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
297 	if (error == 0 && changed != 0) {
298 		if (new_value < -1 || new_value >= PELS_NPOS) {
299 			return EINVAL;
300 		}
301 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
302 		proc_exit_lpexit_spin_pos = new_value;
303 		wakeup(&proc_exit_lpexit_spin_pid);
304 		proc_exit_lpexit_spinning = 0;
305 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
306 	}
307 	return error;
308 }
309 
310 static int
311 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
312 {
313 #pragma unused(oidp, arg1, arg2)
314 	int new_value;
315 	int changed;
316 	int error;
317 
318 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
319 		return ENOENT;
320 	}
321 
322 	error = sysctl_io_number(req, proc_exit_lpexit_spinning,
323 	    sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
324 	if (error == 0 && changed != 0) {
325 		return EINVAL;
326 	}
327 	return error;
328 }
329 
330 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
331     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
332     NULL, sizeof(pid_t),
333     proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
334 
335 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
336     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
337     NULL, sizeof(int),
338     proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
339 
340 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
341     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
342     NULL, sizeof(int),
343     proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
344 
345 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)346 proc_exit_lpexit_check(pid_t pid, int pos)
347 {
348 	if (proc_exit_lpexit_spin_pid == pid) {
349 		bool slept = false;
350 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
351 		while (proc_exit_lpexit_spin_pid == pid &&
352 		    proc_exit_lpexit_spin_pos == pos) {
353 			if (!slept) {
354 				os_log(OS_LOG_DEFAULT,
355 				    "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
356 				slept = true;
357 			}
358 			proc_exit_lpexit_spinning = 1;
359 			msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
360 			    PWAIT, "proc_exit_lpexit_check", NULL);
361 			proc_exit_lpexit_spinning = 0;
362 		}
363 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
364 		if (slept) {
365 			os_log(OS_LOG_DEFAULT,
366 			    "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
367 		}
368 	}
369 }
370 #endif /* DEVELOPMENT || DEBUG */
371 
372 /*
373  * NOTE: Source and target may *NOT* overlap!
374  * XXX Should share code with bsd/dev/ppc/unix_signal.c
375  */
376 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)377 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
378 {
379 	out->si_signo   = in->si_signo;
380 	out->si_errno   = in->si_errno;
381 	out->si_code    = in->si_code;
382 	out->si_pid     = in->si_pid;
383 	out->si_uid     = in->si_uid;
384 	out->si_status  = in->si_status;
385 	out->si_addr    = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
386 	/* following cast works for sival_int because of padding */
387 	out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
388 	out->si_band    = (user32_long_t)in->si_band;                  /* range reduction */
389 }
390 
391 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)392 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
393 {
394 	out->si_signo   = in->si_signo;
395 	out->si_errno   = in->si_errno;
396 	out->si_code    = in->si_code;
397 	out->si_pid     = in->si_pid;
398 	out->si_uid     = in->si_uid;
399 	out->si_status  = in->si_status;
400 	out->si_addr    = in->si_addr;
401 	/* following cast works for sival_int because of padding */
402 	out->si_value.sival_ptr = in->si_value.sival_ptr;
403 	out->si_band    = in->si_band;                  /* range reduction */
404 }
405 
406 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)407 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
408 {
409 	if (is64) {
410 		user64_siginfo_t sinfo64;
411 
412 		bzero(&sinfo64, sizeof(sinfo64));
413 		siginfo_user_to_user64(native, &sinfo64);
414 		return copyout(&sinfo64, uaddr, sizeof(sinfo64));
415 	} else {
416 		user32_siginfo_t sinfo32;
417 
418 		bzero(&sinfo32, sizeof(sinfo32));
419 		siginfo_user_to_user32(native, &sinfo32);
420 		return copyout(&sinfo32, uaddr, sizeof(sinfo32));
421 	}
422 }
423 
424 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)425 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
426     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
427     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
428 {
429 	struct rusage_superset rup;
430 
431 	gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
432 	rup.ri.ri_phys_footprint = 0;
433 	populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
434 	    udata_buffer, num_udata, reason, etype);
435 }
436 
437 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)438 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
439 {
440 	mach_exception_data_type_t code_update = *code;
441 	mach_exception_data_type_t subcode_update = *subcode;
442 	if (p->p_exit_reason == OS_REASON_NULL) {
443 		return;
444 	}
445 
446 	switch (p->p_exit_reason->osr_namespace) {
447 	case OS_REASON_JETSAM:
448 		if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
449 			/* Update the code with EXC_RESOURCE code for high memory watermark */
450 			EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
451 			EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
452 			EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
453 			subcode_update = 0;
454 			break;
455 		}
456 
457 		break;
458 	default:
459 		break;
460 	}
461 
462 	*code = code_update;
463 	*subcode = subcode_update;
464 	return;
465 }
466 
467 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)468 proc_encode_exit_exception_code(proc_t p)
469 {
470 	uint64_t subcode = 0;
471 
472 	if (p->p_exit_reason == OS_REASON_NULL) {
473 		return 0;
474 	}
475 
476 	/* Embed first 32 bits of osr_namespace and osr_code in exception code */
477 	ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
478 	ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
479 	return (mach_exception_data_type_t)subcode;
480 }
481 
482 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)483 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
484     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
485     uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
486 {
487 	mach_vm_address_t uaddr = 0;
488 	mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
489 	exc_codes[0] = code;
490 	exc_codes[1] = subcode;
491 	cpu_type_t cputype;
492 	struct proc_uniqidentifierinfo p_uniqidinfo;
493 	struct proc_workqueueinfo pwqinfo;
494 	int retval = 0;
495 	uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
496 	boolean_t is_corpse_fork;
497 	uint32_t csflags;
498 	unsigned int pflags = 0;
499 	uint64_t max_footprint_mb;
500 	uint64_t max_footprint;
501 
502 	uint64_t ledger_internal;
503 	uint64_t ledger_internal_compressed;
504 	uint64_t ledger_iokit_mapped;
505 	uint64_t ledger_alternate_accounting;
506 	uint64_t ledger_alternate_accounting_compressed;
507 	uint64_t ledger_purgeable_nonvolatile;
508 	uint64_t ledger_purgeable_nonvolatile_compressed;
509 	uint64_t ledger_page_table;
510 	uint64_t ledger_phys_footprint;
511 	uint64_t ledger_phys_footprint_lifetime_max;
512 	uint64_t ledger_network_nonvolatile;
513 	uint64_t ledger_network_nonvolatile_compressed;
514 	uint64_t ledger_wired_mem;
515 	uint64_t ledger_tagged_footprint;
516 	uint64_t ledger_tagged_footprint_compressed;
517 	uint64_t ledger_media_footprint;
518 	uint64_t ledger_media_footprint_compressed;
519 	uint64_t ledger_graphics_footprint;
520 	uint64_t ledger_graphics_footprint_compressed;
521 	uint64_t ledger_neural_footprint;
522 	uint64_t ledger_neural_footprint_compressed;
523 
524 	void *crash_info_ptr = task_get_corpseinfo(corpse_task);
525 
526 #if CONFIG_MEMORYSTATUS
527 	int memstat_dirty_flags = 0;
528 #endif
529 
530 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
531 		kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
532 	}
533 
534 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
535 		pid_t pid = proc_getpid(p);
536 		kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
537 	}
538 
539 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
540 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
541 	}
542 
543 	/* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
544 	if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
545 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
546 			kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
547 		}
548 	}
549 
550 	static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
551 	if (KERN_SUCCESS ==
552 	    kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
553 		proc_piduniqidentifierinfo(p, &p_uniqidinfo);
554 		kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
555 	}
556 
557 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
558 		kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
559 	}
560 
561 	csflags = (uint32_t)proc_getcsflags(p);
562 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
563 		kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
564 	}
565 
566 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
567 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
568 	}
569 
570 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
571 		struct timeval64 t64;
572 		t64.tv_sec = (int64_t)p->p_start.tv_sec;
573 		t64.tv_usec = (int64_t)p->p_start.tv_usec;
574 		kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
575 	}
576 
577 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
578 		kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
579 	}
580 
581 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
582 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
583 	}
584 
585 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
586 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
587 	}
588 
589 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
590 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
591 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
592 		kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
593 		zfree(ZV_NAMEI, buf);
594 	}
595 
596 	pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
597 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
598 		kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
599 	}
600 
601 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
602 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
603 	}
604 
605 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
606 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
607 	}
608 
609 	cputype = cpu_type() & ~CPU_ARCH_MASK;
610 	if (IS_64BIT_PROCESS(p)) {
611 		cputype |= CPU_ARCH_ABI64;
612 	} else if (proc_is64bit_data(p)) {
613 		cputype |= CPU_ARCH_ABI64_32;
614 	}
615 
616 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
617 		kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
618 	}
619 
620 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
621 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_cputype, sizeof(cpu_type_t));
622 	}
623 
624 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
625 		max_footprint = get_task_phys_footprint_limit(proc_task(p));
626 		max_footprint_mb = max_footprint >> 20;
627 		kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
628 	}
629 
630 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
631 		ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
632 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
633 	}
634 
635 	// In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
636 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
637 		ledger_internal = get_task_internal(corpse_task);
638 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
639 	}
640 
641 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
642 		ledger_internal_compressed = get_task_internal_compressed(corpse_task);
643 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
644 	}
645 
646 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
647 		ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
648 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
649 	}
650 
651 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
652 		ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
653 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
654 	}
655 
656 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
657 		ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
658 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
659 	}
660 
661 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
662 		ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
663 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
664 	}
665 
666 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
667 		ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
668 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
669 	}
670 
671 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
672 		ledger_page_table = get_task_page_table(corpse_task);
673 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
674 	}
675 
676 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
677 		ledger_phys_footprint = get_task_phys_footprint(corpse_task);
678 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
679 	}
680 
681 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
682 		ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
683 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
684 	}
685 
686 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
687 		ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
688 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
689 	}
690 
691 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
692 		ledger_wired_mem = get_task_wired_mem(corpse_task);
693 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
694 	}
695 
696 	bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
697 	retval = fill_procworkqueue(p, &pwqinfo);
698 	if (retval == 0) {
699 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
700 			kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
701 		}
702 	}
703 
704 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
705 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
706 	}
707 
708 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
709 		uid_t persona_id = proc_persona_id(p);
710 		kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
711 	}
712 
713 #if CONFIG_COALITIONS
714 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
715 		uint64_t coalition_ids[COALITION_NUM_TYPES];
716 		task_coalition_ids(proc_task(p), coalition_ids);
717 		kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
718 	}
719 #endif /* CONFIG_COALITIONS */
720 
721 #if CONFIG_MEMORYSTATUS
722 	memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
723 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
724 		kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
725 	}
726 #endif
727 
728 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
729 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
730 	}
731 
732 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
733 		ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
734 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
735 	}
736 
737 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
738 		ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
739 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
740 	}
741 
742 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
743 		ledger_media_footprint = get_task_media_footprint(corpse_task);
744 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
745 	}
746 
747 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
748 		ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
749 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
750 	}
751 
752 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
753 		ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
754 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
755 	}
756 
757 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
758 		ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
759 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
760 	}
761 
762 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
763 		ledger_neural_footprint = get_task_neural_footprint(corpse_task);
764 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
765 	}
766 
767 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
768 		ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
769 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
770 	}
771 
772 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
773 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
774 	}
775 
776 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
777 		char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
778 		ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
779 		kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
780 	}
781 
782 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
783 		is_corpse_fork = is_corpsefork(corpse_task);
784 		kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
785 	}
786 
787 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
788 		kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
789 	}
790 
791 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
792 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
793 	}
794 
795 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
796 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
797 	}
798 
799 	char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
800 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
801 		const char * id = cs_identity_get(p);
802 		if (id) {
803 			strlcpy(signing_id, id, sizeof(signing_id));
804 		}
805 		kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
806 	}
807 	char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
808 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
809 		const char * id = csproc_get_teamid(p);
810 		if (id) {
811 			strlcpy(team_id, id, sizeof(team_id));
812 		}
813 		kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
814 	}
815 
816 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
817 		uint32_t category = 0;
818 		if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
819 			category = CS_VALIDATION_CATEGORY_INVALID;
820 		}
821 		kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
822 	}
823 
824 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
825 		uint32_t trust = 0;
826 		kern_return_t ret = get_trust_level_kdp(get_task_pmap(corpse_task), &trust);
827 		if (ret != KERN_SUCCESS) {
828 			trust = KCDATA_INVALID_CS_TRUST_LEVEL;
829 		}
830 		kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
831 	}
832 
833 
834 	if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
835 		reason = p->p_exit_reason;
836 	}
837 	if (reason != OS_REASON_NULL) {
838 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
839 			struct exit_reason_snapshot ers = {
840 				.ers_namespace = reason->osr_namespace,
841 				.ers_code = reason->osr_code,
842 				.ers_flags = reason->osr_flags
843 			};
844 
845 			kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
846 		}
847 
848 		if (reason->osr_kcd_buf != 0) {
849 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
850 			assert(reason_buf_size != 0);
851 
852 			if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
853 				kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
854 			}
855 		}
856 	}
857 
858 	if (num_udata > 0) {
859 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
860 		    sizeof(uint64_t), num_udata, &uaddr)) {
861 			kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
862 		}
863 	}
864 }
865 
866 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)867 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
868 {
869 	kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
870 	    corpse_info->kcd_length);
871 	__assert_only uint32_t type = kcdata_iter_type(iter);
872 	assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
873 
874 	iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
875 	exception_type_t *etype = kcdata_iter_payload(iter);
876 	return *etype;
877 }
878 
879 /*
880  * Collect information required for generating lightwight corpse for current
881  * task, which can be terminating.
882  */
883 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)884 current_thread_collect_backtrace_info(
885 	kcdata_descriptor_t *new_desc,
886 	exception_type_t etype,
887 	mach_exception_data_t code,
888 	mach_msg_type_number_t codeCnt,
889 	void *reasonp)
890 {
891 	kcdata_descriptor_t kcdata;
892 	kern_return_t kr;
893 	int frame_count = 0, max_frames = 100;
894 	mach_vm_address_t uuid_info_addr = 0;
895 	uint32_t uuid_info_count         = 0;
896 	uint32_t btinfo_flag             = 0;
897 	mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
898 	natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
899 	mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
900 	thread_identifier_info_data_t th_info;
901 	char threadname[MAXTHREADNAMESIZE];
902 	void *btdata_kernel = NULL;
903 	typedef uintptr_t user_btframe_t __kernel_data_semantics;
904 	user_btframe_t *btframes = NULL;
905 	os_reason_t reason = (os_reason_t)reasonp;
906 	struct backtrace_user_info info = BTUINFO_INIT;
907 	struct rusage_superset rup;
908 	uint32_t platform;
909 
910 	task_t task = current_task();
911 	proc_t p = current_proc();
912 
913 	bool has_64bit_addr = task_get_64bit_addr(current_task());
914 	bool has_64bit_data = task_get_64bit_data(current_task());
915 
916 	if (new_desc == NULL) {
917 		return KERN_INVALID_ARGUMENT;
918 	}
919 
920 	/* First, collect backtrace frames */
921 	btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
922 	if (!btframes) {
923 		return KERN_RESOURCE_SHORTAGE;
924 	}
925 
926 	frame_count = backtrace_user(btframes, max_frames, NULL, &info);
927 	if (info.btui_error || frame_count == 0) {
928 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
929 		return KERN_FAILURE;
930 	}
931 
932 	if ((info.btui_info & BTI_TRUNCATED) != 0) {
933 		btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
934 	}
935 
936 	/* Captured in kcdata descriptor below */
937 	btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
938 	if (!btdata_kernel) {
939 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
940 		return KERN_RESOURCE_SHORTAGE;
941 	}
942 
943 	kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
944 	if (!kcdata) {
945 		kfree_data(btdata_kernel, alloc_size);
946 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
947 		return KERN_RESOURCE_SHORTAGE;
948 	}
949 
950 	/* First reserve space in kcdata blob for the btinfo flag fields */
951 	if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
952 	    sizeof(uint32_t), &btinfo_flag_addr)) {
953 		kfree_data(btdata_kernel, alloc_size);
954 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
955 		kcdata_memory_destroy(kcdata);
956 		return KERN_RESOURCE_SHORTAGE;
957 	}
958 
959 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
960 	    (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
961 	    sizeof(uintptr_t), frame_count, &kaddr)) {
962 		kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
963 	}
964 
965 #if __LP64__
966 	/* We only support async stacks on 64-bit kernels */
967 	frame_count = 0;
968 
969 	if (info.btui_async_frame_addr != 0) {
970 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
971 		    sizeof(uint32_t), &kaddr)) {
972 			uint32_t idx = info.btui_async_start_index;
973 			kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
974 		}
975 		struct backtrace_control ctl = {
976 			.btc_frame_addr = info.btui_async_frame_addr,
977 			.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
978 		};
979 
980 		info = BTUINFO_INIT;
981 		frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
982 		if (info.btui_error == 0 && frame_count > 0) {
983 			if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
984 			    TASK_BTINFO_ASYNC_BACKTRACE64,
985 			    sizeof(uintptr_t), frame_count, &kaddr)) {
986 				kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
987 			}
988 		}
989 
990 		if ((info.btui_info & BTI_TRUNCATED) != 0) {
991 			btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
992 		}
993 	}
994 #endif
995 
996 	/* Backtrace collection done, free the frames buffer */
997 	kfree_data(btframes, max_frames * sizeof(btframes[0]));
998 	btframes = NULL;
999 
1000 	thread_set_exec_promotion(current_thread());
1001 	/* Next, suspend the task briefly and collect image load infos */
1002 	task_suspend_internal(task);
1003 
1004 	/* all_image_info struct is ABI, in agreement with address width */
1005 	if (has_64bit_addr) {
1006 		struct user64_dyld_all_image_infos task_image_infos = {};
1007 		struct btinfo_sc_load_info64 sc_info;
1008 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1009 		    sizeof(struct user64_dyld_all_image_infos));
1010 		uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1011 		uuid_info_addr = task_image_infos.uuidArray;
1012 
1013 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1014 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1015 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1016 		    sizeof(task_image_infos.sharedCacheUUID));
1017 
1018 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1019 		    TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1020 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1021 		}
1022 	} else {
1023 		struct user32_dyld_all_image_infos task_image_infos = {};
1024 		struct btinfo_sc_load_info sc_info;
1025 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1026 		    sizeof(struct user32_dyld_all_image_infos));
1027 		uuid_info_count = task_image_infos.uuidArrayCount;
1028 		uuid_info_addr = task_image_infos.uuidArray;
1029 
1030 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1031 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1032 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1033 		    sizeof(task_image_infos.sharedCacheUUID));
1034 
1035 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1036 		    TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1037 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1038 		}
1039 	}
1040 
1041 	if (!uuid_info_addr) {
1042 		/*
1043 		 * Can happen when we catch dyld in the middle of updating
1044 		 * this data structure, or copyin of all_image_info struct failed.
1045 		 */
1046 		task_resume_internal(task);
1047 		thread_clear_exec_promotion(current_thread());
1048 		kfree_data(btdata_kernel, alloc_size);
1049 		kcdata_memory_destroy(kcdata);
1050 		return KERN_MEMORY_ERROR;
1051 	}
1052 
1053 	if (uuid_info_count > 0) {
1054 		uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1055 		    sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1056 
1057 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1058 		    (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1059 		    uuid_info_size, uuid_info_count, &kaddr)) {
1060 			if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1061 				task_resume_internal(task);
1062 				thread_clear_exec_promotion(current_thread());
1063 				kfree_data(btdata_kernel, alloc_size);
1064 				kcdata_memory_destroy(kcdata);
1065 				return KERN_MEMORY_ERROR;
1066 			}
1067 		}
1068 	}
1069 
1070 	task_resume_internal(task);
1071 	thread_clear_exec_promotion(current_thread());
1072 
1073 	/* Next, collect all other information */
1074 	thread_flavor_t tsflavor;
1075 	mach_msg_type_number_t tscount;
1076 
1077 #if defined(__x86_64__) || defined(__i386__)
1078 	tsflavor = x86_THREAD_STATE;      /* unified */
1079 	tscount  = x86_THREAD_STATE_COUNT;
1080 #else
1081 	tsflavor = ARM_THREAD_STATE;      /* unified */
1082 	tscount  = ARM_UNIFIED_THREAD_STATE_COUNT;
1083 #endif
1084 
1085 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1086 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1087 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1088 		bt_thread_state->flavor = tsflavor;
1089 		bt_thread_state->count = tscount;
1090 		/* variable-sized tstate array follows */
1091 
1092 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1093 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1094 		if (kr != KERN_SUCCESS) {
1095 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1096 			if (kr == KERN_TERMINATED) {
1097 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1098 			}
1099 		}
1100 	}
1101 
1102 #if defined(__x86_64__) || defined(__i386__)
1103 	tsflavor = x86_EXCEPTION_STATE;       /* unified */
1104 	tscount  = x86_EXCEPTION_STATE_COUNT;
1105 #else
1106 #if defined(__arm64__)
1107 	if (has_64bit_data) {
1108 		tsflavor = ARM_EXCEPTION_STATE64;
1109 		tscount  = ARM_EXCEPTION_STATE64_COUNT;
1110 	} else
1111 #endif /* defined(__arm64__) */
1112 	{
1113 		tsflavor = ARM_EXCEPTION_STATE;
1114 		tscount  = ARM_EXCEPTION_STATE_COUNT;
1115 	}
1116 #endif /* defined(__x86_64__) || defined(__i386__) */
1117 
1118 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1119 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1120 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1121 		bt_thread_state->flavor = tsflavor;
1122 		bt_thread_state->count = tscount;
1123 		/* variable-sized tstate array follows */
1124 
1125 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1126 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1127 		if (kr != KERN_SUCCESS) {
1128 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1129 			if (kr == KERN_TERMINATED) {
1130 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1131 			}
1132 		}
1133 	}
1134 
1135 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1136 		pid_t pid = proc_getpid(p);
1137 		kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1138 	}
1139 
1140 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1141 		kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1142 	}
1143 
1144 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1145 		kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1146 	}
1147 
1148 #if CONFIG_COALITIONS
1149 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1150 		uint64_t coalition_ids[COALITION_NUM_TYPES];
1151 		task_coalition_ids(proc_task(p), coalition_ids);
1152 		kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1153 	}
1154 #endif /* CONFIG_COALITIONS */
1155 
1156 	/* V0 is sufficient for ReportCrash */
1157 	gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1158 	rup.ri.ri_phys_footprint = 0;
1159 	/* Soft crash, proc did not exit */
1160 	rup.ri.ri_proc_exit_abstime = 0;
1161 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1162 		kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1163 	}
1164 
1165 	platform = proc_platform(current_proc());
1166 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1167 		kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1168 	}
1169 
1170 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1171 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1172 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1173 		kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1174 		zfree(ZV_NAMEI, buf);
1175 	}
1176 
1177 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1178 		kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1179 	}
1180 
1181 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1182 		kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1183 	}
1184 
1185 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1186 		unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1187 		kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1188 	}
1189 
1190 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1191 		cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1192 		if (has_64bit_addr) {
1193 			cputype |= CPU_ARCH_ABI64;
1194 		} else if (has_64bit_data) {
1195 			cputype |= CPU_ARCH_ABI64_32;
1196 		}
1197 		kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1198 	}
1199 
1200 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1201 		kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1202 	}
1203 
1204 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1205 		kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1206 	}
1207 
1208 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1209 		kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1210 	}
1211 
1212 	assert(codeCnt <= EXCEPTION_CODE_MAX);
1213 
1214 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1215 	    sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1216 		kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1217 	}
1218 
1219 	if (reason != OS_REASON_NULL) {
1220 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1221 			struct exit_reason_snapshot ers = {
1222 				.ers_namespace = reason->osr_namespace,
1223 				.ers_code = reason->osr_code,
1224 				.ers_flags = reason->osr_flags
1225 			};
1226 
1227 			kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1228 		}
1229 
1230 		if (reason->osr_kcd_buf != 0) {
1231 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1232 			assert(reason_buf_size != 0);
1233 
1234 			if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1235 				kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1236 			}
1237 		}
1238 	}
1239 
1240 	threadname[0] = '\0';
1241 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1242 	    sizeof(threadname), &kaddr)) {
1243 		bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1244 		kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1245 	}
1246 
1247 	kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1248 	if (kr == KERN_TERMINATED) {
1249 		btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1250 	}
1251 
1252 
1253 	kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1254 	    sizeof(uint64_t), &kaddr);
1255 
1256 	/*
1257 	 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1258 	 * handler (ReportCrash) that lw corpse collection ran out of space and the
1259 	 * result is incomplete.
1260 	 */
1261 	if (last_kr != KERN_SUCCESS) {
1262 		btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1263 	}
1264 
1265 	if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1266 		kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1267 	}
1268 
1269 	/* Lastly, copy the flags to the address we reserved at the beginning. */
1270 	kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1271 
1272 	*new_desc = kcdata;
1273 
1274 	return KERN_SUCCESS;
1275 }
1276 
1277 /*
1278  * We only parse exit reason kcdata blobs for critical process before they die
1279  * and we're going to panic or for opt-in, limited diagnostic tools.
1280  *
1281  * Meant to be called immediately before panicking or limited diagnostic
1282  * scenarios.
1283  */
1284 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1285 exit_reason_get_string_desc(os_reason_t exit_reason)
1286 {
1287 	kcdata_iter_t iter;
1288 
1289 	if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1290 	    exit_reason->osr_bufsize == 0) {
1291 		return NULL;
1292 	}
1293 
1294 	iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1295 	if (!kcdata_iter_valid(iter)) {
1296 #if DEBUG || DEVELOPMENT
1297 		printf("exit reason has invalid exit reason buffer\n");
1298 #endif
1299 		return NULL;
1300 	}
1301 
1302 	if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1303 #if DEBUG || DEVELOPMENT
1304 		printf("exit reason buffer type mismatch, expected %d got %d\n",
1305 		    KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1306 #endif
1307 		return NULL;
1308 	}
1309 
1310 	iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1311 	if (!kcdata_iter_valid(iter)) {
1312 		return NULL;
1313 	}
1314 
1315 	return (char *)kcdata_iter_payload(iter);
1316 }
1317 
1318 static int initproc_spawned = 0;
1319 
1320 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1321 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1322 {
1323 	if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1324 		// Can only ever be set by launchd, and only once at boot
1325 		return EPERM;
1326 	}
1327 	return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1328 }
1329 
1330 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1331     CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1332     sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1333 
1334 #if DEVELOPMENT || DEBUG
1335 
1336 /* disable user faults */
1337 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1338 #endif /* DEVELOPMENT || DEBUG */
1339 
1340 #define OS_REASON_IFLAG_USER_FAULT 0x1
1341 
1342 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC  5
1343 
1344 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1345 abort_with_payload_internal(proc_t p,
1346     uint32_t reason_namespace, uint64_t reason_code,
1347     user_addr_t payload, uint32_t payload_size,
1348     user_addr_t reason_string, uint64_t reason_flags,
1349     uint32_t internal_flags)
1350 {
1351 	os_reason_t exit_reason = OS_REASON_NULL;
1352 	kern_return_t kr = KERN_SUCCESS;
1353 
1354 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1355 		uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1356 		    memory_order_relaxed);
1357 
1358 #if DEVELOPMENT || DEBUG
1359 		if (bootarg_disable_user_faults) {
1360 			return EQFULL;
1361 		}
1362 #endif /* DEVELOPMENT || DEBUG */
1363 
1364 		for (;;) {
1365 			if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1366 				return EQFULL;
1367 			}
1368 			// this reloads the value in old_value
1369 			if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1370 			    &old_value, old_value + 1, memory_order_relaxed,
1371 			    memory_order_relaxed)) {
1372 				break;
1373 			}
1374 		}
1375 	}
1376 
1377 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1378 	    proc_getpid(p), reason_namespace,
1379 	    reason_code, 0, 0);
1380 
1381 	exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1382 	    payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1383 
1384 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1385 		mach_exception_code_t code = 0;
1386 
1387 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1388 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
1389 		EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1390 
1391 		if (exit_reason == OS_REASON_NULL) {
1392 			kr = KERN_RESOURCE_SHORTAGE;
1393 		} else {
1394 			kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1395 		}
1396 		os_reason_free(exit_reason);
1397 	} else {
1398 		/*
1399 		 * We use SIGABRT (rather than calling exit directly from here) so that
1400 		 * the debugger can catch abort_with_{reason,payload} calls.
1401 		 */
1402 		psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1403 	}
1404 
1405 	switch (kr) {
1406 	case KERN_SUCCESS:
1407 		return 0;
1408 	case KERN_NOT_SUPPORTED:
1409 		return ENOTSUP;
1410 	case KERN_INVALID_ARGUMENT:
1411 		return EINVAL;
1412 	case KERN_RESOURCE_SHORTAGE:
1413 	default:
1414 		return EBUSY;
1415 	}
1416 }
1417 
1418 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1419 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1420     __unused void *retval)
1421 {
1422 	abort_with_payload_internal(cur_proc, args->reason_namespace,
1423 	    args->reason_code, args->payload, args->payload_size,
1424 	    args->reason_string, args->reason_flags, 0);
1425 
1426 	return 0;
1427 }
1428 
1429 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1430 os_fault_with_payload(struct proc *cur_proc,
1431     struct os_fault_with_payload_args *args, __unused int *retval)
1432 {
1433 	return abort_with_payload_internal(cur_proc, args->reason_namespace,
1434 	           args->reason_code, args->payload, args->payload_size,
1435 	           args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1436 }
1437 
1438 
1439 /*
1440  * exit --
1441  *	Death of process.
1442  */
1443 __attribute__((noreturn))
1444 void
exit(proc_t p,struct exit_args * uap,int * retval)1445 exit(proc_t p, struct exit_args *uap, int *retval)
1446 {
1447 	p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1448 	exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1449 
1450 	thread_exception_return();
1451 	/* NOTREACHED */
1452 	while (TRUE) {
1453 		thread_block(THREAD_CONTINUE_NULL);
1454 	}
1455 	/* NOTREACHED */
1456 }
1457 
1458 /*
1459  * Exit: deallocate address space and other resources, change proc state
1460  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
1461  * status and rusage for wait().  Check for child processes and orphan them.
1462  */
1463 int
exit1(proc_t p,int rv,int * retval)1464 exit1(proc_t p, int rv, int *retval)
1465 {
1466 	return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1467 }
1468 
1469 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1470 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1471     int jetsam_flags)
1472 {
1473 	return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1474 }
1475 
1476 /*
1477  * NOTE: exit_with_reason drops a reference on the passed exit_reason
1478  */
1479 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1480 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1481     int jetsam_flags, struct os_reason *exit_reason)
1482 {
1483 	thread_t self = current_thread();
1484 	struct task *task = proc_task(p);
1485 	struct uthread *ut;
1486 	int error = 0;
1487 	bool proc_exiting = false;
1488 
1489 #if DEVELOPMENT || DEBUG
1490 	/*
1491 	 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1492 	 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1493 	 */
1494 	if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1495 		panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1496 		    __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1497 	}
1498 #endif
1499 
1500 	/*
1501 	 * If a thread in this task has already
1502 	 * called exit(), then halt any others
1503 	 * right here.
1504 	 */
1505 
1506 	ut = get_bsdthread_info(self);
1507 	(void)retval;
1508 
1509 	/*
1510 	 * The parameter list of audit_syscall_exit() was augmented to
1511 	 * take the Darwin syscall number as the first parameter,
1512 	 * which is currently required by mac_audit_postselect().
1513 	 */
1514 
1515 	/*
1516 	 * The BSM token contains two components: an exit status as passed
1517 	 * to exit(), and a return value to indicate what sort of exit it
1518 	 * was.  The exit status is WEXITSTATUS(rv), but it's not clear
1519 	 * what the return value is.
1520 	 */
1521 	AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1522 	/*
1523 	 * TODO: what to audit here when jetsam calls exit and the uthread,
1524 	 * 'ut' does not belong to the proc, 'p'.
1525 	 */
1526 	AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1527 
1528 	DTRACE_PROC1(exit, int, CLD_EXITED);
1529 
1530 	/* mark process is going to exit and pull out of DBG/disk throttle */
1531 	/* TODO: This should be done after becoming exit thread */
1532 	proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1533 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1534 
1535 	proc_lock(p);
1536 	error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1537 	if (error == EDEADLK) {
1538 		/*
1539 		 * If proc_transstart() returns EDEADLK, then another thread
1540 		 * is either exec'ing or exiting. Return an error and allow
1541 		 * the other thread to continue.
1542 		 */
1543 		proc_unlock(p);
1544 		os_reason_free(exit_reason);
1545 		if (current_proc() == p) {
1546 			if (p->exit_thread == self) {
1547 				panic("exit_thread failed to exit");
1548 			}
1549 
1550 			if (thread_can_terminate) {
1551 				thread_exception_return();
1552 			}
1553 		}
1554 
1555 		return error;
1556 	}
1557 
1558 	proc_exiting = !!(p->p_lflag & P_LEXIT);
1559 
1560 	while (proc_exiting || p->exit_thread != self) {
1561 		if (proc_exiting || sig_try_locked(p) <= 0) {
1562 			proc_transend(p, 1);
1563 			os_reason_free(exit_reason);
1564 
1565 			if (get_threadtask(self) != task) {
1566 				proc_unlock(p);
1567 				return 0;
1568 			}
1569 			proc_unlock(p);
1570 
1571 			thread_terminate(self);
1572 			if (!thread_can_terminate) {
1573 				return 0;
1574 			}
1575 
1576 			thread_exception_return();
1577 			/* NOTREACHED */
1578 		}
1579 		sig_lock_to_exit(p);
1580 	}
1581 
1582 	if (exit_reason != OS_REASON_NULL) {
1583 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1584 		    proc_getpid(p), exit_reason->osr_namespace,
1585 		    exit_reason->osr_code, 0, 0);
1586 	}
1587 
1588 	assert(p->p_exit_reason == OS_REASON_NULL);
1589 	p->p_exit_reason = exit_reason;
1590 
1591 	p->p_lflag |= P_LEXIT;
1592 	p->p_xstat = rv;
1593 	p->p_lflag |= jetsam_flags;
1594 
1595 	proc_transend(p, 1);
1596 	proc_unlock(p);
1597 
1598 	proc_prepareexit(p, rv, perf_notify);
1599 
1600 	/* Last thread to terminate will call proc_exit() */
1601 	task_terminate_internal(task);
1602 
1603 	return 0;
1604 }
1605 
1606 #if CONFIG_MEMORYSTATUS
1607 /*
1608  * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1609  * is currently being frozen.
1610  * The proc_list_lock is held by the caller.
1611  * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1612  * the p_refcount P_REF_DEAD bit.
1613  */
1614 static void
proc_memorystatus_remove(proc_t p)1615 proc_memorystatus_remove(proc_t p)
1616 {
1617 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1618 	while (memorystatus_remove(p) == EAGAIN) {
1619 		os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1620 		msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1621 	}
1622 }
1623 #endif
1624 
1625 #if DEVELOPMENT
1626 boolean_t crash_behavior_test_mode = FALSE;
1627 boolean_t crash_behavior_test_would_panic = FALSE;
1628 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1629 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1630 #endif /* DEVELOPMENT */
1631 
1632 static bool
_proc_is_crashing_signal(int sig)1633 _proc_is_crashing_signal(int sig)
1634 {
1635 	bool result = false;
1636 	switch (sig) {
1637 	case SIGILL:
1638 	case SIGABRT:
1639 	case SIGFPE:
1640 	case SIGBUS:
1641 	case SIGSEGV:
1642 	case SIGSYS:
1643 	/*
1644 	 * If SIGTRAP is the terminating signal, then we can safely assume the
1645 	 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1646 	 * a process calls __builtin_trap(), which will abort.)
1647 	 */
1648 	case SIGTRAP:
1649 		result = true;
1650 	}
1651 
1652 	return result;
1653 }
1654 
1655 static bool
_proc_is_fatal_reason(os_reason_t reason)1656 _proc_is_fatal_reason(os_reason_t reason)
1657 {
1658 	if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1659 		/* Abort is always fatal even if there is no crash report generated */
1660 		return true;
1661 	}
1662 	if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1663 		/*
1664 		 * No crash report means this reason shouldn't be considered fatal
1665 		 * unless we are in test mode
1666 		 */
1667 #if DEVELOPMENT
1668 		if (crash_behavior_test_mode) {
1669 			return true;
1670 		}
1671 #endif /* DEVELOPMENT */
1672 		return false;
1673 	}
1674 	// By default all OS_REASON are fatal
1675 	return true;
1676 }
1677 
1678 static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled", false);
1679 
1680 static bool
proc_should_trigger_panic(proc_t p,int rv)1681 proc_should_trigger_panic(proc_t p, int rv)
1682 {
1683 	if (p == initproc) {
1684 		/* Always panic for launchd */
1685 		return true;
1686 	}
1687 
1688 	if (panic_on_crash_disabled) {
1689 		printf("panic-on-crash disabled via boot-arg\n");
1690 		return false;
1691 	}
1692 
1693 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1694 		return true;
1695 	}
1696 
1697 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1698 		return true;
1699 	}
1700 
1701 	if (p->p_posix_spawn_failed) {
1702 		/* posix_spawn failures normally don't qualify for panics */
1703 		return false;
1704 	}
1705 
1706 	bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1707 	if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1708 		return false;
1709 	}
1710 
1711 	if (WIFEXITED(rv)) {
1712 		int code = WEXITSTATUS(rv);
1713 
1714 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1715 			if (code == 0) {
1716 				/* No panic if we exit 0 */
1717 				return false;
1718 			} else {
1719 				/* Panic on non-zero exit */
1720 				return true;
1721 			}
1722 		} else {
1723 			/* No panic on normal exit if the process doesn't have the non-zero flag set */
1724 			return false;
1725 		}
1726 	} else if (WIFSIGNALED(rv)) {
1727 		int signal = WTERMSIG(rv);
1728 		/* This is a crash (non-normal exit) */
1729 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1730 			os_reason_t reason = p->p_exit_reason;
1731 			if (reason != OS_REASON_NULL) {
1732 				if (!_proc_is_fatal_reason(reason)) {
1733 					// Skip non-fatal terminate_with_reason
1734 					return false;
1735 				}
1736 				if (reason->osr_namespace == OS_REASON_SIGNAL) {
1737 					/*
1738 					 * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal
1739 					 * in osr_code, so we should check that signal here
1740 					 */
1741 					return _proc_is_crashing_signal((int)reason->osr_code);
1742 				} else {
1743 					/*
1744 					 * This branch covers the case of terminate_with_reason which
1745 					 * delivers a SIGTERM which is still considered a crash even
1746 					 * thought the signal is not considered a crashing signal
1747 					 */
1748 					return true;
1749 				}
1750 			}
1751 			return _proc_is_crashing_signal(signal);
1752 		} else {
1753 			return false;
1754 		}
1755 	} else {
1756 		/*
1757 		 * This branch implies that we didn't exit normally nor did we receive
1758 		 * a signal. This should be unreachable.
1759 		 */
1760 		return true;
1761 	}
1762 }
1763 
1764 static void
proc_crash_coredump(proc_t p)1765 proc_crash_coredump(proc_t p)
1766 {
1767 	(void)p;
1768 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1769 	/*
1770 	 * For debugging purposes, generate a core file of initproc before
1771 	 * panicking. Leave at least 300 MB free on the root volume, and ignore
1772 	 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1773 	 * before the panic hits.
1774 	 */
1775 
1776 	int             err;
1777 	uint64_t        coredump_start = mach_absolute_time();
1778 	uint64_t        coredump_end;
1779 	clock_sec_t     tv_sec;
1780 	clock_usec_t    tv_usec;
1781 	uint32_t        tv_msec;
1782 
1783 
1784 	err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1785 
1786 	coredump_end = mach_absolute_time();
1787 
1788 	absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1789 
1790 	tv_msec = tv_usec / 1000;
1791 
1792 	if (err != 0) {
1793 		printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1794 		    proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1795 	} else {
1796 		printf("Generated core file for pid: %d in %d.%03d seconds\n",
1797 		    proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1798 	}
1799 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1800 }
1801 
1802 static void
proc_handle_critical_exit(proc_t p,int rv)1803 proc_handle_critical_exit(proc_t p, int rv)
1804 {
1805 	if (!proc_should_trigger_panic(p, rv)) {
1806 		// No panic, bail out
1807 		return;
1808 	}
1809 
1810 #if DEVELOPMENT
1811 	if (crash_behavior_test_mode) {
1812 		crash_behavior_test_would_panic = TRUE;
1813 		// Force test mode off after hitting a panic
1814 		crash_behavior_test_mode = FALSE;
1815 		return;
1816 	}
1817 #endif /* DEVELOPMENT */
1818 
1819 	char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1820 
1821 	if (p->p_exit_reason == OS_REASON_NULL) {
1822 		printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1823 		    proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1824 	} else {
1825 		printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1826 		    p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1827 		    exit_reason_desc : "none");
1828 	}
1829 
1830 	const char *prefix_str;
1831 	char prefix_str_buf[128];
1832 
1833 	if (p == initproc) {
1834 		if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1835 			prefix_str = "LTE preinit process exited";
1836 		} else if (initproc_spawned) {
1837 			prefix_str = "initproc exited";
1838 		} else {
1839 			prefix_str = "initproc failed to start";
1840 		}
1841 	} else {
1842 		/* For processes that aren't launchd, just use the process name and pid */
1843 		snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1844 		prefix_str = prefix_str_buf;
1845 	}
1846 
1847 	proc_crash_coredump(p);
1848 
1849 	sync(p, (void *)NULL, (int *)NULL);
1850 
1851 	if (p->p_exit_reason == OS_REASON_NULL) {
1852 		panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1853 		    prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1854 	} else {
1855 		panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1856 		    ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1857 		    prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1858 		    exit_reason_desc ? exit_reason_desc : "none");
1859 	}
1860 }
1861 
1862 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1863 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1864 {
1865 	mach_exception_data_type_t code = 0, subcode = 0;
1866 	exception_type_t etype;
1867 
1868 	struct uthread *ut;
1869 	thread_t self = current_thread();
1870 	ut = get_bsdthread_info(self);
1871 	struct rusage_superset *rup;
1872 	int kr = 0;
1873 	int create_corpse = FALSE;
1874 	bool corpse_source = false;
1875 	task_t task = proc_task(p);
1876 
1877 
1878 	if (p->p_crash_behavior != 0 || p == initproc) {
1879 		proc_handle_critical_exit(p, rv);
1880 	}
1881 
1882 	if (task) {
1883 		corpse_source = vm_map_is_corpse_source(get_task_map(task));
1884 	}
1885 
1886 	/*
1887 	 * Generate a corefile/crashlog if:
1888 	 *      The process doesn't have an exit reason that indicates no crash report should be created
1889 	 *      AND any of the following are true:
1890 	 *	- The process was terminated due to a fatal signal that generates a core
1891 	 *	- The process was killed due to a code signing violation
1892 	 *	- The process has an exit reason that indicates we should generate a crash report
1893 	 *
1894 	 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1895 	 * (which normally triggers a core) but may indicate that no crash report should be created.
1896 	 */
1897 	if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1898 	    (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1899 	    (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1900 	    OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1901 		/*
1902 		 * Workaround for processes checking up on PT_DENY_ATTACH:
1903 		 * should be backed out post-Leopard (details in 5431025).
1904 		 */
1905 		if ((SIGSEGV == WTERMSIG(rv)) &&
1906 		    (p->p_pptr->p_lflag & P_LNOATTACH)) {
1907 			goto skipcheck;
1908 		}
1909 
1910 		/*
1911 		 * Crash Reporter looks for the signal value, original exception
1912 		 * type, and low 20 bits of the original code in code[0]
1913 		 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1914 		 */
1915 		code = ((WTERMSIG(rv) & 0xff) << 24) |
1916 		    ((ut->uu_exception & 0x0f) << 20) |
1917 		    ((int)ut->uu_code & 0xfffff);
1918 		subcode = ut->uu_subcode;
1919 		etype = ut->uu_exception;
1920 
1921 		/* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1922 		if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1923 			etype = EXC_CRASH;
1924 		}
1925 
1926 #if (DEVELOPMENT || DEBUG)
1927 		if (p->p_pid <= exception_log_max_pid) {
1928 			char *proc_name = proc_best_name(p);
1929 			if (PROC_HAS_EXITREASON(p)) {
1930 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1931 				    "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1932 				    proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1933 				    exit_reason_get_string_desc(p->p_exit_reason));
1934 			} else {
1935 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1936 				    "pid: %d -- process name: %s -- exit status %d",
1937 				    proc_getpid(p), proc_name, WEXITSTATUS(rv));
1938 			}
1939 		}
1940 #endif
1941 		const bool fatal = false;
1942 		kr = task_exception_notify(EXC_CRASH, code, subcode, fatal);
1943 		/* Nobody handled EXC_CRASH?? remember to make corpse */
1944 		if ((kr != 0 || corpse_source) && p == current_proc()) {
1945 			/*
1946 			 * Do not create corpse when exit is called from jetsam thread.
1947 			 * Corpse creation code requires that proc_prepareexit is
1948 			 * called by the exiting proc and not the kernel_proc.
1949 			 */
1950 			create_corpse = TRUE;
1951 		}
1952 
1953 		/*
1954 		 * Revalidate the code signing of the text pages around current PC.
1955 		 * This is an attempt to detect and repair faults due to memory
1956 		 * corruption of text pages.
1957 		 *
1958 		 * The goal here is to fixup infrequent memory corruptions due to
1959 		 * things like aging RAM bit flips. So the approach is to only expect
1960 		 * to have to fixup one thing per crash. This also limits the amount
1961 		 * of extra work we cause in case this is a development kernel with an
1962 		 * active memory stomp happening.
1963 		 */
1964 		uintptr_t bt[2];
1965 		struct backtrace_user_info btinfo = BTUINFO_INIT;
1966 		unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
1967 		int bt_err = btinfo.btui_error;
1968 		if (bt_err == 0 && frame_count >= 1) {
1969 			/*
1970 			 * First check at the page containing the current PC.
1971 			 * This passes if the page code signs -or- if we can't figure out
1972 			 * what is at that address. The latter action is so we continue checking
1973 			 * previous pages which may be corrupt and caused a wild branch.
1974 			 */
1975 			kr = revalidate_text_page(task, bt[0]);
1976 
1977 			/* No corruption found, check the previous sequential page */
1978 			if (kr == KERN_SUCCESS) {
1979 				kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
1980 			}
1981 
1982 			/* Still no corruption found, check the current function's caller */
1983 			if (kr == KERN_SUCCESS) {
1984 				if (frame_count > 1 &&
1985 				    atop(bt[0]) != atop(bt[1]) &&           /* don't recheck PC page */
1986 				    atop(bt[0]) - 1 != atop(bt[1])) {       /* don't recheck page before */
1987 					kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
1988 				}
1989 			}
1990 
1991 			/*
1992 			 * Log that we found a corruption.
1993 			 */
1994 			if (kr != KERN_SUCCESS) {
1995 				os_log(OS_LOG_DEFAULT,
1996 				    "Text page corruption detected in dying process %d\n", proc_getpid(p));
1997 			}
1998 		}
1999 	}
2000 
2001 skipcheck:
2002 	if (task_is_driver(task) && PROC_HAS_EXITREASON(p)) {
2003 		IOUserServerRecordExitReason(task, p->p_exit_reason);
2004 	}
2005 
2006 	/* Notify the perf server? */
2007 	if (perf_notify) {
2008 		(void)sys_perf_notify(self, proc_getpid(p));
2009 	}
2010 
2011 
2012 	/* stash the usage into corpse data if making_corpse == true */
2013 	if (create_corpse == TRUE) {
2014 		kr = task_mark_corpse(task);
2015 		if (kr != KERN_SUCCESS) {
2016 			if (kr == KERN_NO_SPACE) {
2017 				printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
2018 			} else if (kr == KERN_NOT_SUPPORTED) {
2019 				printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
2020 			} else if (kr == KERN_TERMINATED) {
2021 				printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
2022 			} else {
2023 				printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
2024 			}
2025 			create_corpse = FALSE;
2026 		}
2027 	}
2028 
2029 	if (corpse_source && !create_corpse) {
2030 		/* vm_map was marked for corpse, but we decided to not create one, unmark the vmmap */
2031 		vm_map_unset_corpse_source(get_task_map(task));
2032 	}
2033 
2034 	if (!proc_is_shadow(p)) {
2035 		/*
2036 		 * Before this process becomes a zombie, stash resource usage
2037 		 * stats in the proc for external observers to query
2038 		 * via proc_pid_rusage().
2039 		 *
2040 		 * If the zombie allocation fails, just punt the stats.
2041 		 */
2042 		rup = zalloc(zombie_zone);
2043 		gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2044 		rup->ri.ri_phys_footprint = 0;
2045 		rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2046 		/*
2047 		 * Make the rusage_info visible to external observers
2048 		 * only after it has been completely filled in.
2049 		 */
2050 		p->p_ru = rup;
2051 	}
2052 
2053 	if (create_corpse) {
2054 		int est_knotes = 0, num_knotes = 0;
2055 		uint64_t *buffer = NULL;
2056 		uint32_t buf_size = 0;
2057 
2058 		/* Get all the udata pointers from kqueue */
2059 		est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2060 		if (est_knotes > 0) {
2061 			buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2062 			buffer = kalloc_data(buf_size, Z_WAITOK);
2063 			if (buffer) {
2064 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2065 				if (num_knotes > est_knotes + 32) {
2066 					num_knotes = est_knotes + 32;
2067 				}
2068 			}
2069 		}
2070 
2071 		/* Update the code, subcode based on exit reason */
2072 		proc_update_corpse_exception_codes(p, &code, &subcode);
2073 		populate_corpse_crashinfo(p, task, rup,
2074 		    code, subcode, buffer, num_knotes, NULL, etype);
2075 		kfree_data(buffer, buf_size);
2076 	}
2077 	/*
2078 	 * Remove proc from allproc queue and from pidhash chain.
2079 	 * Need to do this before we do anything that can block.
2080 	 * Not doing causes things like mount() find this on allproc
2081 	 * in partially cleaned state.
2082 	 */
2083 
2084 	proc_list_lock();
2085 
2086 #if CONFIG_MEMORYSTATUS
2087 	proc_memorystatus_remove(p);
2088 #endif
2089 
2090 	LIST_REMOVE(p, p_list);
2091 	LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2092 	/* will not be visible via proc_find */
2093 	os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2094 
2095 	proc_list_unlock();
2096 
2097 	/*
2098 	 * If parent is waiting for us to exit or exec,
2099 	 * P_LPPWAIT is set; we will wakeup the parent below.
2100 	 */
2101 	proc_lock(p);
2102 	p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2103 	p->p_sigignore = ~(sigcantmask);
2104 
2105 	/*
2106 	 * If a thread is already waiting for us in proc_exit,
2107 	 * P_LTERM is set, wakeup the thread.
2108 	 */
2109 	if (p->p_lflag & P_LTERM) {
2110 		wakeup(&p->exit_thread);
2111 	} else {
2112 		p->p_lflag |= P_LTERM;
2113 	}
2114 
2115 	/* If current proc is exiting, ignore signals on the exit thread */
2116 	if (p == current_proc()) {
2117 		ut->uu_siglist = 0;
2118 	}
2119 	proc_unlock(p);
2120 }
2121 
2122 void
proc_exit(proc_t p)2123 proc_exit(proc_t p)
2124 {
2125 	proc_t q;
2126 	proc_t pp;
2127 	struct task *task = proc_task(p);
2128 	vnode_t tvp = NULLVP;
2129 	struct pgrp * pg;
2130 	struct session *sessp;
2131 	struct uthread * uth;
2132 	pid_t pid;
2133 	int exitval;
2134 	int knote_hint;
2135 
2136 	uth = current_uthread();
2137 
2138 	proc_lock(p);
2139 	proc_transstart(p, 1, 0);
2140 	if (!(p->p_lflag & P_LEXIT)) {
2141 		/*
2142 		 * This can happen if a thread_terminate() occurs
2143 		 * in a single-threaded process.
2144 		 */
2145 		p->p_lflag |= P_LEXIT;
2146 		proc_transend(p, 1);
2147 		proc_unlock(p);
2148 		proc_prepareexit(p, 0, TRUE);
2149 		(void) task_terminate_internal(task);
2150 		proc_lock(p);
2151 	} else if (!(p->p_lflag & P_LTERM)) {
2152 		proc_transend(p, 1);
2153 		/* Jetsam is in middle of calling proc_prepareexit, wait for it */
2154 		p->p_lflag |= P_LTERM;
2155 		msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2156 	} else {
2157 		proc_transend(p, 1);
2158 	}
2159 
2160 	p->p_lflag |= P_LPEXIT;
2161 
2162 	/*
2163 	 * Other kernel threads may be in the middle of signalling this process.
2164 	 * Wait for those threads to wrap it up before making the process
2165 	 * disappear on them.
2166 	 */
2167 	if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2168 		p->p_sigwaitcnt++;
2169 		while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2170 			msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2171 		}
2172 		p->p_sigwaitcnt--;
2173 	}
2174 
2175 	proc_unlock(p);
2176 	pid = proc_getpid(p);
2177 	exitval = p->p_xstat;
2178 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2179 	    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2180 	    pid, exitval, 0, 0, 0);
2181 
2182 #if DEVELOPMENT || DEBUG
2183 	proc_exit_lpexit_check(pid, PELS_POS_START);
2184 #endif
2185 
2186 #if CONFIG_DTRACE
2187 	dtrace_proc_exit(p);
2188 #endif
2189 
2190 	proc_refdrain(p);
2191 	/* We now have unique ref to the proc */
2192 
2193 	/* if any pending cpu limits action, clear it */
2194 	task_clear_cpuusage(proc_task(p), TRUE);
2195 
2196 	workq_mark_exiting(p);
2197 
2198 	/*
2199 	 * need to cancel async IO requests that can be cancelled and wait for those
2200 	 * already active.  MAY BLOCK!
2201 	 */
2202 	_aio_exit( p );
2203 
2204 	/*
2205 	 * Close open files and release open-file table.
2206 	 * This may block!
2207 	 */
2208 	fdt_invalidate(p);
2209 
2210 	/*
2211 	 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2212 	 * workqueue.
2213 	 */
2214 	workq_exit(p);
2215 
2216 	if (uth->uu_lowpri_window) {
2217 		/*
2218 		 * task is marked as a low priority I/O type
2219 		 * and the I/O we issued while in flushing files on close
2220 		 * collided with normal I/O operations...
2221 		 * no need to throttle this thread since its going away
2222 		 * but we do need to update our bookeeping w/r to throttled threads
2223 		 */
2224 		throttle_lowpri_io(0);
2225 	}
2226 
2227 	if (p->p_lflag & P_LNSPACE_RESOLVER) {
2228 		/*
2229 		 * The namespace resolver is exiting; there may be
2230 		 * outstanding materialization requests to clean up.
2231 		 */
2232 		nspace_resolver_exited(p);
2233 	}
2234 
2235 #if SYSV_SHM
2236 	/* Close ref SYSV Shared memory*/
2237 	if (p->vm_shm) {
2238 		shmexit(p);
2239 	}
2240 #endif
2241 #if SYSV_SEM
2242 	/* Release SYSV semaphores */
2243 	semexit(p);
2244 #endif
2245 
2246 #if PSYNCH
2247 	pth_proc_hashdelete(p);
2248 #endif /* PSYNCH */
2249 
2250 	pg = proc_pgrp(p, &sessp);
2251 	if (SESS_LEADER(p, sessp)) {
2252 		if (sessp->s_ttyvp != NULLVP) {
2253 			struct vnode *ttyvp;
2254 			int ttyvid;
2255 			int cttyflag = 0;
2256 			struct vfs_context context;
2257 			struct tty *tp;
2258 			struct pgrp *tpgrp = PGRP_NULL;
2259 
2260 			/*
2261 			 * Controlling process.
2262 			 * Signal foreground pgrp,
2263 			 * drain controlling terminal
2264 			 * and revoke access to controlling terminal.
2265 			 */
2266 
2267 			proc_list_lock(); /* prevent any t_pgrp from changing */
2268 			session_lock(sessp);
2269 			if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2270 				tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2271 			}
2272 			proc_list_unlock();
2273 
2274 			if (tpgrp != PGRP_NULL) {
2275 				session_unlock(sessp);
2276 				pgsignal(tpgrp, SIGHUP, 1);
2277 				pgrp_rele(tpgrp);
2278 				session_lock(sessp);
2279 			}
2280 
2281 			cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2282 			    S_CTTYREF, relaxed) & S_CTTYREF);
2283 			ttyvp = sessp->s_ttyvp;
2284 			ttyvid = sessp->s_ttyvid;
2285 			tp = session_clear_tty_locked(sessp);
2286 			if (ttyvp) {
2287 				vnode_hold(ttyvp);
2288 			}
2289 			session_unlock(sessp);
2290 
2291 			if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2292 				if (tp != TTY_NULL) {
2293 					tty_lock(tp);
2294 					(void) ttywait(tp);
2295 					tty_unlock(tp);
2296 				}
2297 
2298 				context.vc_thread = NULL;
2299 				context.vc_ucred = kauth_cred_proc_ref(p);
2300 				VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2301 				if (cttyflag) {
2302 					/*
2303 					 * Release the extra usecount taken in cttyopen.
2304 					 * usecount should be released after VNOP_REVOKE is called.
2305 					 * This usecount was taken to ensure that
2306 					 * the VNOP_REVOKE results in a close to
2307 					 * the tty since cttyclose is a no-op.
2308 					 */
2309 					vnode_rele(ttyvp);
2310 				}
2311 				vnode_put(ttyvp);
2312 				kauth_cred_unref(&context.vc_ucred);
2313 				vnode_drop(ttyvp);
2314 				ttyvp = NULLVP;
2315 			}
2316 			if (ttyvp) {
2317 				vnode_drop(ttyvp);
2318 			}
2319 			if (tp) {
2320 				ttyfree(tp);
2321 			}
2322 		}
2323 		session_lock(sessp);
2324 		sessp->s_leader = NULL;
2325 		session_unlock(sessp);
2326 	}
2327 
2328 	if (!proc_is_shadow(p)) {
2329 		fixjobc(p, pg, 0);
2330 	}
2331 	pgrp_rele(pg);
2332 
2333 	/*
2334 	 * Change RLIMIT_FSIZE for accounting/debugging.
2335 	 */
2336 	proc_limitsetcur_fsize(p, RLIM_INFINITY);
2337 
2338 	(void)acct_process(p);
2339 
2340 	proc_list_lock();
2341 
2342 	if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2343 		p->p_listflag &= ~P_LIST_EXITCOUNT;
2344 		proc_shutdown_exitcount--;
2345 		if (proc_shutdown_exitcount == 0) {
2346 			wakeup(&proc_shutdown_exitcount);
2347 		}
2348 	}
2349 
2350 	/* wait till parentrefs are dropped and grant no more */
2351 	proc_childdrainstart(p);
2352 	while ((q = p->p_children.lh_first) != NULL) {
2353 		if (q->p_stat == SZOMB) {
2354 			if (p != q->p_pptr) {
2355 				panic("parent child linkage broken");
2356 			}
2357 			/* check for sysctl zomb lookup */
2358 			while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2359 				msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2360 			}
2361 			q->p_listflag |= P_LIST_WAITING;
2362 			/*
2363 			 * This is a named reference and it is not granted
2364 			 * if the reap is already in progress. So we get
2365 			 * the reference here exclusively and their can be
2366 			 * no waiters. So there is no need for a wakeup
2367 			 * after we are done.  Also the reap frees the structure
2368 			 * and the proc struct cannot be used for wakeups as well.
2369 			 * It is safe to use q here as this is system reap
2370 			 */
2371 			reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2372 			    REAP_REPARENTED_TO_INIT : 0;
2373 			reap_child_locked(p, q,
2374 			    REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2375 		} else {
2376 			/*
2377 			 * Traced processes are killed
2378 			 * since their existence means someone is messing up.
2379 			 */
2380 			if (q->p_lflag & P_LTRACED) {
2381 				struct proc *opp;
2382 
2383 				/*
2384 				 * Take a reference on the child process to
2385 				 * ensure it doesn't exit and disappear between
2386 				 * the time we drop the list_lock and attempt
2387 				 * to acquire its proc_lock.
2388 				 */
2389 				if (proc_ref(q, true) != q) {
2390 					continue;
2391 				}
2392 
2393 				proc_list_unlock();
2394 
2395 				opp = proc_find(q->p_oppid);
2396 				if (opp != PROC_NULL) {
2397 					proc_list_lock();
2398 					q->p_oppid = 0;
2399 					proc_list_unlock();
2400 					proc_reparentlocked(q, opp, 0, 0);
2401 					proc_rele(opp);
2402 				} else {
2403 					/* original parent exited while traced */
2404 					proc_list_lock();
2405 					q->p_listflag |= P_LIST_DEADPARENT;
2406 					q->p_oppid = 0;
2407 					proc_list_unlock();
2408 					proc_reparentlocked(q, initproc, 0, 0);
2409 				}
2410 
2411 				proc_lock(q);
2412 				q->p_lflag &= ~P_LTRACED;
2413 
2414 				if (q->sigwait_thread) {
2415 					thread_t thread = q->sigwait_thread;
2416 
2417 					proc_unlock(q);
2418 					/*
2419 					 * The sigwait_thread could be stopped at a
2420 					 * breakpoint. Wake it up to kill.
2421 					 * Need to do this as it could be a thread which is not
2422 					 * the first thread in the task. So any attempts to kill
2423 					 * the process would result into a deadlock on q->sigwait.
2424 					 */
2425 					thread_resume(thread);
2426 					clear_wait(thread, THREAD_INTERRUPTED);
2427 					threadsignal(thread, SIGKILL, 0, TRUE);
2428 				} else {
2429 					proc_unlock(q);
2430 				}
2431 
2432 				psignal(q, SIGKILL);
2433 				proc_list_lock();
2434 				proc_rele(q);
2435 			} else {
2436 				q->p_listflag |= P_LIST_DEADPARENT;
2437 				proc_reparentlocked(q, initproc, 0, 1);
2438 			}
2439 		}
2440 	}
2441 
2442 	proc_childdrainend(p);
2443 	proc_list_unlock();
2444 
2445 #if CONFIG_MACF
2446 	if (!proc_is_shadow(p)) {
2447 		/*
2448 		 * Notify MAC policies that proc is dead.
2449 		 * This should be replaced with proper label management
2450 		 * (rdar://problem/32126399).
2451 		 */
2452 		mac_proc_notify_exit(p);
2453 	}
2454 #endif
2455 
2456 	/*
2457 	 * Release reference to text vnode
2458 	 */
2459 	tvp = p->p_textvp;
2460 	p->p_textvp = NULL;
2461 	if (tvp != NULLVP) {
2462 		vnode_rele(tvp);
2463 	}
2464 
2465 	/*
2466 	 * Save exit status and final rusage info, adding in child rusage
2467 	 * info and self times.  If we were unable to allocate a zombie
2468 	 * structure, this information is lost.
2469 	 */
2470 	if (p->p_ru != NULL) {
2471 		calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2472 		p->p_ru->ru = p->p_stats->p_ru;
2473 
2474 		ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2475 	}
2476 
2477 	/*
2478 	 * Free up profiling buffers.
2479 	 */
2480 	{
2481 		struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2482 
2483 		p1 = p0->pr_next;
2484 		p0->pr_next = NULL;
2485 		p0->pr_scale = 0;
2486 
2487 		for (; p1 != NULL; p1 = pn) {
2488 			pn = p1->pr_next;
2489 			kfree_type(struct uprof, p1);
2490 		}
2491 	}
2492 
2493 	proc_free_realitimer(p);
2494 
2495 	/*
2496 	 * Other substructures are freed from wait().
2497 	 */
2498 	zfree(proc_stats_zone, p->p_stats);
2499 	p->p_stats = NULL;
2500 
2501 	if (p->p_subsystem_root_path) {
2502 		zfree(ZV_NAMEI, p->p_subsystem_root_path);
2503 		p->p_subsystem_root_path = NULL;
2504 	}
2505 
2506 	proc_limitdrop(p);
2507 
2508 #if DEVELOPMENT || DEBUG
2509 	proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2510 #endif
2511 
2512 	/*
2513 	 * Finish up by terminating the task
2514 	 * and halt this thread (only if a
2515 	 * member of the task exiting).
2516 	 */
2517 	proc_set_task(p, TASK_NULL);
2518 	set_bsdtask_info(task, NULL);
2519 	clear_thread_ro_proc(get_machthread(uth));
2520 
2521 #if DEVELOPMENT || DEBUG
2522 	proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2523 #endif
2524 
2525 	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2526 	proc_knote(p, knote_hint);
2527 
2528 	/* mark the thread as the one that is doing proc_exit
2529 	 * no need to hold proc lock in uthread_free
2530 	 */
2531 	uth->uu_flag |= UT_PROCEXIT;
2532 	/*
2533 	 * Notify parent that we're gone.
2534 	 */
2535 	pp = proc_parent(p);
2536 	if (proc_is_shadow(p)) {
2537 		/* kernel can reap this one, no need to move it to launchd */
2538 		proc_list_lock();
2539 		p->p_listflag |= P_LIST_DEADPARENT;
2540 		proc_list_unlock();
2541 	} else if (pp->p_flag & P_NOCLDWAIT) {
2542 		if (p->p_ru != NULL) {
2543 			proc_lock(pp);
2544 #if 3839178
2545 			/*
2546 			 * If the parent is ignoring SIGCHLD, then POSIX requires
2547 			 * us to not add the resource usage to the parent process -
2548 			 * we are only going to hand it off to init to get reaped.
2549 			 * We should contest the standard in this case on the basis
2550 			 * of RLIMIT_CPU.
2551 			 */
2552 #else   /* !3839178 */
2553 			/*
2554 			 * Add child resource usage to parent before giving
2555 			 * zombie to init.  If we were unable to allocate a
2556 			 * zombie structure, this information is lost.
2557 			 */
2558 			ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2559 #endif  /* !3839178 */
2560 			update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2561 			proc_unlock(pp);
2562 		}
2563 
2564 		/* kernel can reap this one, no need to move it to launchd */
2565 		proc_list_lock();
2566 		p->p_listflag |= P_LIST_DEADPARENT;
2567 		proc_list_unlock();
2568 	}
2569 	if (!proc_is_shadow(p) &&
2570 	    ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2571 		if (pp != initproc) {
2572 			proc_lock(pp);
2573 			pp->si_pid = proc_getpid(p);
2574 			pp->p_xhighbits = p->p_xhighbits;
2575 			p->p_xhighbits = 0;
2576 			pp->si_status = p->p_xstat;
2577 			pp->si_code = CLD_EXITED;
2578 			/*
2579 			 * p_ucred usage is safe as it is an exiting process
2580 			 * and reference is dropped in reap
2581 			 */
2582 			pp->si_uid = kauth_cred_getruid(proc_ucred_unsafe(p));
2583 			proc_unlock(pp);
2584 		}
2585 		/* mark as a zombie */
2586 		/* No need to take proc lock as all refs are drained and
2587 		 * no one except parent (reaping ) can look at this.
2588 		 * The write is to an int and is coherent. Also parent is
2589 		 *  keyed off of list lock for reaping
2590 		 */
2591 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2592 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2593 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2594 		    pid, exitval, 0, 0, 0);
2595 		p->p_stat = SZOMB;
2596 		/*
2597 		 * The current process can be reaped so, no one
2598 		 * can depend on this
2599 		 */
2600 
2601 		psignal(pp, SIGCHLD);
2602 
2603 		/* and now wakeup the parent */
2604 		proc_list_lock();
2605 		wakeup((caddr_t)pp);
2606 		proc_list_unlock();
2607 	} else {
2608 		/* should be fine as parent proc would be initproc */
2609 		/* mark as a zombie */
2610 		/* No need to take proc lock as all refs are drained and
2611 		 * no one except parent (reaping ) can look at this.
2612 		 * The write is to an int and is coherent. Also parent is
2613 		 *  keyed off of list lock for reaping
2614 		 */
2615 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2616 		proc_list_lock();
2617 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2618 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2619 		    pid, exitval, 0, 0, 0);
2620 		/* check for sysctl zomb lookup */
2621 		while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2622 			msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2623 		}
2624 		/* safe to use p as this is a system reap */
2625 		p->p_stat = SZOMB;
2626 		p->p_listflag |= P_LIST_WAITING;
2627 
2628 		/*
2629 		 * This is a named reference and it is not granted
2630 		 * if the reap is already in progress. So we get
2631 		 * the reference here exclusively and their can be
2632 		 * no waiters. So there is no need for a wakeup
2633 		 * after we are done. AlsO  the reap frees the structure
2634 		 * and the proc struct cannot be used for wakeups as well.
2635 		 * It is safe to use p here as this is system reap
2636 		 */
2637 		reap_child_locked(pp, p,
2638 		    REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2639 	}
2640 	if (uth->uu_lowpri_window) {
2641 		/*
2642 		 * task is marked as a low priority I/O type and we've
2643 		 * somehow picked up another throttle during exit processing...
2644 		 * no need to throttle this thread since its going away
2645 		 * but we do need to update our bookeeping w/r to throttled threads
2646 		 */
2647 		throttle_lowpri_io(0);
2648 	}
2649 
2650 	proc_rele(pp);
2651 #if DEVELOPMENT || DEBUG
2652 	proc_exit_lpexit_check(pid, PELS_POS_END);
2653 #endif
2654 }
2655 
2656 
2657 /*
2658  * reap_child_locked
2659  *
2660  * Finalize a child exit once its status has been saved.
2661  *
2662  * If ptrace has attached, detach it and return it to its real parent.  Free any
2663  * remaining resources.
2664  *
2665  * Parameters:
2666  * - proc_t parent      Parent of process being reaped
2667  * - proc_t child       Process to reap
2668  * - reap_flags_t flags Control locking and re-parenting behavior
2669  */
2670 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2671 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2672 {
2673 	struct pgrp *pg;
2674 	boolean_t shadow_proc = proc_is_shadow(child);
2675 
2676 	if (flags & REAP_LOCKED) {
2677 		proc_list_unlock();
2678 	}
2679 
2680 	/*
2681 	 * Under ptrace, the child should now be re-parented back to its original
2682 	 * parent, unless that parent was initproc or it didn't come to initproc
2683 	 * through re-parenting.
2684 	 */
2685 	bool child_ptraced = child->p_oppid != 0;
2686 	if (!shadow_proc && child_ptraced) {
2687 		int knote_hint;
2688 		pid_t orig_ppid = 0;
2689 		proc_t orig_parent = PROC_NULL;
2690 
2691 		proc_lock(child);
2692 		orig_ppid = child->p_oppid;
2693 		child->p_oppid = 0;
2694 		knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2695 		proc_unlock(child);
2696 
2697 		orig_parent = proc_find(orig_ppid);
2698 		if (orig_parent) {
2699 			/*
2700 			 * Only re-parent the process if its original parent was not
2701 			 * initproc and it did not come to initproc from re-parenting.
2702 			 */
2703 			bool reparenting = orig_parent != initproc ||
2704 			    (flags & REAP_REPARENTED_TO_INIT) == 0;
2705 			if (reparenting) {
2706 				if (orig_parent != initproc) {
2707 					/*
2708 					 * Internal fields should be safe to access here because the
2709 					 * child is exited and not reaped or re-parented yet.
2710 					 */
2711 					proc_lock(orig_parent);
2712 					orig_parent->si_pid = proc_getpid(child);
2713 					orig_parent->si_status = child->p_xstat;
2714 					orig_parent->si_code = CLD_CONTINUED;
2715 					orig_parent->si_uid = kauth_cred_getruid(proc_ucred_unsafe(child));
2716 					proc_unlock(orig_parent);
2717 				}
2718 				proc_reparentlocked(child, orig_parent, 1, 0);
2719 
2720 				/*
2721 				 * After re-parenting, re-send the child's NOTE_EXIT to the
2722 				 * original parent.
2723 				 */
2724 				proc_knote(child, knote_hint);
2725 				psignal(orig_parent, SIGCHLD);
2726 
2727 				proc_list_lock();
2728 				wakeup((caddr_t)orig_parent);
2729 				child->p_listflag &= ~P_LIST_WAITING;
2730 				wakeup(&child->p_stat);
2731 				proc_list_unlock();
2732 
2733 				proc_rele(orig_parent);
2734 				if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2735 					proc_list_lock();
2736 				}
2737 				return;
2738 			} else {
2739 				/*
2740 				 * Satisfy the knote lifecycle because ptraced processes don't
2741 				 * broadcast NOTE_EXIT during initial child termination.
2742 				 */
2743 				proc_knote(child, knote_hint);
2744 				proc_rele(orig_parent);
2745 			}
2746 		}
2747 	}
2748 
2749 #pragma clang diagnostic push
2750 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2751 	proc_knote(child, NOTE_REAP);
2752 #pragma clang diagnostic pop
2753 
2754 	proc_knote_drain(child);
2755 
2756 	child->p_xstat = 0;
2757 	if (!shadow_proc && child->p_ru) {
2758 		/*
2759 		 * Roll up the rusage statistics to the parent, unless the parent is
2760 		 * ignoring SIGCHLD.  POSIX requires the children's resources of such a
2761 		 * parent to not be included in the parent's usage (seems odd given
2762 		 * RLIMIT_CPU, though).
2763 		 */
2764 		proc_lock(parent);
2765 		bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2766 		if (rollup_child) {
2767 			ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2768 		}
2769 		update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2770 		proc_unlock(parent);
2771 		zfree(zombie_zone, child->p_ru);
2772 		child->p_ru = NULL;
2773 	} else if (!shadow_proc) {
2774 		printf("Warning : lost p_ru for %s\n", child->p_comm);
2775 	} else {
2776 		assert(child->p_ru == NULL);
2777 	}
2778 
2779 	AUDIT_SESSION_PROCEXIT(child);
2780 
2781 #if CONFIG_PERSONAS
2782 	persona_proc_drop(child);
2783 #endif /* CONFIG_PERSONAS */
2784 	/* proc_ucred_unsafe is safe, because child is not running */
2785 	(void)chgproccnt(kauth_cred_getruid(proc_ucred_unsafe(child)), -1);
2786 
2787 	os_reason_free(child->p_exit_reason);
2788 
2789 	proc_list_lock();
2790 
2791 	pg = pgrp_leave_locked(child);
2792 	LIST_REMOVE(child, p_list);
2793 	parent->p_childrencnt--;
2794 	LIST_REMOVE(child, p_sibling);
2795 	bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2796 	    LIST_EMPTY(&parent->p_children);
2797 	if (no_more_children) {
2798 		wakeup((caddr_t)parent);
2799 	}
2800 	child->p_listflag &= ~P_LIST_WAITING;
2801 	wakeup(&child->p_stat);
2802 
2803 	/* Take it out of process hash */
2804 	if (!shadow_proc) {
2805 		phash_remove_locked(child);
2806 	}
2807 	proc_checkdeadrefs(child);
2808 	nprocs--;
2809 	if (flags & REAP_DEAD_PARENT) {
2810 		child->p_listflag |= P_LIST_DEADPARENT;
2811 	}
2812 
2813 	proc_list_unlock();
2814 
2815 	pgrp_rele(pg);
2816 	fdt_destroy(child);
2817 	lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2818 	lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2819 #if CONFIG_AUDIT
2820 	lck_mtx_destroy(&child->p_audit_mlock, &proc_ucred_mlock_grp);
2821 #endif /* CONFIG_AUDIT */
2822 #if CONFIG_DTRACE
2823 	lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2824 #endif
2825 	lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2826 	proc_wait_release(child);
2827 
2828 	if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2829 		proc_list_lock();
2830 	}
2831 }
2832 
2833 int
wait1continue(int result)2834 wait1continue(int result)
2835 {
2836 	proc_t p;
2837 	thread_t thread;
2838 	uthread_t uth;
2839 	struct _wait4_data *wait4_data;
2840 	struct wait4_nocancel_args *uap;
2841 	int *retval;
2842 
2843 	if (result) {
2844 		return result;
2845 	}
2846 
2847 	p = current_proc();
2848 	thread = current_thread();
2849 	uth = (struct uthread *)get_bsdthread_info(thread);
2850 
2851 	wait4_data = &uth->uu_save.uus_wait4_data;
2852 	uap = wait4_data->args;
2853 	retval = wait4_data->retval;
2854 	return wait4_nocancel(p, uap, retval);
2855 }
2856 
2857 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2858 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2859 {
2860 	__pthread_testcancel(1);
2861 	return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2862 }
2863 
2864 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2865 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2866 {
2867 	int nfound;
2868 	int sibling_count;
2869 	proc_t p;
2870 	int status, error;
2871 	uthread_t uth;
2872 	struct _wait4_data *wait4_data;
2873 
2874 	AUDIT_ARG(pid, uap->pid);
2875 
2876 	if (uap->pid == 0) {
2877 		uap->pid = -q->p_pgrpid;
2878 	}
2879 
2880 	if (uap->pid == INT_MIN) {
2881 		return EINVAL;
2882 	}
2883 
2884 loop:
2885 	proc_list_lock();
2886 loop1:
2887 	nfound = 0;
2888 	sibling_count = 0;
2889 
2890 	PCHILDREN_FOREACH(q, p) {
2891 		if (p->p_sibling.le_next != 0) {
2892 			sibling_count++;
2893 		}
2894 		if (uap->pid != WAIT_ANY &&
2895 		    proc_getpid(p) != uap->pid &&
2896 		    p->p_pgrpid != -(uap->pid)) {
2897 			continue;
2898 		}
2899 
2900 		if (proc_is_shadow(p)) {
2901 			continue;
2902 		}
2903 
2904 		nfound++;
2905 
2906 		/* XXX This is racy because we don't get the lock!!!! */
2907 
2908 		if (p->p_listflag & P_LIST_WAITING) {
2909 			/* we're not using a continuation here but we still need to stash
2910 			 * the args for stackshot. */
2911 			uth = current_uthread();
2912 			wait4_data = &uth->uu_save.uus_wait4_data;
2913 			wait4_data->args = uap;
2914 			thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2915 
2916 			(void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2917 			goto loop1;
2918 		}
2919 		p->p_listflag |= P_LIST_WAITING;   /* only allow single thread to wait() */
2920 
2921 
2922 		if (p->p_stat == SZOMB) {
2923 			reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2924 			    REAP_REPARENTED_TO_INIT : 0;
2925 
2926 			proc_list_unlock();
2927 #if CONFIG_MACF
2928 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2929 				goto out;
2930 			}
2931 #endif
2932 			retval[0] = proc_getpid(p);
2933 			if (uap->status) {
2934 				/* Legacy apps expect only 8 bits of status */
2935 				status = 0xffff & p->p_xstat;   /* convert to int */
2936 				error = copyout((caddr_t)&status,
2937 				    uap->status,
2938 				    sizeof(status));
2939 				if (error) {
2940 					goto out;
2941 				}
2942 			}
2943 			if (uap->rusage) {
2944 				if (p->p_ru == NULL) {
2945 					error = ENOMEM;
2946 				} else {
2947 					if (IS_64BIT_PROCESS(q)) {
2948 						struct user64_rusage    my_rusage = {};
2949 						munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2950 						error = copyout((caddr_t)&my_rusage,
2951 						    uap->rusage,
2952 						    sizeof(my_rusage));
2953 					} else {
2954 						struct user32_rusage    my_rusage = {};
2955 						munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2956 						error = copyout((caddr_t)&my_rusage,
2957 						    uap->rusage,
2958 						    sizeof(my_rusage));
2959 					}
2960 				}
2961 				/* information unavailable? */
2962 				if (error) {
2963 					goto out;
2964 				}
2965 			}
2966 
2967 			/* Conformance change for 6577252.
2968 			 * When SIGCHLD is blocked and wait() returns because the status
2969 			 * of a child process is available and there are no other
2970 			 * children processes, then any pending SIGCHLD signal is cleared.
2971 			 */
2972 			if (sibling_count == 0) {
2973 				int mask = sigmask(SIGCHLD);
2974 				uth = current_uthread();
2975 
2976 				if ((uth->uu_sigmask & mask) != 0) {
2977 					/* we are blocking SIGCHLD signals.  clear any pending SIGCHLD.
2978 					 * This locking looks funny but it is protecting access to the
2979 					 * thread via p_uthlist.
2980 					 */
2981 					proc_lock(q);
2982 					uth->uu_siglist &= ~mask;       /* clear pending signal */
2983 					proc_unlock(q);
2984 				}
2985 			}
2986 
2987 			/* Clean up */
2988 			(void)reap_child_locked(q, p, reap_flags);
2989 
2990 			return 0;
2991 		}
2992 		if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
2993 		    (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
2994 			proc_list_unlock();
2995 #if CONFIG_MACF
2996 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2997 				goto out;
2998 			}
2999 #endif
3000 			proc_lock(p);
3001 			p->p_lflag |= P_LWAITED;
3002 			proc_unlock(p);
3003 			retval[0] = proc_getpid(p);
3004 			if (uap->status) {
3005 				status = W_STOPCODE(p->p_xstat);
3006 				error = copyout((caddr_t)&status,
3007 				    uap->status,
3008 				    sizeof(status));
3009 			} else {
3010 				error = 0;
3011 			}
3012 			goto out;
3013 		}
3014 		/*
3015 		 * If we are waiting for continued processses, and this
3016 		 * process was continued
3017 		 */
3018 		if ((uap->options & WCONTINUED) &&
3019 		    (p->p_flag & P_CONTINUED)) {
3020 			proc_list_unlock();
3021 #if CONFIG_MACF
3022 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3023 				goto out;
3024 			}
3025 #endif
3026 
3027 			/* Prevent other process for waiting for this event */
3028 			OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3029 			retval[0] = proc_getpid(p);
3030 			if (uap->status) {
3031 				status = W_STOPCODE(SIGCONT);
3032 				error = copyout((caddr_t)&status,
3033 				    uap->status,
3034 				    sizeof(status));
3035 			} else {
3036 				error = 0;
3037 			}
3038 			goto out;
3039 		}
3040 		p->p_listflag &= ~P_LIST_WAITING;
3041 		wakeup(&p->p_stat);
3042 	}
3043 	/* list lock is held when we get here any which way */
3044 	if (nfound == 0) {
3045 		proc_list_unlock();
3046 		return ECHILD;
3047 	}
3048 
3049 	if (uap->options & WNOHANG) {
3050 		retval[0] = 0;
3051 		proc_list_unlock();
3052 		return 0;
3053 	}
3054 
3055 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3056 	uth = current_uthread();
3057 	wait4_data = &uth->uu_save.uus_wait4_data;
3058 	wait4_data->args = uap;
3059 	wait4_data->retval = retval;
3060 
3061 	thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3062 	if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3063 		return error;
3064 	}
3065 
3066 	goto loop;
3067 out:
3068 	proc_list_lock();
3069 	p->p_listflag &= ~P_LIST_WAITING;
3070 	wakeup(&p->p_stat);
3071 	proc_list_unlock();
3072 	return error;
3073 }
3074 
3075 #if DEBUG
3076 #define ASSERT_LCK_MTX_OWNED(lock)      \
3077 	                        lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3078 #else
3079 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */
3080 #endif
3081 
3082 int
waitidcontinue(int result)3083 waitidcontinue(int result)
3084 {
3085 	proc_t p;
3086 	thread_t thread;
3087 	uthread_t uth;
3088 	struct _waitid_data *waitid_data;
3089 	struct waitid_nocancel_args *uap;
3090 	int *retval;
3091 
3092 	if (result) {
3093 		return result;
3094 	}
3095 
3096 	p = current_proc();
3097 	thread = current_thread();
3098 	uth = (struct uthread *)get_bsdthread_info(thread);
3099 
3100 	waitid_data = &uth->uu_save.uus_waitid_data;
3101 	uap = waitid_data->args;
3102 	retval = waitid_data->retval;
3103 	return waitid_nocancel(p, uap, retval);
3104 }
3105 
3106 /*
3107  * Description:	Suspend the calling thread until one child of the process
3108  *		containing the calling thread changes state.
3109  *
3110  * Parameters:	uap->idtype		one of P_PID, P_PGID, P_ALL
3111  *		uap->id			pid_t or gid_t or ignored
3112  *		uap->infop		Address of siginfo_t struct in
3113  *					user space into which to return status
3114  *		uap->options		flag values
3115  *
3116  * Returns:	0			Success
3117  *		!0			Error returning status to user space
3118  */
3119 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3120 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3121 {
3122 	__pthread_testcancel(1);
3123 	return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3124 }
3125 
3126 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3127 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3128     __unused int32_t *retval)
3129 {
3130 	user_siginfo_t  siginfo;        /* siginfo data to return to caller */
3131 	boolean_t caller64 = IS_64BIT_PROCESS(q);
3132 	int nfound;
3133 	proc_t p;
3134 	int error;
3135 	uthread_t uth;
3136 	struct _waitid_data *waitid_data;
3137 
3138 	if (uap->options == 0 ||
3139 	    (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3140 		return EINVAL;        /* bits set that aren't recognized */
3141 	}
3142 	switch (uap->idtype) {
3143 	case P_PID:     /* child with process ID equal to... */
3144 	case P_PGID:    /* child with process group ID equal to... */
3145 		if (((int)uap->id) < 0) {
3146 			return EINVAL;
3147 		}
3148 		break;
3149 	case P_ALL:     /* any child */
3150 		break;
3151 	}
3152 
3153 loop:
3154 	proc_list_lock();
3155 loop1:
3156 	nfound = 0;
3157 
3158 	PCHILDREN_FOREACH(q, p) {
3159 		switch (uap->idtype) {
3160 		case P_PID:     /* child with process ID equal to... */
3161 			if (proc_getpid(p) != (pid_t)uap->id) {
3162 				continue;
3163 			}
3164 			break;
3165 		case P_PGID:    /* child with process group ID equal to... */
3166 			if (p->p_pgrpid != (pid_t)uap->id) {
3167 				continue;
3168 			}
3169 			break;
3170 		case P_ALL:     /* any child */
3171 			break;
3172 		}
3173 
3174 		if (proc_is_shadow(p)) {
3175 			continue;
3176 		}
3177 		/* XXX This is racy because we don't get the lock!!!! */
3178 
3179 		/*
3180 		 * Wait collision; go to sleep and restart; used to maintain
3181 		 * the single return for waited process guarantee.
3182 		 */
3183 		if (p->p_listflag & P_LIST_WAITING) {
3184 			(void) msleep(&p->p_stat, &proc_list_mlock,
3185 			    PWAIT, "waitidcoll", 0);
3186 			goto loop1;
3187 		}
3188 		p->p_listflag |= P_LIST_WAITING;                /* mark busy */
3189 
3190 		nfound++;
3191 
3192 		bzero(&siginfo, sizeof(siginfo));
3193 
3194 		switch (p->p_stat) {
3195 		case SZOMB:             /* Exited */
3196 			if (!(uap->options & WEXITED)) {
3197 				break;
3198 			}
3199 			proc_list_unlock();
3200 #if CONFIG_MACF
3201 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3202 				goto out;
3203 			}
3204 #endif
3205 			siginfo.si_signo = SIGCHLD;
3206 			siginfo.si_pid = proc_getpid(p);
3207 
3208 			/* If the child terminated abnormally due to a signal, the signum
3209 			 * needs to be preserved in the exit status.
3210 			 */
3211 			if (WIFSIGNALED(p->p_xstat)) {
3212 				siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3213 				    CLD_DUMPED : CLD_KILLED;
3214 				siginfo.si_status = WTERMSIG(p->p_xstat);
3215 			} else {
3216 				siginfo.si_code = CLD_EXITED;
3217 				siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3218 			}
3219 			siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3220 			p->p_xhighbits = 0;
3221 
3222 			if ((error = copyoutsiginfo(&siginfo,
3223 			    caller64, uap->infop)) != 0) {
3224 				goto out;
3225 			}
3226 
3227 			/* Prevent other process for waiting for this event? */
3228 			if (!(uap->options & WNOWAIT)) {
3229 				reap_child_locked(q, p, 0);
3230 				return 0;
3231 			}
3232 			goto out;
3233 
3234 		case SSTOP:             /* Stopped */
3235 			/*
3236 			 * If we are not interested in stopped processes, then
3237 			 * ignore this one.
3238 			 */
3239 			if (!(uap->options & WSTOPPED)) {
3240 				break;
3241 			}
3242 
3243 			/*
3244 			 * If someone has already waited it, we lost a race
3245 			 * to be the one to return status.
3246 			 */
3247 			if ((p->p_lflag & P_LWAITED) != 0) {
3248 				break;
3249 			}
3250 			proc_list_unlock();
3251 #if CONFIG_MACF
3252 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3253 				goto out;
3254 			}
3255 #endif
3256 			siginfo.si_signo = SIGCHLD;
3257 			siginfo.si_pid = proc_getpid(p);
3258 			siginfo.si_status = p->p_xstat; /* signal number */
3259 			siginfo.si_code = CLD_STOPPED;
3260 
3261 			if ((error = copyoutsiginfo(&siginfo,
3262 			    caller64, uap->infop)) != 0) {
3263 				goto out;
3264 			}
3265 
3266 			/* Prevent other process for waiting for this event? */
3267 			if (!(uap->options & WNOWAIT)) {
3268 				proc_lock(p);
3269 				p->p_lflag |= P_LWAITED;
3270 				proc_unlock(p);
3271 			}
3272 			goto out;
3273 
3274 		default:                /* All other states => Continued */
3275 			if (!(uap->options & WCONTINUED)) {
3276 				break;
3277 			}
3278 
3279 			/*
3280 			 * If the flag isn't set, then this process has not
3281 			 * been stopped and continued, or the status has
3282 			 * already been reaped by another caller of waitid().
3283 			 */
3284 			if ((p->p_flag & P_CONTINUED) == 0) {
3285 				break;
3286 			}
3287 			proc_list_unlock();
3288 #if CONFIG_MACF
3289 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3290 				goto out;
3291 			}
3292 #endif
3293 			siginfo.si_signo = SIGCHLD;
3294 			siginfo.si_code = CLD_CONTINUED;
3295 			proc_lock(p);
3296 			siginfo.si_pid = p->p_contproc;
3297 			siginfo.si_status = p->p_xstat;
3298 			proc_unlock(p);
3299 
3300 			if ((error = copyoutsiginfo(&siginfo,
3301 			    caller64, uap->infop)) != 0) {
3302 				goto out;
3303 			}
3304 
3305 			/* Prevent other process for waiting for this event? */
3306 			if (!(uap->options & WNOWAIT)) {
3307 				OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3308 				    &p->p_flag);
3309 			}
3310 			goto out;
3311 		}
3312 		ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3313 
3314 		/* Not a process we are interested in; go on to next child */
3315 
3316 		p->p_listflag &= ~P_LIST_WAITING;
3317 		wakeup(&p->p_stat);
3318 	}
3319 	ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3320 
3321 	/* No child processes that could possibly satisfy the request? */
3322 
3323 	if (nfound == 0) {
3324 		proc_list_unlock();
3325 		return ECHILD;
3326 	}
3327 
3328 	if (uap->options & WNOHANG) {
3329 		proc_list_unlock();
3330 #if CONFIG_MACF
3331 		if ((error = mac_proc_check_wait(q, p)) != 0) {
3332 			return error;
3333 		}
3334 #endif
3335 		/*
3336 		 * The state of the siginfo structure in this case
3337 		 * is undefined.  Some implementations bzero it, some
3338 		 * (like here) leave it untouched for efficiency.
3339 		 *
3340 		 * Thus the most portable check for "no matching pid with
3341 		 * WNOHANG" is to store a zero into si_pid before
3342 		 * invocation, then check for a non-zero value afterwards.
3343 		 */
3344 		return 0;
3345 	}
3346 
3347 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3348 	uth = current_uthread();
3349 	waitid_data = &uth->uu_save.uus_waitid_data;
3350 	waitid_data->args = uap;
3351 	waitid_data->retval = retval;
3352 
3353 	if ((error = msleep0(q, &proc_list_mlock,
3354 	    PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3355 		return error;
3356 	}
3357 
3358 	goto loop;
3359 out:
3360 	proc_list_lock();
3361 	p->p_listflag &= ~P_LIST_WAITING;
3362 	wakeup(&p->p_stat);
3363 	proc_list_unlock();
3364 	return error;
3365 }
3366 
3367 /*
3368  * make process 'parent' the new parent of process 'child'.
3369  */
3370 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3371 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3372 {
3373 	proc_t oldparent = PROC_NULL;
3374 
3375 	if (child->p_pptr == parent) {
3376 		return;
3377 	}
3378 
3379 	if (locked == 0) {
3380 		proc_list_lock();
3381 	}
3382 
3383 	oldparent = child->p_pptr;
3384 #if __PROC_INTERNAL_DEBUG
3385 	if (oldparent == PROC_NULL) {
3386 		panic("proc_reparent: process %p does not have a parent", child);
3387 	}
3388 #endif
3389 
3390 	LIST_REMOVE(child, p_sibling);
3391 #if __PROC_INTERNAL_DEBUG
3392 	if (oldparent->p_childrencnt == 0) {
3393 		panic("process children count already 0");
3394 	}
3395 #endif
3396 	oldparent->p_childrencnt--;
3397 #if __PROC_INTERNAL_DEBUG
3398 	if (oldparent->p_childrencnt < 0) {
3399 		panic("process children count -ve");
3400 	}
3401 #endif
3402 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3403 	parent->p_childrencnt++;
3404 	child->p_pptr = parent;
3405 	child->p_ppid = proc_getpid(parent);
3406 
3407 	proc_list_unlock();
3408 
3409 	if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3410 		psignal(initproc, SIGCHLD);
3411 	}
3412 	if (locked == 1) {
3413 		proc_list_lock();
3414 	}
3415 }
3416 
3417 /*
3418  * Exit: deallocate address space and other resources, change proc state
3419  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
3420  * status and rusage for wait().  Check for child processes and orphan them.
3421  */
3422 
3423 
3424 /*
3425  * munge_rusage
3426  *	LP64 support - long is 64 bits if we are dealing with a 64 bit user
3427  *	process.  We munge the kernel version of rusage into the
3428  *	64 bit version.
3429  */
3430 __private_extern__  void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3431 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3432 {
3433 	/* Zero-out struct so that padding is cleared */
3434 	bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3435 
3436 	/* timeval changes size, so utime and stime need special handling */
3437 	a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3438 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3439 	a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3440 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3441 	/*
3442 	 * everything else can be a direct assign, since there is no loss
3443 	 * of precision implied boing 32->64.
3444 	 */
3445 	a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3446 	a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3447 	a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3448 	a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3449 	a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3450 	a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3451 	a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3452 	a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3453 	a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3454 	a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3455 	a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3456 	a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3457 	a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3458 	a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3459 }
3460 
3461 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3462 __private_extern__  void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3463 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3464 {
3465 	bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3466 
3467 	/* timeval changes size, so utime and stime need special handling */
3468 	a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3469 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3470 	a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3471 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3472 	/*
3473 	 * everything else can be a direct assign. We currently ignore
3474 	 * the loss of precision
3475 	 */
3476 	a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3477 	a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3478 	a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3479 	a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3480 	a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3481 	a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3482 	a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3483 	a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3484 	a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3485 	a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3486 	a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3487 	a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3488 	a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3489 	a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3490 }
3491 
3492 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3493 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3494 {
3495 	assert(thread != NULL);
3496 	assert(waitinfo != NULL);
3497 
3498 	struct uthread *ut = get_bsdthread_info(thread);
3499 	waitinfo->context = 0;
3500 	// ensure wmesg is consistent with a thread waiting in wait4
3501 	assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3502 	struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3503 	// May not actually contain a pid; this is just the argument to wait4.
3504 	// See man wait4 for other valid wait4 arguments.
3505 	waitinfo->owner = args->pid;
3506 }
3507 
3508 int
exit_with_guard_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3509 exit_with_guard_exception(
3510 	proc_t p,
3511 	mach_exception_data_type_t code,
3512 	mach_exception_data_type_t subcode)
3513 {
3514 	os_reason_t reason = os_reason_create(OS_REASON_GUARD, (uint64_t)code);
3515 	assert(reason != OS_REASON_NULL);
3516 
3517 	return exit_with_mach_exception(p, reason, EXC_GUARD, code, subcode);
3518 }
3519 
3520 #if __has_feature(ptrauth_calls)
3521 int
exit_with_pac_exception(proc_t p,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3522 exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
3523     mach_exception_subcode_t subcode)
3524 {
3525 	os_reason_t reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code);
3526 	assert(reason != OS_REASON_NULL);
3527 
3528 	return exit_with_mach_exception(p, reason, exception, code, subcode);
3529 }
3530 #endif /* __has_feature(ptrauth_calls) */
3531 
3532 int
exit_with_port_space_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3533 exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
3534     mach_exception_data_type_t subcode)
3535 {
3536 	os_reason_t reason = os_reason_create(OS_REASON_PORT_SPACE, (uint64_t)code);
3537 	assert(reason != OS_REASON_NULL);
3538 
3539 	return exit_with_mach_exception(p, reason, EXC_RESOURCE, code, subcode);
3540 }
3541 
3542 static int
exit_with_mach_exception(proc_t p,os_reason_t reason,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3543 exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception, mach_exception_code_t code,
3544     mach_exception_subcode_t subcode)
3545 {
3546 	thread_t self = current_thread();
3547 	struct uthread *ut = get_bsdthread_info(self);
3548 
3549 	ut->uu_exception = exception;
3550 	ut->uu_code = code;
3551 	ut->uu_subcode = subcode;
3552 
3553 	reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3554 	return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3555 	           TRUE, FALSE, 0, reason);
3556 }
3557