xref: /xnu-8796.101.5/bsd/kern/kern_exit.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113 
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116 
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120 
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134 
135 #include <vm/vm_protos.h>
136 #include <os/log.h>
137 #include <os/system_event_log.h>
138 
139 #include <pexpert/pexpert.h>
140 
141 #include <kdp/kdp_dyld.h>
142 
143 #if SYSV_SHM
144 #include <sys/shm_internal.h>   /* shmexit */
145 #endif /* SYSV_SHM */
146 #if CONFIG_PERSONAS
147 #include <sys/persona.h>
148 #endif /* CONFIG_PERSONAS */
149 #if CONFIG_MEMORYSTATUS
150 #include <sys/kern_memorystatus.h>
151 #endif /* CONFIG_MEMORYSTATUS */
152 #if CONFIG_DTRACE
153 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
154 void dtrace_proc_exit(proc_t p);
155 #include <sys/dtrace_ptss.h>
156 #endif /* CONFIG_DTRACE */
157 #if CONFIG_MACF
158 #include <security/mac_framework.h>
159 #include <security/mac_mach_internal.h>
160 #include <sys/syscall.h>
161 #endif /* CONFIG_MACF */
162 
163 #if CONFIG_MEMORYSTATUS
164 static void proc_memorystatus_remove(proc_t p);
165 #endif /* CONFIG_MEMORYSTATUS */
166 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
167 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
168     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
169     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
170 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
171 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
172 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
173 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
174 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
175     struct rusage_superset *rup, mach_exception_data_type_t code,
176     mach_exception_data_type_t subcode, uint64_t *udata_buffer,
177     int num_udata, os_reason_t reason, exception_type_t etype);
178 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
179 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
180 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
181 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
182 extern uint64_t get_task_phys_footprint_limit(task_t);
183 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
184 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
185 
186 extern unsigned int exception_log_max_pid;
187 
188 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
189 
190 /*
191  * Flags for `reap_child_locked`.
192  */
193 __options_decl(reap_flags_t, uint32_t, {
194 	/*
195 	 * Parent is exiting, so the kernel is responsible for reaping children.
196 	 */
197 	REAP_DEAD_PARENT = 0x01,
198 	/*
199 	 * Childr process was re-parented to initproc.
200 	 */
201 	REAP_REPARENTED_TO_INIT = 0x02,
202 	/*
203 	 * `proc_list_lock` is held on entry.
204 	 */
205 	REAP_LOCKED = 0x04,
206 	/*
207 	 * Drop the `proc_list_lock` on return.  Note that the `proc_list_lock` will
208 	 * be dropped internally by the function regardless.
209 	 */
210 	REAP_DROP_LOCK = 0x08,
211 });
212 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
213 
214 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
215 
216 /*
217  * Things which should have prototypes in headers, but don't
218  */
219 void    proc_exit(proc_t p);
220 int     wait1continue(int result);
221 int     waitidcontinue(int result);
222 kern_return_t sys_perf_notify(thread_t thread, int pid);
223 kern_return_t task_exception_notify(exception_type_t exception,
224     mach_exception_data_type_t code, mach_exception_data_type_t subcode);
225 void    delay(int);
226 
227 #if __has_feature(ptrauth_calls)
228 int exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
229     mach_exception_subcode_t subcode);
230 #endif /* __has_feature(ptrauth_calls) */
231 
232 int exit_with_guard_exception(proc_t p, mach_exception_data_type_t code,
233     mach_exception_data_type_t subcode);
234 int exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
235     mach_exception_data_type_t subcode);
236 static int exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception,
237     mach_exception_code_t code, mach_exception_subcode_t subcode);
238 
239 #if DEVELOPMENT || DEBUG
240 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
241 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
242 static pid_t proc_exit_lpexit_spin_pid = -1;            /* wakeup point */
243 static int proc_exit_lpexit_spin_pos = -1;              /* point to block */
244 static int proc_exit_lpexit_spinning = 0;
245 enum {
246 	PELS_POS_START = 0,             /* beginning of proc_exit */
247 	PELS_POS_PRE_TASK_DETACH,       /* before task/proc detach */
248 	PELS_POS_POST_TASK_DETACH,      /* after task/proc detach */
249 	PELS_POS_END,                   /* end of proc_exit */
250 	PELS_NPOS                       /* # valid values */
251 };
252 
253 /* Panic if matching processes (delimited by ',') exit on error. */
254 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
255 
256 static int
257 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
258 {
259 #pragma unused(oidp, arg1, arg2)
260 	pid_t new_value;
261 	int changed;
262 	int error;
263 
264 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
265 		return ENOENT;
266 	}
267 
268 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
269 	    sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
270 	if (error == 0 && changed != 0) {
271 		if (new_value < -1) {
272 			return EINVAL;
273 		}
274 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
275 		proc_exit_lpexit_spin_pid = new_value;
276 		wakeup(&proc_exit_lpexit_spin_pid);
277 		proc_exit_lpexit_spinning = 0;
278 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
279 	}
280 	return error;
281 }
282 
283 static int
284 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
285 {
286 #pragma unused(oidp, arg1, arg2)
287 	int new_value;
288 	int changed;
289 	int error;
290 
291 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
292 		return ENOENT;
293 	}
294 
295 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
296 	    sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
297 	if (error == 0 && changed != 0) {
298 		if (new_value < -1 || new_value >= PELS_NPOS) {
299 			return EINVAL;
300 		}
301 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
302 		proc_exit_lpexit_spin_pos = new_value;
303 		wakeup(&proc_exit_lpexit_spin_pid);
304 		proc_exit_lpexit_spinning = 0;
305 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
306 	}
307 	return error;
308 }
309 
310 static int
311 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
312 {
313 #pragma unused(oidp, arg1, arg2)
314 	int new_value;
315 	int changed;
316 	int error;
317 
318 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
319 		return ENOENT;
320 	}
321 
322 	error = sysctl_io_number(req, proc_exit_lpexit_spinning,
323 	    sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
324 	if (error == 0 && changed != 0) {
325 		return EINVAL;
326 	}
327 	return error;
328 }
329 
330 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
331     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
332     NULL, sizeof(pid_t),
333     proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
334 
335 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
336     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
337     NULL, sizeof(int),
338     proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
339 
340 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
341     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
342     NULL, sizeof(int),
343     proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
344 
345 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)346 proc_exit_lpexit_check(pid_t pid, int pos)
347 {
348 	if (proc_exit_lpexit_spin_pid == pid) {
349 		bool slept = false;
350 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
351 		while (proc_exit_lpexit_spin_pid == pid &&
352 		    proc_exit_lpexit_spin_pos == pos) {
353 			if (!slept) {
354 				os_log(OS_LOG_DEFAULT,
355 				    "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
356 				slept = true;
357 			}
358 			proc_exit_lpexit_spinning = 1;
359 			msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
360 			    PWAIT, "proc_exit_lpexit_check", NULL);
361 			proc_exit_lpexit_spinning = 0;
362 		}
363 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
364 		if (slept) {
365 			os_log(OS_LOG_DEFAULT,
366 			    "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
367 		}
368 	}
369 }
370 #endif /* DEVELOPMENT || DEBUG */
371 
372 /*
373  * NOTE: Source and target may *NOT* overlap!
374  * XXX Should share code with bsd/dev/ppc/unix_signal.c
375  */
376 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)377 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
378 {
379 	out->si_signo   = in->si_signo;
380 	out->si_errno   = in->si_errno;
381 	out->si_code    = in->si_code;
382 	out->si_pid     = in->si_pid;
383 	out->si_uid     = in->si_uid;
384 	out->si_status  = in->si_status;
385 	out->si_addr    = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
386 	/* following cast works for sival_int because of padding */
387 	out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
388 	out->si_band    = (user32_long_t)in->si_band;                  /* range reduction */
389 }
390 
391 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)392 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
393 {
394 	out->si_signo   = in->si_signo;
395 	out->si_errno   = in->si_errno;
396 	out->si_code    = in->si_code;
397 	out->si_pid     = in->si_pid;
398 	out->si_uid     = in->si_uid;
399 	out->si_status  = in->si_status;
400 	out->si_addr    = in->si_addr;
401 	/* following cast works for sival_int because of padding */
402 	out->si_value.sival_ptr = in->si_value.sival_ptr;
403 	out->si_band    = in->si_band;                  /* range reduction */
404 }
405 
406 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)407 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
408 {
409 	if (is64) {
410 		user64_siginfo_t sinfo64;
411 
412 		bzero(&sinfo64, sizeof(sinfo64));
413 		siginfo_user_to_user64(native, &sinfo64);
414 		return copyout(&sinfo64, uaddr, sizeof(sinfo64));
415 	} else {
416 		user32_siginfo_t sinfo32;
417 
418 		bzero(&sinfo32, sizeof(sinfo32));
419 		siginfo_user_to_user32(native, &sinfo32);
420 		return copyout(&sinfo32, uaddr, sizeof(sinfo32));
421 	}
422 }
423 
424 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)425 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
426     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
427     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
428 {
429 	struct rusage_superset rup;
430 
431 	gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
432 	rup.ri.ri_phys_footprint = 0;
433 	populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
434 	    udata_buffer, num_udata, reason, etype);
435 }
436 
437 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)438 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
439 {
440 	mach_exception_data_type_t code_update = *code;
441 	mach_exception_data_type_t subcode_update = *subcode;
442 	if (p->p_exit_reason == OS_REASON_NULL) {
443 		return;
444 	}
445 
446 	switch (p->p_exit_reason->osr_namespace) {
447 	case OS_REASON_JETSAM:
448 		if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
449 			/* Update the code with EXC_RESOURCE code for high memory watermark */
450 			EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
451 			EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
452 			EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
453 			subcode_update = 0;
454 			break;
455 		}
456 
457 		break;
458 	default:
459 		break;
460 	}
461 
462 	*code = code_update;
463 	*subcode = subcode_update;
464 	return;
465 }
466 
467 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)468 proc_encode_exit_exception_code(proc_t p)
469 {
470 	uint64_t subcode = 0;
471 
472 	if (p->p_exit_reason == OS_REASON_NULL) {
473 		return 0;
474 	}
475 
476 	/* Embed first 32 bits of osr_namespace and osr_code in exception code */
477 	ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
478 	ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
479 	return (mach_exception_data_type_t)subcode;
480 }
481 
482 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)483 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
484     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
485     uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
486 {
487 	mach_vm_address_t uaddr = 0;
488 	mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
489 	exc_codes[0] = code;
490 	exc_codes[1] = subcode;
491 	cpu_type_t cputype;
492 	struct proc_uniqidentifierinfo p_uniqidinfo;
493 	struct proc_workqueueinfo pwqinfo;
494 	int retval = 0;
495 	uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
496 	boolean_t is_corpse_fork;
497 	uint32_t csflags;
498 	unsigned int pflags = 0;
499 	uint64_t max_footprint_mb;
500 	uint64_t max_footprint;
501 
502 	uint64_t ledger_internal;
503 	uint64_t ledger_internal_compressed;
504 	uint64_t ledger_iokit_mapped;
505 	uint64_t ledger_alternate_accounting;
506 	uint64_t ledger_alternate_accounting_compressed;
507 	uint64_t ledger_purgeable_nonvolatile;
508 	uint64_t ledger_purgeable_nonvolatile_compressed;
509 	uint64_t ledger_page_table;
510 	uint64_t ledger_phys_footprint;
511 	uint64_t ledger_phys_footprint_lifetime_max;
512 	uint64_t ledger_network_nonvolatile;
513 	uint64_t ledger_network_nonvolatile_compressed;
514 	uint64_t ledger_wired_mem;
515 	uint64_t ledger_tagged_footprint;
516 	uint64_t ledger_tagged_footprint_compressed;
517 	uint64_t ledger_media_footprint;
518 	uint64_t ledger_media_footprint_compressed;
519 	uint64_t ledger_graphics_footprint;
520 	uint64_t ledger_graphics_footprint_compressed;
521 	uint64_t ledger_neural_footprint;
522 	uint64_t ledger_neural_footprint_compressed;
523 
524 	void *crash_info_ptr = task_get_corpseinfo(corpse_task);
525 
526 #if CONFIG_MEMORYSTATUS
527 	int memstat_dirty_flags = 0;
528 #endif
529 
530 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
531 		kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
532 	}
533 
534 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
535 		pid_t pid = proc_getpid(p);
536 		kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
537 	}
538 
539 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
540 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
541 	}
542 
543 	/* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
544 	if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
545 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
546 			kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
547 		}
548 	}
549 
550 	static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
551 	if (KERN_SUCCESS ==
552 	    kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
553 		proc_piduniqidentifierinfo(p, &p_uniqidinfo);
554 		kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
555 	}
556 
557 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
558 		kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
559 	}
560 
561 	csflags = (uint32_t)proc_getcsflags(p);
562 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
563 		kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
564 	}
565 
566 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
567 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
568 	}
569 
570 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
571 		struct timeval64 t64;
572 		t64.tv_sec = (int64_t)p->p_start.tv_sec;
573 		t64.tv_usec = (int64_t)p->p_start.tv_usec;
574 		kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
575 	}
576 
577 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
578 		kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
579 	}
580 
581 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
582 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
583 	}
584 
585 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
586 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
587 	}
588 
589 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
590 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
591 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
592 		kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
593 		zfree(ZV_NAMEI, buf);
594 	}
595 
596 	pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
597 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
598 		kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
599 	}
600 
601 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
602 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
603 	}
604 
605 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
606 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
607 	}
608 
609 	cputype = cpu_type() & ~CPU_ARCH_MASK;
610 	if (IS_64BIT_PROCESS(p)) {
611 		cputype |= CPU_ARCH_ABI64;
612 	} else if (proc_is64bit_data(p)) {
613 		cputype |= CPU_ARCH_ABI64_32;
614 	}
615 
616 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
617 		kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
618 	}
619 
620 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
621 		max_footprint = get_task_phys_footprint_limit(proc_task(p));
622 		max_footprint_mb = max_footprint >> 20;
623 		kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
624 	}
625 
626 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
627 		ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
628 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
629 	}
630 
631 	// In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
632 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
633 		ledger_internal = get_task_internal(corpse_task);
634 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
635 	}
636 
637 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
638 		ledger_internal_compressed = get_task_internal_compressed(corpse_task);
639 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
640 	}
641 
642 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
643 		ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
644 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
645 	}
646 
647 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
648 		ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
649 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
650 	}
651 
652 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
653 		ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
654 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
655 	}
656 
657 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
658 		ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
659 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
660 	}
661 
662 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
663 		ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
664 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
665 	}
666 
667 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
668 		ledger_page_table = get_task_page_table(corpse_task);
669 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
670 	}
671 
672 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
673 		ledger_phys_footprint = get_task_phys_footprint(corpse_task);
674 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
675 	}
676 
677 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
678 		ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
679 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
680 	}
681 
682 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
683 		ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
684 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
685 	}
686 
687 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
688 		ledger_wired_mem = get_task_wired_mem(corpse_task);
689 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
690 	}
691 
692 	bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
693 	retval = fill_procworkqueue(p, &pwqinfo);
694 	if (retval == 0) {
695 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
696 			kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
697 		}
698 	}
699 
700 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
701 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
702 	}
703 
704 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
705 		uid_t persona_id = proc_persona_id(p);
706 		kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
707 	}
708 
709 #if CONFIG_COALITIONS
710 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
711 		uint64_t coalition_ids[COALITION_NUM_TYPES];
712 		task_coalition_ids(proc_task(p), coalition_ids);
713 		kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
714 	}
715 #endif /* CONFIG_COALITIONS */
716 
717 #if CONFIG_MEMORYSTATUS
718 	memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
719 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
720 		kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
721 	}
722 #endif
723 
724 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
725 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
726 	}
727 
728 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
729 		ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
730 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
731 	}
732 
733 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
734 		ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
735 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
736 	}
737 
738 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
739 		ledger_media_footprint = get_task_media_footprint(corpse_task);
740 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
741 	}
742 
743 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
744 		ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
745 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
746 	}
747 
748 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
749 		ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
750 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
751 	}
752 
753 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
754 		ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
755 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
756 	}
757 
758 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
759 		ledger_neural_footprint = get_task_neural_footprint(corpse_task);
760 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
761 	}
762 
763 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
764 		ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
765 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
766 	}
767 
768 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
769 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
770 	}
771 
772 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
773 		char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
774 		ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
775 		kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
776 	}
777 
778 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
779 		is_corpse_fork = is_corpsefork(corpse_task);
780 		kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
781 	}
782 
783 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
784 		kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
785 	}
786 
787 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
788 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
789 	}
790 
791 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
792 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
793 	}
794 
795 	char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
796 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
797 		const char * id = cs_identity_get(p);
798 		if (id) {
799 			strlcpy(signing_id, id, sizeof(signing_id));
800 		}
801 		kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
802 	}
803 	char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
804 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
805 		const char * id = csproc_get_teamid(p);
806 		if (id) {
807 			strlcpy(team_id, id, sizeof(team_id));
808 		}
809 		kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
810 	}
811 
812 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
813 		uint32_t category = 0;
814 		if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
815 			category = CS_VALIDATION_CATEGORY_INVALID;
816 		}
817 		kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
818 	}
819 
820 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
821 		uint32_t trust = 0; //Filling this in is a future action item: rdar://101973010
822 		kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
823 	}
824 
825 
826 	if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
827 		reason = p->p_exit_reason;
828 	}
829 	if (reason != OS_REASON_NULL) {
830 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
831 			struct exit_reason_snapshot ers = {
832 				.ers_namespace = reason->osr_namespace,
833 				.ers_code = reason->osr_code,
834 				.ers_flags = reason->osr_flags
835 			};
836 
837 			kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
838 		}
839 
840 		if (reason->osr_kcd_buf != 0) {
841 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
842 			assert(reason_buf_size != 0);
843 
844 			if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
845 				kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
846 			}
847 		}
848 	}
849 
850 	if (num_udata > 0) {
851 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
852 		    sizeof(uint64_t), num_udata, &uaddr)) {
853 			kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
854 		}
855 	}
856 }
857 
858 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)859 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
860 {
861 	kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
862 	    corpse_info->kcd_length);
863 	__assert_only uint32_t type = kcdata_iter_type(iter);
864 	assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
865 
866 	iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
867 	exception_type_t *etype = kcdata_iter_payload(iter);
868 	return *etype;
869 }
870 
871 /*
872  * Collect information required for generating lightwight corpse for current
873  * task, which can be terminating.
874  */
875 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)876 current_thread_collect_backtrace_info(
877 	kcdata_descriptor_t *new_desc,
878 	exception_type_t etype,
879 	mach_exception_data_t code,
880 	mach_msg_type_number_t codeCnt,
881 	void *reasonp)
882 {
883 	kcdata_descriptor_t kcdata;
884 	kern_return_t kr;
885 	int frame_count = 0, max_frames = 100;
886 	mach_vm_address_t uuid_info_addr = 0;
887 	uint32_t uuid_info_count         = 0;
888 	uint32_t btinfo_flag             = 0;
889 	mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
890 	natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
891 	mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
892 	thread_identifier_info_data_t th_info;
893 	char threadname[MAXTHREADNAMESIZE];
894 	void *btdata_kernel = NULL;
895 	typedef uintptr_t user_btframe_t __kernel_data_semantics;
896 	user_btframe_t *btframes = NULL;
897 	os_reason_t reason = (os_reason_t)reasonp;
898 	struct backtrace_user_info info = BTUINFO_INIT;
899 	struct rusage_superset rup;
900 	uint32_t platform;
901 
902 	task_t task = current_task();
903 	proc_t p = current_proc();
904 
905 	bool has_64bit_addr = task_get_64bit_addr(current_task());
906 	bool has_64bit_data = task_get_64bit_data(current_task());
907 
908 	if (new_desc == NULL) {
909 		return KERN_INVALID_ARGUMENT;
910 	}
911 
912 	/* First, collect backtrace frames */
913 	btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
914 	if (!btframes) {
915 		return KERN_RESOURCE_SHORTAGE;
916 	}
917 
918 	frame_count = backtrace_user(btframes, max_frames, NULL, &info);
919 	if (info.btui_error || frame_count == 0) {
920 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
921 		return KERN_FAILURE;
922 	}
923 
924 	if ((info.btui_info & BTI_TRUNCATED) != 0) {
925 		btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
926 	}
927 
928 	/* Captured in kcdata descriptor below */
929 	btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
930 	if (!btdata_kernel) {
931 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
932 		return KERN_RESOURCE_SHORTAGE;
933 	}
934 
935 	kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
936 	if (!kcdata) {
937 		kfree_data(btdata_kernel, alloc_size);
938 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
939 		return KERN_RESOURCE_SHORTAGE;
940 	}
941 
942 	/* First reserve space in kcdata blob for the btinfo flag fields */
943 	if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
944 	    sizeof(uint32_t), &btinfo_flag_addr)) {
945 		kfree_data(btdata_kernel, alloc_size);
946 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
947 		kcdata_memory_destroy(kcdata);
948 		return KERN_RESOURCE_SHORTAGE;
949 	}
950 
951 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
952 	    (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
953 	    sizeof(uintptr_t), frame_count, &kaddr)) {
954 		kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
955 	}
956 
957 #if __LP64__
958 	/* We only support async stacks on 64-bit kernels */
959 	frame_count = 0;
960 
961 	if (info.btui_async_frame_addr != 0) {
962 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
963 		    sizeof(uint32_t), &kaddr)) {
964 			uint32_t idx = info.btui_async_start_index;
965 			kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
966 		}
967 		struct backtrace_control ctl = {
968 			.btc_frame_addr = info.btui_async_frame_addr,
969 			.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
970 		};
971 
972 		info = BTUINFO_INIT;
973 		frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
974 		if (info.btui_error == 0 && frame_count > 0) {
975 			if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
976 			    TASK_BTINFO_ASYNC_BACKTRACE64,
977 			    sizeof(uintptr_t), frame_count, &kaddr)) {
978 				kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
979 			}
980 		}
981 
982 		if ((info.btui_info & BTI_TRUNCATED) != 0) {
983 			btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
984 		}
985 	}
986 #endif
987 
988 	/* Backtrace collection done, free the frames buffer */
989 	kfree_data(btframes, max_frames * sizeof(btframes[0]));
990 	btframes = NULL;
991 
992 	/* Next, suspend the task briefly and collect image load infos */
993 	task_suspend_internal(task);
994 
995 	/* all_image_info struct is ABI, in agreement with address width */
996 	if (has_64bit_addr) {
997 		struct user64_dyld_all_image_infos task_image_infos = {};
998 		struct btinfo_sc_load_info64 sc_info;
999 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1000 		    sizeof(struct user64_dyld_all_image_infos));
1001 		uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1002 		uuid_info_addr = task_image_infos.uuidArray;
1003 
1004 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1005 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1006 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1007 		    sizeof(task_image_infos.sharedCacheUUID));
1008 
1009 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1010 		    TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1011 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1012 		}
1013 	} else {
1014 		struct user32_dyld_all_image_infos task_image_infos = {};
1015 		struct btinfo_sc_load_info sc_info;
1016 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1017 		    sizeof(struct user32_dyld_all_image_infos));
1018 		uuid_info_count = task_image_infos.uuidArrayCount;
1019 		uuid_info_addr = task_image_infos.uuidArray;
1020 
1021 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1022 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1023 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1024 		    sizeof(task_image_infos.sharedCacheUUID));
1025 
1026 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1027 		    TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1028 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1029 		}
1030 	}
1031 
1032 	if (!uuid_info_addr) {
1033 		/*
1034 		 * Can happen when we catch dyld in the middle of updating
1035 		 * this data structure, or copyin of all_image_info struct failed.
1036 		 */
1037 		task_resume_internal(task);
1038 		kfree_data(btdata_kernel, alloc_size);
1039 		kcdata_memory_destroy(kcdata);
1040 		return KERN_MEMORY_ERROR;
1041 	}
1042 
1043 	if (uuid_info_count > 0) {
1044 		uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1045 		    sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1046 
1047 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1048 		    (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1049 		    uuid_info_size, uuid_info_count, &kaddr)) {
1050 			if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1051 				task_resume_internal(task);
1052 				kfree_data(btdata_kernel, alloc_size);
1053 				kcdata_memory_destroy(kcdata);
1054 				return KERN_MEMORY_ERROR;
1055 			}
1056 		}
1057 	}
1058 
1059 	task_resume_internal(task);
1060 
1061 	/* Next, collect all other information */
1062 	thread_flavor_t tsflavor;
1063 	mach_msg_type_number_t tscount;
1064 
1065 #if defined(__x86_64__) || defined(__i386__)
1066 	tsflavor = x86_THREAD_STATE;      /* unified */
1067 	tscount  = x86_THREAD_STATE_COUNT;
1068 #else
1069 	tsflavor = ARM_THREAD_STATE;      /* unified */
1070 	tscount  = ARM_UNIFIED_THREAD_STATE_COUNT;
1071 #endif
1072 
1073 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1074 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1075 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1076 		bt_thread_state->flavor = tsflavor;
1077 		bt_thread_state->count = tscount;
1078 		/* variable-sized tstate array follows */
1079 
1080 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1081 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1082 		if (kr != KERN_SUCCESS) {
1083 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1084 			if (kr == KERN_TERMINATED) {
1085 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1086 			}
1087 		}
1088 	}
1089 
1090 #if defined(__x86_64__) || defined(__i386__)
1091 	tsflavor = x86_EXCEPTION_STATE;       /* unified */
1092 	tscount  = x86_EXCEPTION_STATE_COUNT;
1093 #else
1094 #if defined(__arm64__)
1095 	if (has_64bit_data) {
1096 		tsflavor = ARM_EXCEPTION_STATE64;
1097 		tscount  = ARM_EXCEPTION_STATE64_COUNT;
1098 	} else
1099 #endif /* defined(__arm64__) */
1100 	{
1101 		tsflavor = ARM_EXCEPTION_STATE;
1102 		tscount  = ARM_EXCEPTION_STATE_COUNT;
1103 	}
1104 #endif /* defined(__x86_64__) || defined(__i386__) */
1105 
1106 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1107 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1108 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1109 		bt_thread_state->flavor = tsflavor;
1110 		bt_thread_state->count = tscount;
1111 		/* variable-sized tstate array follows */
1112 
1113 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1114 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1115 		if (kr != KERN_SUCCESS) {
1116 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1117 			if (kr == KERN_TERMINATED) {
1118 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1119 			}
1120 		}
1121 	}
1122 
1123 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1124 		pid_t pid = proc_getpid(p);
1125 		kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1126 	}
1127 
1128 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1129 		kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1130 	}
1131 
1132 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1133 		kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1134 	}
1135 
1136 #if CONFIG_COALITIONS
1137 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1138 		uint64_t coalition_ids[COALITION_NUM_TYPES];
1139 		task_coalition_ids(proc_task(p), coalition_ids);
1140 		kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1141 	}
1142 #endif /* CONFIG_COALITIONS */
1143 
1144 	/* V0 is sufficient for ReportCrash */
1145 	gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1146 	rup.ri.ri_phys_footprint = 0;
1147 	/* Soft crash, proc did not exit */
1148 	rup.ri.ri_proc_exit_abstime = 0;
1149 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1150 		kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1151 	}
1152 
1153 	platform = proc_platform(current_proc());
1154 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1155 		kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1156 	}
1157 
1158 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1159 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1160 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1161 		kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1162 		zfree(ZV_NAMEI, buf);
1163 	}
1164 
1165 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1166 		kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1167 	}
1168 
1169 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1170 		kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1171 	}
1172 
1173 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1174 		unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1175 		kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1176 	}
1177 
1178 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1179 		cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1180 		if (has_64bit_addr) {
1181 			cputype |= CPU_ARCH_ABI64;
1182 		} else if (has_64bit_data) {
1183 			cputype |= CPU_ARCH_ABI64_32;
1184 		}
1185 		kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1186 	}
1187 
1188 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1189 		kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1190 	}
1191 
1192 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1193 		kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1194 	}
1195 
1196 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1197 		kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1198 	}
1199 
1200 	assert(codeCnt <= EXCEPTION_CODE_MAX);
1201 
1202 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1203 	    sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1204 		kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1205 	}
1206 
1207 	if (reason != OS_REASON_NULL) {
1208 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1209 			struct exit_reason_snapshot ers = {
1210 				.ers_namespace = reason->osr_namespace,
1211 				.ers_code = reason->osr_code,
1212 				.ers_flags = reason->osr_flags
1213 			};
1214 
1215 			kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1216 		}
1217 
1218 		if (reason->osr_kcd_buf != 0) {
1219 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1220 			assert(reason_buf_size != 0);
1221 
1222 			if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1223 				kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1224 			}
1225 		}
1226 	}
1227 
1228 	threadname[0] = '\0';
1229 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1230 	    sizeof(threadname), &kaddr)) {
1231 		bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1232 		kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1233 	}
1234 
1235 	kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1236 	if (kr == KERN_TERMINATED) {
1237 		btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1238 	}
1239 
1240 
1241 	kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1242 	    sizeof(uint64_t), &kaddr);
1243 
1244 	/*
1245 	 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1246 	 * handler (ReportCrash) that lw corpse collection ran out of space and the
1247 	 * result is incomplete.
1248 	 */
1249 	if (last_kr != KERN_SUCCESS) {
1250 		btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1251 	}
1252 
1253 	if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1254 		kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1255 	}
1256 
1257 	/* Lastly, copy the flags to the address we reserved at the beginning. */
1258 	kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1259 
1260 	*new_desc = kcdata;
1261 
1262 	return KERN_SUCCESS;
1263 }
1264 
1265 /*
1266  * We only parse exit reason kcdata blobs for critical process before they die
1267  * and we're going to panic or for opt-in, limited diagnostic tools.
1268  *
1269  * Meant to be called immediately before panicking or limited diagnostic
1270  * scenarios.
1271  */
1272 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1273 exit_reason_get_string_desc(os_reason_t exit_reason)
1274 {
1275 	kcdata_iter_t iter;
1276 
1277 	if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1278 	    exit_reason->osr_bufsize == 0) {
1279 		return NULL;
1280 	}
1281 
1282 	iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1283 	if (!kcdata_iter_valid(iter)) {
1284 #if DEBUG || DEVELOPMENT
1285 		printf("exit reason has invalid exit reason buffer\n");
1286 #endif
1287 		return NULL;
1288 	}
1289 
1290 	if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1291 #if DEBUG || DEVELOPMENT
1292 		printf("exit reason buffer type mismatch, expected %d got %d\n",
1293 		    KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1294 #endif
1295 		return NULL;
1296 	}
1297 
1298 	iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1299 	if (!kcdata_iter_valid(iter)) {
1300 		return NULL;
1301 	}
1302 
1303 	return (char *)kcdata_iter_payload(iter);
1304 }
1305 
1306 static int initproc_spawned = 0;
1307 
1308 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1309 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1310 {
1311 	if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1312 		// Can only ever be set by launchd, and only once at boot
1313 		return EPERM;
1314 	}
1315 	return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1316 }
1317 
1318 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1319     CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1320     sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1321 
1322 #if DEVELOPMENT || DEBUG
1323 
1324 /* disable user faults */
1325 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1326 #endif /* DEVELOPMENT || DEBUG */
1327 
1328 #define OS_REASON_IFLAG_USER_FAULT 0x1
1329 
1330 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC  5
1331 
1332 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1333 abort_with_payload_internal(proc_t p,
1334     uint32_t reason_namespace, uint64_t reason_code,
1335     user_addr_t payload, uint32_t payload_size,
1336     user_addr_t reason_string, uint64_t reason_flags,
1337     uint32_t internal_flags)
1338 {
1339 	os_reason_t exit_reason = OS_REASON_NULL;
1340 	kern_return_t kr = KERN_SUCCESS;
1341 
1342 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1343 		uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1344 		    memory_order_relaxed);
1345 
1346 #if DEVELOPMENT || DEBUG
1347 		if (bootarg_disable_user_faults) {
1348 			return EQFULL;
1349 		}
1350 #endif /* DEVELOPMENT || DEBUG */
1351 
1352 		for (;;) {
1353 			if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1354 				return EQFULL;
1355 			}
1356 			// this reloads the value in old_value
1357 			if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1358 			    &old_value, old_value + 1, memory_order_relaxed,
1359 			    memory_order_relaxed)) {
1360 				break;
1361 			}
1362 		}
1363 	}
1364 
1365 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1366 	    proc_getpid(p), reason_namespace,
1367 	    reason_code, 0, 0);
1368 
1369 	exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1370 	    payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1371 
1372 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1373 		mach_exception_code_t code = 0;
1374 
1375 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1376 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
1377 		EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1378 
1379 		if (exit_reason == OS_REASON_NULL) {
1380 			kr = KERN_RESOURCE_SHORTAGE;
1381 		} else {
1382 			kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1383 		}
1384 		os_reason_free(exit_reason);
1385 	} else {
1386 		/*
1387 		 * We use SIGABRT (rather than calling exit directly from here) so that
1388 		 * the debugger can catch abort_with_{reason,payload} calls.
1389 		 */
1390 		psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1391 	}
1392 
1393 	switch (kr) {
1394 	case KERN_SUCCESS:
1395 		return 0;
1396 	case KERN_NOT_SUPPORTED:
1397 		return ENOTSUP;
1398 	case KERN_INVALID_ARGUMENT:
1399 		return EINVAL;
1400 	case KERN_RESOURCE_SHORTAGE:
1401 	default:
1402 		return EBUSY;
1403 	}
1404 }
1405 
1406 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1407 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1408     __unused void *retval)
1409 {
1410 	abort_with_payload_internal(cur_proc, args->reason_namespace,
1411 	    args->reason_code, args->payload, args->payload_size,
1412 	    args->reason_string, args->reason_flags, 0);
1413 
1414 	return 0;
1415 }
1416 
1417 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1418 os_fault_with_payload(struct proc *cur_proc,
1419     struct os_fault_with_payload_args *args, __unused int *retval)
1420 {
1421 	return abort_with_payload_internal(cur_proc, args->reason_namespace,
1422 	           args->reason_code, args->payload, args->payload_size,
1423 	           args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1424 }
1425 
1426 
1427 /*
1428  * exit --
1429  *	Death of process.
1430  */
1431 __attribute__((noreturn))
1432 void
exit(proc_t p,struct exit_args * uap,int * retval)1433 exit(proc_t p, struct exit_args *uap, int *retval)
1434 {
1435 	p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1436 	exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1437 
1438 	thread_exception_return();
1439 	/* NOTREACHED */
1440 	while (TRUE) {
1441 		thread_block(THREAD_CONTINUE_NULL);
1442 	}
1443 	/* NOTREACHED */
1444 }
1445 
1446 /*
1447  * Exit: deallocate address space and other resources, change proc state
1448  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
1449  * status and rusage for wait().  Check for child processes and orphan them.
1450  */
1451 int
exit1(proc_t p,int rv,int * retval)1452 exit1(proc_t p, int rv, int *retval)
1453 {
1454 	return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1455 }
1456 
1457 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1458 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1459     int jetsam_flags)
1460 {
1461 	return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1462 }
1463 
1464 /*
1465  * NOTE: exit_with_reason drops a reference on the passed exit_reason
1466  */
1467 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1468 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1469     int jetsam_flags, struct os_reason *exit_reason)
1470 {
1471 	thread_t self = current_thread();
1472 	struct task *task = proc_task(p);
1473 	struct uthread *ut;
1474 	int error = 0;
1475 	bool proc_exiting = false;
1476 
1477 #if DEVELOPMENT || DEBUG
1478 	/*
1479 	 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1480 	 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1481 	 */
1482 	if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1483 		panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1484 		    __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1485 	}
1486 #endif
1487 
1488 	/*
1489 	 * If a thread in this task has already
1490 	 * called exit(), then halt any others
1491 	 * right here.
1492 	 */
1493 
1494 	ut = get_bsdthread_info(self);
1495 	(void)retval;
1496 
1497 	/*
1498 	 * The parameter list of audit_syscall_exit() was augmented to
1499 	 * take the Darwin syscall number as the first parameter,
1500 	 * which is currently required by mac_audit_postselect().
1501 	 */
1502 
1503 	/*
1504 	 * The BSM token contains two components: an exit status as passed
1505 	 * to exit(), and a return value to indicate what sort of exit it
1506 	 * was.  The exit status is WEXITSTATUS(rv), but it's not clear
1507 	 * what the return value is.
1508 	 */
1509 	AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1510 	/*
1511 	 * TODO: what to audit here when jetsam calls exit and the uthread,
1512 	 * 'ut' does not belong to the proc, 'p'.
1513 	 */
1514 	AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1515 
1516 	DTRACE_PROC1(exit, int, CLD_EXITED);
1517 
1518 	/* mark process is going to exit and pull out of DBG/disk throttle */
1519 	/* TODO: This should be done after becoming exit thread */
1520 	proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1521 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1522 
1523 	proc_lock(p);
1524 	error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1525 	if (error == EDEADLK) {
1526 		/*
1527 		 * If proc_transstart() returns EDEADLK, then another thread
1528 		 * is either exec'ing or exiting. Return an error and allow
1529 		 * the other thread to continue.
1530 		 */
1531 		proc_unlock(p);
1532 		os_reason_free(exit_reason);
1533 		if (current_proc() == p) {
1534 			if (p->exit_thread == self) {
1535 				panic("exit_thread failed to exit");
1536 			}
1537 
1538 			if (thread_can_terminate) {
1539 				thread_exception_return();
1540 			}
1541 		}
1542 
1543 		return error;
1544 	}
1545 
1546 	proc_exiting = !!(p->p_lflag & P_LEXIT);
1547 
1548 	while (proc_exiting || p->exit_thread != self) {
1549 		if (proc_exiting || sig_try_locked(p) <= 0) {
1550 			proc_transend(p, 1);
1551 			os_reason_free(exit_reason);
1552 
1553 			if (get_threadtask(self) != task) {
1554 				proc_unlock(p);
1555 				return 0;
1556 			}
1557 			proc_unlock(p);
1558 
1559 			thread_terminate(self);
1560 			if (!thread_can_terminate) {
1561 				return 0;
1562 			}
1563 
1564 			thread_exception_return();
1565 			/* NOTREACHED */
1566 		}
1567 		sig_lock_to_exit(p);
1568 	}
1569 
1570 	if (exit_reason != OS_REASON_NULL) {
1571 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1572 		    proc_getpid(p), exit_reason->osr_namespace,
1573 		    exit_reason->osr_code, 0, 0);
1574 	}
1575 
1576 	assert(p->p_exit_reason == OS_REASON_NULL);
1577 	p->p_exit_reason = exit_reason;
1578 
1579 	p->p_lflag |= P_LEXIT;
1580 	p->p_xstat = rv;
1581 	p->p_lflag |= jetsam_flags;
1582 
1583 	proc_transend(p, 1);
1584 	proc_unlock(p);
1585 
1586 	proc_prepareexit(p, rv, perf_notify);
1587 
1588 	/* Last thread to terminate will call proc_exit() */
1589 	task_terminate_internal(task);
1590 
1591 	return 0;
1592 }
1593 
1594 #if CONFIG_MEMORYSTATUS
1595 /*
1596  * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1597  * is currently being frozen.
1598  * The proc_list_lock is held by the caller.
1599  * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1600  * the p_refcount P_REF_DEAD bit.
1601  */
1602 static void
proc_memorystatus_remove(proc_t p)1603 proc_memorystatus_remove(proc_t p)
1604 {
1605 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1606 	while (memorystatus_remove(p) == EAGAIN) {
1607 		os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1608 		msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1609 	}
1610 }
1611 #endif
1612 
1613 #if DEVELOPMENT
1614 boolean_t crash_behavior_test_mode = FALSE;
1615 boolean_t crash_behavior_test_would_panic = FALSE;
1616 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1617 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1618 #endif /* DEVELOPMENT */
1619 
1620 static bool
_proc_is_crashing_signal(int sig)1621 _proc_is_crashing_signal(int sig)
1622 {
1623 	bool result = false;
1624 	switch (sig) {
1625 	case SIGILL:
1626 	case SIGABRT:
1627 	case SIGFPE:
1628 	case SIGBUS:
1629 	case SIGSEGV:
1630 	case SIGSYS:
1631 	/*
1632 	 * If SIGTRAP is the terminating signal, then we can safely assume the
1633 	 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1634 	 * a process calls __builtin_trap(), which will abort.)
1635 	 */
1636 	case SIGTRAP:
1637 		result = true;
1638 	}
1639 
1640 	return result;
1641 }
1642 
1643 static bool
_proc_is_fatal_reason(os_reason_t reason)1644 _proc_is_fatal_reason(os_reason_t reason)
1645 {
1646 	if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1647 		/* Abort is always fatal even if there is no crash report generated */
1648 		return true;
1649 	}
1650 	if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1651 		/*
1652 		 * No crash report means this reason shouldn't be considered fatal
1653 		 * unless we are in test mode
1654 		 */
1655 #if DEVELOPMENT
1656 		if (crash_behavior_test_mode) {
1657 			return true;
1658 		}
1659 #endif /* DEVELOPMENT */
1660 		return false;
1661 	}
1662 	// By default all OS_REASON are fatal
1663 	return true;
1664 }
1665 
1666 static bool
proc_should_trigger_panic(proc_t p,int rv)1667 proc_should_trigger_panic(proc_t p, int rv)
1668 {
1669 	if (p == initproc || (p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1670 		/* Always panic for launchd or equivalents */
1671 		return true;
1672 	}
1673 
1674 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1675 		return true;
1676 	}
1677 
1678 	if (p->p_posix_spawn_failed) {
1679 		/* posix_spawn failures normally don't qualify for panics */
1680 		return false;
1681 	}
1682 
1683 	bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1684 	if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1685 		return false;
1686 	}
1687 
1688 	if (WIFEXITED(rv)) {
1689 		int code = WEXITSTATUS(rv);
1690 
1691 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1692 			if (code == 0) {
1693 				/* No panic if we exit 0 */
1694 				return false;
1695 			} else {
1696 				/* Panic on non-zero exit */
1697 				return true;
1698 			}
1699 		} else {
1700 			/* No panic on normal exit if the process doesn't have the non-zero flag set */
1701 			return false;
1702 		}
1703 	} else if (WIFSIGNALED(rv)) {
1704 		int signal = WTERMSIG(rv);
1705 		/* This is a crash (non-normal exit) */
1706 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1707 			os_reason_t reason = p->p_exit_reason;
1708 			if (reason != OS_REASON_NULL) {
1709 				if (!_proc_is_fatal_reason(reason)) {
1710 					// Skip non-fatal terminate_with_reason
1711 					return false;
1712 				}
1713 				if (reason->osr_namespace == OS_REASON_SIGNAL) {
1714 					return _proc_is_crashing_signal(signal);
1715 				} else {
1716 					/*
1717 					 * This branch covers the case of terminate_with_reason which
1718 					 * delivers a SIGTERM which is still considered a crash even
1719 					 * thought the signal is not considered a crashing signal
1720 					 */
1721 					return true;
1722 				}
1723 			}
1724 			return _proc_is_crashing_signal(signal);
1725 		} else {
1726 			return false;
1727 		}
1728 	} else {
1729 		/*
1730 		 * This branch implies that we didn't exit normally nor did we receive
1731 		 * a signal. This should be unreachable.
1732 		 */
1733 		return true;
1734 	}
1735 }
1736 
1737 static void
proc_crash_coredump(proc_t p)1738 proc_crash_coredump(proc_t p)
1739 {
1740 	(void)p;
1741 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1742 	/*
1743 	 * For debugging purposes, generate a core file of initproc before
1744 	 * panicking. Leave at least 300 MB free on the root volume, and ignore
1745 	 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1746 	 * before the panic hits.
1747 	 */
1748 
1749 	int             err;
1750 	uint64_t        coredump_start = mach_absolute_time();
1751 	uint64_t        coredump_end;
1752 	clock_sec_t     tv_sec;
1753 	clock_usec_t    tv_usec;
1754 	uint32_t        tv_msec;
1755 
1756 
1757 	err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1758 
1759 	coredump_end = mach_absolute_time();
1760 
1761 	absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1762 
1763 	tv_msec = tv_usec / 1000;
1764 
1765 	if (err != 0) {
1766 		printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1767 		    proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1768 	} else {
1769 		printf("Generated core file for pid: %d in %d.%03d seconds\n",
1770 		    proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1771 	}
1772 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1773 }
1774 
1775 static void
proc_handle_critical_exit(proc_t p,int rv)1776 proc_handle_critical_exit(proc_t p, int rv)
1777 {
1778 	if (!proc_should_trigger_panic(p, rv)) {
1779 		// No panic, bail out
1780 		return;
1781 	}
1782 
1783 #if DEVELOPMENT
1784 	if (crash_behavior_test_mode) {
1785 		crash_behavior_test_would_panic = TRUE;
1786 		// Force test mode off after hitting a panic
1787 		crash_behavior_test_mode = FALSE;
1788 		return;
1789 	}
1790 #endif /* DEVELOPMENT */
1791 
1792 	char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1793 
1794 	if (p->p_exit_reason == OS_REASON_NULL) {
1795 		printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1796 		    proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1797 	} else {
1798 		printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1799 		    p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1800 		    exit_reason_desc : "none");
1801 	}
1802 
1803 	const char *prefix_str;
1804 	char prefix_str_buf[128];
1805 
1806 	if (p == initproc) {
1807 		if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1808 			prefix_str = "LTE preinit process exited";
1809 		} else if (initproc_spawned) {
1810 			prefix_str = "initproc exited";
1811 		} else {
1812 			prefix_str = "initproc failed to start";
1813 		}
1814 	} else {
1815 		/* For processes that aren't launchd, just use the process name and pid */
1816 		snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1817 		prefix_str = prefix_str_buf;
1818 	}
1819 
1820 	proc_crash_coredump(p);
1821 
1822 	sync(p, (void *)NULL, (int *)NULL);
1823 
1824 	if (p->p_exit_reason == OS_REASON_NULL) {
1825 		panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1826 		    prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1827 	} else {
1828 		panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1829 		    ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1830 		    prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1831 		    exit_reason_desc ? exit_reason_desc : "none");
1832 	}
1833 }
1834 
1835 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1836 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1837 {
1838 	mach_exception_data_type_t code = 0, subcode = 0;
1839 	exception_type_t etype;
1840 
1841 	struct uthread *ut;
1842 	thread_t self = current_thread();
1843 	ut = get_bsdthread_info(self);
1844 	struct rusage_superset *rup;
1845 	int kr = 0;
1846 	int create_corpse = FALSE;
1847 
1848 	if (p->p_crash_behavior != 0 || p == initproc) {
1849 		proc_handle_critical_exit(p, rv);
1850 	}
1851 
1852 	/*
1853 	 * Generate a corefile/crashlog if:
1854 	 *      The process doesn't have an exit reason that indicates no crash report should be created
1855 	 *      AND any of the following are true:
1856 	 *	- The process was terminated due to a fatal signal that generates a core
1857 	 *	- The process was killed due to a code signing violation
1858 	 *	- The process has an exit reason that indicates we should generate a crash report
1859 	 *
1860 	 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1861 	 * (which normally triggers a core) but may indicate that no crash report should be created.
1862 	 */
1863 	if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1864 	    (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1865 	    (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1866 	    OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1867 		/*
1868 		 * Workaround for processes checking up on PT_DENY_ATTACH:
1869 		 * should be backed out post-Leopard (details in 5431025).
1870 		 */
1871 		if ((SIGSEGV == WTERMSIG(rv)) &&
1872 		    (p->p_pptr->p_lflag & P_LNOATTACH)) {
1873 			goto skipcheck;
1874 		}
1875 
1876 		/*
1877 		 * Crash Reporter looks for the signal value, original exception
1878 		 * type, and low 20 bits of the original code in code[0]
1879 		 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1880 		 */
1881 		code = ((WTERMSIG(rv) & 0xff) << 24) |
1882 		    ((ut->uu_exception & 0x0f) << 20) |
1883 		    ((int)ut->uu_code & 0xfffff);
1884 		subcode = ut->uu_subcode;
1885 		etype = ut->uu_exception;
1886 
1887 		/* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1888 		if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1889 			etype = EXC_CRASH;
1890 		}
1891 
1892 #if (DEVELOPMENT || DEBUG)
1893 		if (p->p_pid <= exception_log_max_pid) {
1894 			char *proc_name = proc_best_name(p);
1895 			if (PROC_HAS_EXITREASON(p)) {
1896 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1897 				    "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1898 				    proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1899 				    exit_reason_get_string_desc(p->p_exit_reason));
1900 			} else {
1901 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1902 				    "pid: %d -- process name: %s -- exit status %d",
1903 				    proc_getpid(p), proc_name, WEXITSTATUS(rv));
1904 			}
1905 		}
1906 #endif
1907 
1908 		kr = task_exception_notify(EXC_CRASH, code, subcode);
1909 
1910 		/* Nobody handled EXC_CRASH?? remember to make corpse */
1911 		if (kr != 0 && p == current_proc()) {
1912 			/*
1913 			 * Do not create corpse when exit is called from jetsam thread.
1914 			 * Corpse creation code requires that proc_prepareexit is
1915 			 * called by the exiting proc and not the kernel_proc.
1916 			 */
1917 			create_corpse = TRUE;
1918 		}
1919 
1920 		/*
1921 		 * Revalidate the code signing of the text pages around current PC.
1922 		 * This is an attempt to detect and repair faults due to memory
1923 		 * corruption of text pages.
1924 		 *
1925 		 * The goal here is to fixup infrequent memory corruptions due to
1926 		 * things like aging RAM bit flips. So the approach is to only expect
1927 		 * to have to fixup one thing per crash. This also limits the amount
1928 		 * of extra work we cause in case this is a development kernel with an
1929 		 * active memory stomp happening.
1930 		 */
1931 		task_t task = proc_task(p);
1932 		uintptr_t bt[2];
1933 		struct backtrace_user_info btinfo = BTUINFO_INIT;
1934 		unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
1935 		int bt_err = btinfo.btui_error;
1936 		if (bt_err == 0 && frame_count >= 1) {
1937 			/*
1938 			 * First check at the page containing the current PC.
1939 			 * This passes if the page code signs -or- if we can't figure out
1940 			 * what is at that address. The latter action is so we continue checking
1941 			 * previous pages which may be corrupt and caused a wild branch.
1942 			 */
1943 			kr = revalidate_text_page(task, bt[0]);
1944 
1945 			/* No corruption found, check the previous sequential page */
1946 			if (kr == KERN_SUCCESS) {
1947 				kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
1948 			}
1949 
1950 			/* Still no corruption found, check the current function's caller */
1951 			if (kr == KERN_SUCCESS) {
1952 				if (frame_count > 1 &&
1953 				    atop(bt[0]) != atop(bt[1]) &&           /* don't recheck PC page */
1954 				    atop(bt[0]) - 1 != atop(bt[1])) {       /* don't recheck page before */
1955 					kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
1956 				}
1957 			}
1958 
1959 			/*
1960 			 * Log that we found a corruption.
1961 			 */
1962 			if (kr != KERN_SUCCESS) {
1963 				os_log(OS_LOG_DEFAULT,
1964 				    "Text page corruption detected in dying process %d\n", proc_getpid(p));
1965 			}
1966 		}
1967 	}
1968 
1969 skipcheck:
1970 	if (task_is_driver(proc_task(p)) && PROC_HAS_EXITREASON(p)) {
1971 		IOUserServerRecordExitReason(proc_task(p), p->p_exit_reason);
1972 	}
1973 
1974 	/* Notify the perf server? */
1975 	if (perf_notify) {
1976 		(void)sys_perf_notify(self, proc_getpid(p));
1977 	}
1978 
1979 
1980 	/* stash the usage into corpse data if making_corpse == true */
1981 	if (create_corpse == TRUE) {
1982 		kr = task_mark_corpse(proc_task(p));
1983 		if (kr != KERN_SUCCESS) {
1984 			if (kr == KERN_NO_SPACE) {
1985 				printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
1986 			} else if (kr == KERN_NOT_SUPPORTED) {
1987 				printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
1988 			} else if (kr == KERN_TERMINATED) {
1989 				printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
1990 			} else {
1991 				printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
1992 			}
1993 			create_corpse = FALSE;
1994 		}
1995 	}
1996 
1997 	if (!proc_is_shadow(p)) {
1998 		/*
1999 		 * Before this process becomes a zombie, stash resource usage
2000 		 * stats in the proc for external observers to query
2001 		 * via proc_pid_rusage().
2002 		 *
2003 		 * If the zombie allocation fails, just punt the stats.
2004 		 */
2005 		rup = zalloc(zombie_zone);
2006 		gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2007 		rup->ri.ri_phys_footprint = 0;
2008 		rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2009 		/*
2010 		 * Make the rusage_info visible to external observers
2011 		 * only after it has been completely filled in.
2012 		 */
2013 		p->p_ru = rup;
2014 	}
2015 
2016 	if (create_corpse) {
2017 		int est_knotes = 0, num_knotes = 0;
2018 		uint64_t *buffer = NULL;
2019 		uint32_t buf_size = 0;
2020 
2021 		/* Get all the udata pointers from kqueue */
2022 		est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2023 		if (est_knotes > 0) {
2024 			buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2025 			buffer = kalloc_data(buf_size, Z_WAITOK);
2026 			if (buffer) {
2027 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2028 				if (num_knotes > est_knotes + 32) {
2029 					num_knotes = est_knotes + 32;
2030 				}
2031 			}
2032 		}
2033 
2034 		/* Update the code, subcode based on exit reason */
2035 		proc_update_corpse_exception_codes(p, &code, &subcode);
2036 		populate_corpse_crashinfo(p, proc_task(p), rup,
2037 		    code, subcode, buffer, num_knotes, NULL, etype);
2038 		kfree_data(buffer, buf_size);
2039 	}
2040 	/*
2041 	 * Remove proc from allproc queue and from pidhash chain.
2042 	 * Need to do this before we do anything that can block.
2043 	 * Not doing causes things like mount() find this on allproc
2044 	 * in partially cleaned state.
2045 	 */
2046 
2047 	proc_list_lock();
2048 
2049 #if CONFIG_MEMORYSTATUS
2050 	proc_memorystatus_remove(p);
2051 #endif
2052 
2053 	LIST_REMOVE(p, p_list);
2054 	LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2055 	/* will not be visible via proc_find */
2056 	os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2057 
2058 	proc_list_unlock();
2059 
2060 	/*
2061 	 * If parent is waiting for us to exit or exec,
2062 	 * P_LPPWAIT is set; we will wakeup the parent below.
2063 	 */
2064 	proc_lock(p);
2065 	p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2066 	p->p_sigignore = ~(sigcantmask);
2067 
2068 	/*
2069 	 * If a thread is already waiting for us in proc_exit,
2070 	 * P_LTERM is set, wakeup the thread.
2071 	 */
2072 	if (p->p_lflag & P_LTERM) {
2073 		wakeup(&p->exit_thread);
2074 	} else {
2075 		p->p_lflag |= P_LTERM;
2076 	}
2077 
2078 	/* If current proc is exiting, ignore signals on the exit thread */
2079 	if (p == current_proc()) {
2080 		ut->uu_siglist = 0;
2081 	}
2082 	proc_unlock(p);
2083 }
2084 
2085 void
proc_exit(proc_t p)2086 proc_exit(proc_t p)
2087 {
2088 	proc_t q;
2089 	proc_t pp;
2090 	struct task *task = proc_task(p);
2091 	vnode_t tvp = NULLVP;
2092 	struct pgrp * pg;
2093 	struct session *sessp;
2094 	struct uthread * uth;
2095 	pid_t pid;
2096 	int exitval;
2097 	int knote_hint;
2098 
2099 	uth = current_uthread();
2100 
2101 	proc_lock(p);
2102 	proc_transstart(p, 1, 0);
2103 	if (!(p->p_lflag & P_LEXIT)) {
2104 		/*
2105 		 * This can happen if a thread_terminate() occurs
2106 		 * in a single-threaded process.
2107 		 */
2108 		p->p_lflag |= P_LEXIT;
2109 		proc_transend(p, 1);
2110 		proc_unlock(p);
2111 		proc_prepareexit(p, 0, TRUE);
2112 		(void) task_terminate_internal(task);
2113 		proc_lock(p);
2114 	} else if (!(p->p_lflag & P_LTERM)) {
2115 		proc_transend(p, 1);
2116 		/* Jetsam is in middle of calling proc_prepareexit, wait for it */
2117 		p->p_lflag |= P_LTERM;
2118 		msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2119 	} else {
2120 		proc_transend(p, 1);
2121 	}
2122 
2123 	p->p_lflag |= P_LPEXIT;
2124 
2125 	/*
2126 	 * Other kernel threads may be in the middle of signalling this process.
2127 	 * Wait for those threads to wrap it up before making the process
2128 	 * disappear on them.
2129 	 */
2130 	if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2131 		p->p_sigwaitcnt++;
2132 		while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2133 			msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2134 		}
2135 		p->p_sigwaitcnt--;
2136 	}
2137 
2138 	proc_unlock(p);
2139 	pid = proc_getpid(p);
2140 	exitval = p->p_xstat;
2141 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2142 	    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2143 	    pid, exitval, 0, 0, 0);
2144 
2145 #if DEVELOPMENT || DEBUG
2146 	proc_exit_lpexit_check(pid, PELS_POS_START);
2147 #endif
2148 
2149 #if CONFIG_DTRACE
2150 	dtrace_proc_exit(p);
2151 #endif
2152 
2153 	/*
2154 	 * need to cancel async IO requests that can be cancelled and wait for those
2155 	 * already active.  MAY BLOCK!
2156 	 */
2157 
2158 	proc_refdrain(p);
2159 
2160 	/* if any pending cpu limits action, clear it */
2161 	task_clear_cpuusage(proc_task(p), TRUE);
2162 
2163 	workq_mark_exiting(p);
2164 
2165 	_aio_exit( p );
2166 
2167 	/*
2168 	 * Close open files and release open-file table.
2169 	 * This may block!
2170 	 */
2171 	fdt_invalidate(p);
2172 
2173 	/*
2174 	 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2175 	 * workqueue.
2176 	 */
2177 	workq_exit(p);
2178 
2179 	if (uth->uu_lowpri_window) {
2180 		/*
2181 		 * task is marked as a low priority I/O type
2182 		 * and the I/O we issued while in flushing files on close
2183 		 * collided with normal I/O operations...
2184 		 * no need to throttle this thread since its going away
2185 		 * but we do need to update our bookeeping w/r to throttled threads
2186 		 */
2187 		throttle_lowpri_io(0);
2188 	}
2189 
2190 	if (p->p_lflag & P_LNSPACE_RESOLVER) {
2191 		/*
2192 		 * The namespace resolver is exiting; there may be
2193 		 * outstanding materialization requests to clean up.
2194 		 */
2195 		nspace_resolver_exited(p);
2196 	}
2197 
2198 #if SYSV_SHM
2199 	/* Close ref SYSV Shared memory*/
2200 	if (p->vm_shm) {
2201 		shmexit(p);
2202 	}
2203 #endif
2204 #if SYSV_SEM
2205 	/* Release SYSV semaphores */
2206 	semexit(p);
2207 #endif
2208 
2209 #if PSYNCH
2210 	pth_proc_hashdelete(p);
2211 #endif /* PSYNCH */
2212 
2213 	pg = proc_pgrp(p, &sessp);
2214 	if (SESS_LEADER(p, sessp)) {
2215 		if (sessp->s_ttyvp != NULLVP) {
2216 			struct vnode *ttyvp;
2217 			int ttyvid;
2218 			int cttyflag = 0;
2219 			struct vfs_context context;
2220 			struct tty *tp;
2221 			struct pgrp *tpgrp = PGRP_NULL;
2222 
2223 			/*
2224 			 * Controlling process.
2225 			 * Signal foreground pgrp,
2226 			 * drain controlling terminal
2227 			 * and revoke access to controlling terminal.
2228 			 */
2229 
2230 			proc_list_lock(); /* prevent any t_pgrp from changing */
2231 			session_lock(sessp);
2232 			if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2233 				tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2234 			}
2235 			proc_list_unlock();
2236 
2237 			if (tpgrp != PGRP_NULL) {
2238 				session_unlock(sessp);
2239 				pgsignal(tpgrp, SIGHUP, 1);
2240 				pgrp_rele(tpgrp);
2241 				session_lock(sessp);
2242 			}
2243 
2244 			cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2245 			    S_CTTYREF, relaxed) & S_CTTYREF);
2246 			ttyvp = sessp->s_ttyvp;
2247 			ttyvid = sessp->s_ttyvid;
2248 			tp = session_clear_tty_locked(sessp);
2249 			if (ttyvp) {
2250 				vnode_hold(ttyvp);
2251 			}
2252 			session_unlock(sessp);
2253 
2254 			if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2255 				if (tp != TTY_NULL) {
2256 					tty_lock(tp);
2257 					(void) ttywait(tp);
2258 					tty_unlock(tp);
2259 				}
2260 
2261 				context.vc_thread = NULL;
2262 				context.vc_ucred = kauth_cred_proc_ref(p);
2263 				VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2264 				if (cttyflag) {
2265 					/*
2266 					 * Release the extra usecount taken in cttyopen.
2267 					 * usecount should be released after VNOP_REVOKE is called.
2268 					 * This usecount was taken to ensure that
2269 					 * the VNOP_REVOKE results in a close to
2270 					 * the tty since cttyclose is a no-op.
2271 					 */
2272 					vnode_rele(ttyvp);
2273 				}
2274 				vnode_put(ttyvp);
2275 				kauth_cred_unref(&context.vc_ucred);
2276 				vnode_drop(ttyvp);
2277 				ttyvp = NULLVP;
2278 			}
2279 			if (ttyvp) {
2280 				vnode_drop(ttyvp);
2281 			}
2282 			if (tp) {
2283 				ttyfree(tp);
2284 			}
2285 		}
2286 		session_lock(sessp);
2287 		sessp->s_leader = NULL;
2288 		session_unlock(sessp);
2289 	}
2290 
2291 	if (!proc_is_shadow(p)) {
2292 		fixjobc(p, pg, 0);
2293 	}
2294 	pgrp_rele(pg);
2295 
2296 	/*
2297 	 * Change RLIMIT_FSIZE for accounting/debugging.
2298 	 */
2299 	proc_limitsetcur_fsize(p, RLIM_INFINITY);
2300 
2301 	(void)acct_process(p);
2302 
2303 	proc_list_lock();
2304 
2305 	if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2306 		p->p_listflag &= ~P_LIST_EXITCOUNT;
2307 		proc_shutdown_exitcount--;
2308 		if (proc_shutdown_exitcount == 0) {
2309 			wakeup(&proc_shutdown_exitcount);
2310 		}
2311 	}
2312 
2313 	/* wait till parentrefs are dropped and grant no more */
2314 	proc_childdrainstart(p);
2315 	while ((q = p->p_children.lh_first) != NULL) {
2316 		if (q->p_stat == SZOMB) {
2317 			if (p != q->p_pptr) {
2318 				panic("parent child linkage broken");
2319 			}
2320 			/* check for sysctl zomb lookup */
2321 			while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2322 				msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2323 			}
2324 			q->p_listflag |= P_LIST_WAITING;
2325 			/*
2326 			 * This is a named reference and it is not granted
2327 			 * if the reap is already in progress. So we get
2328 			 * the reference here exclusively and their can be
2329 			 * no waiters. So there is no need for a wakeup
2330 			 * after we are done.  Also the reap frees the structure
2331 			 * and the proc struct cannot be used for wakeups as well.
2332 			 * It is safe to use q here as this is system reap
2333 			 */
2334 			reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2335 			    REAP_REPARENTED_TO_INIT : 0;
2336 			reap_child_locked(p, q,
2337 			    REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2338 		} else {
2339 			/*
2340 			 * Traced processes are killed
2341 			 * since their existence means someone is messing up.
2342 			 */
2343 			if (q->p_lflag & P_LTRACED) {
2344 				struct proc *opp;
2345 
2346 				/*
2347 				 * Take a reference on the child process to
2348 				 * ensure it doesn't exit and disappear between
2349 				 * the time we drop the list_lock and attempt
2350 				 * to acquire its proc_lock.
2351 				 */
2352 				if (proc_ref(q, true) != q) {
2353 					continue;
2354 				}
2355 
2356 				proc_list_unlock();
2357 
2358 				opp = proc_find(q->p_oppid);
2359 				if (opp != PROC_NULL) {
2360 					proc_list_lock();
2361 					q->p_oppid = 0;
2362 					proc_list_unlock();
2363 					proc_reparentlocked(q, opp, 0, 0);
2364 					proc_rele(opp);
2365 				} else {
2366 					/* original parent exited while traced */
2367 					proc_list_lock();
2368 					q->p_listflag |= P_LIST_DEADPARENT;
2369 					q->p_oppid = 0;
2370 					proc_list_unlock();
2371 					proc_reparentlocked(q, initproc, 0, 0);
2372 				}
2373 
2374 				proc_lock(q);
2375 				q->p_lflag &= ~P_LTRACED;
2376 
2377 				if (q->sigwait_thread) {
2378 					thread_t thread = q->sigwait_thread;
2379 
2380 					proc_unlock(q);
2381 					/*
2382 					 * The sigwait_thread could be stopped at a
2383 					 * breakpoint. Wake it up to kill.
2384 					 * Need to do this as it could be a thread which is not
2385 					 * the first thread in the task. So any attempts to kill
2386 					 * the process would result into a deadlock on q->sigwait.
2387 					 */
2388 					thread_resume(thread);
2389 					clear_wait(thread, THREAD_INTERRUPTED);
2390 					threadsignal(thread, SIGKILL, 0, TRUE);
2391 				} else {
2392 					proc_unlock(q);
2393 				}
2394 
2395 				psignal(q, SIGKILL);
2396 				proc_list_lock();
2397 				proc_rele(q);
2398 			} else {
2399 				q->p_listflag |= P_LIST_DEADPARENT;
2400 				proc_reparentlocked(q, initproc, 0, 1);
2401 			}
2402 		}
2403 	}
2404 
2405 	proc_childdrainend(p);
2406 	proc_list_unlock();
2407 
2408 #if CONFIG_MACF
2409 	if (!proc_is_shadow(p)) {
2410 		/*
2411 		 * Notify MAC policies that proc is dead.
2412 		 * This should be replaced with proper label management
2413 		 * (rdar://problem/32126399).
2414 		 */
2415 		mac_proc_notify_exit(p);
2416 	}
2417 #endif
2418 
2419 	/*
2420 	 * Release reference to text vnode
2421 	 */
2422 	tvp = p->p_textvp;
2423 	p->p_textvp = NULL;
2424 	if (tvp != NULLVP) {
2425 		vnode_rele(tvp);
2426 	}
2427 
2428 	/*
2429 	 * Save exit status and final rusage info, adding in child rusage
2430 	 * info and self times.  If we were unable to allocate a zombie
2431 	 * structure, this information is lost.
2432 	 */
2433 	if (p->p_ru != NULL) {
2434 		calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2435 		p->p_ru->ru = p->p_stats->p_ru;
2436 
2437 		ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2438 	}
2439 
2440 	/*
2441 	 * Free up profiling buffers.
2442 	 */
2443 	{
2444 		struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2445 
2446 		p1 = p0->pr_next;
2447 		p0->pr_next = NULL;
2448 		p0->pr_scale = 0;
2449 
2450 		for (; p1 != NULL; p1 = pn) {
2451 			pn = p1->pr_next;
2452 			kfree_type(struct uprof, p1);
2453 		}
2454 	}
2455 
2456 	proc_free_realitimer(p);
2457 
2458 	/*
2459 	 * Other substructures are freed from wait().
2460 	 */
2461 	zfree(proc_stats_zone, p->p_stats);
2462 	p->p_stats = NULL;
2463 
2464 	proc_limitdrop(p);
2465 
2466 #if DEVELOPMENT || DEBUG
2467 	proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2468 #endif
2469 
2470 	/*
2471 	 * Finish up by terminating the task
2472 	 * and halt this thread (only if a
2473 	 * member of the task exiting).
2474 	 */
2475 	proc_set_task(p, TASK_NULL);
2476 	set_bsdtask_info(task, NULL);
2477 	clear_thread_ro_proc(get_machthread(uth));
2478 
2479 #if DEVELOPMENT || DEBUG
2480 	proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2481 #endif
2482 
2483 	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2484 	proc_knote(p, knote_hint);
2485 
2486 	/* mark the thread as the one that is doing proc_exit
2487 	 * no need to hold proc lock in uthread_free
2488 	 */
2489 	uth->uu_flag |= UT_PROCEXIT;
2490 	/*
2491 	 * Notify parent that we're gone.
2492 	 */
2493 	pp = proc_parent(p);
2494 	if (proc_is_shadow(p)) {
2495 		/* kernel can reap this one, no need to move it to launchd */
2496 		proc_list_lock();
2497 		p->p_listflag |= P_LIST_DEADPARENT;
2498 		proc_list_unlock();
2499 	} else if (pp->p_flag & P_NOCLDWAIT) {
2500 		if (p->p_ru != NULL) {
2501 			proc_lock(pp);
2502 #if 3839178
2503 			/*
2504 			 * If the parent is ignoring SIGCHLD, then POSIX requires
2505 			 * us to not add the resource usage to the parent process -
2506 			 * we are only going to hand it off to init to get reaped.
2507 			 * We should contest the standard in this case on the basis
2508 			 * of RLIMIT_CPU.
2509 			 */
2510 #else   /* !3839178 */
2511 			/*
2512 			 * Add child resource usage to parent before giving
2513 			 * zombie to init.  If we were unable to allocate a
2514 			 * zombie structure, this information is lost.
2515 			 */
2516 			ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2517 #endif  /* !3839178 */
2518 			update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2519 			proc_unlock(pp);
2520 		}
2521 
2522 		/* kernel can reap this one, no need to move it to launchd */
2523 		proc_list_lock();
2524 		p->p_listflag |= P_LIST_DEADPARENT;
2525 		proc_list_unlock();
2526 	}
2527 	if (!proc_is_shadow(p) &&
2528 	    ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2529 		if (pp != initproc) {
2530 			proc_lock(pp);
2531 			pp->si_pid = proc_getpid(p);
2532 			pp->p_xhighbits = p->p_xhighbits;
2533 			p->p_xhighbits = 0;
2534 			pp->si_status = p->p_xstat;
2535 			pp->si_code = CLD_EXITED;
2536 			/*
2537 			 * p_ucred usage is safe as it is an exiting process
2538 			 * and reference is dropped in reap
2539 			 */
2540 			pp->si_uid = kauth_cred_getruid(proc_ucred(p));
2541 			proc_unlock(pp);
2542 		}
2543 		/* mark as a zombie */
2544 		/* No need to take proc lock as all refs are drained and
2545 		 * no one except parent (reaping ) can look at this.
2546 		 * The write is to an int and is coherent. Also parent is
2547 		 *  keyed off of list lock for reaping
2548 		 */
2549 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2550 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2551 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2552 		    pid, exitval, 0, 0, 0);
2553 		p->p_stat = SZOMB;
2554 		/*
2555 		 * The current process can be reaped so, no one
2556 		 * can depend on this
2557 		 */
2558 
2559 		psignal(pp, SIGCHLD);
2560 
2561 		/* and now wakeup the parent */
2562 		proc_list_lock();
2563 		wakeup((caddr_t)pp);
2564 		proc_list_unlock();
2565 	} else {
2566 		/* should be fine as parent proc would be initproc */
2567 		/* mark as a zombie */
2568 		/* No need to take proc lock as all refs are drained and
2569 		 * no one except parent (reaping ) can look at this.
2570 		 * The write is to an int and is coherent. Also parent is
2571 		 *  keyed off of list lock for reaping
2572 		 */
2573 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2574 		proc_list_lock();
2575 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2576 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2577 		    pid, exitval, 0, 0, 0);
2578 		/* check for sysctl zomb lookup */
2579 		while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2580 			msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2581 		}
2582 		/* safe to use p as this is a system reap */
2583 		p->p_stat = SZOMB;
2584 		p->p_listflag |= P_LIST_WAITING;
2585 
2586 		/*
2587 		 * This is a named reference and it is not granted
2588 		 * if the reap is already in progress. So we get
2589 		 * the reference here exclusively and their can be
2590 		 * no waiters. So there is no need for a wakeup
2591 		 * after we are done. AlsO  the reap frees the structure
2592 		 * and the proc struct cannot be used for wakeups as well.
2593 		 * It is safe to use p here as this is system reap
2594 		 */
2595 		reap_child_locked(pp, p,
2596 		    REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2597 	}
2598 	if (uth->uu_lowpri_window) {
2599 		/*
2600 		 * task is marked as a low priority I/O type and we've
2601 		 * somehow picked up another throttle during exit processing...
2602 		 * no need to throttle this thread since its going away
2603 		 * but we do need to update our bookeeping w/r to throttled threads
2604 		 */
2605 		throttle_lowpri_io(0);
2606 	}
2607 
2608 	proc_rele(pp);
2609 #if DEVELOPMENT || DEBUG
2610 	proc_exit_lpexit_check(pid, PELS_POS_END);
2611 #endif
2612 }
2613 
2614 
2615 /*
2616  * reap_child_locked
2617  *
2618  * Finalize a child exit once its status has been saved.
2619  *
2620  * If ptrace has attached, detach it and return it to its real parent.  Free any
2621  * remaining resources.
2622  *
2623  * Parameters:
2624  * - proc_t parent      Parent of process being reaped
2625  * - proc_t child       Process to reap
2626  * - reap_flags_t flags Control locking and re-parenting behavior
2627  */
2628 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2629 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2630 {
2631 	struct pgrp *pg;
2632 	kauth_cred_t cred;
2633 	boolean_t shadow_proc = proc_is_shadow(child);
2634 
2635 	if (flags & REAP_LOCKED) {
2636 		proc_list_unlock();
2637 	}
2638 
2639 	/*
2640 	 * Under ptrace, the child should now be re-parented back to its original
2641 	 * parent, unless that parent was initproc or it didn't come to initproc
2642 	 * through re-parenting.
2643 	 */
2644 	bool child_ptraced = child->p_oppid != 0;
2645 	if (!shadow_proc && child_ptraced) {
2646 		int knote_hint;
2647 		pid_t orig_ppid = 0;
2648 		proc_t orig_parent = PROC_NULL;
2649 
2650 		proc_lock(child);
2651 		orig_ppid = child->p_oppid;
2652 		child->p_oppid = 0;
2653 		knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2654 		proc_unlock(child);
2655 
2656 		orig_parent = proc_find(orig_ppid);
2657 		if (orig_parent) {
2658 			/*
2659 			 * Only re-parent the process if its original parent was not
2660 			 * initproc and it did not come to initproc from re-parenting.
2661 			 */
2662 			bool reparenting = orig_parent != initproc ||
2663 			    (flags & REAP_REPARENTED_TO_INIT) == 0;
2664 			if (reparenting) {
2665 				if (orig_parent != initproc) {
2666 					/*
2667 					 * Internal fields should be safe to access here because the
2668 					 * child is exited and not reaped or re-parented yet.
2669 					 */
2670 					proc_lock(orig_parent);
2671 					orig_parent->si_pid = proc_getpid(child);
2672 					orig_parent->si_status = child->p_xstat;
2673 					orig_parent->si_code = CLD_CONTINUED;
2674 					orig_parent->si_uid = kauth_cred_getruid(proc_ucred(child));
2675 					proc_unlock(orig_parent);
2676 				}
2677 				proc_reparentlocked(child, orig_parent, 1, 0);
2678 
2679 				/*
2680 				 * After re-parenting, re-send the child's NOTE_EXIT to the
2681 				 * original parent.
2682 				 */
2683 				proc_knote(child, knote_hint);
2684 				psignal(orig_parent, SIGCHLD);
2685 
2686 				proc_list_lock();
2687 				wakeup((caddr_t)orig_parent);
2688 				child->p_listflag &= ~P_LIST_WAITING;
2689 				wakeup(&child->p_stat);
2690 				proc_list_unlock();
2691 
2692 				proc_rele(orig_parent);
2693 				if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2694 					proc_list_lock();
2695 				}
2696 				return;
2697 			} else {
2698 				/*
2699 				 * Satisfy the knote lifecycle because ptraced processes don't
2700 				 * broadcast NOTE_EXIT during initial child termination.
2701 				 */
2702 				proc_knote(child, knote_hint);
2703 				proc_rele(orig_parent);
2704 			}
2705 		}
2706 	}
2707 
2708 #pragma clang diagnostic push
2709 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2710 	proc_knote(child, NOTE_REAP);
2711 #pragma clang diagnostic pop
2712 
2713 	proc_knote_drain(child);
2714 
2715 	child->p_xstat = 0;
2716 	if (!shadow_proc && child->p_ru) {
2717 		/*
2718 		 * Roll up the rusage statistics to the parent, unless the parent is
2719 		 * ignoring SIGCHLD.  POSIX requires the children's resources of such a
2720 		 * parent to not be included in the parent's usage (seems odd given
2721 		 * RLIMIT_CPU, though).
2722 		 */
2723 		proc_lock(parent);
2724 		bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2725 		if (rollup_child) {
2726 			ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2727 		}
2728 		update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2729 		proc_unlock(parent);
2730 		zfree(zombie_zone, child->p_ru);
2731 		child->p_ru = NULL;
2732 	} else if (!shadow_proc) {
2733 		printf("Warning : lost p_ru for %s\n", child->p_comm);
2734 	} else {
2735 		assert(child->p_ru == NULL);
2736 	}
2737 
2738 	AUDIT_SESSION_PROCEXIT(child);
2739 
2740 #if CONFIG_PERSONAS
2741 	persona_proc_drop(child);
2742 #endif /* CONFIG_PERSONAS */
2743 	(void)chgproccnt(kauth_cred_getruid(proc_ucred(child)), -1);
2744 
2745 	os_reason_free(child->p_exit_reason);
2746 
2747 	proc_list_lock();
2748 
2749 	pg = pgrp_leave_locked(child);
2750 	LIST_REMOVE(child, p_list);
2751 	parent->p_childrencnt--;
2752 	LIST_REMOVE(child, p_sibling);
2753 	bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2754 	    LIST_EMPTY(&parent->p_children);
2755 	if (no_more_children) {
2756 		wakeup((caddr_t)parent);
2757 	}
2758 	child->p_listflag &= ~P_LIST_WAITING;
2759 	wakeup(&child->p_stat);
2760 
2761 	/* Take it out of process hash */
2762 	if (!shadow_proc) {
2763 		phash_remove_locked(child);
2764 	}
2765 	proc_checkdeadrefs(child);
2766 	nprocs--;
2767 	if (flags & REAP_DEAD_PARENT) {
2768 		child->p_listflag |= P_LIST_DEADPARENT;
2769 	}
2770 	cred = proc_ucred(child);
2771 	child->p_proc_ro = proc_ro_release_proc(child->p_proc_ro);
2772 
2773 	proc_list_unlock();
2774 
2775 	pgrp_rele(pg);
2776 	if (child->p_proc_ro != NULL) {
2777 		proc_ro_free(child->p_proc_ro);
2778 		child->p_proc_ro = NULL;
2779 	}
2780 	kauth_cred_set(&cred, NOCRED);
2781 	fdt_destroy(child);
2782 	lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2783 	lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2784 #if CONFIG_DTRACE
2785 	lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2786 #endif
2787 	lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2788 	proc_wait_release(child);
2789 
2790 	if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2791 		proc_list_lock();
2792 	}
2793 }
2794 
2795 int
wait1continue(int result)2796 wait1continue(int result)
2797 {
2798 	proc_t p;
2799 	thread_t thread;
2800 	uthread_t uth;
2801 	struct _wait4_data *wait4_data;
2802 	struct wait4_nocancel_args *uap;
2803 	int *retval;
2804 
2805 	if (result) {
2806 		return result;
2807 	}
2808 
2809 	p = current_proc();
2810 	thread = current_thread();
2811 	uth = (struct uthread *)get_bsdthread_info(thread);
2812 
2813 	wait4_data = &uth->uu_save.uus_wait4_data;
2814 	uap = wait4_data->args;
2815 	retval = wait4_data->retval;
2816 	return wait4_nocancel(p, uap, retval);
2817 }
2818 
2819 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2820 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2821 {
2822 	__pthread_testcancel(1);
2823 	return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2824 }
2825 
2826 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2827 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2828 {
2829 	int nfound;
2830 	int sibling_count;
2831 	proc_t p;
2832 	int status, error;
2833 	uthread_t uth;
2834 	struct _wait4_data *wait4_data;
2835 
2836 	AUDIT_ARG(pid, uap->pid);
2837 
2838 	if (uap->pid == 0) {
2839 		uap->pid = -q->p_pgrpid;
2840 	}
2841 
2842 	if (uap->pid == INT_MIN) {
2843 		return EINVAL;
2844 	}
2845 
2846 loop:
2847 	proc_list_lock();
2848 loop1:
2849 	nfound = 0;
2850 	sibling_count = 0;
2851 
2852 	PCHILDREN_FOREACH(q, p) {
2853 		if (p->p_sibling.le_next != 0) {
2854 			sibling_count++;
2855 		}
2856 		if (uap->pid != WAIT_ANY &&
2857 		    proc_getpid(p) != uap->pid &&
2858 		    p->p_pgrpid != -(uap->pid)) {
2859 			continue;
2860 		}
2861 
2862 		if (proc_is_shadow(p)) {
2863 			continue;
2864 		}
2865 
2866 		nfound++;
2867 
2868 		/* XXX This is racy because we don't get the lock!!!! */
2869 
2870 		if (p->p_listflag & P_LIST_WAITING) {
2871 			/* we're not using a continuation here but we still need to stash
2872 			 * the args for stackshot. */
2873 			uth = current_uthread();
2874 			wait4_data = &uth->uu_save.uus_wait4_data;
2875 			wait4_data->args = uap;
2876 			thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2877 
2878 			(void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2879 			goto loop1;
2880 		}
2881 		p->p_listflag |= P_LIST_WAITING;   /* only allow single thread to wait() */
2882 
2883 
2884 		if (p->p_stat == SZOMB) {
2885 			reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2886 			    REAP_REPARENTED_TO_INIT : 0;
2887 
2888 			proc_list_unlock();
2889 #if CONFIG_MACF
2890 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2891 				goto out;
2892 			}
2893 #endif
2894 			retval[0] = proc_getpid(p);
2895 			if (uap->status) {
2896 				/* Legacy apps expect only 8 bits of status */
2897 				status = 0xffff & p->p_xstat;   /* convert to int */
2898 				error = copyout((caddr_t)&status,
2899 				    uap->status,
2900 				    sizeof(status));
2901 				if (error) {
2902 					goto out;
2903 				}
2904 			}
2905 			if (uap->rusage) {
2906 				if (p->p_ru == NULL) {
2907 					error = ENOMEM;
2908 				} else {
2909 					if (IS_64BIT_PROCESS(q)) {
2910 						struct user64_rusage    my_rusage = {};
2911 						munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2912 						error = copyout((caddr_t)&my_rusage,
2913 						    uap->rusage,
2914 						    sizeof(my_rusage));
2915 					} else {
2916 						struct user32_rusage    my_rusage = {};
2917 						munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2918 						error = copyout((caddr_t)&my_rusage,
2919 						    uap->rusage,
2920 						    sizeof(my_rusage));
2921 					}
2922 				}
2923 				/* information unavailable? */
2924 				if (error) {
2925 					goto out;
2926 				}
2927 			}
2928 
2929 			/* Conformance change for 6577252.
2930 			 * When SIGCHLD is blocked and wait() returns because the status
2931 			 * of a child process is available and there are no other
2932 			 * children processes, then any pending SIGCHLD signal is cleared.
2933 			 */
2934 			if (sibling_count == 0) {
2935 				int mask = sigmask(SIGCHLD);
2936 				uth = current_uthread();
2937 
2938 				if ((uth->uu_sigmask & mask) != 0) {
2939 					/* we are blocking SIGCHLD signals.  clear any pending SIGCHLD.
2940 					 * This locking looks funny but it is protecting access to the
2941 					 * thread via p_uthlist.
2942 					 */
2943 					proc_lock(q);
2944 					uth->uu_siglist &= ~mask;       /* clear pending signal */
2945 					proc_unlock(q);
2946 				}
2947 			}
2948 
2949 			/* Clean up */
2950 			(void)reap_child_locked(q, p, reap_flags);
2951 
2952 			return 0;
2953 		}
2954 		if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
2955 		    (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
2956 			proc_list_unlock();
2957 #if CONFIG_MACF
2958 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2959 				goto out;
2960 			}
2961 #endif
2962 			proc_lock(p);
2963 			p->p_lflag |= P_LWAITED;
2964 			proc_unlock(p);
2965 			retval[0] = proc_getpid(p);
2966 			if (uap->status) {
2967 				status = W_STOPCODE(p->p_xstat);
2968 				error = copyout((caddr_t)&status,
2969 				    uap->status,
2970 				    sizeof(status));
2971 			} else {
2972 				error = 0;
2973 			}
2974 			goto out;
2975 		}
2976 		/*
2977 		 * If we are waiting for continued processses, and this
2978 		 * process was continued
2979 		 */
2980 		if ((uap->options & WCONTINUED) &&
2981 		    (p->p_flag & P_CONTINUED)) {
2982 			proc_list_unlock();
2983 #if CONFIG_MACF
2984 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2985 				goto out;
2986 			}
2987 #endif
2988 
2989 			/* Prevent other process for waiting for this event */
2990 			OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2991 			retval[0] = proc_getpid(p);
2992 			if (uap->status) {
2993 				status = W_STOPCODE(SIGCONT);
2994 				error = copyout((caddr_t)&status,
2995 				    uap->status,
2996 				    sizeof(status));
2997 			} else {
2998 				error = 0;
2999 			}
3000 			goto out;
3001 		}
3002 		p->p_listflag &= ~P_LIST_WAITING;
3003 		wakeup(&p->p_stat);
3004 	}
3005 	/* list lock is held when we get here any which way */
3006 	if (nfound == 0) {
3007 		proc_list_unlock();
3008 		return ECHILD;
3009 	}
3010 
3011 	if (uap->options & WNOHANG) {
3012 		retval[0] = 0;
3013 		proc_list_unlock();
3014 		return 0;
3015 	}
3016 
3017 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3018 	uth = current_uthread();
3019 	wait4_data = &uth->uu_save.uus_wait4_data;
3020 	wait4_data->args = uap;
3021 	wait4_data->retval = retval;
3022 
3023 	thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3024 	if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3025 		return error;
3026 	}
3027 
3028 	goto loop;
3029 out:
3030 	proc_list_lock();
3031 	p->p_listflag &= ~P_LIST_WAITING;
3032 	wakeup(&p->p_stat);
3033 	proc_list_unlock();
3034 	return error;
3035 }
3036 
3037 #if DEBUG
3038 #define ASSERT_LCK_MTX_OWNED(lock)      \
3039 	                        lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3040 #else
3041 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */
3042 #endif
3043 
3044 int
waitidcontinue(int result)3045 waitidcontinue(int result)
3046 {
3047 	proc_t p;
3048 	thread_t thread;
3049 	uthread_t uth;
3050 	struct _waitid_data *waitid_data;
3051 	struct waitid_nocancel_args *uap;
3052 	int *retval;
3053 
3054 	if (result) {
3055 		return result;
3056 	}
3057 
3058 	p = current_proc();
3059 	thread = current_thread();
3060 	uth = (struct uthread *)get_bsdthread_info(thread);
3061 
3062 	waitid_data = &uth->uu_save.uus_waitid_data;
3063 	uap = waitid_data->args;
3064 	retval = waitid_data->retval;
3065 	return waitid_nocancel(p, uap, retval);
3066 }
3067 
3068 /*
3069  * Description:	Suspend the calling thread until one child of the process
3070  *		containing the calling thread changes state.
3071  *
3072  * Parameters:	uap->idtype		one of P_PID, P_PGID, P_ALL
3073  *		uap->id			pid_t or gid_t or ignored
3074  *		uap->infop		Address of siginfo_t struct in
3075  *					user space into which to return status
3076  *		uap->options		flag values
3077  *
3078  * Returns:	0			Success
3079  *		!0			Error returning status to user space
3080  */
3081 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3082 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3083 {
3084 	__pthread_testcancel(1);
3085 	return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3086 }
3087 
3088 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3089 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3090     __unused int32_t *retval)
3091 {
3092 	user_siginfo_t  siginfo;        /* siginfo data to return to caller */
3093 	boolean_t caller64 = IS_64BIT_PROCESS(q);
3094 	int nfound;
3095 	proc_t p;
3096 	int error;
3097 	uthread_t uth;
3098 	struct _waitid_data *waitid_data;
3099 
3100 	if (uap->options == 0 ||
3101 	    (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3102 		return EINVAL;        /* bits set that aren't recognized */
3103 	}
3104 	switch (uap->idtype) {
3105 	case P_PID:     /* child with process ID equal to... */
3106 	case P_PGID:    /* child with process group ID equal to... */
3107 		if (((int)uap->id) < 0) {
3108 			return EINVAL;
3109 		}
3110 		break;
3111 	case P_ALL:     /* any child */
3112 		break;
3113 	}
3114 
3115 loop:
3116 	proc_list_lock();
3117 loop1:
3118 	nfound = 0;
3119 
3120 	PCHILDREN_FOREACH(q, p) {
3121 		switch (uap->idtype) {
3122 		case P_PID:     /* child with process ID equal to... */
3123 			if (proc_getpid(p) != (pid_t)uap->id) {
3124 				continue;
3125 			}
3126 			break;
3127 		case P_PGID:    /* child with process group ID equal to... */
3128 			if (p->p_pgrpid != (pid_t)uap->id) {
3129 				continue;
3130 			}
3131 			break;
3132 		case P_ALL:     /* any child */
3133 			break;
3134 		}
3135 
3136 		if (proc_is_shadow(p)) {
3137 			continue;
3138 		}
3139 		/* XXX This is racy because we don't get the lock!!!! */
3140 
3141 		/*
3142 		 * Wait collision; go to sleep and restart; used to maintain
3143 		 * the single return for waited process guarantee.
3144 		 */
3145 		if (p->p_listflag & P_LIST_WAITING) {
3146 			(void) msleep(&p->p_stat, &proc_list_mlock,
3147 			    PWAIT, "waitidcoll", 0);
3148 			goto loop1;
3149 		}
3150 		p->p_listflag |= P_LIST_WAITING;                /* mark busy */
3151 
3152 		nfound++;
3153 
3154 		bzero(&siginfo, sizeof(siginfo));
3155 
3156 		switch (p->p_stat) {
3157 		case SZOMB:             /* Exited */
3158 			if (!(uap->options & WEXITED)) {
3159 				break;
3160 			}
3161 			proc_list_unlock();
3162 #if CONFIG_MACF
3163 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3164 				goto out;
3165 			}
3166 #endif
3167 			siginfo.si_signo = SIGCHLD;
3168 			siginfo.si_pid = proc_getpid(p);
3169 
3170 			/* If the child terminated abnormally due to a signal, the signum
3171 			 * needs to be preserved in the exit status.
3172 			 */
3173 			if (WIFSIGNALED(p->p_xstat)) {
3174 				siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3175 				    CLD_DUMPED : CLD_KILLED;
3176 				siginfo.si_status = WTERMSIG(p->p_xstat);
3177 			} else {
3178 				siginfo.si_code = CLD_EXITED;
3179 				siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3180 			}
3181 			siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3182 			p->p_xhighbits = 0;
3183 
3184 			if ((error = copyoutsiginfo(&siginfo,
3185 			    caller64, uap->infop)) != 0) {
3186 				goto out;
3187 			}
3188 
3189 			/* Prevent other process for waiting for this event? */
3190 			if (!(uap->options & WNOWAIT)) {
3191 				reap_child_locked(q, p, 0);
3192 				return 0;
3193 			}
3194 			goto out;
3195 
3196 		case SSTOP:             /* Stopped */
3197 			/*
3198 			 * If we are not interested in stopped processes, then
3199 			 * ignore this one.
3200 			 */
3201 			if (!(uap->options & WSTOPPED)) {
3202 				break;
3203 			}
3204 
3205 			/*
3206 			 * If someone has already waited it, we lost a race
3207 			 * to be the one to return status.
3208 			 */
3209 			if ((p->p_lflag & P_LWAITED) != 0) {
3210 				break;
3211 			}
3212 			proc_list_unlock();
3213 #if CONFIG_MACF
3214 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3215 				goto out;
3216 			}
3217 #endif
3218 			siginfo.si_signo = SIGCHLD;
3219 			siginfo.si_pid = proc_getpid(p);
3220 			siginfo.si_status = p->p_xstat; /* signal number */
3221 			siginfo.si_code = CLD_STOPPED;
3222 
3223 			if ((error = copyoutsiginfo(&siginfo,
3224 			    caller64, uap->infop)) != 0) {
3225 				goto out;
3226 			}
3227 
3228 			/* Prevent other process for waiting for this event? */
3229 			if (!(uap->options & WNOWAIT)) {
3230 				proc_lock(p);
3231 				p->p_lflag |= P_LWAITED;
3232 				proc_unlock(p);
3233 			}
3234 			goto out;
3235 
3236 		default:                /* All other states => Continued */
3237 			if (!(uap->options & WCONTINUED)) {
3238 				break;
3239 			}
3240 
3241 			/*
3242 			 * If the flag isn't set, then this process has not
3243 			 * been stopped and continued, or the status has
3244 			 * already been reaped by another caller of waitid().
3245 			 */
3246 			if ((p->p_flag & P_CONTINUED) == 0) {
3247 				break;
3248 			}
3249 			proc_list_unlock();
3250 #if CONFIG_MACF
3251 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3252 				goto out;
3253 			}
3254 #endif
3255 			siginfo.si_signo = SIGCHLD;
3256 			siginfo.si_code = CLD_CONTINUED;
3257 			proc_lock(p);
3258 			siginfo.si_pid = p->p_contproc;
3259 			siginfo.si_status = p->p_xstat;
3260 			proc_unlock(p);
3261 
3262 			if ((error = copyoutsiginfo(&siginfo,
3263 			    caller64, uap->infop)) != 0) {
3264 				goto out;
3265 			}
3266 
3267 			/* Prevent other process for waiting for this event? */
3268 			if (!(uap->options & WNOWAIT)) {
3269 				OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3270 				    &p->p_flag);
3271 			}
3272 			goto out;
3273 		}
3274 		ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3275 
3276 		/* Not a process we are interested in; go on to next child */
3277 
3278 		p->p_listflag &= ~P_LIST_WAITING;
3279 		wakeup(&p->p_stat);
3280 	}
3281 	ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3282 
3283 	/* No child processes that could possibly satisfy the request? */
3284 
3285 	if (nfound == 0) {
3286 		proc_list_unlock();
3287 		return ECHILD;
3288 	}
3289 
3290 	if (uap->options & WNOHANG) {
3291 		proc_list_unlock();
3292 #if CONFIG_MACF
3293 		if ((error = mac_proc_check_wait(q, p)) != 0) {
3294 			return error;
3295 		}
3296 #endif
3297 		/*
3298 		 * The state of the siginfo structure in this case
3299 		 * is undefined.  Some implementations bzero it, some
3300 		 * (like here) leave it untouched for efficiency.
3301 		 *
3302 		 * Thus the most portable check for "no matching pid with
3303 		 * WNOHANG" is to store a zero into si_pid before
3304 		 * invocation, then check for a non-zero value afterwards.
3305 		 */
3306 		return 0;
3307 	}
3308 
3309 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3310 	uth = current_uthread();
3311 	waitid_data = &uth->uu_save.uus_waitid_data;
3312 	waitid_data->args = uap;
3313 	waitid_data->retval = retval;
3314 
3315 	if ((error = msleep0(q, &proc_list_mlock,
3316 	    PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3317 		return error;
3318 	}
3319 
3320 	goto loop;
3321 out:
3322 	proc_list_lock();
3323 	p->p_listflag &= ~P_LIST_WAITING;
3324 	wakeup(&p->p_stat);
3325 	proc_list_unlock();
3326 	return error;
3327 }
3328 
3329 /*
3330  * make process 'parent' the new parent of process 'child'.
3331  */
3332 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3333 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3334 {
3335 	proc_t oldparent = PROC_NULL;
3336 
3337 	if (child->p_pptr == parent) {
3338 		return;
3339 	}
3340 
3341 	if (locked == 0) {
3342 		proc_list_lock();
3343 	}
3344 
3345 	oldparent = child->p_pptr;
3346 #if __PROC_INTERNAL_DEBUG
3347 	if (oldparent == PROC_NULL) {
3348 		panic("proc_reparent: process %p does not have a parent", child);
3349 	}
3350 #endif
3351 
3352 	LIST_REMOVE(child, p_sibling);
3353 #if __PROC_INTERNAL_DEBUG
3354 	if (oldparent->p_childrencnt == 0) {
3355 		panic("process children count already 0");
3356 	}
3357 #endif
3358 	oldparent->p_childrencnt--;
3359 #if __PROC_INTERNAL_DEBUG
3360 	if (oldparent->p_childrencnt < 0) {
3361 		panic("process children count -ve");
3362 	}
3363 #endif
3364 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3365 	parent->p_childrencnt++;
3366 	child->p_pptr = parent;
3367 	child->p_ppid = proc_getpid(parent);
3368 
3369 	proc_list_unlock();
3370 
3371 	if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3372 		psignal(initproc, SIGCHLD);
3373 	}
3374 	if (locked == 1) {
3375 		proc_list_lock();
3376 	}
3377 }
3378 
3379 /*
3380  * Exit: deallocate address space and other resources, change proc state
3381  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
3382  * status and rusage for wait().  Check for child processes and orphan them.
3383  */
3384 
3385 
3386 /*
3387  * munge_rusage
3388  *	LP64 support - long is 64 bits if we are dealing with a 64 bit user
3389  *	process.  We munge the kernel version of rusage into the
3390  *	64 bit version.
3391  */
3392 __private_extern__  void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3393 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3394 {
3395 	/* Zero-out struct so that padding is cleared */
3396 	bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3397 
3398 	/* timeval changes size, so utime and stime need special handling */
3399 	a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3400 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3401 	a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3402 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3403 	/*
3404 	 * everything else can be a direct assign, since there is no loss
3405 	 * of precision implied boing 32->64.
3406 	 */
3407 	a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3408 	a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3409 	a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3410 	a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3411 	a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3412 	a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3413 	a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3414 	a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3415 	a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3416 	a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3417 	a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3418 	a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3419 	a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3420 	a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3421 }
3422 
3423 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3424 __private_extern__  void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3425 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3426 {
3427 	bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3428 
3429 	/* timeval changes size, so utime and stime need special handling */
3430 	a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3431 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3432 	a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3433 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3434 	/*
3435 	 * everything else can be a direct assign. We currently ignore
3436 	 * the loss of precision
3437 	 */
3438 	a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3439 	a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3440 	a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3441 	a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3442 	a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3443 	a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3444 	a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3445 	a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3446 	a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3447 	a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3448 	a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3449 	a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3450 	a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3451 	a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3452 }
3453 
3454 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3455 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3456 {
3457 	assert(thread != NULL);
3458 	assert(waitinfo != NULL);
3459 
3460 	struct uthread *ut = get_bsdthread_info(thread);
3461 	waitinfo->context = 0;
3462 	// ensure wmesg is consistent with a thread waiting in wait4
3463 	assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3464 	struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3465 	// May not actually contain a pid; this is just the argument to wait4.
3466 	// See man wait4 for other valid wait4 arguments.
3467 	waitinfo->owner = args->pid;
3468 }
3469 
3470 int
exit_with_guard_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3471 exit_with_guard_exception(
3472 	proc_t p,
3473 	mach_exception_data_type_t code,
3474 	mach_exception_data_type_t subcode)
3475 {
3476 	os_reason_t reason = os_reason_create(OS_REASON_GUARD, (uint64_t)code);
3477 	assert(reason != OS_REASON_NULL);
3478 
3479 	return exit_with_mach_exception(p, reason, EXC_GUARD, code, subcode);
3480 }
3481 
3482 #if __has_feature(ptrauth_calls)
3483 int
exit_with_pac_exception(proc_t p,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3484 exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
3485     mach_exception_subcode_t subcode)
3486 {
3487 	os_reason_t reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code);
3488 	assert(reason != OS_REASON_NULL);
3489 
3490 	return exit_with_mach_exception(p, reason, exception, code, subcode);
3491 }
3492 #endif /* __has_feature(ptrauth_calls) */
3493 
3494 int
exit_with_port_space_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3495 exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
3496     mach_exception_data_type_t subcode)
3497 {
3498 	os_reason_t reason = os_reason_create(OS_REASON_PORT_SPACE, (uint64_t)code);
3499 	assert(reason != OS_REASON_NULL);
3500 
3501 	return exit_with_mach_exception(p, reason, EXC_RESOURCE, code, subcode);
3502 }
3503 
3504 static int
exit_with_mach_exception(proc_t p,os_reason_t reason,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3505 exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception, mach_exception_code_t code,
3506     mach_exception_subcode_t subcode)
3507 {
3508 	thread_t self = current_thread();
3509 	struct uthread *ut = get_bsdthread_info(self);
3510 
3511 	ut->uu_exception = exception;
3512 	ut->uu_code = code;
3513 	ut->uu_subcode = subcode;
3514 
3515 	reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3516 	return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3517 	           TRUE, FALSE, 0, reason);
3518 }
3519