1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134
135 #include <vm/vm_protos.h>
136 #include <os/log.h>
137 #include <os/system_event_log.h>
138
139 #include <pexpert/pexpert.h>
140
141 #include <kdp/kdp_dyld.h>
142
143 #if SYSV_SHM
144 #include <sys/shm_internal.h> /* shmexit */
145 #endif /* SYSV_SHM */
146 #if CONFIG_PERSONAS
147 #include <sys/persona.h>
148 #endif /* CONFIG_PERSONAS */
149 #if CONFIG_MEMORYSTATUS
150 #include <sys/kern_memorystatus.h>
151 #endif /* CONFIG_MEMORYSTATUS */
152 #if CONFIG_DTRACE
153 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
154 void dtrace_proc_exit(proc_t p);
155 #include <sys/dtrace_ptss.h>
156 #endif /* CONFIG_DTRACE */
157 #if CONFIG_MACF
158 #include <security/mac_framework.h>
159 #include <security/mac_mach_internal.h>
160 #include <sys/syscall.h>
161 #endif /* CONFIG_MACF */
162
163 #if CONFIG_MEMORYSTATUS
164 static void proc_memorystatus_remove(proc_t p);
165 #endif /* CONFIG_MEMORYSTATUS */
166 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
167 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
168 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
169 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
170 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
171 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
172 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
173 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
174 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
175 struct rusage_superset *rup, mach_exception_data_type_t code,
176 mach_exception_data_type_t subcode, uint64_t *udata_buffer,
177 int num_udata, os_reason_t reason, exception_type_t etype);
178 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
179 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
180 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
181 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
182 extern uint64_t get_task_phys_footprint_limit(task_t);
183 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
184 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
185
186 extern unsigned int exception_log_max_pid;
187
188 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
189
190 /*
191 * Flags for `reap_child_locked`.
192 */
193 __options_decl(reap_flags_t, uint32_t, {
194 /*
195 * Parent is exiting, so the kernel is responsible for reaping children.
196 */
197 REAP_DEAD_PARENT = 0x01,
198 /*
199 * Childr process was re-parented to initproc.
200 */
201 REAP_REPARENTED_TO_INIT = 0x02,
202 /*
203 * `proc_list_lock` is held on entry.
204 */
205 REAP_LOCKED = 0x04,
206 /*
207 * Drop the `proc_list_lock` on return. Note that the `proc_list_lock` will
208 * be dropped internally by the function regardless.
209 */
210 REAP_DROP_LOCK = 0x08,
211 });
212 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
213
214 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
215
216 /*
217 * Things which should have prototypes in headers, but don't
218 */
219 void proc_exit(proc_t p);
220 int wait1continue(int result);
221 int waitidcontinue(int result);
222 kern_return_t sys_perf_notify(thread_t thread, int pid);
223 kern_return_t task_exception_notify(exception_type_t exception,
224 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
225 void delay(int);
226
227 #if __has_feature(ptrauth_calls)
228 int exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
229 mach_exception_subcode_t subcode);
230 #endif /* __has_feature(ptrauth_calls) */
231
232 int exit_with_guard_exception(proc_t p, mach_exception_data_type_t code,
233 mach_exception_data_type_t subcode);
234 int exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
235 mach_exception_data_type_t subcode);
236 static int exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception,
237 mach_exception_code_t code, mach_exception_subcode_t subcode);
238
239 #if DEVELOPMENT || DEBUG
240 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
241 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
242 static pid_t proc_exit_lpexit_spin_pid = -1; /* wakeup point */
243 static int proc_exit_lpexit_spin_pos = -1; /* point to block */
244 static int proc_exit_lpexit_spinning = 0;
245 enum {
246 PELS_POS_START = 0, /* beginning of proc_exit */
247 PELS_POS_PRE_TASK_DETACH, /* before task/proc detach */
248 PELS_POS_POST_TASK_DETACH, /* after task/proc detach */
249 PELS_POS_END, /* end of proc_exit */
250 PELS_NPOS /* # valid values */
251 };
252
253 /* Panic if matching processes (delimited by ',') exit on error. */
254 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
255
256 static int
257 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
258 {
259 #pragma unused(oidp, arg1, arg2)
260 pid_t new_value;
261 int changed;
262 int error;
263
264 if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
265 return ENOENT;
266 }
267
268 error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
269 sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
270 if (error == 0 && changed != 0) {
271 if (new_value < -1) {
272 return EINVAL;
273 }
274 lck_mtx_lock(&proc_exit_lpexit_spin_lock);
275 proc_exit_lpexit_spin_pid = new_value;
276 wakeup(&proc_exit_lpexit_spin_pid);
277 proc_exit_lpexit_spinning = 0;
278 lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
279 }
280 return error;
281 }
282
283 static int
284 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
285 {
286 #pragma unused(oidp, arg1, arg2)
287 int new_value;
288 int changed;
289 int error;
290
291 if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
292 return ENOENT;
293 }
294
295 error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
296 sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
297 if (error == 0 && changed != 0) {
298 if (new_value < -1 || new_value >= PELS_NPOS) {
299 return EINVAL;
300 }
301 lck_mtx_lock(&proc_exit_lpexit_spin_lock);
302 proc_exit_lpexit_spin_pos = new_value;
303 wakeup(&proc_exit_lpexit_spin_pid);
304 proc_exit_lpexit_spinning = 0;
305 lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
306 }
307 return error;
308 }
309
310 static int
311 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
312 {
313 #pragma unused(oidp, arg1, arg2)
314 int new_value;
315 int changed;
316 int error;
317
318 if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
319 return ENOENT;
320 }
321
322 error = sysctl_io_number(req, proc_exit_lpexit_spinning,
323 sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
324 if (error == 0 && changed != 0) {
325 return EINVAL;
326 }
327 return error;
328 }
329
330 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
331 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
332 NULL, sizeof(pid_t),
333 proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
334
335 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
336 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
337 NULL, sizeof(int),
338 proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
339
340 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
341 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
342 NULL, sizeof(int),
343 proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
344
345 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)346 proc_exit_lpexit_check(pid_t pid, int pos)
347 {
348 if (proc_exit_lpexit_spin_pid == pid) {
349 bool slept = false;
350 lck_mtx_lock(&proc_exit_lpexit_spin_lock);
351 while (proc_exit_lpexit_spin_pid == pid &&
352 proc_exit_lpexit_spin_pos == pos) {
353 if (!slept) {
354 os_log(OS_LOG_DEFAULT,
355 "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
356 slept = true;
357 }
358 proc_exit_lpexit_spinning = 1;
359 msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
360 PWAIT, "proc_exit_lpexit_check", NULL);
361 proc_exit_lpexit_spinning = 0;
362 }
363 lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
364 if (slept) {
365 os_log(OS_LOG_DEFAULT,
366 "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
367 }
368 }
369 }
370 #endif /* DEVELOPMENT || DEBUG */
371
372 /*
373 * NOTE: Source and target may *NOT* overlap!
374 * XXX Should share code with bsd/dev/ppc/unix_signal.c
375 */
376 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)377 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
378 {
379 out->si_signo = in->si_signo;
380 out->si_errno = in->si_errno;
381 out->si_code = in->si_code;
382 out->si_pid = in->si_pid;
383 out->si_uid = in->si_uid;
384 out->si_status = in->si_status;
385 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
386 /* following cast works for sival_int because of padding */
387 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
388 out->si_band = (user32_long_t)in->si_band; /* range reduction */
389 }
390
391 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)392 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
393 {
394 out->si_signo = in->si_signo;
395 out->si_errno = in->si_errno;
396 out->si_code = in->si_code;
397 out->si_pid = in->si_pid;
398 out->si_uid = in->si_uid;
399 out->si_status = in->si_status;
400 out->si_addr = in->si_addr;
401 /* following cast works for sival_int because of padding */
402 out->si_value.sival_ptr = in->si_value.sival_ptr;
403 out->si_band = in->si_band; /* range reduction */
404 }
405
406 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)407 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
408 {
409 if (is64) {
410 user64_siginfo_t sinfo64;
411
412 bzero(&sinfo64, sizeof(sinfo64));
413 siginfo_user_to_user64(native, &sinfo64);
414 return copyout(&sinfo64, uaddr, sizeof(sinfo64));
415 } else {
416 user32_siginfo_t sinfo32;
417
418 bzero(&sinfo32, sizeof(sinfo32));
419 siginfo_user_to_user32(native, &sinfo32);
420 return copyout(&sinfo32, uaddr, sizeof(sinfo32));
421 }
422 }
423
424 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)425 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
426 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
427 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
428 {
429 struct rusage_superset rup;
430
431 gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
432 rup.ri.ri_phys_footprint = 0;
433 populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
434 udata_buffer, num_udata, reason, etype);
435 }
436
437 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)438 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
439 {
440 mach_exception_data_type_t code_update = *code;
441 mach_exception_data_type_t subcode_update = *subcode;
442 if (p->p_exit_reason == OS_REASON_NULL) {
443 return;
444 }
445
446 switch (p->p_exit_reason->osr_namespace) {
447 case OS_REASON_JETSAM:
448 if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
449 /* Update the code with EXC_RESOURCE code for high memory watermark */
450 EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
451 EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
452 EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
453 subcode_update = 0;
454 break;
455 }
456
457 break;
458 default:
459 break;
460 }
461
462 *code = code_update;
463 *subcode = subcode_update;
464 return;
465 }
466
467 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)468 proc_encode_exit_exception_code(proc_t p)
469 {
470 uint64_t subcode = 0;
471
472 if (p->p_exit_reason == OS_REASON_NULL) {
473 return 0;
474 }
475
476 /* Embed first 32 bits of osr_namespace and osr_code in exception code */
477 ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
478 ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
479 return (mach_exception_data_type_t)subcode;
480 }
481
482 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)483 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
484 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
485 uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
486 {
487 mach_vm_address_t uaddr = 0;
488 mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
489 exc_codes[0] = code;
490 exc_codes[1] = subcode;
491 cpu_type_t cputype;
492 struct proc_uniqidentifierinfo p_uniqidinfo;
493 struct proc_workqueueinfo pwqinfo;
494 int retval = 0;
495 uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
496 boolean_t is_corpse_fork;
497 uint32_t csflags;
498 unsigned int pflags = 0;
499 uint64_t max_footprint_mb;
500 uint64_t max_footprint;
501
502 uint64_t ledger_internal;
503 uint64_t ledger_internal_compressed;
504 uint64_t ledger_iokit_mapped;
505 uint64_t ledger_alternate_accounting;
506 uint64_t ledger_alternate_accounting_compressed;
507 uint64_t ledger_purgeable_nonvolatile;
508 uint64_t ledger_purgeable_nonvolatile_compressed;
509 uint64_t ledger_page_table;
510 uint64_t ledger_phys_footprint;
511 uint64_t ledger_phys_footprint_lifetime_max;
512 uint64_t ledger_network_nonvolatile;
513 uint64_t ledger_network_nonvolatile_compressed;
514 uint64_t ledger_wired_mem;
515 uint64_t ledger_tagged_footprint;
516 uint64_t ledger_tagged_footprint_compressed;
517 uint64_t ledger_media_footprint;
518 uint64_t ledger_media_footprint_compressed;
519 uint64_t ledger_graphics_footprint;
520 uint64_t ledger_graphics_footprint_compressed;
521 uint64_t ledger_neural_footprint;
522 uint64_t ledger_neural_footprint_compressed;
523
524 void *crash_info_ptr = task_get_corpseinfo(corpse_task);
525
526 #if CONFIG_MEMORYSTATUS
527 int memstat_dirty_flags = 0;
528 #endif
529
530 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
531 kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
532 }
533
534 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
535 pid_t pid = proc_getpid(p);
536 kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
537 }
538
539 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
540 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
541 }
542
543 /* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
544 if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
545 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
546 kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
547 }
548 }
549
550 static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
551 if (KERN_SUCCESS ==
552 kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
553 proc_piduniqidentifierinfo(p, &p_uniqidinfo);
554 kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
555 }
556
557 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
558 kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
559 }
560
561 csflags = (uint32_t)proc_getcsflags(p);
562 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
563 kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
564 }
565
566 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
567 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
568 }
569
570 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
571 struct timeval64 t64;
572 t64.tv_sec = (int64_t)p->p_start.tv_sec;
573 t64.tv_usec = (int64_t)p->p_start.tv_usec;
574 kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
575 }
576
577 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
578 kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
579 }
580
581 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
582 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
583 }
584
585 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
586 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
587 }
588
589 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
590 char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
591 proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
592 kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
593 zfree(ZV_NAMEI, buf);
594 }
595
596 pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
597 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
598 kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
599 }
600
601 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
602 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
603 }
604
605 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
606 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
607 }
608
609 cputype = cpu_type() & ~CPU_ARCH_MASK;
610 if (IS_64BIT_PROCESS(p)) {
611 cputype |= CPU_ARCH_ABI64;
612 } else if (proc_is64bit_data(p)) {
613 cputype |= CPU_ARCH_ABI64_32;
614 }
615
616 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
617 kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
618 }
619
620 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
621 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_cputype, sizeof(cpu_type_t));
622 }
623
624 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
625 max_footprint = get_task_phys_footprint_limit(proc_task(p));
626 max_footprint_mb = max_footprint >> 20;
627 kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
628 }
629
630 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
631 ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
632 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
633 }
634
635 // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
636 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
637 ledger_internal = get_task_internal(corpse_task);
638 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
639 }
640
641 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
642 ledger_internal_compressed = get_task_internal_compressed(corpse_task);
643 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
644 }
645
646 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
647 ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
648 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
649 }
650
651 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
652 ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
653 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
654 }
655
656 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
657 ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
658 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
659 }
660
661 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
662 ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
663 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
664 }
665
666 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
667 ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
668 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
669 }
670
671 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
672 ledger_page_table = get_task_page_table(corpse_task);
673 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
674 }
675
676 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
677 ledger_phys_footprint = get_task_phys_footprint(corpse_task);
678 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
679 }
680
681 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
682 ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
683 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
684 }
685
686 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
687 ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
688 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
689 }
690
691 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
692 ledger_wired_mem = get_task_wired_mem(corpse_task);
693 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
694 }
695
696 bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
697 retval = fill_procworkqueue(p, &pwqinfo);
698 if (retval == 0) {
699 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
700 kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
701 }
702 }
703
704 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
705 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
706 }
707
708 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
709 uid_t persona_id = proc_persona_id(p);
710 kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
711 }
712
713 #if CONFIG_COALITIONS
714 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
715 uint64_t coalition_ids[COALITION_NUM_TYPES];
716 task_coalition_ids(proc_task(p), coalition_ids);
717 kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
718 }
719 #endif /* CONFIG_COALITIONS */
720
721 #if CONFIG_MEMORYSTATUS
722 memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
723 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
724 kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
725 }
726 #endif
727
728 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
729 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
730 }
731
732 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
733 ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
734 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
735 }
736
737 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
738 ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
739 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
740 }
741
742 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
743 ledger_media_footprint = get_task_media_footprint(corpse_task);
744 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
745 }
746
747 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
748 ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
749 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
750 }
751
752 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
753 ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
754 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
755 }
756
757 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
758 ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
759 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
760 }
761
762 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
763 ledger_neural_footprint = get_task_neural_footprint(corpse_task);
764 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
765 }
766
767 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
768 ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
769 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
770 }
771
772 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
773 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
774 }
775
776 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
777 char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
778 ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
779 kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
780 }
781
782 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
783 is_corpse_fork = is_corpsefork(corpse_task);
784 kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
785 }
786
787 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
788 kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
789 }
790
791 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
792 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
793 }
794
795 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
796 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
797 }
798
799 char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
800 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
801 const char * id = cs_identity_get(p);
802 if (id) {
803 strlcpy(signing_id, id, sizeof(signing_id));
804 }
805 kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
806 }
807 char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
808 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
809 const char * id = csproc_get_teamid(p);
810 if (id) {
811 strlcpy(team_id, id, sizeof(team_id));
812 }
813 kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
814 }
815
816 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
817 uint32_t category = 0;
818 if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
819 category = CS_VALIDATION_CATEGORY_INVALID;
820 }
821 kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
822 }
823
824 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
825 uint32_t trust = 0;
826 kern_return_t ret = get_trust_level_kdp(get_task_pmap(corpse_task), &trust);
827 if (ret != KERN_SUCCESS) {
828 trust = KCDATA_INVALID_CS_TRUST_LEVEL;
829 }
830 kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
831 }
832
833
834 if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
835 reason = p->p_exit_reason;
836 }
837 if (reason != OS_REASON_NULL) {
838 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
839 struct exit_reason_snapshot ers = {
840 .ers_namespace = reason->osr_namespace,
841 .ers_code = reason->osr_code,
842 .ers_flags = reason->osr_flags
843 };
844
845 kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
846 }
847
848 if (reason->osr_kcd_buf != 0) {
849 uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
850 assert(reason_buf_size != 0);
851
852 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
853 kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
854 }
855 }
856 }
857
858 if (num_udata > 0) {
859 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
860 sizeof(uint64_t), num_udata, &uaddr)) {
861 kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
862 }
863 }
864 }
865
866 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)867 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
868 {
869 kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
870 corpse_info->kcd_length);
871 __assert_only uint32_t type = kcdata_iter_type(iter);
872 assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
873
874 iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
875 exception_type_t *etype = kcdata_iter_payload(iter);
876 return *etype;
877 }
878
879 /*
880 * Collect information required for generating lightwight corpse for current
881 * task, which can be terminating.
882 */
883 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)884 current_thread_collect_backtrace_info(
885 kcdata_descriptor_t *new_desc,
886 exception_type_t etype,
887 mach_exception_data_t code,
888 mach_msg_type_number_t codeCnt,
889 void *reasonp)
890 {
891 kcdata_descriptor_t kcdata;
892 kern_return_t kr;
893 int frame_count = 0, max_frames = 100;
894 mach_vm_address_t uuid_info_addr = 0;
895 uint32_t uuid_info_count = 0;
896 uint32_t btinfo_flag = 0;
897 mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
898 natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
899 mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
900 thread_identifier_info_data_t th_info;
901 char threadname[MAXTHREADNAMESIZE];
902 void *btdata_kernel = NULL;
903 typedef uintptr_t user_btframe_t __kernel_data_semantics;
904 user_btframe_t *btframes = NULL;
905 os_reason_t reason = (os_reason_t)reasonp;
906 struct backtrace_user_info info = BTUINFO_INIT;
907 struct rusage_superset rup;
908 uint32_t platform;
909
910 task_t task = current_task();
911 proc_t p = current_proc();
912
913 bool has_64bit_addr = task_get_64bit_addr(current_task());
914 bool has_64bit_data = task_get_64bit_data(current_task());
915
916 if (new_desc == NULL) {
917 return KERN_INVALID_ARGUMENT;
918 }
919
920 /* First, collect backtrace frames */
921 btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
922 if (!btframes) {
923 return KERN_RESOURCE_SHORTAGE;
924 }
925
926 frame_count = backtrace_user(btframes, max_frames, NULL, &info);
927 if (info.btui_error || frame_count == 0) {
928 kfree_data(btframes, max_frames * sizeof(btframes[0]));
929 return KERN_FAILURE;
930 }
931
932 if ((info.btui_info & BTI_TRUNCATED) != 0) {
933 btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
934 }
935
936 /* Captured in kcdata descriptor below */
937 btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
938 if (!btdata_kernel) {
939 kfree_data(btframes, max_frames * sizeof(btframes[0]));
940 return KERN_RESOURCE_SHORTAGE;
941 }
942
943 kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
944 if (!kcdata) {
945 kfree_data(btdata_kernel, alloc_size);
946 kfree_data(btframes, max_frames * sizeof(btframes[0]));
947 return KERN_RESOURCE_SHORTAGE;
948 }
949
950 /* First reserve space in kcdata blob for the btinfo flag fields */
951 if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
952 sizeof(uint32_t), &btinfo_flag_addr)) {
953 kfree_data(btdata_kernel, alloc_size);
954 kfree_data(btframes, max_frames * sizeof(btframes[0]));
955 kcdata_memory_destroy(kcdata);
956 return KERN_RESOURCE_SHORTAGE;
957 }
958
959 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
960 (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
961 sizeof(uintptr_t), frame_count, &kaddr)) {
962 kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
963 }
964
965 #if __LP64__
966 /* We only support async stacks on 64-bit kernels */
967 frame_count = 0;
968
969 if (info.btui_async_frame_addr != 0) {
970 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
971 sizeof(uint32_t), &kaddr)) {
972 uint32_t idx = info.btui_async_start_index;
973 kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
974 }
975 struct backtrace_control ctl = {
976 .btc_frame_addr = info.btui_async_frame_addr,
977 .btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
978 };
979
980 info = BTUINFO_INIT;
981 frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
982 if (info.btui_error == 0 && frame_count > 0) {
983 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
984 TASK_BTINFO_ASYNC_BACKTRACE64,
985 sizeof(uintptr_t), frame_count, &kaddr)) {
986 kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
987 }
988 }
989
990 if ((info.btui_info & BTI_TRUNCATED) != 0) {
991 btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
992 }
993 }
994 #endif
995
996 /* Backtrace collection done, free the frames buffer */
997 kfree_data(btframes, max_frames * sizeof(btframes[0]));
998 btframes = NULL;
999
1000 thread_set_exec_promotion(current_thread());
1001 /* Next, suspend the task briefly and collect image load infos */
1002 task_suspend_internal(task);
1003
1004 /* all_image_info struct is ABI, in agreement with address width */
1005 if (has_64bit_addr) {
1006 struct user64_dyld_all_image_infos task_image_infos = {};
1007 struct btinfo_sc_load_info64 sc_info;
1008 (void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1009 sizeof(struct user64_dyld_all_image_infos));
1010 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1011 uuid_info_addr = task_image_infos.uuidArray;
1012
1013 sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1014 sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1015 memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1016 sizeof(task_image_infos.sharedCacheUUID));
1017
1018 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1019 TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1020 kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1021 }
1022 } else {
1023 struct user32_dyld_all_image_infos task_image_infos = {};
1024 struct btinfo_sc_load_info sc_info;
1025 (void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1026 sizeof(struct user32_dyld_all_image_infos));
1027 uuid_info_count = task_image_infos.uuidArrayCount;
1028 uuid_info_addr = task_image_infos.uuidArray;
1029
1030 sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1031 sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1032 memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1033 sizeof(task_image_infos.sharedCacheUUID));
1034
1035 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1036 TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1037 kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1038 }
1039 }
1040
1041 if (!uuid_info_addr) {
1042 /*
1043 * Can happen when we catch dyld in the middle of updating
1044 * this data structure, or copyin of all_image_info struct failed.
1045 */
1046 task_resume_internal(task);
1047 thread_clear_exec_promotion(current_thread());
1048 kfree_data(btdata_kernel, alloc_size);
1049 kcdata_memory_destroy(kcdata);
1050 return KERN_MEMORY_ERROR;
1051 }
1052
1053 if (uuid_info_count > 0) {
1054 uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1055 sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1056
1057 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1058 (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1059 uuid_info_size, uuid_info_count, &kaddr)) {
1060 if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1061 task_resume_internal(task);
1062 thread_clear_exec_promotion(current_thread());
1063 kfree_data(btdata_kernel, alloc_size);
1064 kcdata_memory_destroy(kcdata);
1065 return KERN_MEMORY_ERROR;
1066 }
1067 }
1068 }
1069
1070 task_resume_internal(task);
1071 thread_clear_exec_promotion(current_thread());
1072
1073 /* Next, collect all other information */
1074 thread_flavor_t tsflavor;
1075 mach_msg_type_number_t tscount;
1076
1077 #if defined(__x86_64__) || defined(__i386__)
1078 tsflavor = x86_THREAD_STATE; /* unified */
1079 tscount = x86_THREAD_STATE_COUNT;
1080 #else
1081 tsflavor = ARM_THREAD_STATE; /* unified */
1082 tscount = ARM_UNIFIED_THREAD_STATE_COUNT;
1083 #endif
1084
1085 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1086 sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1087 struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1088 bt_thread_state->flavor = tsflavor;
1089 bt_thread_state->count = tscount;
1090 /* variable-sized tstate array follows */
1091
1092 kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1093 (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1094 if (kr != KERN_SUCCESS) {
1095 bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1096 if (kr == KERN_TERMINATED) {
1097 btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1098 }
1099 }
1100 }
1101
1102 #if defined(__x86_64__) || defined(__i386__)
1103 tsflavor = x86_EXCEPTION_STATE; /* unified */
1104 tscount = x86_EXCEPTION_STATE_COUNT;
1105 #else
1106 #if defined(__arm64__)
1107 if (has_64bit_data) {
1108 tsflavor = ARM_EXCEPTION_STATE64;
1109 tscount = ARM_EXCEPTION_STATE64_COUNT;
1110 } else
1111 #endif /* defined(__arm64__) */
1112 {
1113 tsflavor = ARM_EXCEPTION_STATE;
1114 tscount = ARM_EXCEPTION_STATE_COUNT;
1115 }
1116 #endif /* defined(__x86_64__) || defined(__i386__) */
1117
1118 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1119 sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1120 struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1121 bt_thread_state->flavor = tsflavor;
1122 bt_thread_state->count = tscount;
1123 /* variable-sized tstate array follows */
1124
1125 kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1126 (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1127 if (kr != KERN_SUCCESS) {
1128 bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1129 if (kr == KERN_TERMINATED) {
1130 btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1131 }
1132 }
1133 }
1134
1135 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1136 pid_t pid = proc_getpid(p);
1137 kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1138 }
1139
1140 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1141 kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1142 }
1143
1144 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1145 kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1146 }
1147
1148 #if CONFIG_COALITIONS
1149 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1150 uint64_t coalition_ids[COALITION_NUM_TYPES];
1151 task_coalition_ids(proc_task(p), coalition_ids);
1152 kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1153 }
1154 #endif /* CONFIG_COALITIONS */
1155
1156 /* V0 is sufficient for ReportCrash */
1157 gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1158 rup.ri.ri_phys_footprint = 0;
1159 /* Soft crash, proc did not exit */
1160 rup.ri.ri_proc_exit_abstime = 0;
1161 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1162 kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1163 }
1164
1165 platform = proc_platform(current_proc());
1166 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1167 kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1168 }
1169
1170 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1171 char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1172 proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1173 kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1174 zfree(ZV_NAMEI, buf);
1175 }
1176
1177 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1178 kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1179 }
1180
1181 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1182 kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1183 }
1184
1185 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1186 unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1187 kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1188 }
1189
1190 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1191 cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1192 if (has_64bit_addr) {
1193 cputype |= CPU_ARCH_ABI64;
1194 } else if (has_64bit_data) {
1195 cputype |= CPU_ARCH_ABI64_32;
1196 }
1197 kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1198 }
1199
1200 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1201 kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1202 }
1203
1204 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1205 kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1206 }
1207
1208 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1209 kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1210 }
1211
1212 assert(codeCnt <= EXCEPTION_CODE_MAX);
1213
1214 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1215 sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1216 kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1217 }
1218
1219 if (reason != OS_REASON_NULL) {
1220 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1221 struct exit_reason_snapshot ers = {
1222 .ers_namespace = reason->osr_namespace,
1223 .ers_code = reason->osr_code,
1224 .ers_flags = reason->osr_flags
1225 };
1226
1227 kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1228 }
1229
1230 if (reason->osr_kcd_buf != 0) {
1231 uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1232 assert(reason_buf_size != 0);
1233
1234 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1235 kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1236 }
1237 }
1238 }
1239
1240 threadname[0] = '\0';
1241 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1242 sizeof(threadname), &kaddr)) {
1243 bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1244 kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1245 }
1246
1247 kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1248 if (kr == KERN_TERMINATED) {
1249 btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1250 }
1251
1252
1253 kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1254 sizeof(uint64_t), &kaddr);
1255
1256 /*
1257 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1258 * handler (ReportCrash) that lw corpse collection ran out of space and the
1259 * result is incomplete.
1260 */
1261 if (last_kr != KERN_SUCCESS) {
1262 btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1263 }
1264
1265 if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1266 kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1267 }
1268
1269 /* Lastly, copy the flags to the address we reserved at the beginning. */
1270 kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1271
1272 *new_desc = kcdata;
1273
1274 return KERN_SUCCESS;
1275 }
1276
1277 /*
1278 * We only parse exit reason kcdata blobs for critical process before they die
1279 * and we're going to panic or for opt-in, limited diagnostic tools.
1280 *
1281 * Meant to be called immediately before panicking or limited diagnostic
1282 * scenarios.
1283 */
1284 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1285 exit_reason_get_string_desc(os_reason_t exit_reason)
1286 {
1287 kcdata_iter_t iter;
1288
1289 if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1290 exit_reason->osr_bufsize == 0) {
1291 return NULL;
1292 }
1293
1294 iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1295 if (!kcdata_iter_valid(iter)) {
1296 #if DEBUG || DEVELOPMENT
1297 printf("exit reason has invalid exit reason buffer\n");
1298 #endif
1299 return NULL;
1300 }
1301
1302 if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1303 #if DEBUG || DEVELOPMENT
1304 printf("exit reason buffer type mismatch, expected %d got %d\n",
1305 KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1306 #endif
1307 return NULL;
1308 }
1309
1310 iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1311 if (!kcdata_iter_valid(iter)) {
1312 return NULL;
1313 }
1314
1315 return (char *)kcdata_iter_payload(iter);
1316 }
1317
1318 static int initproc_spawned = 0;
1319
1320 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1321 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1322 {
1323 if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1324 // Can only ever be set by launchd, and only once at boot
1325 return EPERM;
1326 }
1327 return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1328 }
1329
1330 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1331 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1332 sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1333
1334 #if DEVELOPMENT || DEBUG
1335
1336 /* disable user faults */
1337 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1338 #endif /* DEVELOPMENT || DEBUG */
1339
1340 #define OS_REASON_IFLAG_USER_FAULT 0x1
1341
1342 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC 5
1343
1344 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1345 abort_with_payload_internal(proc_t p,
1346 uint32_t reason_namespace, uint64_t reason_code,
1347 user_addr_t payload, uint32_t payload_size,
1348 user_addr_t reason_string, uint64_t reason_flags,
1349 uint32_t internal_flags)
1350 {
1351 os_reason_t exit_reason = OS_REASON_NULL;
1352 kern_return_t kr = KERN_SUCCESS;
1353
1354 if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1355 uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1356 memory_order_relaxed);
1357
1358 #if DEVELOPMENT || DEBUG
1359 if (bootarg_disable_user_faults) {
1360 return EQFULL;
1361 }
1362 #endif /* DEVELOPMENT || DEBUG */
1363
1364 for (;;) {
1365 if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1366 return EQFULL;
1367 }
1368 // this reloads the value in old_value
1369 if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1370 &old_value, old_value + 1, memory_order_relaxed,
1371 memory_order_relaxed)) {
1372 break;
1373 }
1374 }
1375 }
1376
1377 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1378 proc_getpid(p), reason_namespace,
1379 reason_code, 0, 0);
1380
1381 exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1382 payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1383
1384 if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1385 mach_exception_code_t code = 0;
1386
1387 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1388 EXC_GUARD_ENCODE_FLAVOR(code, 0);
1389 EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1390
1391 if (exit_reason == OS_REASON_NULL) {
1392 kr = KERN_RESOURCE_SHORTAGE;
1393 } else {
1394 kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1395 }
1396 os_reason_free(exit_reason);
1397 } else {
1398 /*
1399 * We use SIGABRT (rather than calling exit directly from here) so that
1400 * the debugger can catch abort_with_{reason,payload} calls.
1401 */
1402 psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1403 }
1404
1405 switch (kr) {
1406 case KERN_SUCCESS:
1407 return 0;
1408 case KERN_NOT_SUPPORTED:
1409 return ENOTSUP;
1410 case KERN_INVALID_ARGUMENT:
1411 return EINVAL;
1412 case KERN_RESOURCE_SHORTAGE:
1413 default:
1414 return EBUSY;
1415 }
1416 }
1417
1418 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1419 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1420 __unused void *retval)
1421 {
1422 abort_with_payload_internal(cur_proc, args->reason_namespace,
1423 args->reason_code, args->payload, args->payload_size,
1424 args->reason_string, args->reason_flags, 0);
1425
1426 return 0;
1427 }
1428
1429 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1430 os_fault_with_payload(struct proc *cur_proc,
1431 struct os_fault_with_payload_args *args, __unused int *retval)
1432 {
1433 return abort_with_payload_internal(cur_proc, args->reason_namespace,
1434 args->reason_code, args->payload, args->payload_size,
1435 args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1436 }
1437
1438
1439 /*
1440 * exit --
1441 * Death of process.
1442 */
1443 __attribute__((noreturn))
1444 void
exit(proc_t p,struct exit_args * uap,int * retval)1445 exit(proc_t p, struct exit_args *uap, int *retval)
1446 {
1447 p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1448 exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1449
1450 thread_exception_return();
1451 /* NOTREACHED */
1452 while (TRUE) {
1453 thread_block(THREAD_CONTINUE_NULL);
1454 }
1455 /* NOTREACHED */
1456 }
1457
1458 /*
1459 * Exit: deallocate address space and other resources, change proc state
1460 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1461 * status and rusage for wait(). Check for child processes and orphan them.
1462 */
1463 int
exit1(proc_t p,int rv,int * retval)1464 exit1(proc_t p, int rv, int *retval)
1465 {
1466 return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1467 }
1468
1469 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1470 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1471 int jetsam_flags)
1472 {
1473 return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1474 }
1475
1476 /*
1477 * NOTE: exit_with_reason drops a reference on the passed exit_reason
1478 */
1479 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1480 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1481 int jetsam_flags, struct os_reason *exit_reason)
1482 {
1483 thread_t self = current_thread();
1484 struct task *task = proc_task(p);
1485 struct uthread *ut;
1486 int error = 0;
1487 bool proc_exiting = false;
1488
1489 #if DEVELOPMENT || DEBUG
1490 /*
1491 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1492 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1493 */
1494 if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1495 panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1496 __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1497 }
1498 #endif
1499
1500 /*
1501 * If a thread in this task has already
1502 * called exit(), then halt any others
1503 * right here.
1504 */
1505
1506 ut = get_bsdthread_info(self);
1507 (void)retval;
1508
1509 /*
1510 * The parameter list of audit_syscall_exit() was augmented to
1511 * take the Darwin syscall number as the first parameter,
1512 * which is currently required by mac_audit_postselect().
1513 */
1514
1515 /*
1516 * The BSM token contains two components: an exit status as passed
1517 * to exit(), and a return value to indicate what sort of exit it
1518 * was. The exit status is WEXITSTATUS(rv), but it's not clear
1519 * what the return value is.
1520 */
1521 AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1522 /*
1523 * TODO: what to audit here when jetsam calls exit and the uthread,
1524 * 'ut' does not belong to the proc, 'p'.
1525 */
1526 AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1527
1528 DTRACE_PROC1(exit, int, CLD_EXITED);
1529
1530 /* mark process is going to exit and pull out of DBG/disk throttle */
1531 /* TODO: This should be done after becoming exit thread */
1532 proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1533 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1534
1535 proc_lock(p);
1536 error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1537 if (error == EDEADLK) {
1538 /*
1539 * If proc_transstart() returns EDEADLK, then another thread
1540 * is either exec'ing or exiting. Return an error and allow
1541 * the other thread to continue.
1542 */
1543 proc_unlock(p);
1544 os_reason_free(exit_reason);
1545 if (current_proc() == p) {
1546 if (p->exit_thread == self) {
1547 panic("exit_thread failed to exit");
1548 }
1549
1550 if (thread_can_terminate) {
1551 thread_exception_return();
1552 }
1553 }
1554
1555 return error;
1556 }
1557
1558 proc_exiting = !!(p->p_lflag & P_LEXIT);
1559
1560 while (proc_exiting || p->exit_thread != self) {
1561 if (proc_exiting || sig_try_locked(p) <= 0) {
1562 proc_transend(p, 1);
1563 os_reason_free(exit_reason);
1564
1565 if (get_threadtask(self) != task) {
1566 proc_unlock(p);
1567 return 0;
1568 }
1569 proc_unlock(p);
1570
1571 thread_terminate(self);
1572 if (!thread_can_terminate) {
1573 return 0;
1574 }
1575
1576 thread_exception_return();
1577 /* NOTREACHED */
1578 }
1579 sig_lock_to_exit(p);
1580 }
1581
1582 if (exit_reason != OS_REASON_NULL) {
1583 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1584 proc_getpid(p), exit_reason->osr_namespace,
1585 exit_reason->osr_code, 0, 0);
1586 }
1587
1588 assert(p->p_exit_reason == OS_REASON_NULL);
1589 p->p_exit_reason = exit_reason;
1590
1591 p->p_lflag |= P_LEXIT;
1592 p->p_xstat = rv;
1593 p->p_lflag |= jetsam_flags;
1594
1595 proc_transend(p, 1);
1596 proc_unlock(p);
1597
1598 proc_prepareexit(p, rv, perf_notify);
1599
1600 /* Last thread to terminate will call proc_exit() */
1601 task_terminate_internal(task);
1602
1603 return 0;
1604 }
1605
1606 #if CONFIG_MEMORYSTATUS
1607 /*
1608 * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1609 * is currently being frozen.
1610 * The proc_list_lock is held by the caller.
1611 * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1612 * the p_refcount P_REF_DEAD bit.
1613 */
1614 static void
proc_memorystatus_remove(proc_t p)1615 proc_memorystatus_remove(proc_t p)
1616 {
1617 LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1618 while (memorystatus_remove(p) == EAGAIN) {
1619 os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1620 msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1621 }
1622 }
1623 #endif
1624
1625 #if DEVELOPMENT
1626 boolean_t crash_behavior_test_mode = FALSE;
1627 boolean_t crash_behavior_test_would_panic = FALSE;
1628 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1629 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1630 #endif /* DEVELOPMENT */
1631
1632 static bool
_proc_is_crashing_signal(int sig)1633 _proc_is_crashing_signal(int sig)
1634 {
1635 bool result = false;
1636 switch (sig) {
1637 case SIGILL:
1638 case SIGABRT:
1639 case SIGFPE:
1640 case SIGBUS:
1641 case SIGSEGV:
1642 case SIGSYS:
1643 /*
1644 * If SIGTRAP is the terminating signal, then we can safely assume the
1645 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1646 * a process calls __builtin_trap(), which will abort.)
1647 */
1648 case SIGTRAP:
1649 result = true;
1650 }
1651
1652 return result;
1653 }
1654
1655 static bool
_proc_is_fatal_reason(os_reason_t reason)1656 _proc_is_fatal_reason(os_reason_t reason)
1657 {
1658 if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1659 /* Abort is always fatal even if there is no crash report generated */
1660 return true;
1661 }
1662 if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1663 /*
1664 * No crash report means this reason shouldn't be considered fatal
1665 * unless we are in test mode
1666 */
1667 #if DEVELOPMENT
1668 if (crash_behavior_test_mode) {
1669 return true;
1670 }
1671 #endif /* DEVELOPMENT */
1672 return false;
1673 }
1674 // By default all OS_REASON are fatal
1675 return true;
1676 }
1677
1678 static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled", false);
1679
1680 static bool
proc_should_trigger_panic(proc_t p,int rv)1681 proc_should_trigger_panic(proc_t p, int rv)
1682 {
1683 if (p == initproc) {
1684 /* Always panic for launchd */
1685 return true;
1686 }
1687
1688 if (panic_on_crash_disabled) {
1689 printf("panic-on-crash disabled via boot-arg\n");
1690 return false;
1691 }
1692
1693 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1694 return true;
1695 }
1696
1697 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1698 return true;
1699 }
1700
1701 if (p->p_posix_spawn_failed) {
1702 /* posix_spawn failures normally don't qualify for panics */
1703 return false;
1704 }
1705
1706 bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1707 if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1708 return false;
1709 }
1710
1711 if (WIFEXITED(rv)) {
1712 int code = WEXITSTATUS(rv);
1713
1714 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1715 if (code == 0) {
1716 /* No panic if we exit 0 */
1717 return false;
1718 } else {
1719 /* Panic on non-zero exit */
1720 return true;
1721 }
1722 } else {
1723 /* No panic on normal exit if the process doesn't have the non-zero flag set */
1724 return false;
1725 }
1726 } else if (WIFSIGNALED(rv)) {
1727 int signal = WTERMSIG(rv);
1728 /* This is a crash (non-normal exit) */
1729 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1730 os_reason_t reason = p->p_exit_reason;
1731 if (reason != OS_REASON_NULL) {
1732 if (!_proc_is_fatal_reason(reason)) {
1733 // Skip non-fatal terminate_with_reason
1734 return false;
1735 }
1736 if (reason->osr_namespace == OS_REASON_SIGNAL) {
1737 /*
1738 * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal
1739 * in osr_code, so we should check that signal here
1740 */
1741 return _proc_is_crashing_signal((int)reason->osr_code);
1742 } else {
1743 /*
1744 * This branch covers the case of terminate_with_reason which
1745 * delivers a SIGTERM which is still considered a crash even
1746 * thought the signal is not considered a crashing signal
1747 */
1748 return true;
1749 }
1750 }
1751 return _proc_is_crashing_signal(signal);
1752 } else {
1753 return false;
1754 }
1755 } else {
1756 /*
1757 * This branch implies that we didn't exit normally nor did we receive
1758 * a signal. This should be unreachable.
1759 */
1760 return true;
1761 }
1762 }
1763
1764 static void
proc_crash_coredump(proc_t p)1765 proc_crash_coredump(proc_t p)
1766 {
1767 (void)p;
1768 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1769 /*
1770 * For debugging purposes, generate a core file of initproc before
1771 * panicking. Leave at least 300 MB free on the root volume, and ignore
1772 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1773 * before the panic hits.
1774 */
1775
1776 int err;
1777 uint64_t coredump_start = mach_absolute_time();
1778 uint64_t coredump_end;
1779 clock_sec_t tv_sec;
1780 clock_usec_t tv_usec;
1781 uint32_t tv_msec;
1782
1783
1784 err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1785
1786 coredump_end = mach_absolute_time();
1787
1788 absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1789
1790 tv_msec = tv_usec / 1000;
1791
1792 if (err != 0) {
1793 printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1794 proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1795 } else {
1796 printf("Generated core file for pid: %d in %d.%03d seconds\n",
1797 proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1798 }
1799 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1800 }
1801
1802 static void
proc_handle_critical_exit(proc_t p,int rv)1803 proc_handle_critical_exit(proc_t p, int rv)
1804 {
1805 if (!proc_should_trigger_panic(p, rv)) {
1806 // No panic, bail out
1807 return;
1808 }
1809
1810 #if DEVELOPMENT
1811 if (crash_behavior_test_mode) {
1812 crash_behavior_test_would_panic = TRUE;
1813 // Force test mode off after hitting a panic
1814 crash_behavior_test_mode = FALSE;
1815 return;
1816 }
1817 #endif /* DEVELOPMENT */
1818
1819 char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1820
1821 if (p->p_exit_reason == OS_REASON_NULL) {
1822 printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1823 proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1824 } else {
1825 printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1826 p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1827 exit_reason_desc : "none");
1828 }
1829
1830 const char *prefix_str;
1831 char prefix_str_buf[128];
1832
1833 if (p == initproc) {
1834 if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1835 prefix_str = "LTE preinit process exited";
1836 } else if (initproc_spawned) {
1837 prefix_str = "initproc exited";
1838 } else {
1839 prefix_str = "initproc failed to start";
1840 }
1841 } else {
1842 /* For processes that aren't launchd, just use the process name and pid */
1843 snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1844 prefix_str = prefix_str_buf;
1845 }
1846
1847 proc_crash_coredump(p);
1848
1849 sync(p, (void *)NULL, (int *)NULL);
1850
1851 if (p->p_exit_reason == OS_REASON_NULL) {
1852 panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1853 prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1854 } else {
1855 panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1856 ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1857 prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1858 exit_reason_desc ? exit_reason_desc : "none");
1859 }
1860 }
1861
1862 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1863 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1864 {
1865 mach_exception_data_type_t code = 0, subcode = 0;
1866 exception_type_t etype;
1867
1868 struct uthread *ut;
1869 thread_t self = current_thread();
1870 ut = get_bsdthread_info(self);
1871 struct rusage_superset *rup;
1872 int kr = 0;
1873 int create_corpse = FALSE;
1874
1875 if (p->p_crash_behavior != 0 || p == initproc) {
1876 proc_handle_critical_exit(p, rv);
1877 }
1878
1879 /*
1880 * Generate a corefile/crashlog if:
1881 * The process doesn't have an exit reason that indicates no crash report should be created
1882 * AND any of the following are true:
1883 * - The process was terminated due to a fatal signal that generates a core
1884 * - The process was killed due to a code signing violation
1885 * - The process has an exit reason that indicates we should generate a crash report
1886 *
1887 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1888 * (which normally triggers a core) but may indicate that no crash report should be created.
1889 */
1890 if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1891 (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1892 (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1893 OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1894 /*
1895 * Workaround for processes checking up on PT_DENY_ATTACH:
1896 * should be backed out post-Leopard (details in 5431025).
1897 */
1898 if ((SIGSEGV == WTERMSIG(rv)) &&
1899 (p->p_pptr->p_lflag & P_LNOATTACH)) {
1900 goto skipcheck;
1901 }
1902
1903 /*
1904 * Crash Reporter looks for the signal value, original exception
1905 * type, and low 20 bits of the original code in code[0]
1906 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1907 */
1908 code = ((WTERMSIG(rv) & 0xff) << 24) |
1909 ((ut->uu_exception & 0x0f) << 20) |
1910 ((int)ut->uu_code & 0xfffff);
1911 subcode = ut->uu_subcode;
1912 etype = ut->uu_exception;
1913
1914 /* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1915 if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1916 etype = EXC_CRASH;
1917 }
1918
1919 #if (DEVELOPMENT || DEBUG)
1920 if (p->p_pid <= exception_log_max_pid) {
1921 char *proc_name = proc_best_name(p);
1922 if (PROC_HAS_EXITREASON(p)) {
1923 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1924 "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1925 proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1926 exit_reason_get_string_desc(p->p_exit_reason));
1927 } else {
1928 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1929 "pid: %d -- process name: %s -- exit status %d",
1930 proc_getpid(p), proc_name, WEXITSTATUS(rv));
1931 }
1932 }
1933 #endif
1934
1935 kr = task_exception_notify(EXC_CRASH, code, subcode);
1936
1937 /* Nobody handled EXC_CRASH?? remember to make corpse */
1938 if (kr != 0 && p == current_proc()) {
1939 /*
1940 * Do not create corpse when exit is called from jetsam thread.
1941 * Corpse creation code requires that proc_prepareexit is
1942 * called by the exiting proc and not the kernel_proc.
1943 */
1944 create_corpse = TRUE;
1945 }
1946
1947 /*
1948 * Revalidate the code signing of the text pages around current PC.
1949 * This is an attempt to detect and repair faults due to memory
1950 * corruption of text pages.
1951 *
1952 * The goal here is to fixup infrequent memory corruptions due to
1953 * things like aging RAM bit flips. So the approach is to only expect
1954 * to have to fixup one thing per crash. This also limits the amount
1955 * of extra work we cause in case this is a development kernel with an
1956 * active memory stomp happening.
1957 */
1958 task_t task = proc_task(p);
1959 uintptr_t bt[2];
1960 struct backtrace_user_info btinfo = BTUINFO_INIT;
1961 unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
1962 int bt_err = btinfo.btui_error;
1963 if (bt_err == 0 && frame_count >= 1) {
1964 /*
1965 * First check at the page containing the current PC.
1966 * This passes if the page code signs -or- if we can't figure out
1967 * what is at that address. The latter action is so we continue checking
1968 * previous pages which may be corrupt and caused a wild branch.
1969 */
1970 kr = revalidate_text_page(task, bt[0]);
1971
1972 /* No corruption found, check the previous sequential page */
1973 if (kr == KERN_SUCCESS) {
1974 kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
1975 }
1976
1977 /* Still no corruption found, check the current function's caller */
1978 if (kr == KERN_SUCCESS) {
1979 if (frame_count > 1 &&
1980 atop(bt[0]) != atop(bt[1]) && /* don't recheck PC page */
1981 atop(bt[0]) - 1 != atop(bt[1])) { /* don't recheck page before */
1982 kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
1983 }
1984 }
1985
1986 /*
1987 * Log that we found a corruption.
1988 */
1989 if (kr != KERN_SUCCESS) {
1990 os_log(OS_LOG_DEFAULT,
1991 "Text page corruption detected in dying process %d\n", proc_getpid(p));
1992 }
1993 }
1994 }
1995
1996 skipcheck:
1997 if (task_is_driver(proc_task(p)) && PROC_HAS_EXITREASON(p)) {
1998 IOUserServerRecordExitReason(proc_task(p), p->p_exit_reason);
1999 }
2000
2001 /* Notify the perf server? */
2002 if (perf_notify) {
2003 (void)sys_perf_notify(self, proc_getpid(p));
2004 }
2005
2006
2007 /* stash the usage into corpse data if making_corpse == true */
2008 if (create_corpse == TRUE) {
2009 kr = task_mark_corpse(proc_task(p));
2010 if (kr != KERN_SUCCESS) {
2011 if (kr == KERN_NO_SPACE) {
2012 printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
2013 } else if (kr == KERN_NOT_SUPPORTED) {
2014 printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
2015 } else if (kr == KERN_TERMINATED) {
2016 printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
2017 } else {
2018 printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
2019 }
2020 create_corpse = FALSE;
2021 }
2022 }
2023
2024 if (!proc_is_shadow(p)) {
2025 /*
2026 * Before this process becomes a zombie, stash resource usage
2027 * stats in the proc for external observers to query
2028 * via proc_pid_rusage().
2029 *
2030 * If the zombie allocation fails, just punt the stats.
2031 */
2032 rup = zalloc(zombie_zone);
2033 gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2034 rup->ri.ri_phys_footprint = 0;
2035 rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2036 /*
2037 * Make the rusage_info visible to external observers
2038 * only after it has been completely filled in.
2039 */
2040 p->p_ru = rup;
2041 }
2042
2043 if (create_corpse) {
2044 int est_knotes = 0, num_knotes = 0;
2045 uint64_t *buffer = NULL;
2046 uint32_t buf_size = 0;
2047
2048 /* Get all the udata pointers from kqueue */
2049 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2050 if (est_knotes > 0) {
2051 buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2052 buffer = kalloc_data(buf_size, Z_WAITOK);
2053 if (buffer) {
2054 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2055 if (num_knotes > est_knotes + 32) {
2056 num_knotes = est_knotes + 32;
2057 }
2058 }
2059 }
2060
2061 /* Update the code, subcode based on exit reason */
2062 proc_update_corpse_exception_codes(p, &code, &subcode);
2063 populate_corpse_crashinfo(p, proc_task(p), rup,
2064 code, subcode, buffer, num_knotes, NULL, etype);
2065 kfree_data(buffer, buf_size);
2066 }
2067 /*
2068 * Remove proc from allproc queue and from pidhash chain.
2069 * Need to do this before we do anything that can block.
2070 * Not doing causes things like mount() find this on allproc
2071 * in partially cleaned state.
2072 */
2073
2074 proc_list_lock();
2075
2076 #if CONFIG_MEMORYSTATUS
2077 proc_memorystatus_remove(p);
2078 #endif
2079
2080 LIST_REMOVE(p, p_list);
2081 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2082 /* will not be visible via proc_find */
2083 os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2084
2085 proc_list_unlock();
2086
2087 /*
2088 * If parent is waiting for us to exit or exec,
2089 * P_LPPWAIT is set; we will wakeup the parent below.
2090 */
2091 proc_lock(p);
2092 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2093 p->p_sigignore = ~(sigcantmask);
2094
2095 /*
2096 * If a thread is already waiting for us in proc_exit,
2097 * P_LTERM is set, wakeup the thread.
2098 */
2099 if (p->p_lflag & P_LTERM) {
2100 wakeup(&p->exit_thread);
2101 } else {
2102 p->p_lflag |= P_LTERM;
2103 }
2104
2105 /* If current proc is exiting, ignore signals on the exit thread */
2106 if (p == current_proc()) {
2107 ut->uu_siglist = 0;
2108 }
2109 proc_unlock(p);
2110 }
2111
2112 void
proc_exit(proc_t p)2113 proc_exit(proc_t p)
2114 {
2115 proc_t q;
2116 proc_t pp;
2117 struct task *task = proc_task(p);
2118 vnode_t tvp = NULLVP;
2119 struct pgrp * pg;
2120 struct session *sessp;
2121 struct uthread * uth;
2122 pid_t pid;
2123 int exitval;
2124 int knote_hint;
2125
2126 uth = current_uthread();
2127
2128 proc_lock(p);
2129 proc_transstart(p, 1, 0);
2130 if (!(p->p_lflag & P_LEXIT)) {
2131 /*
2132 * This can happen if a thread_terminate() occurs
2133 * in a single-threaded process.
2134 */
2135 p->p_lflag |= P_LEXIT;
2136 proc_transend(p, 1);
2137 proc_unlock(p);
2138 proc_prepareexit(p, 0, TRUE);
2139 (void) task_terminate_internal(task);
2140 proc_lock(p);
2141 } else if (!(p->p_lflag & P_LTERM)) {
2142 proc_transend(p, 1);
2143 /* Jetsam is in middle of calling proc_prepareexit, wait for it */
2144 p->p_lflag |= P_LTERM;
2145 msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2146 } else {
2147 proc_transend(p, 1);
2148 }
2149
2150 p->p_lflag |= P_LPEXIT;
2151
2152 /*
2153 * Other kernel threads may be in the middle of signalling this process.
2154 * Wait for those threads to wrap it up before making the process
2155 * disappear on them.
2156 */
2157 if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2158 p->p_sigwaitcnt++;
2159 while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2160 msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2161 }
2162 p->p_sigwaitcnt--;
2163 }
2164
2165 proc_unlock(p);
2166 pid = proc_getpid(p);
2167 exitval = p->p_xstat;
2168 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2169 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2170 pid, exitval, 0, 0, 0);
2171
2172 #if DEVELOPMENT || DEBUG
2173 proc_exit_lpexit_check(pid, PELS_POS_START);
2174 #endif
2175
2176 #if CONFIG_DTRACE
2177 dtrace_proc_exit(p);
2178 #endif
2179
2180 proc_refdrain(p);
2181 /* We now have unique ref to the proc */
2182
2183 /* if any pending cpu limits action, clear it */
2184 task_clear_cpuusage(proc_task(p), TRUE);
2185
2186 workq_mark_exiting(p);
2187
2188 /*
2189 * need to cancel async IO requests that can be cancelled and wait for those
2190 * already active. MAY BLOCK!
2191 */
2192 _aio_exit( p );
2193
2194 /*
2195 * Close open files and release open-file table.
2196 * This may block!
2197 */
2198 fdt_invalidate(p);
2199
2200 /*
2201 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2202 * workqueue.
2203 */
2204 workq_exit(p);
2205
2206 if (uth->uu_lowpri_window) {
2207 /*
2208 * task is marked as a low priority I/O type
2209 * and the I/O we issued while in flushing files on close
2210 * collided with normal I/O operations...
2211 * no need to throttle this thread since its going away
2212 * but we do need to update our bookeeping w/r to throttled threads
2213 */
2214 throttle_lowpri_io(0);
2215 }
2216
2217 if (p->p_lflag & P_LNSPACE_RESOLVER) {
2218 /*
2219 * The namespace resolver is exiting; there may be
2220 * outstanding materialization requests to clean up.
2221 */
2222 nspace_resolver_exited(p);
2223 }
2224
2225 #if SYSV_SHM
2226 /* Close ref SYSV Shared memory*/
2227 if (p->vm_shm) {
2228 shmexit(p);
2229 }
2230 #endif
2231 #if SYSV_SEM
2232 /* Release SYSV semaphores */
2233 semexit(p);
2234 #endif
2235
2236 #if PSYNCH
2237 pth_proc_hashdelete(p);
2238 #endif /* PSYNCH */
2239
2240 pg = proc_pgrp(p, &sessp);
2241 if (SESS_LEADER(p, sessp)) {
2242 if (sessp->s_ttyvp != NULLVP) {
2243 struct vnode *ttyvp;
2244 int ttyvid;
2245 int cttyflag = 0;
2246 struct vfs_context context;
2247 struct tty *tp;
2248 struct pgrp *tpgrp = PGRP_NULL;
2249
2250 /*
2251 * Controlling process.
2252 * Signal foreground pgrp,
2253 * drain controlling terminal
2254 * and revoke access to controlling terminal.
2255 */
2256
2257 proc_list_lock(); /* prevent any t_pgrp from changing */
2258 session_lock(sessp);
2259 if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2260 tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2261 }
2262 proc_list_unlock();
2263
2264 if (tpgrp != PGRP_NULL) {
2265 session_unlock(sessp);
2266 pgsignal(tpgrp, SIGHUP, 1);
2267 pgrp_rele(tpgrp);
2268 session_lock(sessp);
2269 }
2270
2271 cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2272 S_CTTYREF, relaxed) & S_CTTYREF);
2273 ttyvp = sessp->s_ttyvp;
2274 ttyvid = sessp->s_ttyvid;
2275 tp = session_clear_tty_locked(sessp);
2276 if (ttyvp) {
2277 vnode_hold(ttyvp);
2278 }
2279 session_unlock(sessp);
2280
2281 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2282 if (tp != TTY_NULL) {
2283 tty_lock(tp);
2284 (void) ttywait(tp);
2285 tty_unlock(tp);
2286 }
2287
2288 context.vc_thread = NULL;
2289 context.vc_ucred = kauth_cred_proc_ref(p);
2290 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2291 if (cttyflag) {
2292 /*
2293 * Release the extra usecount taken in cttyopen.
2294 * usecount should be released after VNOP_REVOKE is called.
2295 * This usecount was taken to ensure that
2296 * the VNOP_REVOKE results in a close to
2297 * the tty since cttyclose is a no-op.
2298 */
2299 vnode_rele(ttyvp);
2300 }
2301 vnode_put(ttyvp);
2302 kauth_cred_unref(&context.vc_ucred);
2303 vnode_drop(ttyvp);
2304 ttyvp = NULLVP;
2305 }
2306 if (ttyvp) {
2307 vnode_drop(ttyvp);
2308 }
2309 if (tp) {
2310 ttyfree(tp);
2311 }
2312 }
2313 session_lock(sessp);
2314 sessp->s_leader = NULL;
2315 session_unlock(sessp);
2316 }
2317
2318 if (!proc_is_shadow(p)) {
2319 fixjobc(p, pg, 0);
2320 }
2321 pgrp_rele(pg);
2322
2323 /*
2324 * Change RLIMIT_FSIZE for accounting/debugging.
2325 */
2326 proc_limitsetcur_fsize(p, RLIM_INFINITY);
2327
2328 (void)acct_process(p);
2329
2330 proc_list_lock();
2331
2332 if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2333 p->p_listflag &= ~P_LIST_EXITCOUNT;
2334 proc_shutdown_exitcount--;
2335 if (proc_shutdown_exitcount == 0) {
2336 wakeup(&proc_shutdown_exitcount);
2337 }
2338 }
2339
2340 /* wait till parentrefs are dropped and grant no more */
2341 proc_childdrainstart(p);
2342 while ((q = p->p_children.lh_first) != NULL) {
2343 if (q->p_stat == SZOMB) {
2344 if (p != q->p_pptr) {
2345 panic("parent child linkage broken");
2346 }
2347 /* check for sysctl zomb lookup */
2348 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2349 msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2350 }
2351 q->p_listflag |= P_LIST_WAITING;
2352 /*
2353 * This is a named reference and it is not granted
2354 * if the reap is already in progress. So we get
2355 * the reference here exclusively and their can be
2356 * no waiters. So there is no need for a wakeup
2357 * after we are done. Also the reap frees the structure
2358 * and the proc struct cannot be used for wakeups as well.
2359 * It is safe to use q here as this is system reap
2360 */
2361 reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2362 REAP_REPARENTED_TO_INIT : 0;
2363 reap_child_locked(p, q,
2364 REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2365 } else {
2366 /*
2367 * Traced processes are killed
2368 * since their existence means someone is messing up.
2369 */
2370 if (q->p_lflag & P_LTRACED) {
2371 struct proc *opp;
2372
2373 /*
2374 * Take a reference on the child process to
2375 * ensure it doesn't exit and disappear between
2376 * the time we drop the list_lock and attempt
2377 * to acquire its proc_lock.
2378 */
2379 if (proc_ref(q, true) != q) {
2380 continue;
2381 }
2382
2383 proc_list_unlock();
2384
2385 opp = proc_find(q->p_oppid);
2386 if (opp != PROC_NULL) {
2387 proc_list_lock();
2388 q->p_oppid = 0;
2389 proc_list_unlock();
2390 proc_reparentlocked(q, opp, 0, 0);
2391 proc_rele(opp);
2392 } else {
2393 /* original parent exited while traced */
2394 proc_list_lock();
2395 q->p_listflag |= P_LIST_DEADPARENT;
2396 q->p_oppid = 0;
2397 proc_list_unlock();
2398 proc_reparentlocked(q, initproc, 0, 0);
2399 }
2400
2401 proc_lock(q);
2402 q->p_lflag &= ~P_LTRACED;
2403
2404 if (q->sigwait_thread) {
2405 thread_t thread = q->sigwait_thread;
2406
2407 proc_unlock(q);
2408 /*
2409 * The sigwait_thread could be stopped at a
2410 * breakpoint. Wake it up to kill.
2411 * Need to do this as it could be a thread which is not
2412 * the first thread in the task. So any attempts to kill
2413 * the process would result into a deadlock on q->sigwait.
2414 */
2415 thread_resume(thread);
2416 clear_wait(thread, THREAD_INTERRUPTED);
2417 threadsignal(thread, SIGKILL, 0, TRUE);
2418 } else {
2419 proc_unlock(q);
2420 }
2421
2422 psignal(q, SIGKILL);
2423 proc_list_lock();
2424 proc_rele(q);
2425 } else {
2426 q->p_listflag |= P_LIST_DEADPARENT;
2427 proc_reparentlocked(q, initproc, 0, 1);
2428 }
2429 }
2430 }
2431
2432 proc_childdrainend(p);
2433 proc_list_unlock();
2434
2435 #if CONFIG_MACF
2436 if (!proc_is_shadow(p)) {
2437 /*
2438 * Notify MAC policies that proc is dead.
2439 * This should be replaced with proper label management
2440 * (rdar://problem/32126399).
2441 */
2442 mac_proc_notify_exit(p);
2443 }
2444 #endif
2445
2446 /*
2447 * Release reference to text vnode
2448 */
2449 tvp = p->p_textvp;
2450 p->p_textvp = NULL;
2451 if (tvp != NULLVP) {
2452 vnode_rele(tvp);
2453 }
2454
2455 /*
2456 * Save exit status and final rusage info, adding in child rusage
2457 * info and self times. If we were unable to allocate a zombie
2458 * structure, this information is lost.
2459 */
2460 if (p->p_ru != NULL) {
2461 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2462 p->p_ru->ru = p->p_stats->p_ru;
2463
2464 ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2465 }
2466
2467 /*
2468 * Free up profiling buffers.
2469 */
2470 {
2471 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2472
2473 p1 = p0->pr_next;
2474 p0->pr_next = NULL;
2475 p0->pr_scale = 0;
2476
2477 for (; p1 != NULL; p1 = pn) {
2478 pn = p1->pr_next;
2479 kfree_type(struct uprof, p1);
2480 }
2481 }
2482
2483 proc_free_realitimer(p);
2484
2485 /*
2486 * Other substructures are freed from wait().
2487 */
2488 zfree(proc_stats_zone, p->p_stats);
2489 p->p_stats = NULL;
2490
2491 if (p->p_subsystem_root_path) {
2492 zfree(ZV_NAMEI, p->p_subsystem_root_path);
2493 p->p_subsystem_root_path = NULL;
2494 }
2495
2496 proc_limitdrop(p);
2497
2498 #if DEVELOPMENT || DEBUG
2499 proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2500 #endif
2501
2502 /*
2503 * Finish up by terminating the task
2504 * and halt this thread (only if a
2505 * member of the task exiting).
2506 */
2507 proc_set_task(p, TASK_NULL);
2508 set_bsdtask_info(task, NULL);
2509 clear_thread_ro_proc(get_machthread(uth));
2510
2511 #if DEVELOPMENT || DEBUG
2512 proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2513 #endif
2514
2515 knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2516 proc_knote(p, knote_hint);
2517
2518 /* mark the thread as the one that is doing proc_exit
2519 * no need to hold proc lock in uthread_free
2520 */
2521 uth->uu_flag |= UT_PROCEXIT;
2522 /*
2523 * Notify parent that we're gone.
2524 */
2525 pp = proc_parent(p);
2526 if (proc_is_shadow(p)) {
2527 /* kernel can reap this one, no need to move it to launchd */
2528 proc_list_lock();
2529 p->p_listflag |= P_LIST_DEADPARENT;
2530 proc_list_unlock();
2531 } else if (pp->p_flag & P_NOCLDWAIT) {
2532 if (p->p_ru != NULL) {
2533 proc_lock(pp);
2534 #if 3839178
2535 /*
2536 * If the parent is ignoring SIGCHLD, then POSIX requires
2537 * us to not add the resource usage to the parent process -
2538 * we are only going to hand it off to init to get reaped.
2539 * We should contest the standard in this case on the basis
2540 * of RLIMIT_CPU.
2541 */
2542 #else /* !3839178 */
2543 /*
2544 * Add child resource usage to parent before giving
2545 * zombie to init. If we were unable to allocate a
2546 * zombie structure, this information is lost.
2547 */
2548 ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2549 #endif /* !3839178 */
2550 update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2551 proc_unlock(pp);
2552 }
2553
2554 /* kernel can reap this one, no need to move it to launchd */
2555 proc_list_lock();
2556 p->p_listflag |= P_LIST_DEADPARENT;
2557 proc_list_unlock();
2558 }
2559 if (!proc_is_shadow(p) &&
2560 ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2561 if (pp != initproc) {
2562 proc_lock(pp);
2563 pp->si_pid = proc_getpid(p);
2564 pp->p_xhighbits = p->p_xhighbits;
2565 p->p_xhighbits = 0;
2566 pp->si_status = p->p_xstat;
2567 pp->si_code = CLD_EXITED;
2568 /*
2569 * p_ucred usage is safe as it is an exiting process
2570 * and reference is dropped in reap
2571 */
2572 pp->si_uid = kauth_cred_getruid(proc_ucred_unsafe(p));
2573 proc_unlock(pp);
2574 }
2575 /* mark as a zombie */
2576 /* No need to take proc lock as all refs are drained and
2577 * no one except parent (reaping ) can look at this.
2578 * The write is to an int and is coherent. Also parent is
2579 * keyed off of list lock for reaping
2580 */
2581 DTRACE_PROC2(exited, proc_t, p, int, exitval);
2582 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2583 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2584 pid, exitval, 0, 0, 0);
2585 p->p_stat = SZOMB;
2586 /*
2587 * The current process can be reaped so, no one
2588 * can depend on this
2589 */
2590
2591 psignal(pp, SIGCHLD);
2592
2593 /* and now wakeup the parent */
2594 proc_list_lock();
2595 wakeup((caddr_t)pp);
2596 proc_list_unlock();
2597 } else {
2598 /* should be fine as parent proc would be initproc */
2599 /* mark as a zombie */
2600 /* No need to take proc lock as all refs are drained and
2601 * no one except parent (reaping ) can look at this.
2602 * The write is to an int and is coherent. Also parent is
2603 * keyed off of list lock for reaping
2604 */
2605 DTRACE_PROC2(exited, proc_t, p, int, exitval);
2606 proc_list_lock();
2607 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2608 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2609 pid, exitval, 0, 0, 0);
2610 /* check for sysctl zomb lookup */
2611 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2612 msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2613 }
2614 /* safe to use p as this is a system reap */
2615 p->p_stat = SZOMB;
2616 p->p_listflag |= P_LIST_WAITING;
2617
2618 /*
2619 * This is a named reference and it is not granted
2620 * if the reap is already in progress. So we get
2621 * the reference here exclusively and their can be
2622 * no waiters. So there is no need for a wakeup
2623 * after we are done. AlsO the reap frees the structure
2624 * and the proc struct cannot be used for wakeups as well.
2625 * It is safe to use p here as this is system reap
2626 */
2627 reap_child_locked(pp, p,
2628 REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2629 }
2630 if (uth->uu_lowpri_window) {
2631 /*
2632 * task is marked as a low priority I/O type and we've
2633 * somehow picked up another throttle during exit processing...
2634 * no need to throttle this thread since its going away
2635 * but we do need to update our bookeeping w/r to throttled threads
2636 */
2637 throttle_lowpri_io(0);
2638 }
2639
2640 proc_rele(pp);
2641 #if DEVELOPMENT || DEBUG
2642 proc_exit_lpexit_check(pid, PELS_POS_END);
2643 #endif
2644 }
2645
2646
2647 /*
2648 * reap_child_locked
2649 *
2650 * Finalize a child exit once its status has been saved.
2651 *
2652 * If ptrace has attached, detach it and return it to its real parent. Free any
2653 * remaining resources.
2654 *
2655 * Parameters:
2656 * - proc_t parent Parent of process being reaped
2657 * - proc_t child Process to reap
2658 * - reap_flags_t flags Control locking and re-parenting behavior
2659 */
2660 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2661 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2662 {
2663 struct pgrp *pg;
2664 boolean_t shadow_proc = proc_is_shadow(child);
2665
2666 if (flags & REAP_LOCKED) {
2667 proc_list_unlock();
2668 }
2669
2670 /*
2671 * Under ptrace, the child should now be re-parented back to its original
2672 * parent, unless that parent was initproc or it didn't come to initproc
2673 * through re-parenting.
2674 */
2675 bool child_ptraced = child->p_oppid != 0;
2676 if (!shadow_proc && child_ptraced) {
2677 int knote_hint;
2678 pid_t orig_ppid = 0;
2679 proc_t orig_parent = PROC_NULL;
2680
2681 proc_lock(child);
2682 orig_ppid = child->p_oppid;
2683 child->p_oppid = 0;
2684 knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2685 proc_unlock(child);
2686
2687 orig_parent = proc_find(orig_ppid);
2688 if (orig_parent) {
2689 /*
2690 * Only re-parent the process if its original parent was not
2691 * initproc and it did not come to initproc from re-parenting.
2692 */
2693 bool reparenting = orig_parent != initproc ||
2694 (flags & REAP_REPARENTED_TO_INIT) == 0;
2695 if (reparenting) {
2696 if (orig_parent != initproc) {
2697 /*
2698 * Internal fields should be safe to access here because the
2699 * child is exited and not reaped or re-parented yet.
2700 */
2701 proc_lock(orig_parent);
2702 orig_parent->si_pid = proc_getpid(child);
2703 orig_parent->si_status = child->p_xstat;
2704 orig_parent->si_code = CLD_CONTINUED;
2705 orig_parent->si_uid = kauth_cred_getruid(proc_ucred_unsafe(child));
2706 proc_unlock(orig_parent);
2707 }
2708 proc_reparentlocked(child, orig_parent, 1, 0);
2709
2710 /*
2711 * After re-parenting, re-send the child's NOTE_EXIT to the
2712 * original parent.
2713 */
2714 proc_knote(child, knote_hint);
2715 psignal(orig_parent, SIGCHLD);
2716
2717 proc_list_lock();
2718 wakeup((caddr_t)orig_parent);
2719 child->p_listflag &= ~P_LIST_WAITING;
2720 wakeup(&child->p_stat);
2721 proc_list_unlock();
2722
2723 proc_rele(orig_parent);
2724 if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2725 proc_list_lock();
2726 }
2727 return;
2728 } else {
2729 /*
2730 * Satisfy the knote lifecycle because ptraced processes don't
2731 * broadcast NOTE_EXIT during initial child termination.
2732 */
2733 proc_knote(child, knote_hint);
2734 proc_rele(orig_parent);
2735 }
2736 }
2737 }
2738
2739 #pragma clang diagnostic push
2740 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2741 proc_knote(child, NOTE_REAP);
2742 #pragma clang diagnostic pop
2743
2744 proc_knote_drain(child);
2745
2746 child->p_xstat = 0;
2747 if (!shadow_proc && child->p_ru) {
2748 /*
2749 * Roll up the rusage statistics to the parent, unless the parent is
2750 * ignoring SIGCHLD. POSIX requires the children's resources of such a
2751 * parent to not be included in the parent's usage (seems odd given
2752 * RLIMIT_CPU, though).
2753 */
2754 proc_lock(parent);
2755 bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2756 if (rollup_child) {
2757 ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2758 }
2759 update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2760 proc_unlock(parent);
2761 zfree(zombie_zone, child->p_ru);
2762 child->p_ru = NULL;
2763 } else if (!shadow_proc) {
2764 printf("Warning : lost p_ru for %s\n", child->p_comm);
2765 } else {
2766 assert(child->p_ru == NULL);
2767 }
2768
2769 AUDIT_SESSION_PROCEXIT(child);
2770
2771 #if CONFIG_PERSONAS
2772 persona_proc_drop(child);
2773 #endif /* CONFIG_PERSONAS */
2774 /* proc_ucred_unsafe is safe, because child is not running */
2775 (void)chgproccnt(kauth_cred_getruid(proc_ucred_unsafe(child)), -1);
2776
2777 os_reason_free(child->p_exit_reason);
2778
2779 proc_list_lock();
2780
2781 pg = pgrp_leave_locked(child);
2782 LIST_REMOVE(child, p_list);
2783 parent->p_childrencnt--;
2784 LIST_REMOVE(child, p_sibling);
2785 bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2786 LIST_EMPTY(&parent->p_children);
2787 if (no_more_children) {
2788 wakeup((caddr_t)parent);
2789 }
2790 child->p_listflag &= ~P_LIST_WAITING;
2791 wakeup(&child->p_stat);
2792
2793 /* Take it out of process hash */
2794 if (!shadow_proc) {
2795 phash_remove_locked(child);
2796 }
2797 proc_checkdeadrefs(child);
2798 nprocs--;
2799 if (flags & REAP_DEAD_PARENT) {
2800 child->p_listflag |= P_LIST_DEADPARENT;
2801 }
2802
2803 proc_list_unlock();
2804
2805 pgrp_rele(pg);
2806 fdt_destroy(child);
2807 lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2808 lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2809 #if CONFIG_AUDIT
2810 lck_mtx_destroy(&child->p_audit_mlock, &proc_ucred_mlock_grp);
2811 #endif /* CONFIG_AUDIT */
2812 #if CONFIG_DTRACE
2813 lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2814 #endif
2815 lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2816 proc_wait_release(child);
2817
2818 if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2819 proc_list_lock();
2820 }
2821 }
2822
2823 int
wait1continue(int result)2824 wait1continue(int result)
2825 {
2826 proc_t p;
2827 thread_t thread;
2828 uthread_t uth;
2829 struct _wait4_data *wait4_data;
2830 struct wait4_nocancel_args *uap;
2831 int *retval;
2832
2833 if (result) {
2834 return result;
2835 }
2836
2837 p = current_proc();
2838 thread = current_thread();
2839 uth = (struct uthread *)get_bsdthread_info(thread);
2840
2841 wait4_data = &uth->uu_save.uus_wait4_data;
2842 uap = wait4_data->args;
2843 retval = wait4_data->retval;
2844 return wait4_nocancel(p, uap, retval);
2845 }
2846
2847 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2848 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2849 {
2850 __pthread_testcancel(1);
2851 return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2852 }
2853
2854 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2855 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2856 {
2857 int nfound;
2858 int sibling_count;
2859 proc_t p;
2860 int status, error;
2861 uthread_t uth;
2862 struct _wait4_data *wait4_data;
2863
2864 AUDIT_ARG(pid, uap->pid);
2865
2866 if (uap->pid == 0) {
2867 uap->pid = -q->p_pgrpid;
2868 }
2869
2870 if (uap->pid == INT_MIN) {
2871 return EINVAL;
2872 }
2873
2874 loop:
2875 proc_list_lock();
2876 loop1:
2877 nfound = 0;
2878 sibling_count = 0;
2879
2880 PCHILDREN_FOREACH(q, p) {
2881 if (p->p_sibling.le_next != 0) {
2882 sibling_count++;
2883 }
2884 if (uap->pid != WAIT_ANY &&
2885 proc_getpid(p) != uap->pid &&
2886 p->p_pgrpid != -(uap->pid)) {
2887 continue;
2888 }
2889
2890 if (proc_is_shadow(p)) {
2891 continue;
2892 }
2893
2894 nfound++;
2895
2896 /* XXX This is racy because we don't get the lock!!!! */
2897
2898 if (p->p_listflag & P_LIST_WAITING) {
2899 /* we're not using a continuation here but we still need to stash
2900 * the args for stackshot. */
2901 uth = current_uthread();
2902 wait4_data = &uth->uu_save.uus_wait4_data;
2903 wait4_data->args = uap;
2904 thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2905
2906 (void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2907 goto loop1;
2908 }
2909 p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */
2910
2911
2912 if (p->p_stat == SZOMB) {
2913 reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2914 REAP_REPARENTED_TO_INIT : 0;
2915
2916 proc_list_unlock();
2917 #if CONFIG_MACF
2918 if ((error = mac_proc_check_wait(q, p)) != 0) {
2919 goto out;
2920 }
2921 #endif
2922 retval[0] = proc_getpid(p);
2923 if (uap->status) {
2924 /* Legacy apps expect only 8 bits of status */
2925 status = 0xffff & p->p_xstat; /* convert to int */
2926 error = copyout((caddr_t)&status,
2927 uap->status,
2928 sizeof(status));
2929 if (error) {
2930 goto out;
2931 }
2932 }
2933 if (uap->rusage) {
2934 if (p->p_ru == NULL) {
2935 error = ENOMEM;
2936 } else {
2937 if (IS_64BIT_PROCESS(q)) {
2938 struct user64_rusage my_rusage = {};
2939 munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2940 error = copyout((caddr_t)&my_rusage,
2941 uap->rusage,
2942 sizeof(my_rusage));
2943 } else {
2944 struct user32_rusage my_rusage = {};
2945 munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2946 error = copyout((caddr_t)&my_rusage,
2947 uap->rusage,
2948 sizeof(my_rusage));
2949 }
2950 }
2951 /* information unavailable? */
2952 if (error) {
2953 goto out;
2954 }
2955 }
2956
2957 /* Conformance change for 6577252.
2958 * When SIGCHLD is blocked and wait() returns because the status
2959 * of a child process is available and there are no other
2960 * children processes, then any pending SIGCHLD signal is cleared.
2961 */
2962 if (sibling_count == 0) {
2963 int mask = sigmask(SIGCHLD);
2964 uth = current_uthread();
2965
2966 if ((uth->uu_sigmask & mask) != 0) {
2967 /* we are blocking SIGCHLD signals. clear any pending SIGCHLD.
2968 * This locking looks funny but it is protecting access to the
2969 * thread via p_uthlist.
2970 */
2971 proc_lock(q);
2972 uth->uu_siglist &= ~mask; /* clear pending signal */
2973 proc_unlock(q);
2974 }
2975 }
2976
2977 /* Clean up */
2978 (void)reap_child_locked(q, p, reap_flags);
2979
2980 return 0;
2981 }
2982 if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
2983 (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
2984 proc_list_unlock();
2985 #if CONFIG_MACF
2986 if ((error = mac_proc_check_wait(q, p)) != 0) {
2987 goto out;
2988 }
2989 #endif
2990 proc_lock(p);
2991 p->p_lflag |= P_LWAITED;
2992 proc_unlock(p);
2993 retval[0] = proc_getpid(p);
2994 if (uap->status) {
2995 status = W_STOPCODE(p->p_xstat);
2996 error = copyout((caddr_t)&status,
2997 uap->status,
2998 sizeof(status));
2999 } else {
3000 error = 0;
3001 }
3002 goto out;
3003 }
3004 /*
3005 * If we are waiting for continued processses, and this
3006 * process was continued
3007 */
3008 if ((uap->options & WCONTINUED) &&
3009 (p->p_flag & P_CONTINUED)) {
3010 proc_list_unlock();
3011 #if CONFIG_MACF
3012 if ((error = mac_proc_check_wait(q, p)) != 0) {
3013 goto out;
3014 }
3015 #endif
3016
3017 /* Prevent other process for waiting for this event */
3018 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3019 retval[0] = proc_getpid(p);
3020 if (uap->status) {
3021 status = W_STOPCODE(SIGCONT);
3022 error = copyout((caddr_t)&status,
3023 uap->status,
3024 sizeof(status));
3025 } else {
3026 error = 0;
3027 }
3028 goto out;
3029 }
3030 p->p_listflag &= ~P_LIST_WAITING;
3031 wakeup(&p->p_stat);
3032 }
3033 /* list lock is held when we get here any which way */
3034 if (nfound == 0) {
3035 proc_list_unlock();
3036 return ECHILD;
3037 }
3038
3039 if (uap->options & WNOHANG) {
3040 retval[0] = 0;
3041 proc_list_unlock();
3042 return 0;
3043 }
3044
3045 /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3046 uth = current_uthread();
3047 wait4_data = &uth->uu_save.uus_wait4_data;
3048 wait4_data->args = uap;
3049 wait4_data->retval = retval;
3050
3051 thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3052 if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3053 return error;
3054 }
3055
3056 goto loop;
3057 out:
3058 proc_list_lock();
3059 p->p_listflag &= ~P_LIST_WAITING;
3060 wakeup(&p->p_stat);
3061 proc_list_unlock();
3062 return error;
3063 }
3064
3065 #if DEBUG
3066 #define ASSERT_LCK_MTX_OWNED(lock) \
3067 lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3068 #else
3069 #define ASSERT_LCK_MTX_OWNED(lock) /* nothing */
3070 #endif
3071
3072 int
waitidcontinue(int result)3073 waitidcontinue(int result)
3074 {
3075 proc_t p;
3076 thread_t thread;
3077 uthread_t uth;
3078 struct _waitid_data *waitid_data;
3079 struct waitid_nocancel_args *uap;
3080 int *retval;
3081
3082 if (result) {
3083 return result;
3084 }
3085
3086 p = current_proc();
3087 thread = current_thread();
3088 uth = (struct uthread *)get_bsdthread_info(thread);
3089
3090 waitid_data = &uth->uu_save.uus_waitid_data;
3091 uap = waitid_data->args;
3092 retval = waitid_data->retval;
3093 return waitid_nocancel(p, uap, retval);
3094 }
3095
3096 /*
3097 * Description: Suspend the calling thread until one child of the process
3098 * containing the calling thread changes state.
3099 *
3100 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
3101 * uap->id pid_t or gid_t or ignored
3102 * uap->infop Address of siginfo_t struct in
3103 * user space into which to return status
3104 * uap->options flag values
3105 *
3106 * Returns: 0 Success
3107 * !0 Error returning status to user space
3108 */
3109 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3110 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3111 {
3112 __pthread_testcancel(1);
3113 return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3114 }
3115
3116 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3117 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3118 __unused int32_t *retval)
3119 {
3120 user_siginfo_t siginfo; /* siginfo data to return to caller */
3121 boolean_t caller64 = IS_64BIT_PROCESS(q);
3122 int nfound;
3123 proc_t p;
3124 int error;
3125 uthread_t uth;
3126 struct _waitid_data *waitid_data;
3127
3128 if (uap->options == 0 ||
3129 (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3130 return EINVAL; /* bits set that aren't recognized */
3131 }
3132 switch (uap->idtype) {
3133 case P_PID: /* child with process ID equal to... */
3134 case P_PGID: /* child with process group ID equal to... */
3135 if (((int)uap->id) < 0) {
3136 return EINVAL;
3137 }
3138 break;
3139 case P_ALL: /* any child */
3140 break;
3141 }
3142
3143 loop:
3144 proc_list_lock();
3145 loop1:
3146 nfound = 0;
3147
3148 PCHILDREN_FOREACH(q, p) {
3149 switch (uap->idtype) {
3150 case P_PID: /* child with process ID equal to... */
3151 if (proc_getpid(p) != (pid_t)uap->id) {
3152 continue;
3153 }
3154 break;
3155 case P_PGID: /* child with process group ID equal to... */
3156 if (p->p_pgrpid != (pid_t)uap->id) {
3157 continue;
3158 }
3159 break;
3160 case P_ALL: /* any child */
3161 break;
3162 }
3163
3164 if (proc_is_shadow(p)) {
3165 continue;
3166 }
3167 /* XXX This is racy because we don't get the lock!!!! */
3168
3169 /*
3170 * Wait collision; go to sleep and restart; used to maintain
3171 * the single return for waited process guarantee.
3172 */
3173 if (p->p_listflag & P_LIST_WAITING) {
3174 (void) msleep(&p->p_stat, &proc_list_mlock,
3175 PWAIT, "waitidcoll", 0);
3176 goto loop1;
3177 }
3178 p->p_listflag |= P_LIST_WAITING; /* mark busy */
3179
3180 nfound++;
3181
3182 bzero(&siginfo, sizeof(siginfo));
3183
3184 switch (p->p_stat) {
3185 case SZOMB: /* Exited */
3186 if (!(uap->options & WEXITED)) {
3187 break;
3188 }
3189 proc_list_unlock();
3190 #if CONFIG_MACF
3191 if ((error = mac_proc_check_wait(q, p)) != 0) {
3192 goto out;
3193 }
3194 #endif
3195 siginfo.si_signo = SIGCHLD;
3196 siginfo.si_pid = proc_getpid(p);
3197
3198 /* If the child terminated abnormally due to a signal, the signum
3199 * needs to be preserved in the exit status.
3200 */
3201 if (WIFSIGNALED(p->p_xstat)) {
3202 siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3203 CLD_DUMPED : CLD_KILLED;
3204 siginfo.si_status = WTERMSIG(p->p_xstat);
3205 } else {
3206 siginfo.si_code = CLD_EXITED;
3207 siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3208 }
3209 siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3210 p->p_xhighbits = 0;
3211
3212 if ((error = copyoutsiginfo(&siginfo,
3213 caller64, uap->infop)) != 0) {
3214 goto out;
3215 }
3216
3217 /* Prevent other process for waiting for this event? */
3218 if (!(uap->options & WNOWAIT)) {
3219 reap_child_locked(q, p, 0);
3220 return 0;
3221 }
3222 goto out;
3223
3224 case SSTOP: /* Stopped */
3225 /*
3226 * If we are not interested in stopped processes, then
3227 * ignore this one.
3228 */
3229 if (!(uap->options & WSTOPPED)) {
3230 break;
3231 }
3232
3233 /*
3234 * If someone has already waited it, we lost a race
3235 * to be the one to return status.
3236 */
3237 if ((p->p_lflag & P_LWAITED) != 0) {
3238 break;
3239 }
3240 proc_list_unlock();
3241 #if CONFIG_MACF
3242 if ((error = mac_proc_check_wait(q, p)) != 0) {
3243 goto out;
3244 }
3245 #endif
3246 siginfo.si_signo = SIGCHLD;
3247 siginfo.si_pid = proc_getpid(p);
3248 siginfo.si_status = p->p_xstat; /* signal number */
3249 siginfo.si_code = CLD_STOPPED;
3250
3251 if ((error = copyoutsiginfo(&siginfo,
3252 caller64, uap->infop)) != 0) {
3253 goto out;
3254 }
3255
3256 /* Prevent other process for waiting for this event? */
3257 if (!(uap->options & WNOWAIT)) {
3258 proc_lock(p);
3259 p->p_lflag |= P_LWAITED;
3260 proc_unlock(p);
3261 }
3262 goto out;
3263
3264 default: /* All other states => Continued */
3265 if (!(uap->options & WCONTINUED)) {
3266 break;
3267 }
3268
3269 /*
3270 * If the flag isn't set, then this process has not
3271 * been stopped and continued, or the status has
3272 * already been reaped by another caller of waitid().
3273 */
3274 if ((p->p_flag & P_CONTINUED) == 0) {
3275 break;
3276 }
3277 proc_list_unlock();
3278 #if CONFIG_MACF
3279 if ((error = mac_proc_check_wait(q, p)) != 0) {
3280 goto out;
3281 }
3282 #endif
3283 siginfo.si_signo = SIGCHLD;
3284 siginfo.si_code = CLD_CONTINUED;
3285 proc_lock(p);
3286 siginfo.si_pid = p->p_contproc;
3287 siginfo.si_status = p->p_xstat;
3288 proc_unlock(p);
3289
3290 if ((error = copyoutsiginfo(&siginfo,
3291 caller64, uap->infop)) != 0) {
3292 goto out;
3293 }
3294
3295 /* Prevent other process for waiting for this event? */
3296 if (!(uap->options & WNOWAIT)) {
3297 OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3298 &p->p_flag);
3299 }
3300 goto out;
3301 }
3302 ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3303
3304 /* Not a process we are interested in; go on to next child */
3305
3306 p->p_listflag &= ~P_LIST_WAITING;
3307 wakeup(&p->p_stat);
3308 }
3309 ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3310
3311 /* No child processes that could possibly satisfy the request? */
3312
3313 if (nfound == 0) {
3314 proc_list_unlock();
3315 return ECHILD;
3316 }
3317
3318 if (uap->options & WNOHANG) {
3319 proc_list_unlock();
3320 #if CONFIG_MACF
3321 if ((error = mac_proc_check_wait(q, p)) != 0) {
3322 return error;
3323 }
3324 #endif
3325 /*
3326 * The state of the siginfo structure in this case
3327 * is undefined. Some implementations bzero it, some
3328 * (like here) leave it untouched for efficiency.
3329 *
3330 * Thus the most portable check for "no matching pid with
3331 * WNOHANG" is to store a zero into si_pid before
3332 * invocation, then check for a non-zero value afterwards.
3333 */
3334 return 0;
3335 }
3336
3337 /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3338 uth = current_uthread();
3339 waitid_data = &uth->uu_save.uus_waitid_data;
3340 waitid_data->args = uap;
3341 waitid_data->retval = retval;
3342
3343 if ((error = msleep0(q, &proc_list_mlock,
3344 PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3345 return error;
3346 }
3347
3348 goto loop;
3349 out:
3350 proc_list_lock();
3351 p->p_listflag &= ~P_LIST_WAITING;
3352 wakeup(&p->p_stat);
3353 proc_list_unlock();
3354 return error;
3355 }
3356
3357 /*
3358 * make process 'parent' the new parent of process 'child'.
3359 */
3360 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3361 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3362 {
3363 proc_t oldparent = PROC_NULL;
3364
3365 if (child->p_pptr == parent) {
3366 return;
3367 }
3368
3369 if (locked == 0) {
3370 proc_list_lock();
3371 }
3372
3373 oldparent = child->p_pptr;
3374 #if __PROC_INTERNAL_DEBUG
3375 if (oldparent == PROC_NULL) {
3376 panic("proc_reparent: process %p does not have a parent", child);
3377 }
3378 #endif
3379
3380 LIST_REMOVE(child, p_sibling);
3381 #if __PROC_INTERNAL_DEBUG
3382 if (oldparent->p_childrencnt == 0) {
3383 panic("process children count already 0");
3384 }
3385 #endif
3386 oldparent->p_childrencnt--;
3387 #if __PROC_INTERNAL_DEBUG
3388 if (oldparent->p_childrencnt < 0) {
3389 panic("process children count -ve");
3390 }
3391 #endif
3392 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3393 parent->p_childrencnt++;
3394 child->p_pptr = parent;
3395 child->p_ppid = proc_getpid(parent);
3396
3397 proc_list_unlock();
3398
3399 if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3400 psignal(initproc, SIGCHLD);
3401 }
3402 if (locked == 1) {
3403 proc_list_lock();
3404 }
3405 }
3406
3407 /*
3408 * Exit: deallocate address space and other resources, change proc state
3409 * to zombie, and unlink proc from allproc and parent's lists. Save exit
3410 * status and rusage for wait(). Check for child processes and orphan them.
3411 */
3412
3413
3414 /*
3415 * munge_rusage
3416 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
3417 * process. We munge the kernel version of rusage into the
3418 * 64 bit version.
3419 */
3420 __private_extern__ void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3421 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3422 {
3423 /* Zero-out struct so that padding is cleared */
3424 bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3425
3426 /* timeval changes size, so utime and stime need special handling */
3427 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3428 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3429 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3430 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3431 /*
3432 * everything else can be a direct assign, since there is no loss
3433 * of precision implied boing 32->64.
3434 */
3435 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3436 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3437 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3438 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3439 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3440 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3441 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3442 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3443 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3444 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3445 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3446 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3447 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3448 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3449 }
3450
3451 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3452 __private_extern__ void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3453 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3454 {
3455 bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3456
3457 /* timeval changes size, so utime and stime need special handling */
3458 a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3459 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3460 a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3461 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3462 /*
3463 * everything else can be a direct assign. We currently ignore
3464 * the loss of precision
3465 */
3466 a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3467 a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3468 a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3469 a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3470 a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3471 a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3472 a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3473 a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3474 a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3475 a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3476 a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3477 a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3478 a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3479 a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3480 }
3481
3482 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3483 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3484 {
3485 assert(thread != NULL);
3486 assert(waitinfo != NULL);
3487
3488 struct uthread *ut = get_bsdthread_info(thread);
3489 waitinfo->context = 0;
3490 // ensure wmesg is consistent with a thread waiting in wait4
3491 assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3492 struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3493 // May not actually contain a pid; this is just the argument to wait4.
3494 // See man wait4 for other valid wait4 arguments.
3495 waitinfo->owner = args->pid;
3496 }
3497
3498 int
exit_with_guard_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3499 exit_with_guard_exception(
3500 proc_t p,
3501 mach_exception_data_type_t code,
3502 mach_exception_data_type_t subcode)
3503 {
3504 os_reason_t reason = os_reason_create(OS_REASON_GUARD, (uint64_t)code);
3505 assert(reason != OS_REASON_NULL);
3506
3507 return exit_with_mach_exception(p, reason, EXC_GUARD, code, subcode);
3508 }
3509
3510 #if __has_feature(ptrauth_calls)
3511 int
exit_with_pac_exception(proc_t p,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3512 exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
3513 mach_exception_subcode_t subcode)
3514 {
3515 os_reason_t reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code);
3516 assert(reason != OS_REASON_NULL);
3517
3518 return exit_with_mach_exception(p, reason, exception, code, subcode);
3519 }
3520 #endif /* __has_feature(ptrauth_calls) */
3521
3522 int
exit_with_port_space_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3523 exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
3524 mach_exception_data_type_t subcode)
3525 {
3526 os_reason_t reason = os_reason_create(OS_REASON_PORT_SPACE, (uint64_t)code);
3527 assert(reason != OS_REASON_NULL);
3528
3529 return exit_with_mach_exception(p, reason, EXC_RESOURCE, code, subcode);
3530 }
3531
3532 static int
exit_with_mach_exception(proc_t p,os_reason_t reason,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3533 exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception, mach_exception_code_t code,
3534 mach_exception_subcode_t subcode)
3535 {
3536 thread_t self = current_thread();
3537 struct uthread *ut = get_bsdthread_info(self);
3538
3539 ut->uu_exception = exception;
3540 ut->uu_code = code;
3541 ut->uu_subcode = subcode;
3542
3543 reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3544 return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3545 TRUE, FALSE, 0, reason);
3546 }
3547