1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111
112 #include <security/audit/audit.h>
113 #include <bsm/audit_kevents.h>
114
115 #include <mach/mach_types.h>
116 #include <mach/task.h>
117 #include <mach/thread_act.h>
118
119 #include <kern/exc_resource.h>
120 #include <kern/kern_types.h>
121 #include <kern/kalloc.h>
122 #include <kern/task.h>
123 #include <corpses/task_corpse.h>
124 #include <kern/thread.h>
125 #include <kern/thread_call.h>
126 #include <kern/sched_prim.h>
127 #include <kern/assert.h>
128 #include <kern/locks.h>
129 #include <kern/policy_internal.h>
130 #include <kern/exc_guard.h>
131 #include <kern/backtrace.h>
132
133 #include <vm/vm_protos.h>
134 #include <os/log.h>
135 #include <os/system_event_log.h>
136
137 #include <pexpert/pexpert.h>
138
139 #include <kdp/kdp_dyld.h>
140
141 #if SYSV_SHM
142 #include <sys/shm_internal.h> /* shmexit */
143 #endif /* SYSV_SHM */
144 #if CONFIG_PERSONAS
145 #include <sys/persona.h>
146 #endif /* CONFIG_PERSONAS */
147 #if CONFIG_MEMORYSTATUS
148 #include <sys/kern_memorystatus.h>
149 #endif /* CONFIG_MEMORYSTATUS */
150 #if CONFIG_DTRACE
151 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
152 void dtrace_proc_exit(proc_t p);
153 #include <sys/dtrace_ptss.h>
154 #endif /* CONFIG_DTRACE */
155 #if CONFIG_MACF
156 #include <security/mac_framework.h>
157 #include <security/mac_mach_internal.h>
158 #include <sys/syscall.h>
159 #endif /* CONFIG_MACF */
160
161 #if CONFIG_MEMORYSTATUS
162 static void proc_memorystatus_remove(proc_t p);
163 #endif /* CONFIG_MEMORYSTATUS */
164 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
165 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
166 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
167 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
168 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
169 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
170 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
171 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
172 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
173 struct rusage_superset *rup, mach_exception_data_type_t code,
174 mach_exception_data_type_t subcode, uint64_t *udata_buffer,
175 int num_udata, os_reason_t reason, exception_type_t etype);
176 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
177 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
178 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
179 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
180 extern uint64_t get_task_phys_footprint_limit(task_t);
181 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
182 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
183
184 extern unsigned int exception_log_max_pid;
185
186 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
187
188 /*
189 * Flags for `reap_child_locked`.
190 */
191 __options_decl(reap_flags_t, uint32_t, {
192 /*
193 * Parent is exiting, so the kernel is responsible for reaping children.
194 */
195 REAP_DEAD_PARENT = 0x01,
196 /*
197 * Childr process was re-parented to initproc.
198 */
199 REAP_REPARENTED_TO_INIT = 0x02,
200 /*
201 * `proc_list_lock` is held on entry.
202 */
203 REAP_LOCKED = 0x04,
204 /*
205 * Drop the `proc_list_lock` on return. Note that the `proc_list_lock` will
206 * be dropped internally by the function regardless.
207 */
208 REAP_DROP_LOCK = 0x08,
209 });
210 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
211
212 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
213
214 /*
215 * Things which should have prototypes in headers, but don't
216 */
217 void proc_exit(proc_t p);
218 int wait1continue(int result);
219 int waitidcontinue(int result);
220 kern_return_t sys_perf_notify(thread_t thread, int pid);
221 kern_return_t task_exception_notify(exception_type_t exception,
222 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
223 void delay(int);
224
225 #if __has_feature(ptrauth_calls)
226 int exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
227 mach_exception_subcode_t subcode);
228 #endif /* __has_feature(ptrauth_calls) */
229
230 int exit_with_guard_exception(proc_t p, mach_exception_data_type_t code,
231 mach_exception_data_type_t subcode);
232 int exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
233 mach_exception_data_type_t subcode);
234 static int exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception,
235 mach_exception_code_t code, mach_exception_subcode_t subcode);
236
237 #if DEVELOPMENT || DEBUG
238 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
239 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
240 static pid_t proc_exit_lpexit_spin_pid = -1; /* wakeup point */
241 static int proc_exit_lpexit_spin_pos = -1; /* point to block */
242 static int proc_exit_lpexit_spinning = 0;
243 enum {
244 PELS_POS_START = 0, /* beginning of proc_exit */
245 PELS_POS_PRE_TASK_DETACH, /* before task/proc detach */
246 PELS_POS_POST_TASK_DETACH, /* after task/proc detach */
247 PELS_POS_END, /* end of proc_exit */
248 PELS_NPOS /* # valid values */
249 };
250
251 /* Panic if matching processes (delimited by ',') exit on error. */
252 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
253
254 static int
255 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
256 {
257 #pragma unused(oidp, arg1, arg2)
258 pid_t new_value;
259 int changed;
260 int error;
261
262 if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
263 return ENOENT;
264 }
265
266 error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
267 sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
268 if (error == 0 && changed != 0) {
269 if (new_value < -1) {
270 return EINVAL;
271 }
272 lck_mtx_lock(&proc_exit_lpexit_spin_lock);
273 proc_exit_lpexit_spin_pid = new_value;
274 wakeup(&proc_exit_lpexit_spin_pid);
275 proc_exit_lpexit_spinning = 0;
276 lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
277 }
278 return error;
279 }
280
281 static int
282 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
283 {
284 #pragma unused(oidp, arg1, arg2)
285 int new_value;
286 int changed;
287 int error;
288
289 if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
290 return ENOENT;
291 }
292
293 error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
294 sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
295 if (error == 0 && changed != 0) {
296 if (new_value < -1 || new_value >= PELS_NPOS) {
297 return EINVAL;
298 }
299 lck_mtx_lock(&proc_exit_lpexit_spin_lock);
300 proc_exit_lpexit_spin_pos = new_value;
301 wakeup(&proc_exit_lpexit_spin_pid);
302 proc_exit_lpexit_spinning = 0;
303 lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
304 }
305 return error;
306 }
307
308 static int
309 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
310 {
311 #pragma unused(oidp, arg1, arg2)
312 int new_value;
313 int changed;
314 int error;
315
316 if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
317 return ENOENT;
318 }
319
320 error = sysctl_io_number(req, proc_exit_lpexit_spinning,
321 sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
322 if (error == 0 && changed != 0) {
323 return EINVAL;
324 }
325 return error;
326 }
327
328 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
329 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
330 NULL, sizeof(pid_t),
331 proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
332
333 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
334 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
335 NULL, sizeof(int),
336 proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
337
338 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
339 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
340 NULL, sizeof(int),
341 proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
342
343 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)344 proc_exit_lpexit_check(pid_t pid, int pos)
345 {
346 if (proc_exit_lpexit_spin_pid == pid) {
347 bool slept = false;
348 lck_mtx_lock(&proc_exit_lpexit_spin_lock);
349 while (proc_exit_lpexit_spin_pid == pid &&
350 proc_exit_lpexit_spin_pos == pos) {
351 if (!slept) {
352 os_log(OS_LOG_DEFAULT,
353 "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
354 slept = true;
355 }
356 proc_exit_lpexit_spinning = 1;
357 msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
358 PWAIT, "proc_exit_lpexit_check", NULL);
359 proc_exit_lpexit_spinning = 0;
360 }
361 lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
362 if (slept) {
363 os_log(OS_LOG_DEFAULT,
364 "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
365 }
366 }
367 }
368 #endif /* DEVELOPMENT || DEBUG */
369
370 /*
371 * NOTE: Source and target may *NOT* overlap!
372 * XXX Should share code with bsd/dev/ppc/unix_signal.c
373 */
374 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)375 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
376 {
377 out->si_signo = in->si_signo;
378 out->si_errno = in->si_errno;
379 out->si_code = in->si_code;
380 out->si_pid = in->si_pid;
381 out->si_uid = in->si_uid;
382 out->si_status = in->si_status;
383 out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
384 /* following cast works for sival_int because of padding */
385 out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
386 out->si_band = (user32_long_t)in->si_band; /* range reduction */
387 }
388
389 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)390 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
391 {
392 out->si_signo = in->si_signo;
393 out->si_errno = in->si_errno;
394 out->si_code = in->si_code;
395 out->si_pid = in->si_pid;
396 out->si_uid = in->si_uid;
397 out->si_status = in->si_status;
398 out->si_addr = in->si_addr;
399 /* following cast works for sival_int because of padding */
400 out->si_value.sival_ptr = in->si_value.sival_ptr;
401 out->si_band = in->si_band; /* range reduction */
402 }
403
404 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)405 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
406 {
407 if (is64) {
408 user64_siginfo_t sinfo64;
409
410 bzero(&sinfo64, sizeof(sinfo64));
411 siginfo_user_to_user64(native, &sinfo64);
412 return copyout(&sinfo64, uaddr, sizeof(sinfo64));
413 } else {
414 user32_siginfo_t sinfo32;
415
416 bzero(&sinfo32, sizeof(sinfo32));
417 siginfo_user_to_user32(native, &sinfo32);
418 return copyout(&sinfo32, uaddr, sizeof(sinfo32));
419 }
420 }
421
422 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)423 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
424 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
425 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
426 {
427 struct rusage_superset rup;
428
429 gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
430 rup.ri.ri_phys_footprint = 0;
431 populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
432 udata_buffer, num_udata, reason, etype);
433 }
434
435 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)436 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
437 {
438 mach_exception_data_type_t code_update = *code;
439 mach_exception_data_type_t subcode_update = *subcode;
440 if (p->p_exit_reason == OS_REASON_NULL) {
441 return;
442 }
443
444 switch (p->p_exit_reason->osr_namespace) {
445 case OS_REASON_JETSAM:
446 if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
447 /* Update the code with EXC_RESOURCE code for high memory watermark */
448 EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
449 EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
450 EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
451 subcode_update = 0;
452 break;
453 }
454
455 break;
456 default:
457 break;
458 }
459
460 *code = code_update;
461 *subcode = subcode_update;
462 return;
463 }
464
465 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)466 proc_encode_exit_exception_code(proc_t p)
467 {
468 uint64_t subcode = 0;
469
470 if (p->p_exit_reason == OS_REASON_NULL) {
471 return 0;
472 }
473
474 /* Embed first 32 bits of osr_namespace and osr_code in exception code */
475 ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
476 ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
477 return (mach_exception_data_type_t)subcode;
478 }
479
480 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)481 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
482 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
483 uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
484 {
485 mach_vm_address_t uaddr = 0;
486 mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
487 exc_codes[0] = code;
488 exc_codes[1] = subcode;
489 cpu_type_t cputype;
490 struct proc_uniqidentifierinfo p_uniqidinfo;
491 struct proc_workqueueinfo pwqinfo;
492 int retval = 0;
493 uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
494 bool is_corpse_fork;
495 uint32_t csflags;
496 unsigned int pflags = 0;
497 uint64_t max_footprint_mb;
498 uint64_t max_footprint;
499
500 uint64_t ledger_internal;
501 uint64_t ledger_internal_compressed;
502 uint64_t ledger_iokit_mapped;
503 uint64_t ledger_alternate_accounting;
504 uint64_t ledger_alternate_accounting_compressed;
505 uint64_t ledger_purgeable_nonvolatile;
506 uint64_t ledger_purgeable_nonvolatile_compressed;
507 uint64_t ledger_page_table;
508 uint64_t ledger_phys_footprint;
509 uint64_t ledger_phys_footprint_lifetime_max;
510 uint64_t ledger_network_nonvolatile;
511 uint64_t ledger_network_nonvolatile_compressed;
512 uint64_t ledger_wired_mem;
513 uint64_t ledger_tagged_footprint;
514 uint64_t ledger_tagged_footprint_compressed;
515 uint64_t ledger_media_footprint;
516 uint64_t ledger_media_footprint_compressed;
517 uint64_t ledger_graphics_footprint;
518 uint64_t ledger_graphics_footprint_compressed;
519 uint64_t ledger_neural_footprint;
520 uint64_t ledger_neural_footprint_compressed;
521
522 void *crash_info_ptr = task_get_corpseinfo(corpse_task);
523
524 #if CONFIG_MEMORYSTATUS
525 int memstat_dirty_flags = 0;
526 #endif
527
528 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
529 kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
530 }
531
532 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
533 pid_t pid = proc_getpid(p);
534 kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
535 }
536
537 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
538 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
539 }
540
541 /* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
542 if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
543 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
544 kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
545 }
546 }
547
548 static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
549 if (KERN_SUCCESS ==
550 kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
551 proc_piduniqidentifierinfo(p, &p_uniqidinfo);
552 kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
553 }
554
555 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
556 kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
557 }
558
559 csflags = (uint32_t)proc_getcsflags(p);
560 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
561 kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
562 }
563
564 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
565 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
566 }
567
568 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
569 struct timeval64 t64;
570 t64.tv_sec = (int64_t)p->p_start.tv_sec;
571 t64.tv_usec = (int64_t)p->p_start.tv_usec;
572 kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
573 }
574
575 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
576 kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
577 }
578
579 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
580 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
581 }
582
583 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
584 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
585 }
586
587 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
588 char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
589 proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
590 kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
591 zfree(ZV_NAMEI, buf);
592 }
593
594 pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
595 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
596 kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
597 }
598
599 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
600 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
601 }
602
603 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
604 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
605 }
606
607 cputype = cpu_type() & ~CPU_ARCH_MASK;
608 if (IS_64BIT_PROCESS(p)) {
609 cputype |= CPU_ARCH_ABI64;
610 } else if (proc_is64bit_data(p)) {
611 cputype |= CPU_ARCH_ABI64_32;
612 }
613
614 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
615 kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
616 }
617
618 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
619 max_footprint = get_task_phys_footprint_limit(proc_task(p));
620 max_footprint_mb = max_footprint >> 20;
621 kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
622 }
623
624 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
625 ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
626 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
627 }
628
629 // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
630 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
631 ledger_internal = get_task_internal(corpse_task);
632 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
633 }
634
635 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
636 ledger_internal_compressed = get_task_internal_compressed(corpse_task);
637 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
638 }
639
640 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
641 ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
642 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
643 }
644
645 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
646 ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
647 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
648 }
649
650 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
651 ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
652 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
653 }
654
655 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
656 ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
657 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
658 }
659
660 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
661 ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
662 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
663 }
664
665 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
666 ledger_page_table = get_task_page_table(corpse_task);
667 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
668 }
669
670 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
671 ledger_phys_footprint = get_task_phys_footprint(corpse_task);
672 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
673 }
674
675 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
676 ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
677 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
678 }
679
680 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
681 ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
682 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
683 }
684
685 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
686 ledger_wired_mem = get_task_wired_mem(corpse_task);
687 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
688 }
689
690 bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
691 retval = fill_procworkqueue(p, &pwqinfo);
692 if (retval == 0) {
693 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
694 kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
695 }
696 }
697
698 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
699 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
700 }
701
702 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
703 uid_t persona_id = proc_persona_id(p);
704 kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
705 }
706
707 #if CONFIG_COALITIONS
708 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
709 uint64_t coalition_ids[COALITION_NUM_TYPES];
710 task_coalition_ids(proc_task(p), coalition_ids);
711 kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
712 }
713 #endif /* CONFIG_COALITIONS */
714
715 #if CONFIG_MEMORYSTATUS
716 memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
717 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
718 kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
719 }
720 #endif
721
722 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
723 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
724 }
725
726 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
727 ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
728 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
729 }
730
731 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
732 ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
733 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
734 }
735
736 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
737 ledger_media_footprint = get_task_media_footprint(corpse_task);
738 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
739 }
740
741 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
742 ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
743 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
744 }
745
746 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
747 ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
748 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
749 }
750
751 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
752 ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
753 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
754 }
755
756 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
757 ledger_neural_footprint = get_task_neural_footprint(corpse_task);
758 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
759 }
760
761 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
762 ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
763 kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
764 }
765
766 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
767 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
768 }
769
770 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
771 char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
772 ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
773 kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
774 }
775
776 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
777 is_corpse_fork = is_corpsefork(corpse_task);
778 kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
779 }
780
781 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
782 kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
783 }
784
785 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
786 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
787 }
788
789 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
790 kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
791 }
792
793 if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
794 reason = p->p_exit_reason;
795 }
796 if (reason != OS_REASON_NULL) {
797 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
798 struct exit_reason_snapshot ers = {
799 .ers_namespace = reason->osr_namespace,
800 .ers_code = reason->osr_code,
801 .ers_flags = reason->osr_flags
802 };
803
804 kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
805 }
806
807 if (reason->osr_kcd_buf != 0) {
808 uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
809 assert(reason_buf_size != 0);
810
811 if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
812 kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
813 }
814 }
815 }
816
817 if (num_udata > 0) {
818 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
819 sizeof(uint64_t), num_udata, &uaddr)) {
820 kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
821 }
822 }
823 }
824
825 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)826 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
827 {
828 kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
829 corpse_info->kcd_length);
830 __assert_only uint32_t type = kcdata_iter_type(iter);
831 assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
832
833 iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
834 exception_type_t *etype = kcdata_iter_payload(iter);
835 return *etype;
836 }
837
838 /*
839 * Collect information required for generating lightwight corpse for current
840 * task, which can be terminating.
841 */
842 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)843 current_thread_collect_backtrace_info(
844 kcdata_descriptor_t *new_desc,
845 exception_type_t etype,
846 mach_exception_data_t code,
847 mach_msg_type_number_t codeCnt,
848 void *reasonp)
849 {
850 kcdata_descriptor_t kcdata;
851 kern_return_t kr;
852 int frame_count = 0, max_frames = 100;
853 mach_vm_address_t uuid_info_addr = 0;
854 uint32_t uuid_info_count = 0;
855 uint32_t btinfo_flag = 0;
856 mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
857 natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
858 mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
859 thread_identifier_info_data_t th_info;
860 char threadname[MAXTHREADNAMESIZE];
861 void *btdata_kernel = NULL;
862 typedef uintptr_t user_btframe_t __kernel_data_semantics;
863 user_btframe_t *btframes = NULL;
864 os_reason_t reason = (os_reason_t)reasonp;
865 struct backtrace_user_info info = BTUINFO_INIT;
866 struct rusage_superset rup;
867 uint32_t platform;
868
869 task_t task = current_task();
870 proc_t p = current_proc();
871
872 bool has_64bit_addr = task_get_64bit_addr(current_task());
873 bool has_64bit_data = task_get_64bit_data(current_task());
874
875 if (new_desc == NULL) {
876 return KERN_INVALID_ARGUMENT;
877 }
878
879 /* First, collect backtrace frames */
880 btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
881 if (!btframes) {
882 return KERN_RESOURCE_SHORTAGE;
883 }
884
885 frame_count = backtrace_user(btframes, max_frames, NULL, &info);
886 if (info.btui_error || frame_count == 0) {
887 kfree_data(btframes, max_frames * sizeof(btframes[0]));
888 return KERN_FAILURE;
889 }
890
891 if ((info.btui_info & BTI_TRUNCATED) != 0) {
892 btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
893 }
894
895 /* Captured in kcdata descriptor below */
896 btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
897 if (!btdata_kernel) {
898 kfree_data(btframes, max_frames * sizeof(btframes[0]));
899 return KERN_RESOURCE_SHORTAGE;
900 }
901
902 kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
903 if (!kcdata) {
904 kfree_data(btdata_kernel, alloc_size);
905 kfree_data(btframes, max_frames * sizeof(btframes[0]));
906 return KERN_RESOURCE_SHORTAGE;
907 }
908
909 /* First reserve space in kcdata blob for the btinfo flag fields */
910 if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
911 sizeof(uint32_t), &btinfo_flag_addr)) {
912 kfree_data(btdata_kernel, alloc_size);
913 kfree_data(btframes, max_frames * sizeof(btframes[0]));
914 kcdata_memory_destroy(kcdata);
915 return KERN_RESOURCE_SHORTAGE;
916 }
917
918 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
919 (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
920 sizeof(uintptr_t), frame_count, &kaddr)) {
921 kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
922 }
923
924 #if __LP64__
925 /* We only support async stacks on 64-bit kernels */
926 frame_count = 0;
927
928 if (info.btui_async_frame_addr != 0) {
929 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
930 sizeof(uint32_t), &kaddr)) {
931 uint32_t idx = info.btui_async_start_index;
932 kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
933 }
934 struct backtrace_control ctl = {
935 .btc_frame_addr = info.btui_async_frame_addr,
936 .btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
937 };
938
939 info = BTUINFO_INIT;
940 frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
941 if (info.btui_error == 0 && frame_count > 0) {
942 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
943 TASK_BTINFO_ASYNC_BACKTRACE64,
944 sizeof(uintptr_t), frame_count, &kaddr)) {
945 kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
946 }
947 }
948
949 if ((info.btui_info & BTI_TRUNCATED) != 0) {
950 btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
951 }
952 }
953 #endif
954
955 /* Backtrace collection done, free the frames buffer */
956 kfree_data(btframes, max_frames * sizeof(btframes[0]));
957 btframes = NULL;
958
959 /* Next, suspend the task briefly and collect image load infos */
960 task_suspend_internal(task);
961
962 /* all_image_info struct is ABI, in agreement with address width */
963 if (has_64bit_addr) {
964 struct user64_dyld_all_image_infos task_image_infos = {};
965 struct btinfo_sc_load_info64 sc_info;
966 (void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
967 sizeof(struct user64_dyld_all_image_infos));
968 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
969 uuid_info_addr = task_image_infos.uuidArray;
970
971 sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
972 sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
973 memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
974 sizeof(task_image_infos.sharedCacheUUID));
975
976 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
977 TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
978 kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
979 }
980 } else {
981 struct user32_dyld_all_image_infos task_image_infos = {};
982 struct btinfo_sc_load_info sc_info;
983 (void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
984 sizeof(struct user32_dyld_all_image_infos));
985 uuid_info_count = task_image_infos.uuidArrayCount;
986 uuid_info_addr = task_image_infos.uuidArray;
987
988 sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
989 sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
990 memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
991 sizeof(task_image_infos.sharedCacheUUID));
992
993 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
994 TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
995 kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
996 }
997 }
998
999 if (!uuid_info_addr) {
1000 /*
1001 * Can happen when we catch dyld in the middle of updating
1002 * this data structure, or copyin of all_image_info struct failed.
1003 */
1004 task_resume_internal(task);
1005 kfree_data(btdata_kernel, alloc_size);
1006 kcdata_memory_destroy(kcdata);
1007 return KERN_MEMORY_ERROR;
1008 }
1009
1010 if (uuid_info_count > 0) {
1011 uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1012 sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1013
1014 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1015 (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1016 uuid_info_size, uuid_info_count, &kaddr)) {
1017 if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1018 task_resume_internal(task);
1019 kfree_data(btdata_kernel, alloc_size);
1020 kcdata_memory_destroy(kcdata);
1021 return KERN_MEMORY_ERROR;
1022 }
1023 }
1024 }
1025
1026 task_resume_internal(task);
1027
1028 /* Next, collect all other information */
1029 thread_flavor_t tsflavor;
1030 mach_msg_type_number_t tscount;
1031
1032 #if defined(__x86_64__) || defined(__i386__)
1033 tsflavor = x86_THREAD_STATE; /* unified */
1034 tscount = x86_THREAD_STATE_COUNT;
1035 #else
1036 tsflavor = ARM_THREAD_STATE; /* unified */
1037 tscount = ARM_UNIFIED_THREAD_STATE_COUNT;
1038 #endif
1039
1040 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1041 sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1042 struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1043 bt_thread_state->flavor = tsflavor;
1044 bt_thread_state->count = tscount;
1045 /* variable-sized tstate array follows */
1046
1047 kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1048 (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1049 if (kr != KERN_SUCCESS) {
1050 bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1051 if (kr == KERN_TERMINATED) {
1052 btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1053 }
1054 }
1055 }
1056
1057 #if defined(__x86_64__) || defined(__i386__)
1058 tsflavor = x86_EXCEPTION_STATE; /* unified */
1059 tscount = x86_EXCEPTION_STATE_COUNT;
1060 #else
1061 #if defined(__arm64__)
1062 if (has_64bit_data) {
1063 tsflavor = ARM_EXCEPTION_STATE64;
1064 tscount = ARM_EXCEPTION_STATE64_COUNT;
1065 } else
1066 #endif /* defined(__arm64__) */
1067 {
1068 tsflavor = ARM_EXCEPTION_STATE;
1069 tscount = ARM_EXCEPTION_STATE_COUNT;
1070 }
1071 #endif /* defined(__x86_64__) || defined(__i386__) */
1072
1073 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1074 sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1075 struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1076 bt_thread_state->flavor = tsflavor;
1077 bt_thread_state->count = tscount;
1078 /* variable-sized tstate array follows */
1079
1080 kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1081 (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1082 if (kr != KERN_SUCCESS) {
1083 bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1084 if (kr == KERN_TERMINATED) {
1085 btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1086 }
1087 }
1088 }
1089
1090 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1091 pid_t pid = proc_getpid(p);
1092 kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1093 }
1094
1095 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1096 kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1097 }
1098
1099 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1100 kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1101 }
1102
1103 #if CONFIG_COALITIONS
1104 if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1105 uint64_t coalition_ids[COALITION_NUM_TYPES];
1106 task_coalition_ids(proc_task(p), coalition_ids);
1107 kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1108 }
1109 #endif /* CONFIG_COALITIONS */
1110
1111 /* V0 is sufficient for ReportCrash */
1112 gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1113 rup.ri.ri_phys_footprint = 0;
1114 /* Soft crash, proc did not exit */
1115 rup.ri.ri_proc_exit_abstime = 0;
1116 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1117 kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1118 }
1119
1120 platform = proc_platform(current_proc());
1121 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1122 kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1123 }
1124
1125 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1126 char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1127 proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1128 kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1129 zfree(ZV_NAMEI, buf);
1130 }
1131
1132 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1133 kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1134 }
1135
1136 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1137 kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1138 }
1139
1140 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1141 unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1142 kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1143 }
1144
1145 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1146 cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1147 if (has_64bit_addr) {
1148 cputype |= CPU_ARCH_ABI64;
1149 } else if (has_64bit_data) {
1150 cputype |= CPU_ARCH_ABI64_32;
1151 }
1152 kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1153 }
1154
1155 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1156 kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1157 }
1158
1159 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1160 kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1161 }
1162
1163 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1164 kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1165 }
1166
1167 assert(codeCnt <= EXCEPTION_CODE_MAX);
1168
1169 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1170 sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1171 kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1172 }
1173
1174 if (reason != OS_REASON_NULL) {
1175 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1176 struct exit_reason_snapshot ers = {
1177 .ers_namespace = reason->osr_namespace,
1178 .ers_code = reason->osr_code,
1179 .ers_flags = reason->osr_flags
1180 };
1181
1182 kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1183 }
1184
1185 if (reason->osr_kcd_buf != 0) {
1186 uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1187 assert(reason_buf_size != 0);
1188
1189 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1190 kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1191 }
1192 }
1193 }
1194
1195 threadname[0] = '\0';
1196 if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1197 sizeof(threadname), &kaddr)) {
1198 bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1199 kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1200 }
1201
1202 kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1203 if (kr == KERN_TERMINATED) {
1204 btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1205 }
1206
1207
1208 kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1209 sizeof(uint64_t), &kaddr);
1210
1211 /*
1212 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1213 * handler (ReportCrash) that lw corpse collection ran out of space and the
1214 * result is incomplete.
1215 */
1216 if (last_kr != KERN_SUCCESS) {
1217 btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1218 }
1219
1220 if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1221 kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1222 }
1223
1224 /* Lastly, copy the flags to the address we reserved at the beginning. */
1225 kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1226
1227 *new_desc = kcdata;
1228
1229 return KERN_SUCCESS;
1230 }
1231
1232 /*
1233 * We only parse exit reason kcdata blobs for critical process before they die
1234 * and we're going to panic or for opt-in, limited diagnostic tools.
1235 *
1236 * Meant to be called immediately before panicking or limited diagnostic
1237 * scenarios.
1238 */
1239 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1240 exit_reason_get_string_desc(os_reason_t exit_reason)
1241 {
1242 kcdata_iter_t iter;
1243
1244 if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1245 exit_reason->osr_bufsize == 0) {
1246 return NULL;
1247 }
1248
1249 iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1250 if (!kcdata_iter_valid(iter)) {
1251 #if DEBUG || DEVELOPMENT
1252 printf("exit reason has invalid exit reason buffer\n");
1253 #endif
1254 return NULL;
1255 }
1256
1257 if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1258 #if DEBUG || DEVELOPMENT
1259 printf("exit reason buffer type mismatch, expected %d got %d\n",
1260 KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1261 #endif
1262 return NULL;
1263 }
1264
1265 iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1266 if (!kcdata_iter_valid(iter)) {
1267 return NULL;
1268 }
1269
1270 return (char *)kcdata_iter_payload(iter);
1271 }
1272
1273 static int initproc_spawned = 0;
1274
1275 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1276 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1277 {
1278 if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1279 // Can only ever be set by launchd, and only once at boot
1280 return EPERM;
1281 }
1282 return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1283 }
1284
1285 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1286 CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1287 sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1288
1289 #if DEVELOPMENT || DEBUG
1290
1291 /* disable user faults */
1292 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1293 #endif /* DEVELOPMENT || DEBUG */
1294
1295 #define OS_REASON_IFLAG_USER_FAULT 0x1
1296
1297 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC 5
1298
1299 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1300 abort_with_payload_internal(proc_t p,
1301 uint32_t reason_namespace, uint64_t reason_code,
1302 user_addr_t payload, uint32_t payload_size,
1303 user_addr_t reason_string, uint64_t reason_flags,
1304 uint32_t internal_flags)
1305 {
1306 os_reason_t exit_reason = OS_REASON_NULL;
1307 kern_return_t kr = KERN_SUCCESS;
1308
1309 if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1310 uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1311 memory_order_relaxed);
1312
1313 #if DEVELOPMENT || DEBUG
1314 if (bootarg_disable_user_faults) {
1315 return EQFULL;
1316 }
1317 #endif /* DEVELOPMENT || DEBUG */
1318
1319 for (;;) {
1320 if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1321 return EQFULL;
1322 }
1323 // this reloads the value in old_value
1324 if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1325 &old_value, old_value + 1, memory_order_relaxed,
1326 memory_order_relaxed)) {
1327 break;
1328 }
1329 }
1330 }
1331
1332 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1333 proc_getpid(p), reason_namespace,
1334 reason_code, 0, 0);
1335
1336 exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1337 payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1338
1339 if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1340 mach_exception_code_t code = 0;
1341
1342 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1343 EXC_GUARD_ENCODE_FLAVOR(code, 0);
1344 EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1345
1346 if (exit_reason == OS_REASON_NULL) {
1347 kr = KERN_RESOURCE_SHORTAGE;
1348 } else {
1349 kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1350 }
1351 os_reason_free(exit_reason);
1352 } else {
1353 /*
1354 * We use SIGABRT (rather than calling exit directly from here) so that
1355 * the debugger can catch abort_with_{reason,payload} calls.
1356 */
1357 psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1358 }
1359
1360 switch (kr) {
1361 case KERN_SUCCESS:
1362 return 0;
1363 case KERN_NOT_SUPPORTED:
1364 return ENOTSUP;
1365 case KERN_INVALID_ARGUMENT:
1366 return EINVAL;
1367 case KERN_RESOURCE_SHORTAGE:
1368 default:
1369 return EBUSY;
1370 }
1371 }
1372
1373 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1374 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1375 __unused void *retval)
1376 {
1377 abort_with_payload_internal(cur_proc, args->reason_namespace,
1378 args->reason_code, args->payload, args->payload_size,
1379 args->reason_string, args->reason_flags, 0);
1380
1381 return 0;
1382 }
1383
1384 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1385 os_fault_with_payload(struct proc *cur_proc,
1386 struct os_fault_with_payload_args *args, __unused int *retval)
1387 {
1388 return abort_with_payload_internal(cur_proc, args->reason_namespace,
1389 args->reason_code, args->payload, args->payload_size,
1390 args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1391 }
1392
1393
1394 /*
1395 * exit --
1396 * Death of process.
1397 */
1398 __attribute__((noreturn))
1399 void
exit(proc_t p,struct exit_args * uap,int * retval)1400 exit(proc_t p, struct exit_args *uap, int *retval)
1401 {
1402 p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1403 exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1404
1405 thread_exception_return();
1406 /* NOTREACHED */
1407 while (TRUE) {
1408 thread_block(THREAD_CONTINUE_NULL);
1409 }
1410 /* NOTREACHED */
1411 }
1412
1413 /*
1414 * Exit: deallocate address space and other resources, change proc state
1415 * to zombie, and unlink proc from allproc and parent's lists. Save exit
1416 * status and rusage for wait(). Check for child processes and orphan them.
1417 */
1418 int
exit1(proc_t p,int rv,int * retval)1419 exit1(proc_t p, int rv, int *retval)
1420 {
1421 return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1422 }
1423
1424 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1425 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1426 int jetsam_flags)
1427 {
1428 return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1429 }
1430
1431 /*
1432 * NOTE: exit_with_reason drops a reference on the passed exit_reason
1433 */
1434 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1435 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1436 int jetsam_flags, struct os_reason *exit_reason)
1437 {
1438 thread_t self = current_thread();
1439 struct task *task = proc_task(p);
1440 struct uthread *ut;
1441 int error = 0;
1442 bool proc_exiting = false;
1443
1444 #if DEVELOPMENT || DEBUG
1445 /*
1446 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1447 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1448 */
1449 if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1450 panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1451 __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1452 }
1453 #endif
1454
1455 /*
1456 * If a thread in this task has already
1457 * called exit(), then halt any others
1458 * right here.
1459 */
1460
1461 ut = get_bsdthread_info(self);
1462 (void)retval;
1463
1464 /*
1465 * The parameter list of audit_syscall_exit() was augmented to
1466 * take the Darwin syscall number as the first parameter,
1467 * which is currently required by mac_audit_postselect().
1468 */
1469
1470 /*
1471 * The BSM token contains two components: an exit status as passed
1472 * to exit(), and a return value to indicate what sort of exit it
1473 * was. The exit status is WEXITSTATUS(rv), but it's not clear
1474 * what the return value is.
1475 */
1476 AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1477 /*
1478 * TODO: what to audit here when jetsam calls exit and the uthread,
1479 * 'ut' does not belong to the proc, 'p'.
1480 */
1481 AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1482
1483 DTRACE_PROC1(exit, int, CLD_EXITED);
1484
1485 /* mark process is going to exit and pull out of DBG/disk throttle */
1486 /* TODO: This should be done after becoming exit thread */
1487 proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1488 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1489
1490 proc_lock(p);
1491 error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1492 if (error == EDEADLK) {
1493 /*
1494 * If proc_transstart() returns EDEADLK, then another thread
1495 * is either exec'ing or exiting. Return an error and allow
1496 * the other thread to continue.
1497 */
1498 proc_unlock(p);
1499 os_reason_free(exit_reason);
1500 if (current_proc() == p) {
1501 if (p->exit_thread == self) {
1502 panic("exit_thread failed to exit");
1503 }
1504
1505 if (thread_can_terminate) {
1506 thread_exception_return();
1507 }
1508 }
1509
1510 return error;
1511 }
1512
1513 proc_exiting = !!(p->p_lflag & P_LEXIT);
1514
1515 while (proc_exiting || p->exit_thread != self) {
1516 if (proc_exiting || sig_try_locked(p) <= 0) {
1517 proc_transend(p, 1);
1518 os_reason_free(exit_reason);
1519
1520 if (get_threadtask(self) != task) {
1521 proc_unlock(p);
1522 return 0;
1523 }
1524 proc_unlock(p);
1525
1526 thread_terminate(self);
1527 if (!thread_can_terminate) {
1528 return 0;
1529 }
1530
1531 thread_exception_return();
1532 /* NOTREACHED */
1533 }
1534 sig_lock_to_exit(p);
1535 }
1536
1537 if (exit_reason != OS_REASON_NULL) {
1538 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1539 proc_getpid(p), exit_reason->osr_namespace,
1540 exit_reason->osr_code, 0, 0);
1541 }
1542
1543 assert(p->p_exit_reason == OS_REASON_NULL);
1544 p->p_exit_reason = exit_reason;
1545
1546 p->p_lflag |= P_LEXIT;
1547 p->p_xstat = rv;
1548 p->p_lflag |= jetsam_flags;
1549
1550 proc_transend(p, 1);
1551 proc_unlock(p);
1552
1553 proc_prepareexit(p, rv, perf_notify);
1554
1555 /* Last thread to terminate will call proc_exit() */
1556 task_terminate_internal(task);
1557
1558 return 0;
1559 }
1560
1561 #if CONFIG_MEMORYSTATUS
1562 /*
1563 * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1564 * is currently being frozen.
1565 * The proc_list_lock is held by the caller.
1566 * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1567 * the p_refcount P_REF_DEAD bit.
1568 */
1569 static void
proc_memorystatus_remove(proc_t p)1570 proc_memorystatus_remove(proc_t p)
1571 {
1572 LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1573 while (memorystatus_remove(p) == EAGAIN) {
1574 os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1575 msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1576 }
1577 }
1578 #endif
1579
1580 #if DEVELOPMENT
1581 boolean_t crash_behavior_test_mode = FALSE;
1582 boolean_t crash_behavior_test_would_panic = FALSE;
1583 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1584 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1585 #endif /* DEVELOPMENT */
1586
1587 static bool
_proc_is_crashing_signal(int sig)1588 _proc_is_crashing_signal(int sig)
1589 {
1590 bool result = false;
1591 switch (sig) {
1592 case SIGILL:
1593 case SIGABRT:
1594 case SIGFPE:
1595 case SIGBUS:
1596 case SIGSEGV:
1597 case SIGSYS:
1598 /*
1599 * If SIGTRAP is the terminating signal, then we can safely assume the
1600 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1601 * a process calls __builtin_trap(), which will abort.)
1602 */
1603 case SIGTRAP:
1604 result = true;
1605 }
1606
1607 return result;
1608 }
1609
1610 static bool
_proc_is_fatal_reason(os_reason_t reason)1611 _proc_is_fatal_reason(os_reason_t reason)
1612 {
1613 if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1614 /* Abort is always fatal even if there is no crash report generated */
1615 return true;
1616 }
1617 if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1618 /*
1619 * No crash report means this reason shouldn't be considered fatal
1620 * unless we are in test mode
1621 */
1622 #if DEVELOPMENT
1623 if (crash_behavior_test_mode) {
1624 return true;
1625 }
1626 #endif /* DEVELOPMENT */
1627 return false;
1628 }
1629 // By default all OS_REASON are fatal
1630 return true;
1631 }
1632
1633 static bool
proc_should_trigger_panic(proc_t p,int rv)1634 proc_should_trigger_panic(proc_t p, int rv)
1635 {
1636 if (p == initproc || (p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1637 /* Always panic for launchd or equivalents */
1638 return true;
1639 }
1640
1641 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1642 return true;
1643 }
1644
1645 if (p->p_posix_spawn_failed) {
1646 /* posix_spawn failures normally don't qualify for panics */
1647 return false;
1648 }
1649
1650 bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1651 if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1652 return false;
1653 }
1654
1655 if (WIFEXITED(rv)) {
1656 int code = WEXITSTATUS(rv);
1657
1658 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1659 if (code == 0) {
1660 /* No panic if we exit 0 */
1661 return false;
1662 } else {
1663 /* Panic on non-zero exit */
1664 return true;
1665 }
1666 } else {
1667 /* No panic on normal exit if the process doesn't have the non-zero flag set */
1668 return false;
1669 }
1670 } else if (WIFSIGNALED(rv)) {
1671 int signal = WTERMSIG(rv);
1672 /* This is a crash (non-normal exit) */
1673 if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1674 os_reason_t reason = p->p_exit_reason;
1675 if (reason != OS_REASON_NULL) {
1676 if (!_proc_is_fatal_reason(reason)) {
1677 // Skip non-fatal terminate_with_reason
1678 return false;
1679 }
1680 if (reason->osr_namespace == OS_REASON_SIGNAL) {
1681 return _proc_is_crashing_signal(signal);
1682 } else {
1683 /*
1684 * This branch covers the case of terminate_with_reason which
1685 * delivers a SIGTERM which is still considered a crash even
1686 * thought the signal is not considered a crashing signal
1687 */
1688 return true;
1689 }
1690 }
1691 return _proc_is_crashing_signal(signal);
1692 } else {
1693 return false;
1694 }
1695 } else {
1696 /*
1697 * This branch implies that we didn't exit normally nor did we receive
1698 * a signal. This should be unreachable.
1699 */
1700 return true;
1701 }
1702 }
1703
1704 static void
proc_crash_coredump(proc_t p)1705 proc_crash_coredump(proc_t p)
1706 {
1707 if (p != initproc) {
1708 /* Core dumps are only enabled for launchd for now */
1709 return;
1710 }
1711
1712 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1713 /*
1714 * For debugging purposes, generate a core file of initproc before
1715 * panicking. Leave at least 300 MB free on the root volume, and ignore
1716 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1717 * before the panic hits.
1718 */
1719
1720 int err;
1721 uint64_t coredump_start = mach_absolute_time();
1722 uint64_t coredump_end;
1723 clock_sec_t tv_sec;
1724 clock_usec_t tv_usec;
1725 uint32_t tv_msec;
1726
1727
1728 err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1729
1730 coredump_end = mach_absolute_time();
1731
1732 absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1733
1734 tv_msec = tv_usec / 1000;
1735
1736 if (err != 0) {
1737 printf("Failed to generate initproc core file: error %d, took %d.%03d seconds\n",
1738 err, (uint32_t)tv_sec, tv_msec);
1739 } else {
1740 printf("Generated initproc core file in %d.%03d seconds\n",
1741 (uint32_t)tv_sec, tv_msec);
1742 }
1743 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1744 }
1745
1746 static void
proc_handle_critical_exit(proc_t p,int rv)1747 proc_handle_critical_exit(proc_t p, int rv)
1748 {
1749 if (!proc_should_trigger_panic(p, rv)) {
1750 // No panic, bail out
1751 return;
1752 }
1753
1754 #if DEVELOPMENT
1755 if (crash_behavior_test_mode) {
1756 crash_behavior_test_would_panic = TRUE;
1757 // Force test mode off after hitting a panic
1758 crash_behavior_test_mode = FALSE;
1759 return;
1760 }
1761 #endif /* DEVELOPMENT */
1762
1763 char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1764
1765 if (p->p_exit_reason == OS_REASON_NULL) {
1766 printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1767 proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1768 } else {
1769 printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1770 p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1771 exit_reason_desc : "none");
1772 }
1773
1774 const char *prefix_str;
1775 char prefix_str_buf[128];
1776
1777 if (p == initproc) {
1778 if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1779 prefix_str = "LTE preinit process exited";
1780 } else if (initproc_spawned) {
1781 prefix_str = "initproc exited";
1782 } else {
1783 prefix_str = "initproc failed to start";
1784 }
1785 } else {
1786 /* For processes that aren't launchd, just use the process name and pid */
1787 snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1788 prefix_str = prefix_str_buf;
1789 }
1790
1791 proc_crash_coredump(p);
1792
1793 sync(p, (void *)NULL, (int *)NULL);
1794
1795 if (p->p_exit_reason == OS_REASON_NULL) {
1796 panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1797 prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1798 } else {
1799 panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1800 ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1801 prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1802 exit_reason_desc ? exit_reason_desc : "none");
1803 }
1804 }
1805
1806 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1807 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1808 {
1809 mach_exception_data_type_t code = 0, subcode = 0;
1810 exception_type_t etype;
1811
1812 struct uthread *ut;
1813 thread_t self = current_thread();
1814 ut = get_bsdthread_info(self);
1815 struct rusage_superset *rup;
1816 int kr = 0;
1817 int create_corpse = FALSE;
1818
1819 if (p->p_crash_behavior != 0 || p == initproc) {
1820 proc_handle_critical_exit(p, rv);
1821 }
1822
1823 /*
1824 * Generate a corefile/crashlog if:
1825 * The process doesn't have an exit reason that indicates no crash report should be created
1826 * AND any of the following are true:
1827 * - The process was terminated due to a fatal signal that generates a core
1828 * - The process was killed due to a code signing violation
1829 * - The process has an exit reason that indicates we should generate a crash report
1830 *
1831 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1832 * (which normally triggers a core) but may indicate that no crash report should be created.
1833 */
1834 if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1835 (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1836 (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1837 OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1838 /*
1839 * Workaround for processes checking up on PT_DENY_ATTACH:
1840 * should be backed out post-Leopard (details in 5431025).
1841 */
1842 if ((SIGSEGV == WTERMSIG(rv)) &&
1843 (p->p_pptr->p_lflag & P_LNOATTACH)) {
1844 goto skipcheck;
1845 }
1846
1847 /*
1848 * Crash Reporter looks for the signal value, original exception
1849 * type, and low 20 bits of the original code in code[0]
1850 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1851 */
1852 code = ((WTERMSIG(rv) & 0xff) << 24) |
1853 ((ut->uu_exception & 0x0f) << 20) |
1854 ((int)ut->uu_code & 0xfffff);
1855 subcode = ut->uu_subcode;
1856 etype = ut->uu_exception;
1857
1858 /* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1859 if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1860 etype = EXC_CRASH;
1861 }
1862
1863 #if (DEVELOPMENT || DEBUG)
1864 if (p->p_pid <= exception_log_max_pid) {
1865 char *proc_name = proc_best_name(p);
1866 if (PROC_HAS_EXITREASON(p)) {
1867 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1868 "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1869 proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1870 exit_reason_get_string_desc(p->p_exit_reason));
1871 } else {
1872 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1873 "pid: %d -- process name: %s -- exit status %d",
1874 proc_getpid(p), proc_name, WEXITSTATUS(rv));
1875 }
1876 }
1877 #endif
1878
1879 kr = task_exception_notify(EXC_CRASH, code, subcode);
1880
1881 /* Nobody handled EXC_CRASH?? remember to make corpse */
1882 if (kr != 0 && p == current_proc()) {
1883 /*
1884 * Do not create corpse when exit is called from jetsam thread.
1885 * Corpse creation code requires that proc_prepareexit is
1886 * called by the exiting proc and not the kernel_proc.
1887 */
1888 create_corpse = TRUE;
1889 }
1890
1891 /*
1892 * Revalidate the code signing of the text pages around current PC.
1893 * This is an attempt to detect and repair faults due to memory
1894 * corruption of text pages.
1895 *
1896 * The goal here is to fixup infrequent memory corruptions due to
1897 * things like aging RAM bit flips. So the approach is to only expect
1898 * to have to fixup one thing per crash. This also limits the amount
1899 * of extra work we cause in case this is a development kernel with an
1900 * active memory stomp happening.
1901 */
1902 task_t task = proc_task(p);
1903 uintptr_t bt[2];
1904 struct backtrace_user_info btinfo = BTUINFO_INIT;
1905 unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
1906 int bt_err = btinfo.btui_error;
1907 if (bt_err == 0 && frame_count >= 1) {
1908 /*
1909 * First check at the page containing the current PC.
1910 * This passes if the page code signs -or- if we can't figure out
1911 * what is at that address. The latter action is so we continue checking
1912 * previous pages which may be corrupt and caused a wild branch.
1913 */
1914 kr = revalidate_text_page(task, bt[0]);
1915
1916 /* No corruption found, check the previous sequential page */
1917 if (kr == KERN_SUCCESS) {
1918 kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
1919 }
1920
1921 /* Still no corruption found, check the current function's caller */
1922 if (kr == KERN_SUCCESS) {
1923 if (frame_count > 1 &&
1924 atop(bt[0]) != atop(bt[1]) && /* don't recheck PC page */
1925 atop(bt[0]) - 1 != atop(bt[1])) { /* don't recheck page before */
1926 kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
1927 }
1928 }
1929
1930 /*
1931 * Log that we found a corruption.
1932 */
1933 if (kr != KERN_SUCCESS) {
1934 os_log(OS_LOG_DEFAULT,
1935 "Text page corruption detected in dying process %d\n", proc_getpid(p));
1936 }
1937 }
1938 }
1939
1940 skipcheck:
1941 if (task_is_driver(proc_task(p)) && PROC_HAS_EXITREASON(p)) {
1942 IOUserServerRecordExitReason(proc_task(p), p->p_exit_reason);
1943 }
1944
1945 /* Notify the perf server? */
1946 if (perf_notify) {
1947 (void)sys_perf_notify(self, proc_getpid(p));
1948 }
1949
1950
1951 /* stash the usage into corpse data if making_corpse == true */
1952 if (create_corpse == TRUE) {
1953 kr = task_mark_corpse(proc_task(p));
1954 if (kr != KERN_SUCCESS) {
1955 if (kr == KERN_NO_SPACE) {
1956 printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
1957 } else if (kr == KERN_NOT_SUPPORTED) {
1958 printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
1959 } else if (kr == KERN_TERMINATED) {
1960 printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
1961 } else {
1962 printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
1963 }
1964 create_corpse = FALSE;
1965 }
1966 }
1967
1968 if (!proc_is_shadow(p)) {
1969 /*
1970 * Before this process becomes a zombie, stash resource usage
1971 * stats in the proc for external observers to query
1972 * via proc_pid_rusage().
1973 *
1974 * If the zombie allocation fails, just punt the stats.
1975 */
1976 rup = zalloc(zombie_zone);
1977 gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
1978 rup->ri.ri_phys_footprint = 0;
1979 rup->ri.ri_proc_exit_abstime = mach_absolute_time();
1980 /*
1981 * Make the rusage_info visible to external observers
1982 * only after it has been completely filled in.
1983 */
1984 p->p_ru = rup;
1985 }
1986
1987 if (create_corpse) {
1988 int est_knotes = 0, num_knotes = 0;
1989 uint64_t *buffer = NULL;
1990 uint32_t buf_size = 0;
1991
1992 /* Get all the udata pointers from kqueue */
1993 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
1994 if (est_knotes > 0) {
1995 buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
1996 buffer = kalloc_data(buf_size, Z_WAITOK);
1997 if (buffer) {
1998 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
1999 if (num_knotes > est_knotes + 32) {
2000 num_knotes = est_knotes + 32;
2001 }
2002 }
2003 }
2004
2005 /* Update the code, subcode based on exit reason */
2006 proc_update_corpse_exception_codes(p, &code, &subcode);
2007 populate_corpse_crashinfo(p, proc_task(p), rup,
2008 code, subcode, buffer, num_knotes, NULL, etype);
2009 kfree_data(buffer, buf_size);
2010 }
2011 /*
2012 * Remove proc from allproc queue and from pidhash chain.
2013 * Need to do this before we do anything that can block.
2014 * Not doing causes things like mount() find this on allproc
2015 * in partially cleaned state.
2016 */
2017
2018 proc_list_lock();
2019
2020 #if CONFIG_MEMORYSTATUS
2021 proc_memorystatus_remove(p);
2022 #endif
2023
2024 LIST_REMOVE(p, p_list);
2025 LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2026 /* will not be visible via proc_find */
2027 os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2028
2029 proc_list_unlock();
2030
2031 /*
2032 * If parent is waiting for us to exit or exec,
2033 * P_LPPWAIT is set; we will wakeup the parent below.
2034 */
2035 proc_lock(p);
2036 p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2037 p->p_sigignore = ~(sigcantmask);
2038
2039 /*
2040 * If a thread is already waiting for us in proc_exit,
2041 * P_LTERM is set, wakeup the thread.
2042 */
2043 if (p->p_lflag & P_LTERM) {
2044 wakeup(&p->exit_thread);
2045 } else {
2046 p->p_lflag |= P_LTERM;
2047 }
2048
2049 /* If current proc is exiting, ignore signals on the exit thread */
2050 if (p == current_proc()) {
2051 ut->uu_siglist = 0;
2052 }
2053 proc_unlock(p);
2054 }
2055
2056 void
proc_exit(proc_t p)2057 proc_exit(proc_t p)
2058 {
2059 proc_t q;
2060 proc_t pp;
2061 struct task *task = proc_task(p);
2062 vnode_t tvp = NULLVP;
2063 struct pgrp * pg;
2064 struct session *sessp;
2065 struct uthread * uth;
2066 pid_t pid;
2067 int exitval;
2068 int knote_hint;
2069
2070 uth = current_uthread();
2071
2072 proc_lock(p);
2073 proc_transstart(p, 1, 0);
2074 if (!(p->p_lflag & P_LEXIT)) {
2075 /*
2076 * This can happen if a thread_terminate() occurs
2077 * in a single-threaded process.
2078 */
2079 p->p_lflag |= P_LEXIT;
2080 proc_transend(p, 1);
2081 proc_unlock(p);
2082 proc_prepareexit(p, 0, TRUE);
2083 (void) task_terminate_internal(task);
2084 proc_lock(p);
2085 } else if (!(p->p_lflag & P_LTERM)) {
2086 proc_transend(p, 1);
2087 /* Jetsam is in middle of calling proc_prepareexit, wait for it */
2088 p->p_lflag |= P_LTERM;
2089 msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2090 } else {
2091 proc_transend(p, 1);
2092 }
2093
2094 p->p_lflag |= P_LPEXIT;
2095
2096 /*
2097 * Other kernel threads may be in the middle of signalling this process.
2098 * Wait for those threads to wrap it up before making the process
2099 * disappear on them.
2100 */
2101 if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2102 p->p_sigwaitcnt++;
2103 while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2104 msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2105 }
2106 p->p_sigwaitcnt--;
2107 }
2108
2109 proc_unlock(p);
2110 pid = proc_getpid(p);
2111 exitval = p->p_xstat;
2112 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2113 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2114 pid, exitval, 0, 0, 0);
2115
2116 #if DEVELOPMENT || DEBUG
2117 proc_exit_lpexit_check(pid, PELS_POS_START);
2118 #endif
2119
2120 #if CONFIG_DTRACE
2121 dtrace_proc_exit(p);
2122 #endif
2123
2124 /*
2125 * need to cancel async IO requests that can be cancelled and wait for those
2126 * already active. MAY BLOCK!
2127 */
2128
2129 proc_refdrain(p);
2130
2131 /* if any pending cpu limits action, clear it */
2132 task_clear_cpuusage(proc_task(p), TRUE);
2133
2134 workq_mark_exiting(p);
2135
2136 _aio_exit( p );
2137
2138 /*
2139 * Close open files and release open-file table.
2140 * This may block!
2141 */
2142 fdt_invalidate(p);
2143
2144 /*
2145 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2146 * workqueue.
2147 */
2148 workq_exit(p);
2149
2150 if (uth->uu_lowpri_window) {
2151 /*
2152 * task is marked as a low priority I/O type
2153 * and the I/O we issued while in flushing files on close
2154 * collided with normal I/O operations...
2155 * no need to throttle this thread since its going away
2156 * but we do need to update our bookeeping w/r to throttled threads
2157 */
2158 throttle_lowpri_io(0);
2159 }
2160
2161 if (p->p_lflag & P_LNSPACE_RESOLVER) {
2162 /*
2163 * The namespace resolver is exiting; there may be
2164 * outstanding materialization requests to clean up.
2165 */
2166 nspace_resolver_exited(p);
2167 }
2168
2169 #if SYSV_SHM
2170 /* Close ref SYSV Shared memory*/
2171 if (p->vm_shm) {
2172 shmexit(p);
2173 }
2174 #endif
2175 #if SYSV_SEM
2176 /* Release SYSV semaphores */
2177 semexit(p);
2178 #endif
2179
2180 #if PSYNCH
2181 pth_proc_hashdelete(p);
2182 #endif /* PSYNCH */
2183
2184 pg = proc_pgrp(p, &sessp);
2185 if (SESS_LEADER(p, sessp)) {
2186 if (sessp->s_ttyvp != NULLVP) {
2187 struct vnode *ttyvp;
2188 int ttyvid;
2189 int cttyflag = 0;
2190 struct vfs_context context;
2191 struct tty *tp;
2192 struct pgrp *tpgrp = PGRP_NULL;
2193
2194 /*
2195 * Controlling process.
2196 * Signal foreground pgrp,
2197 * drain controlling terminal
2198 * and revoke access to controlling terminal.
2199 */
2200
2201 proc_list_lock(); /* prevent any t_pgrp from changing */
2202 session_lock(sessp);
2203 if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2204 tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2205 }
2206 proc_list_unlock();
2207
2208 if (tpgrp != PGRP_NULL) {
2209 session_unlock(sessp);
2210 pgsignal(tpgrp, SIGHUP, 1);
2211 pgrp_rele(tpgrp);
2212 session_lock(sessp);
2213 }
2214
2215 cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2216 S_CTTYREF, relaxed) & S_CTTYREF);
2217 ttyvp = sessp->s_ttyvp;
2218 ttyvid = sessp->s_ttyvid;
2219 tp = session_clear_tty_locked(sessp);
2220 if (ttyvp) {
2221 vnode_hold(ttyvp);
2222 }
2223 session_unlock(sessp);
2224
2225 if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2226 if (tp != TTY_NULL) {
2227 tty_lock(tp);
2228 (void) ttywait(tp);
2229 tty_unlock(tp);
2230 }
2231
2232 context.vc_thread = NULL;
2233 context.vc_ucred = kauth_cred_proc_ref(p);
2234 VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2235 if (cttyflag) {
2236 /*
2237 * Release the extra usecount taken in cttyopen.
2238 * usecount should be released after VNOP_REVOKE is called.
2239 * This usecount was taken to ensure that
2240 * the VNOP_REVOKE results in a close to
2241 * the tty since cttyclose is a no-op.
2242 */
2243 vnode_rele(ttyvp);
2244 }
2245 vnode_put(ttyvp);
2246 kauth_cred_unref(&context.vc_ucred);
2247 vnode_drop(ttyvp);
2248 ttyvp = NULLVP;
2249 }
2250 if (ttyvp) {
2251 vnode_drop(ttyvp);
2252 }
2253 if (tp) {
2254 ttyfree(tp);
2255 }
2256 }
2257 session_lock(sessp);
2258 sessp->s_leader = NULL;
2259 session_unlock(sessp);
2260 }
2261
2262 if (!proc_is_shadow(p)) {
2263 fixjobc(p, pg, 0);
2264 }
2265 pgrp_rele(pg);
2266
2267 /*
2268 * Change RLIMIT_FSIZE for accounting/debugging.
2269 */
2270 proc_limitsetcur_fsize(p, RLIM_INFINITY);
2271
2272 (void)acct_process(p);
2273
2274 proc_list_lock();
2275
2276 if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2277 p->p_listflag &= ~P_LIST_EXITCOUNT;
2278 proc_shutdown_exitcount--;
2279 if (proc_shutdown_exitcount == 0) {
2280 wakeup(&proc_shutdown_exitcount);
2281 }
2282 }
2283
2284 /* wait till parentrefs are dropped and grant no more */
2285 proc_childdrainstart(p);
2286 while ((q = p->p_children.lh_first) != NULL) {
2287 if (q->p_stat == SZOMB) {
2288 if (p != q->p_pptr) {
2289 panic("parent child linkage broken");
2290 }
2291 /* check for sysctl zomb lookup */
2292 while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2293 msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2294 }
2295 q->p_listflag |= P_LIST_WAITING;
2296 /*
2297 * This is a named reference and it is not granted
2298 * if the reap is already in progress. So we get
2299 * the reference here exclusively and their can be
2300 * no waiters. So there is no need for a wakeup
2301 * after we are done. Also the reap frees the structure
2302 * and the proc struct cannot be used for wakeups as well.
2303 * It is safe to use q here as this is system reap
2304 */
2305 reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2306 REAP_REPARENTED_TO_INIT : 0;
2307 reap_child_locked(p, q,
2308 REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2309 } else {
2310 /*
2311 * Traced processes are killed
2312 * since their existence means someone is messing up.
2313 */
2314 if (q->p_lflag & P_LTRACED) {
2315 struct proc *opp;
2316
2317 /*
2318 * Take a reference on the child process to
2319 * ensure it doesn't exit and disappear between
2320 * the time we drop the list_lock and attempt
2321 * to acquire its proc_lock.
2322 */
2323 if (proc_ref(q, true) != q) {
2324 continue;
2325 }
2326
2327 proc_list_unlock();
2328
2329 opp = proc_find(q->p_oppid);
2330 if (opp != PROC_NULL) {
2331 proc_list_lock();
2332 q->p_oppid = 0;
2333 proc_list_unlock();
2334 proc_reparentlocked(q, opp, 0, 0);
2335 proc_rele(opp);
2336 } else {
2337 /* original parent exited while traced */
2338 proc_list_lock();
2339 q->p_listflag |= P_LIST_DEADPARENT;
2340 q->p_oppid = 0;
2341 proc_list_unlock();
2342 proc_reparentlocked(q, initproc, 0, 0);
2343 }
2344
2345 proc_lock(q);
2346 q->p_lflag &= ~P_LTRACED;
2347
2348 if (q->sigwait_thread) {
2349 thread_t thread = q->sigwait_thread;
2350
2351 proc_unlock(q);
2352 /*
2353 * The sigwait_thread could be stopped at a
2354 * breakpoint. Wake it up to kill.
2355 * Need to do this as it could be a thread which is not
2356 * the first thread in the task. So any attempts to kill
2357 * the process would result into a deadlock on q->sigwait.
2358 */
2359 thread_resume(thread);
2360 clear_wait(thread, THREAD_INTERRUPTED);
2361 threadsignal(thread, SIGKILL, 0, TRUE);
2362 } else {
2363 proc_unlock(q);
2364 }
2365
2366 psignal(q, SIGKILL);
2367 proc_list_lock();
2368 proc_rele(q);
2369 } else {
2370 q->p_listflag |= P_LIST_DEADPARENT;
2371 proc_reparentlocked(q, initproc, 0, 1);
2372 }
2373 }
2374 }
2375
2376 proc_childdrainend(p);
2377 proc_list_unlock();
2378
2379 #if CONFIG_MACF
2380 if (!proc_is_shadow(p)) {
2381 /*
2382 * Notify MAC policies that proc is dead.
2383 * This should be replaced with proper label management
2384 * (rdar://problem/32126399).
2385 */
2386 mac_proc_notify_exit(p);
2387 }
2388 #endif
2389
2390 /*
2391 * Release reference to text vnode
2392 */
2393 tvp = p->p_textvp;
2394 p->p_textvp = NULL;
2395 if (tvp != NULLVP) {
2396 vnode_rele(tvp);
2397 }
2398
2399 /*
2400 * Save exit status and final rusage info, adding in child rusage
2401 * info and self times. If we were unable to allocate a zombie
2402 * structure, this information is lost.
2403 */
2404 if (p->p_ru != NULL) {
2405 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2406 p->p_ru->ru = p->p_stats->p_ru;
2407
2408 ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2409 }
2410
2411 /*
2412 * Free up profiling buffers.
2413 */
2414 {
2415 struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2416
2417 p1 = p0->pr_next;
2418 p0->pr_next = NULL;
2419 p0->pr_scale = 0;
2420
2421 for (; p1 != NULL; p1 = pn) {
2422 pn = p1->pr_next;
2423 kfree_type(struct uprof, p1);
2424 }
2425 }
2426
2427 proc_free_realitimer(p);
2428
2429 /*
2430 * Other substructures are freed from wait().
2431 */
2432 zfree(proc_stats_zone, p->p_stats);
2433 p->p_stats = NULL;
2434
2435 zfree_ro(ZONE_ID_PROC_SIGACTS_RO, p->p_sigacts.ps_ro);
2436
2437 proc_limitdrop(p);
2438
2439 #if DEVELOPMENT || DEBUG
2440 proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2441 #endif
2442
2443 /*
2444 * Finish up by terminating the task
2445 * and halt this thread (only if a
2446 * member of the task exiting).
2447 */
2448 proc_set_task(p, TASK_NULL);
2449 set_bsdtask_info(task, NULL);
2450 clear_thread_ro_proc(get_machthread(uth));
2451
2452 #if DEVELOPMENT || DEBUG
2453 proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2454 #endif
2455
2456 knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2457 proc_knote(p, knote_hint);
2458
2459 /* mark the thread as the one that is doing proc_exit
2460 * no need to hold proc lock in uthread_free
2461 */
2462 uth->uu_flag |= UT_PROCEXIT;
2463 /*
2464 * Notify parent that we're gone.
2465 */
2466 pp = proc_parent(p);
2467 if (proc_is_shadow(p)) {
2468 /* kernel can reap this one, no need to move it to launchd */
2469 proc_list_lock();
2470 p->p_listflag |= P_LIST_DEADPARENT;
2471 proc_list_unlock();
2472 } else if (pp->p_flag & P_NOCLDWAIT) {
2473 if (p->p_ru != NULL) {
2474 proc_lock(pp);
2475 #if 3839178
2476 /*
2477 * If the parent is ignoring SIGCHLD, then POSIX requires
2478 * us to not add the resource usage to the parent process -
2479 * we are only going to hand it off to init to get reaped.
2480 * We should contest the standard in this case on the basis
2481 * of RLIMIT_CPU.
2482 */
2483 #else /* !3839178 */
2484 /*
2485 * Add child resource usage to parent before giving
2486 * zombie to init. If we were unable to allocate a
2487 * zombie structure, this information is lost.
2488 */
2489 ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2490 #endif /* !3839178 */
2491 update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2492 proc_unlock(pp);
2493 }
2494
2495 /* kernel can reap this one, no need to move it to launchd */
2496 proc_list_lock();
2497 p->p_listflag |= P_LIST_DEADPARENT;
2498 proc_list_unlock();
2499 }
2500 if (!proc_is_shadow(p) &&
2501 ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2502 if (pp != initproc) {
2503 proc_lock(pp);
2504 pp->si_pid = proc_getpid(p);
2505 pp->p_xhighbits = p->p_xhighbits;
2506 p->p_xhighbits = 0;
2507 pp->si_status = p->p_xstat;
2508 pp->si_code = CLD_EXITED;
2509 /*
2510 * p_ucred usage is safe as it is an exiting process
2511 * and reference is dropped in reap
2512 */
2513 pp->si_uid = kauth_cred_getruid(proc_ucred(p));
2514 proc_unlock(pp);
2515 }
2516 /* mark as a zombie */
2517 /* No need to take proc lock as all refs are drained and
2518 * no one except parent (reaping ) can look at this.
2519 * The write is to an int and is coherent. Also parent is
2520 * keyed off of list lock for reaping
2521 */
2522 DTRACE_PROC2(exited, proc_t, p, int, exitval);
2523 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2524 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2525 pid, exitval, 0, 0, 0);
2526 p->p_stat = SZOMB;
2527 /*
2528 * The current process can be reaped so, no one
2529 * can depend on this
2530 */
2531
2532 psignal(pp, SIGCHLD);
2533
2534 /* and now wakeup the parent */
2535 proc_list_lock();
2536 wakeup((caddr_t)pp);
2537 proc_list_unlock();
2538 } else {
2539 /* should be fine as parent proc would be initproc */
2540 /* mark as a zombie */
2541 /* No need to take proc lock as all refs are drained and
2542 * no one except parent (reaping ) can look at this.
2543 * The write is to an int and is coherent. Also parent is
2544 * keyed off of list lock for reaping
2545 */
2546 DTRACE_PROC2(exited, proc_t, p, int, exitval);
2547 proc_list_lock();
2548 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2549 BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2550 pid, exitval, 0, 0, 0);
2551 /* check for sysctl zomb lookup */
2552 while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2553 msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2554 }
2555 /* safe to use p as this is a system reap */
2556 p->p_stat = SZOMB;
2557 p->p_listflag |= P_LIST_WAITING;
2558
2559 /*
2560 * This is a named reference and it is not granted
2561 * if the reap is already in progress. So we get
2562 * the reference here exclusively and their can be
2563 * no waiters. So there is no need for a wakeup
2564 * after we are done. AlsO the reap frees the structure
2565 * and the proc struct cannot be used for wakeups as well.
2566 * It is safe to use p here as this is system reap
2567 */
2568 reap_child_locked(pp, p,
2569 REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2570 }
2571 if (uth->uu_lowpri_window) {
2572 /*
2573 * task is marked as a low priority I/O type and we've
2574 * somehow picked up another throttle during exit processing...
2575 * no need to throttle this thread since its going away
2576 * but we do need to update our bookeeping w/r to throttled threads
2577 */
2578 throttle_lowpri_io(0);
2579 }
2580
2581 proc_rele(pp);
2582 #if DEVELOPMENT || DEBUG
2583 proc_exit_lpexit_check(pid, PELS_POS_END);
2584 #endif
2585 }
2586
2587
2588 /*
2589 * reap_child_locked
2590 *
2591 * Finalize a child exit once its status has been saved.
2592 *
2593 * If ptrace has attached, detach it and return it to its real parent. Free any
2594 * remaining resources.
2595 *
2596 * Parameters:
2597 * - proc_t parent Parent of process being reaped
2598 * - proc_t child Process to reap
2599 * - reap_flags_t flags Control locking and re-parenting behavior
2600 */
2601 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2602 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2603 {
2604 struct pgrp *pg;
2605 kauth_cred_t cred;
2606 boolean_t shadow_proc = proc_is_shadow(child);
2607
2608 if (flags & REAP_LOCKED) {
2609 proc_list_unlock();
2610 }
2611
2612 /*
2613 * Under ptrace, the child should now be re-parented back to its original
2614 * parent, unless that parent was initproc or it didn't come to initproc
2615 * through re-parenting.
2616 */
2617 bool child_ptraced = child->p_oppid != 0;
2618 if (!shadow_proc && child_ptraced) {
2619 int knote_hint;
2620 pid_t orig_ppid = 0;
2621 proc_t orig_parent = PROC_NULL;
2622
2623 proc_lock(child);
2624 orig_ppid = child->p_oppid;
2625 child->p_oppid = 0;
2626 knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2627 proc_unlock(child);
2628
2629 orig_parent = proc_find(orig_ppid);
2630 if (orig_parent) {
2631 /*
2632 * Only re-parent the process if its original parent was not
2633 * initproc and it did not come to initproc from re-parenting.
2634 */
2635 bool reparenting = orig_parent != initproc ||
2636 (flags & REAP_REPARENTED_TO_INIT) == 0;
2637 if (reparenting) {
2638 if (orig_parent != initproc) {
2639 /*
2640 * Internal fields should be safe to access here because the
2641 * child is exited and not reaped or re-parented yet.
2642 */
2643 proc_lock(orig_parent);
2644 orig_parent->si_pid = proc_getpid(child);
2645 orig_parent->si_status = child->p_xstat;
2646 orig_parent->si_code = CLD_CONTINUED;
2647 orig_parent->si_uid = kauth_cred_getruid(proc_ucred(child));
2648 proc_unlock(orig_parent);
2649 }
2650 proc_reparentlocked(child, orig_parent, 1, 0);
2651
2652 /*
2653 * After re-parenting, re-send the child's NOTE_EXIT to the
2654 * original parent.
2655 */
2656 proc_knote(child, knote_hint);
2657 psignal(orig_parent, SIGCHLD);
2658
2659 proc_list_lock();
2660 wakeup((caddr_t)orig_parent);
2661 child->p_listflag &= ~P_LIST_WAITING;
2662 wakeup(&child->p_stat);
2663 proc_list_unlock();
2664
2665 proc_rele(orig_parent);
2666 if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2667 proc_list_lock();
2668 }
2669 return;
2670 } else {
2671 /*
2672 * Satisfy the knote lifecycle because ptraced processes don't
2673 * broadcast NOTE_EXIT during initial child termination.
2674 */
2675 proc_knote(child, knote_hint);
2676 proc_rele(orig_parent);
2677 }
2678 }
2679 }
2680
2681 #pragma clang diagnostic push
2682 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2683 proc_knote(child, NOTE_REAP);
2684 #pragma clang diagnostic pop
2685
2686 proc_knote_drain(child);
2687
2688 child->p_xstat = 0;
2689 if (!shadow_proc && child->p_ru) {
2690 /*
2691 * Roll up the rusage statistics to the parent, unless the parent is
2692 * ignoring SIGCHLD. POSIX requires the children's resources of such a
2693 * parent to not be included in the parent's usage (seems odd given
2694 * RLIMIT_CPU, though).
2695 */
2696 proc_lock(parent);
2697 bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2698 if (rollup_child) {
2699 ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2700 }
2701 update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2702 proc_unlock(parent);
2703 zfree(zombie_zone, child->p_ru);
2704 child->p_ru = NULL;
2705 } else if (!shadow_proc) {
2706 printf("Warning : lost p_ru for %s\n", child->p_comm);
2707 } else {
2708 assert(child->p_ru == NULL);
2709 }
2710
2711 AUDIT_SESSION_PROCEXIT(child);
2712
2713 #if CONFIG_PERSONAS
2714 persona_proc_drop(child);
2715 #endif /* CONFIG_PERSONAS */
2716 (void)chgproccnt(kauth_cred_getruid(proc_ucred(child)), -1);
2717
2718 os_reason_free(child->p_exit_reason);
2719
2720 proc_list_lock();
2721
2722 pg = pgrp_leave_locked(child);
2723 LIST_REMOVE(child, p_list);
2724 parent->p_childrencnt--;
2725 LIST_REMOVE(child, p_sibling);
2726 bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2727 LIST_EMPTY(&parent->p_children);
2728 if (no_more_children) {
2729 wakeup((caddr_t)parent);
2730 }
2731 child->p_listflag &= ~P_LIST_WAITING;
2732 wakeup(&child->p_stat);
2733
2734 /* Take it out of process hash */
2735 if (!shadow_proc) {
2736 phash_remove_locked(proc_getpid(child), child);
2737 }
2738 proc_checkdeadrefs(child);
2739 nprocs--;
2740 if (flags & REAP_DEAD_PARENT) {
2741 child->p_listflag |= P_LIST_DEADPARENT;
2742 }
2743 cred = proc_ucred(child);
2744 child->p_proc_ro = proc_ro_release_proc(child->p_proc_ro);
2745
2746 proc_list_unlock();
2747
2748 pgrp_rele(pg);
2749 if (child->p_proc_ro != NULL) {
2750 proc_ro_free(child->p_proc_ro);
2751 child->p_proc_ro = NULL;
2752 }
2753 kauth_cred_set(&cred, NOCRED);
2754 fdt_destroy(child);
2755 lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2756 lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2757 #if CONFIG_DTRACE
2758 lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2759 #endif
2760 lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2761 proc_wait_release(child);
2762
2763 if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2764 proc_list_lock();
2765 }
2766 }
2767
2768 int
wait1continue(int result)2769 wait1continue(int result)
2770 {
2771 proc_t p;
2772 thread_t thread;
2773 uthread_t uth;
2774 struct _wait4_data *wait4_data;
2775 struct wait4_nocancel_args *uap;
2776 int *retval;
2777
2778 if (result) {
2779 return result;
2780 }
2781
2782 p = current_proc();
2783 thread = current_thread();
2784 uth = (struct uthread *)get_bsdthread_info(thread);
2785
2786 wait4_data = &uth->uu_save.uus_wait4_data;
2787 uap = wait4_data->args;
2788 retval = wait4_data->retval;
2789 return wait4_nocancel(p, uap, retval);
2790 }
2791
2792 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2793 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2794 {
2795 __pthread_testcancel(1);
2796 return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2797 }
2798
2799 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2800 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2801 {
2802 int nfound;
2803 int sibling_count;
2804 proc_t p;
2805 int status, error;
2806 uthread_t uth;
2807 struct _wait4_data *wait4_data;
2808
2809 AUDIT_ARG(pid, uap->pid);
2810
2811 if (uap->pid == 0) {
2812 uap->pid = -q->p_pgrpid;
2813 }
2814
2815 if (uap->pid == INT_MIN) {
2816 return EINVAL;
2817 }
2818
2819 loop:
2820 proc_list_lock();
2821 loop1:
2822 nfound = 0;
2823 sibling_count = 0;
2824
2825 PCHILDREN_FOREACH(q, p) {
2826 if (p->p_sibling.le_next != 0) {
2827 sibling_count++;
2828 }
2829 if (uap->pid != WAIT_ANY &&
2830 proc_getpid(p) != uap->pid &&
2831 p->p_pgrpid != -(uap->pid)) {
2832 continue;
2833 }
2834
2835 if (proc_is_shadow(p)) {
2836 continue;
2837 }
2838
2839 nfound++;
2840
2841 /* XXX This is racy because we don't get the lock!!!! */
2842
2843 if (p->p_listflag & P_LIST_WAITING) {
2844 /* we're not using a continuation here but we still need to stash
2845 * the args for stackshot. */
2846 uth = current_uthread();
2847 wait4_data = &uth->uu_save.uus_wait4_data;
2848 wait4_data->args = uap;
2849 thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2850
2851 (void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2852 goto loop1;
2853 }
2854 p->p_listflag |= P_LIST_WAITING; /* only allow single thread to wait() */
2855
2856
2857 if (p->p_stat == SZOMB) {
2858 reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2859 REAP_REPARENTED_TO_INIT : 0;
2860
2861 proc_list_unlock();
2862 #if CONFIG_MACF
2863 if ((error = mac_proc_check_wait(q, p)) != 0) {
2864 goto out;
2865 }
2866 #endif
2867 retval[0] = proc_getpid(p);
2868 if (uap->status) {
2869 /* Legacy apps expect only 8 bits of status */
2870 status = 0xffff & p->p_xstat; /* convert to int */
2871 error = copyout((caddr_t)&status,
2872 uap->status,
2873 sizeof(status));
2874 if (error) {
2875 goto out;
2876 }
2877 }
2878 if (uap->rusage) {
2879 if (p->p_ru == NULL) {
2880 error = ENOMEM;
2881 } else {
2882 if (IS_64BIT_PROCESS(q)) {
2883 struct user64_rusage my_rusage = {};
2884 munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2885 error = copyout((caddr_t)&my_rusage,
2886 uap->rusage,
2887 sizeof(my_rusage));
2888 } else {
2889 struct user32_rusage my_rusage = {};
2890 munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2891 error = copyout((caddr_t)&my_rusage,
2892 uap->rusage,
2893 sizeof(my_rusage));
2894 }
2895 }
2896 /* information unavailable? */
2897 if (error) {
2898 goto out;
2899 }
2900 }
2901
2902 /* Conformance change for 6577252.
2903 * When SIGCHLD is blocked and wait() returns because the status
2904 * of a child process is available and there are no other
2905 * children processes, then any pending SIGCHLD signal is cleared.
2906 */
2907 if (sibling_count == 0) {
2908 int mask = sigmask(SIGCHLD);
2909 uth = current_uthread();
2910
2911 if ((uth->uu_sigmask & mask) != 0) {
2912 /* we are blocking SIGCHLD signals. clear any pending SIGCHLD.
2913 * This locking looks funny but it is protecting access to the
2914 * thread via p_uthlist.
2915 */
2916 proc_lock(q);
2917 uth->uu_siglist &= ~mask; /* clear pending signal */
2918 proc_unlock(q);
2919 }
2920 }
2921
2922 /* Clean up */
2923 (void)reap_child_locked(q, p, reap_flags);
2924
2925 return 0;
2926 }
2927 if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
2928 (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
2929 proc_list_unlock();
2930 #if CONFIG_MACF
2931 if ((error = mac_proc_check_wait(q, p)) != 0) {
2932 goto out;
2933 }
2934 #endif
2935 proc_lock(p);
2936 p->p_lflag |= P_LWAITED;
2937 proc_unlock(p);
2938 retval[0] = proc_getpid(p);
2939 if (uap->status) {
2940 status = W_STOPCODE(p->p_xstat);
2941 error = copyout((caddr_t)&status,
2942 uap->status,
2943 sizeof(status));
2944 } else {
2945 error = 0;
2946 }
2947 goto out;
2948 }
2949 /*
2950 * If we are waiting for continued processses, and this
2951 * process was continued
2952 */
2953 if ((uap->options & WCONTINUED) &&
2954 (p->p_flag & P_CONTINUED)) {
2955 proc_list_unlock();
2956 #if CONFIG_MACF
2957 if ((error = mac_proc_check_wait(q, p)) != 0) {
2958 goto out;
2959 }
2960 #endif
2961
2962 /* Prevent other process for waiting for this event */
2963 OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
2964 retval[0] = proc_getpid(p);
2965 if (uap->status) {
2966 status = W_STOPCODE(SIGCONT);
2967 error = copyout((caddr_t)&status,
2968 uap->status,
2969 sizeof(status));
2970 } else {
2971 error = 0;
2972 }
2973 goto out;
2974 }
2975 p->p_listflag &= ~P_LIST_WAITING;
2976 wakeup(&p->p_stat);
2977 }
2978 /* list lock is held when we get here any which way */
2979 if (nfound == 0) {
2980 proc_list_unlock();
2981 return ECHILD;
2982 }
2983
2984 if (uap->options & WNOHANG) {
2985 retval[0] = 0;
2986 proc_list_unlock();
2987 return 0;
2988 }
2989
2990 /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
2991 uth = current_uthread();
2992 wait4_data = &uth->uu_save.uus_wait4_data;
2993 wait4_data->args = uap;
2994 wait4_data->retval = retval;
2995
2996 thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2997 if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
2998 return error;
2999 }
3000
3001 goto loop;
3002 out:
3003 proc_list_lock();
3004 p->p_listflag &= ~P_LIST_WAITING;
3005 wakeup(&p->p_stat);
3006 proc_list_unlock();
3007 return error;
3008 }
3009
3010 #if DEBUG
3011 #define ASSERT_LCK_MTX_OWNED(lock) \
3012 lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3013 #else
3014 #define ASSERT_LCK_MTX_OWNED(lock) /* nothing */
3015 #endif
3016
3017 int
waitidcontinue(int result)3018 waitidcontinue(int result)
3019 {
3020 proc_t p;
3021 thread_t thread;
3022 uthread_t uth;
3023 struct _waitid_data *waitid_data;
3024 struct waitid_nocancel_args *uap;
3025 int *retval;
3026
3027 if (result) {
3028 return result;
3029 }
3030
3031 p = current_proc();
3032 thread = current_thread();
3033 uth = (struct uthread *)get_bsdthread_info(thread);
3034
3035 waitid_data = &uth->uu_save.uus_waitid_data;
3036 uap = waitid_data->args;
3037 retval = waitid_data->retval;
3038 return waitid_nocancel(p, uap, retval);
3039 }
3040
3041 /*
3042 * Description: Suspend the calling thread until one child of the process
3043 * containing the calling thread changes state.
3044 *
3045 * Parameters: uap->idtype one of P_PID, P_PGID, P_ALL
3046 * uap->id pid_t or gid_t or ignored
3047 * uap->infop Address of siginfo_t struct in
3048 * user space into which to return status
3049 * uap->options flag values
3050 *
3051 * Returns: 0 Success
3052 * !0 Error returning status to user space
3053 */
3054 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3055 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3056 {
3057 __pthread_testcancel(1);
3058 return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3059 }
3060
3061 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3062 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3063 __unused int32_t *retval)
3064 {
3065 user_siginfo_t siginfo; /* siginfo data to return to caller */
3066 boolean_t caller64 = IS_64BIT_PROCESS(q);
3067 int nfound;
3068 proc_t p;
3069 int error;
3070 uthread_t uth;
3071 struct _waitid_data *waitid_data;
3072
3073 if (uap->options == 0 ||
3074 (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3075 return EINVAL; /* bits set that aren't recognized */
3076 }
3077 switch (uap->idtype) {
3078 case P_PID: /* child with process ID equal to... */
3079 case P_PGID: /* child with process group ID equal to... */
3080 if (((int)uap->id) < 0) {
3081 return EINVAL;
3082 }
3083 break;
3084 case P_ALL: /* any child */
3085 break;
3086 }
3087
3088 loop:
3089 proc_list_lock();
3090 loop1:
3091 nfound = 0;
3092
3093 PCHILDREN_FOREACH(q, p) {
3094 switch (uap->idtype) {
3095 case P_PID: /* child with process ID equal to... */
3096 if (proc_getpid(p) != (pid_t)uap->id) {
3097 continue;
3098 }
3099 break;
3100 case P_PGID: /* child with process group ID equal to... */
3101 if (p->p_pgrpid != (pid_t)uap->id) {
3102 continue;
3103 }
3104 break;
3105 case P_ALL: /* any child */
3106 break;
3107 }
3108
3109 if (proc_is_shadow(p)) {
3110 continue;
3111 }
3112 /* XXX This is racy because we don't get the lock!!!! */
3113
3114 /*
3115 * Wait collision; go to sleep and restart; used to maintain
3116 * the single return for waited process guarantee.
3117 */
3118 if (p->p_listflag & P_LIST_WAITING) {
3119 (void) msleep(&p->p_stat, &proc_list_mlock,
3120 PWAIT, "waitidcoll", 0);
3121 goto loop1;
3122 }
3123 p->p_listflag |= P_LIST_WAITING; /* mark busy */
3124
3125 nfound++;
3126
3127 bzero(&siginfo, sizeof(siginfo));
3128
3129 switch (p->p_stat) {
3130 case SZOMB: /* Exited */
3131 if (!(uap->options & WEXITED)) {
3132 break;
3133 }
3134 proc_list_unlock();
3135 #if CONFIG_MACF
3136 if ((error = mac_proc_check_wait(q, p)) != 0) {
3137 goto out;
3138 }
3139 #endif
3140 siginfo.si_signo = SIGCHLD;
3141 siginfo.si_pid = proc_getpid(p);
3142
3143 /* If the child terminated abnormally due to a signal, the signum
3144 * needs to be preserved in the exit status.
3145 */
3146 if (WIFSIGNALED(p->p_xstat)) {
3147 siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3148 CLD_DUMPED : CLD_KILLED;
3149 siginfo.si_status = WTERMSIG(p->p_xstat);
3150 } else {
3151 siginfo.si_code = CLD_EXITED;
3152 siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3153 }
3154 siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3155 p->p_xhighbits = 0;
3156
3157 if ((error = copyoutsiginfo(&siginfo,
3158 caller64, uap->infop)) != 0) {
3159 goto out;
3160 }
3161
3162 /* Prevent other process for waiting for this event? */
3163 if (!(uap->options & WNOWAIT)) {
3164 reap_child_locked(q, p, 0);
3165 return 0;
3166 }
3167 goto out;
3168
3169 case SSTOP: /* Stopped */
3170 /*
3171 * If we are not interested in stopped processes, then
3172 * ignore this one.
3173 */
3174 if (!(uap->options & WSTOPPED)) {
3175 break;
3176 }
3177
3178 /*
3179 * If someone has already waited it, we lost a race
3180 * to be the one to return status.
3181 */
3182 if ((p->p_lflag & P_LWAITED) != 0) {
3183 break;
3184 }
3185 proc_list_unlock();
3186 #if CONFIG_MACF
3187 if ((error = mac_proc_check_wait(q, p)) != 0) {
3188 goto out;
3189 }
3190 #endif
3191 siginfo.si_signo = SIGCHLD;
3192 siginfo.si_pid = proc_getpid(p);
3193 siginfo.si_status = p->p_xstat; /* signal number */
3194 siginfo.si_code = CLD_STOPPED;
3195
3196 if ((error = copyoutsiginfo(&siginfo,
3197 caller64, uap->infop)) != 0) {
3198 goto out;
3199 }
3200
3201 /* Prevent other process for waiting for this event? */
3202 if (!(uap->options & WNOWAIT)) {
3203 proc_lock(p);
3204 p->p_lflag |= P_LWAITED;
3205 proc_unlock(p);
3206 }
3207 goto out;
3208
3209 default: /* All other states => Continued */
3210 if (!(uap->options & WCONTINUED)) {
3211 break;
3212 }
3213
3214 /*
3215 * If the flag isn't set, then this process has not
3216 * been stopped and continued, or the status has
3217 * already been reaped by another caller of waitid().
3218 */
3219 if ((p->p_flag & P_CONTINUED) == 0) {
3220 break;
3221 }
3222 proc_list_unlock();
3223 #if CONFIG_MACF
3224 if ((error = mac_proc_check_wait(q, p)) != 0) {
3225 goto out;
3226 }
3227 #endif
3228 siginfo.si_signo = SIGCHLD;
3229 siginfo.si_code = CLD_CONTINUED;
3230 proc_lock(p);
3231 siginfo.si_pid = p->p_contproc;
3232 siginfo.si_status = p->p_xstat;
3233 proc_unlock(p);
3234
3235 if ((error = copyoutsiginfo(&siginfo,
3236 caller64, uap->infop)) != 0) {
3237 goto out;
3238 }
3239
3240 /* Prevent other process for waiting for this event? */
3241 if (!(uap->options & WNOWAIT)) {
3242 OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3243 &p->p_flag);
3244 }
3245 goto out;
3246 }
3247 ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3248
3249 /* Not a process we are interested in; go on to next child */
3250
3251 p->p_listflag &= ~P_LIST_WAITING;
3252 wakeup(&p->p_stat);
3253 }
3254 ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3255
3256 /* No child processes that could possibly satisfy the request? */
3257
3258 if (nfound == 0) {
3259 proc_list_unlock();
3260 return ECHILD;
3261 }
3262
3263 if (uap->options & WNOHANG) {
3264 proc_list_unlock();
3265 #if CONFIG_MACF
3266 if ((error = mac_proc_check_wait(q, p)) != 0) {
3267 return error;
3268 }
3269 #endif
3270 /*
3271 * The state of the siginfo structure in this case
3272 * is undefined. Some implementations bzero it, some
3273 * (like here) leave it untouched for efficiency.
3274 *
3275 * Thus the most portable check for "no matching pid with
3276 * WNOHANG" is to store a zero into si_pid before
3277 * invocation, then check for a non-zero value afterwards.
3278 */
3279 return 0;
3280 }
3281
3282 /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3283 uth = current_uthread();
3284 waitid_data = &uth->uu_save.uus_waitid_data;
3285 waitid_data->args = uap;
3286 waitid_data->retval = retval;
3287
3288 if ((error = msleep0(q, &proc_list_mlock,
3289 PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3290 return error;
3291 }
3292
3293 goto loop;
3294 out:
3295 proc_list_lock();
3296 p->p_listflag &= ~P_LIST_WAITING;
3297 wakeup(&p->p_stat);
3298 proc_list_unlock();
3299 return error;
3300 }
3301
3302 /*
3303 * make process 'parent' the new parent of process 'child'.
3304 */
3305 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3306 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3307 {
3308 proc_t oldparent = PROC_NULL;
3309
3310 if (child->p_pptr == parent) {
3311 return;
3312 }
3313
3314 if (locked == 0) {
3315 proc_list_lock();
3316 }
3317
3318 oldparent = child->p_pptr;
3319 #if __PROC_INTERNAL_DEBUG
3320 if (oldparent == PROC_NULL) {
3321 panic("proc_reparent: process %p does not have a parent", child);
3322 }
3323 #endif
3324
3325 LIST_REMOVE(child, p_sibling);
3326 #if __PROC_INTERNAL_DEBUG
3327 if (oldparent->p_childrencnt == 0) {
3328 panic("process children count already 0");
3329 }
3330 #endif
3331 oldparent->p_childrencnt--;
3332 #if __PROC_INTERNAL_DEBUG
3333 if (oldparent->p_childrencnt < 0) {
3334 panic("process children count -ve");
3335 }
3336 #endif
3337 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3338 parent->p_childrencnt++;
3339 child->p_pptr = parent;
3340 child->p_ppid = proc_getpid(parent);
3341
3342 proc_list_unlock();
3343
3344 if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3345 psignal(initproc, SIGCHLD);
3346 }
3347 if (locked == 1) {
3348 proc_list_lock();
3349 }
3350 }
3351
3352 /*
3353 * Exit: deallocate address space and other resources, change proc state
3354 * to zombie, and unlink proc from allproc and parent's lists. Save exit
3355 * status and rusage for wait(). Check for child processes and orphan them.
3356 */
3357
3358
3359 /*
3360 * munge_rusage
3361 * LP64 support - long is 64 bits if we are dealing with a 64 bit user
3362 * process. We munge the kernel version of rusage into the
3363 * 64 bit version.
3364 */
3365 __private_extern__ void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3366 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3367 {
3368 /* Zero-out struct so that padding is cleared */
3369 bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3370
3371 /* timeval changes size, so utime and stime need special handling */
3372 a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3373 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3374 a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3375 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3376 /*
3377 * everything else can be a direct assign, since there is no loss
3378 * of precision implied boing 32->64.
3379 */
3380 a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3381 a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3382 a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3383 a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3384 a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3385 a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3386 a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3387 a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3388 a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3389 a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3390 a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3391 a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3392 a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3393 a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3394 }
3395
3396 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3397 __private_extern__ void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3398 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3399 {
3400 bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3401
3402 /* timeval changes size, so utime and stime need special handling */
3403 a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3404 a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3405 a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3406 a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3407 /*
3408 * everything else can be a direct assign. We currently ignore
3409 * the loss of precision
3410 */
3411 a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3412 a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3413 a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3414 a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3415 a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3416 a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3417 a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3418 a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3419 a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3420 a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3421 a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3422 a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3423 a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3424 a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3425 }
3426
3427 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3428 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3429 {
3430 assert(thread != NULL);
3431 assert(waitinfo != NULL);
3432
3433 struct uthread *ut = get_bsdthread_info(thread);
3434 waitinfo->context = 0;
3435 // ensure wmesg is consistent with a thread waiting in wait4
3436 assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3437 struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3438 // May not actually contain a pid; this is just the argument to wait4.
3439 // See man wait4 for other valid wait4 arguments.
3440 waitinfo->owner = args->pid;
3441 }
3442
3443 int
exit_with_guard_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3444 exit_with_guard_exception(
3445 proc_t p,
3446 mach_exception_data_type_t code,
3447 mach_exception_data_type_t subcode)
3448 {
3449 os_reason_t reason = os_reason_create(OS_REASON_GUARD, (uint64_t)code);
3450 assert(reason != OS_REASON_NULL);
3451
3452 return exit_with_mach_exception(p, reason, EXC_GUARD, code, subcode);
3453 }
3454
3455 #if __has_feature(ptrauth_calls)
3456 int
exit_with_pac_exception(proc_t p,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3457 exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_code_t code,
3458 mach_exception_subcode_t subcode)
3459 {
3460 os_reason_t reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code);
3461 assert(reason != OS_REASON_NULL);
3462
3463 return exit_with_mach_exception(p, reason, exception, code, subcode);
3464 }
3465 #endif /* __has_feature(ptrauth_calls) */
3466
3467 int
exit_with_port_space_exception(proc_t p,mach_exception_data_type_t code,mach_exception_data_type_t subcode)3468 exit_with_port_space_exception(proc_t p, mach_exception_data_type_t code,
3469 mach_exception_data_type_t subcode)
3470 {
3471 os_reason_t reason = os_reason_create(OS_REASON_PORT_SPACE, (uint64_t)code);
3472 assert(reason != OS_REASON_NULL);
3473
3474 return exit_with_mach_exception(p, reason, EXC_RESOURCE, code, subcode);
3475 }
3476
3477 static int
exit_with_mach_exception(proc_t p,os_reason_t reason,exception_type_t exception,mach_exception_code_t code,mach_exception_subcode_t subcode)3478 exit_with_mach_exception(proc_t p, os_reason_t reason, exception_type_t exception, mach_exception_code_t code,
3479 mach_exception_subcode_t subcode)
3480 {
3481 thread_t self = current_thread();
3482 struct uthread *ut = get_bsdthread_info(self);
3483
3484 ut->uu_exception = exception;
3485 ut->uu_code = code;
3486 ut->uu_subcode = subcode;
3487
3488 reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3489 return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3490 TRUE, FALSE, 0, reason);
3491 }
3492