1 /*
2 * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 * Corpses Overview
32 * ================
33 *
34 * A corpse is a state of process that is past the point of its death. This means that process has
35 * completed all its termination operations like releasing file descriptors, mach ports, sockets and
36 * other constructs used to identify a process. For all the processes this mimics the behavior as if
37 * the process has died and no longer available by any means.
38 *
39 * Why do we need Corpses?
40 * -----------------------
41 * For crash inspection we need to inspect the state and data that is associated with process so that
42 * crash reporting infrastructure can build backtraces, find leaks etc.
43 *
44 * Corpses functionality in kernel
45 * ===============================
46 * The corpse functionality is an extension of existing exception reporting mechanisms we have. The
47 * exception_triage calls will try to deliver the first round of exceptions allowing
48 * task/debugger/ReportCrash/launchd level exception handlers to respond to exception. If even after
49 * notification the exception is not handled, then the process begins the death operations and during
50 * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through
51 * of events and data shuffling that happens when corpses is enabled.
52 *
53 * * a process causes an exception during normal execution of threads.
54 * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD
55 * etc) side is passed through the exception_triage() function to follow the thread -> task -> host
56 * level exception handling system. This set of steps are same as before and allow for existing
57 * crash reporting systems (both internal and 3rd party) to catch and create reports as required.
58 * * If above exception handling returns failed (when nobody handles the notification), then the
59 * proc_prepareexit path has logic to decide to create corpse.
60 * * The task_mark_corpse function allocates userspace vm memory and attaches the information
61 * kcdata_descriptor_t to task->corpse_info field of task.
62 * - All the task's threads are marked with the "inspection" flag which signals the termination
63 * daemon to not reap them but hold until they are being inspected.
64 * - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE
65 * prevents task_terminate from stripping important data from task.
66 * - It marks all the threads to terminate and return to AST for termination.
67 * - The allocation logic takes into account the rate limiting policy of allowing only
68 * `total_corpses_allowed` in flight.
69 * * The proc exit threads continues and collects required information in the allocated vm region.
70 * Once complete it marks itself for termination.
71 * * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit().
72 * Following this is a check to see if task is marked for corpse notification and will
73 * invoke the the task_deliver_crash_notification().
74 * * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and
75 * inspection flag from all its threads) and allows task_terminate to go ahead and continue
76 * the mach task termination process.
77 * * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the
78 * inspection flag set are just bounced to another holding queue (crashed_threads_queue).
79 * Only after the corpse notification these are pulled out from holding queue and enqueued
80 * back to termination queue
81 *
82 *
83 * Corpse info format
84 * ==================
85 * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag
86 * VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various
87 * subsystems like
88 * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc
89 * * mach side may append data regarding ledger usage, memory stats etc
90 * See detailed info about the memory structure and format in kern_cdata.h documentation.
91 *
92 * Configuring Corpses functionality
93 * =================================
94 * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting
95 * any other subsystem.
96 * DEFAULT_TOTAL_CORPSES_ALLOWED: Controls the number of corpse instances to be held for
97 * inspection before allowing memory to be reclaimed by the system.
98 * On a live system, the maximum corpse count can be reconfigured via the `kern.total_corpses_allowed` sysctl.
99 * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more
100 * data to be put in, then please re-tune this parameter.
101 *
102 * Debugging/Visibility
103 * ====================
104 * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads.
105 * * there are macros to see list of threads in termination queue (dumpthread_terminate_queue)
106 * and holding queue (dumpcrashed_thread_queue).
107 * * In case of corpse creation is disabled of ignored then the system log is updated with
108 * printf data with reason.
109 *
110 * Limitations of Corpses
111 * ======================
112 * With holding off memory for inspection, it creates vm pressure which might not be desirable
113 * on low memory devices. There are limits to max corpses being inspected at a time which is
114 * marked by `total_corpses_allowed`.
115 *
116 */
117
118 #include <stdatomic.h>
119 #include <kern/assert.h>
120 #include <mach/mach_types.h>
121 #include <mach/boolean.h>
122 #include <mach/vm_param.h>
123 #include <mach/task.h>
124 #include <mach/thread_act.h>
125 #include <mach/host_priv.h>
126 #include <kern/host.h>
127 #include <kern/kern_types.h>
128 #include <kern/mach_param.h>
129 #include <kern/policy_internal.h>
130 #include <kern/thread.h>
131 #include <kern/task.h>
132 #include <corpses/task_corpse.h>
133 #include <kern/kalloc.h>
134 #include <kern/kern_cdata.h>
135 #include <mach/mach_vm.h>
136 #include <kern/exc_guard.h>
137 #include <os/log.h>
138 #include <sys/kdebug_triage.h>
139 #include <vm/vm_kern_xnu.h>
140 #include <vm/vm_map_xnu.h>
141
142 #if CONFIG_MACF
143 #include <security/mac_mach_internal.h>
144 #endif
145
146 /*
147 * Exported interfaces
148 */
149 #include <mach/task_server.h>
150
151 union corpse_creation_gate {
152 struct {
153 uint16_t user_faults;
154 uint16_t corpses;
155 };
156 uint32_t value;
157 };
158
159 static _Atomic uint32_t inflight_corpses;
160 unsigned long total_corpses_created = 0;
161
162 uint32_t total_corpses_allowed = DEFAULT_TOTAL_CORPSES_ALLOWED;
163
164 static TUNABLE(bool, corpses_disabled, "-no_corpses", false);
165
166 #if !XNU_TARGET_OS_OSX
167 /* Use lightweight corpse on embedded */
168 static TUNABLE(bool, lw_corpses_enabled, "lw_corpses", true);
169 #else
170 static TUNABLE(bool, lw_corpses_enabled, "lw_corpses", false);
171 #endif
172
173 #if DEBUG || DEVELOPMENT
174 /* bootarg to generate corpse with size up to max_footprint_mb */
175 TUNABLE(bool, corpse_threshold_system_limit, "corpse_threshold_system_limit", false);
176 #endif /* DEBUG || DEVELOPMENT */
177
178 /* bootarg to turn on corpse forking for EXC_RESOURCE */
179 TUNABLE(bool, exc_via_corpse_forking, "exc_via_corpse_forking", true);
180
181 /* bootarg to generate corpse for fatal high memory watermark violation */
182 TUNABLE(bool, corpse_for_fatal_memkill, "corpse_for_fatal_memkill", true);
183
184 extern int IS_64BIT_PROCESS(void *);
185 extern void gather_populate_corpse_crashinfo(void *p, task_t task,
186 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
187 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
188 extern void *proc_find(int pid);
189 extern int proc_rele(void *p);
190 extern task_t proc_get_task_raw(void *proc);
191 extern const char *proc_best_name(struct proc *proc);
192
193
194 /*
195 * Routine: corpses_enabled
196 * returns FALSE if not enabled
197 */
198 boolean_t
corpses_enabled(void)199 corpses_enabled(void)
200 {
201 return !corpses_disabled;
202 }
203
204 unsigned long
total_corpses_count(void)205 total_corpses_count(void)
206 {
207 union corpse_creation_gate gate;
208
209 gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
210 return gate.corpses;
211 }
212
213 extern int proc_pid(struct proc *);
214
215 /*
216 * Routine: task_crashinfo_get_ref()
217 * Grab a slot at creating a corpse.
218 * Returns: KERN_SUCCESS if the policy allows for creating a corpse.
219 */
220 static kern_return_t
task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)221 task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)
222 {
223 union corpse_creation_gate oldgate, newgate;
224 struct proc *p = (void *)current_proc();
225
226 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
227
228 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
229 for (;;) {
230 newgate = oldgate;
231 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
232 if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) {
233 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many faults %d\n",
234 proc_best_name(p), proc_pid(p), newgate.user_faults);
235 return KERN_RESOURCE_SHORTAGE;
236 }
237 }
238 if (newgate.corpses++ >= total_corpses_allowed) {
239 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many %d\n",
240 proc_best_name(p), proc_pid(p), newgate.corpses);
241 return KERN_RESOURCE_SHORTAGE;
242 }
243
244 // this reloads the value in oldgate
245 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
246 &oldgate.value, newgate.value, memory_order_relaxed,
247 memory_order_relaxed)) {
248 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse allowed %d of %d\n",
249 proc_best_name(p), proc_pid(p), newgate.corpses, total_corpses_allowed);
250 return KERN_SUCCESS;
251 }
252 }
253 }
254
255 /*
256 * Routine: task_crashinfo_release_ref
257 * release the slot for corpse being used.
258 */
259 static kern_return_t
task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)260 task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)
261 {
262 union corpse_creation_gate oldgate, newgate;
263
264 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
265
266 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
267 for (;;) {
268 newgate = oldgate;
269 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
270 if (newgate.user_faults-- == 0) {
271 panic("corpse in flight count over-release");
272 }
273 }
274 if (newgate.corpses-- == 0) {
275 panic("corpse in flight count over-release");
276 }
277 // this reloads the value in oldgate
278 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
279 &oldgate.value, newgate.value, memory_order_relaxed,
280 memory_order_relaxed)) {
281 os_log(OS_LOG_DEFAULT, "Corpse released, count at %d\n", newgate.corpses);
282 return KERN_SUCCESS;
283 }
284 }
285 }
286
287
288 kcdata_descriptor_t
task_crashinfo_alloc_init(mach_vm_address_t crash_data_p,unsigned size,corpse_flags_t kc_u_flags,unsigned kc_flags)289 task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size,
290 corpse_flags_t kc_u_flags, unsigned kc_flags)
291 {
292 kcdata_descriptor_t kcdata;
293
294 if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
295 if (KERN_SUCCESS != task_crashinfo_get_ref(kc_u_flags)) {
296 return NULL;
297 }
298 }
299
300 kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size,
301 kc_flags);
302 if (kcdata) {
303 kcdata->kcd_user_flags = kc_u_flags;
304 } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
305 task_crashinfo_release_ref(kc_u_flags);
306 }
307 return kcdata;
308 }
309
310 kcdata_descriptor_t
task_btinfo_alloc_init(mach_vm_address_t addr,unsigned size)311 task_btinfo_alloc_init(mach_vm_address_t addr, unsigned size)
312 {
313 kcdata_descriptor_t kcdata;
314
315 kcdata = kcdata_memory_alloc_init(addr, TASK_BTINFO_BEGIN, size, KCFLAG_USE_MEMCOPY);
316
317 return kcdata;
318 }
319
320
321 /*
322 * Free up the memory associated with task_crashinfo_data
323 */
324 kern_return_t
task_crashinfo_destroy(kcdata_descriptor_t data)325 task_crashinfo_destroy(kcdata_descriptor_t data)
326 {
327 if (!data) {
328 return KERN_INVALID_ARGUMENT;
329 }
330 if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) {
331 task_crashinfo_release_ref(data->kcd_user_flags);
332 }
333 return kcdata_memory_destroy(data);
334 }
335
336 /*
337 * Routine: task_get_corpseinfo
338 * params: task - task which has corpse info setup.
339 * returns: crash info data attached to task.
340 * NULL if task is null or has no corpse info
341 */
342 kcdata_descriptor_t
task_get_corpseinfo(task_t task)343 task_get_corpseinfo(task_t task)
344 {
345 kcdata_descriptor_t retval = NULL;
346 if (task != NULL) {
347 retval = task->corpse_info;
348 }
349 return retval;
350 }
351
352 /*
353 * Routine: task_add_to_corpse_task_list
354 * params: task - task to be added to corpse task list
355 * returns: None.
356 */
357 void
task_add_to_corpse_task_list(task_t corpse_task)358 task_add_to_corpse_task_list(task_t corpse_task)
359 {
360 lck_mtx_lock(&tasks_corpse_lock);
361 queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks);
362 lck_mtx_unlock(&tasks_corpse_lock);
363 }
364
365 /*
366 * Routine: task_remove_from_corpse_task_list
367 * params: task - task to be removed from corpse task list
368 * returns: None.
369 */
370 void
task_remove_from_corpse_task_list(task_t corpse_task)371 task_remove_from_corpse_task_list(task_t corpse_task)
372 {
373 lck_mtx_lock(&tasks_corpse_lock);
374 queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks);
375 lck_mtx_unlock(&tasks_corpse_lock);
376 }
377
378 /*
379 * Routine: task_purge_all_corpses
380 * params: None.
381 * returns: None.
382 */
383 void
task_purge_all_corpses(void)384 task_purge_all_corpses(void)
385 {
386 task_t task;
387
388 lck_mtx_lock(&tasks_corpse_lock);
389 /* Iterate through all the corpse tasks and clear all map entries */
390 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
391 os_log(OS_LOG_DEFAULT, "Memory pressure corpse purge for pid %d.\n", task_pid(task));
392 vm_map_terminate(task->map);
393 }
394 lck_mtx_unlock(&tasks_corpse_lock);
395 }
396
397 /*
398 * Routine: find_corpse_task_by_uniqueid_grp
399 * params: task_uniqueid - uniqueid of the corpse
400 * target - target task [Out Param]
401 * grp - task reference group
402 * returns:
403 * KERN_SUCCESS if a matching corpse if found, gives a ref.
404 * KERN_FAILURE corpse with given uniqueid is not found.
405 */
406 kern_return_t
find_corpse_task_by_uniqueid_grp(uint64_t task_uniqueid,task_t * target,task_grp_t grp)407 find_corpse_task_by_uniqueid_grp(
408 uint64_t task_uniqueid,
409 task_t *target,
410 task_grp_t grp)
411 {
412 task_t task;
413
414 lck_mtx_lock(&tasks_corpse_lock);
415
416 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
417 if (task->task_uniqueid == task_uniqueid) {
418 task_reference_grp(task, grp);
419 lck_mtx_unlock(&tasks_corpse_lock);
420 *target = task;
421 return KERN_SUCCESS;
422 }
423 }
424
425 lck_mtx_unlock(&tasks_corpse_lock);
426 return KERN_FAILURE;
427 }
428
429 /*
430 * Routine: task_generate_corpse
431 * params: task - task to fork a corpse
432 * corpse_task - task port of the generated corpse
433 * returns: KERN_SUCCESS on Success.
434 * KERN_FAILURE on Failure.
435 * KERN_NOT_SUPPORTED on corpse disabled.
436 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
437 */
438 kern_return_t
task_generate_corpse(task_t task,ipc_port_t * corpse_task_port)439 task_generate_corpse(
440 task_t task,
441 ipc_port_t *corpse_task_port)
442 {
443 task_t new_task;
444 kern_return_t kr;
445 thread_t thread, th_iter;
446 ipc_port_t corpse_port;
447
448 if (task == kernel_task || task == TASK_NULL) {
449 return KERN_INVALID_ARGUMENT;
450 }
451
452 task_lock(task);
453 if (task_is_a_corpse_fork(task)) {
454 task_unlock(task);
455 return KERN_INVALID_ARGUMENT;
456 }
457 task_unlock(task);
458
459 thread_set_exec_promotion(current_thread());
460 /* Generate a corpse for the given task, will return with a ref on corpse task */
461 kr = task_generate_corpse_internal(task, &new_task, &thread, 0, 0, 0, NULL);
462 thread_clear_exec_promotion(current_thread());
463 if (kr != KERN_SUCCESS) {
464 return kr;
465 }
466 if (thread != THREAD_NULL) {
467 thread_deallocate(thread);
468 }
469
470 /* wait for all the threads in the task to terminate */
471 task_lock(new_task);
472 task_wait_till_threads_terminate_locked(new_task);
473
474 /* Reset thread ports of all the threads in task */
475 queue_iterate(&new_task->threads, th_iter, thread_t, task_threads)
476 {
477 /* Do not reset the thread port for inactive threads */
478 if (th_iter->corpse_dup == FALSE) {
479 ipc_thread_reset(th_iter);
480 }
481 }
482 task_unlock(new_task);
483
484 /* transfer the task ref to port and arm the no-senders notification */
485 corpse_port = convert_corpse_to_port_and_nsrequest(new_task);
486 assert(IP_NULL != corpse_port);
487
488 *corpse_task_port = corpse_port;
489 return KERN_SUCCESS;
490 }
491
492 /*
493 * Only generate lightweight corpse if any of thread, task, or host level registers
494 * EXC_CORPSE_NOTIFY with behavior EXCEPTION_BACKTRACE.
495 *
496 * Save a send right and behavior of those ports on out param EXC_PORTS.
497 */
498 static boolean_t
task_should_generate_lightweight_corpse(task_t task,ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT])499 task_should_generate_lightweight_corpse(
500 task_t task,
501 ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT])
502 {
503 kern_return_t kr;
504 boolean_t should_generate = FALSE;
505
506 exception_mask_t mask;
507 mach_msg_type_number_t nmasks;
508 exception_port_t exc_port = IP_NULL;
509 exception_behavior_t behavior;
510 thread_state_flavor_t flavor;
511
512 if (task != current_task()) {
513 return FALSE;
514 }
515
516 if (!lw_corpses_enabled) {
517 return FALSE;
518 }
519
520 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
521 nmasks = 1;
522
523 /* thread, task, and host level, in this order */
524 if (i == 0) {
525 kr = thread_get_exception_ports(current_thread(), EXC_MASK_CORPSE_NOTIFY,
526 &mask, &nmasks, &exc_port, &behavior, &flavor);
527 } else if (i == 1) {
528 kr = task_get_exception_ports(current_task(), EXC_MASK_CORPSE_NOTIFY,
529 &mask, &nmasks, &exc_port, &behavior, &flavor);
530 } else {
531 kr = host_get_exception_ports(host_priv_self(), EXC_MASK_CORPSE_NOTIFY,
532 &mask, &nmasks, &exc_port, &behavior, &flavor);
533 }
534
535 if (kr != KERN_SUCCESS || nmasks == 0) {
536 exc_port = IP_NULL;
537 }
538
539 /* thread level can return KERN_SUCCESS && nmasks 0 */
540 assert(nmasks == 1 || i == 0);
541
542 if (IP_VALID(exc_port) && (behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED)) {
543 assert(behavior & MACH_EXCEPTION_CODES);
544 exc_ports[i] = exc_port; /* transfers right to array */
545 exc_port = NULL;
546 should_generate = TRUE;
547 } else {
548 exc_ports[i] = IP_NULL;
549 }
550
551 ipc_port_release_send(exc_port);
552 }
553
554 return should_generate;
555 }
556
557 /*
558 * Routine: task_enqueue_exception_with_corpse
559 * params: task - task to generate a corpse and enqueue it
560 * etype - EXC_RESOURCE or EXC_GUARD
561 * code - exception code to be enqueued
562 * codeCnt - code array count - code and subcode
563 *
564 * returns: KERN_SUCCESS on Success.
565 * KERN_FAILURE on Failure.
566 * KERN_INVALID_ARGUMENT on invalid arguments passed.
567 * KERN_NOT_SUPPORTED on corpse disabled.
568 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
569 */
570 kern_return_t
task_enqueue_exception_with_corpse(task_t task,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reason,boolean_t lightweight)571 task_enqueue_exception_with_corpse(
572 task_t task,
573 exception_type_t etype,
574 mach_exception_data_t code,
575 mach_msg_type_number_t codeCnt,
576 void *reason,
577 boolean_t lightweight)
578 {
579 kern_return_t kr;
580 ipc_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights in thread, task, host order */
581 const char *procname = proc_best_name(get_bsdtask_info(task));
582
583 if (codeCnt < 2) {
584 return KERN_INVALID_ARGUMENT;
585 }
586
587 if (lightweight && task_should_generate_lightweight_corpse(task, exc_ports)) {
588 /* port rights captured in exc_ports */
589 kcdata_descriptor_t desc = NULL;
590 kcdata_object_t obj = KCDATA_OBJECT_NULL;
591 bool lw_corpse_enqueued = false;
592
593 assert(task == current_task());
594 assert(etype == EXC_GUARD);
595
596 kr = kcdata_object_throttle_get(KCDATA_OBJECT_TYPE_LW_CORPSE);
597 if (kr != KERN_SUCCESS) {
598 goto out;
599 }
600
601 kr = current_thread_collect_backtrace_info(&desc, etype, code, codeCnt, reason);
602 if (kr != KERN_SUCCESS) {
603 kcdata_object_throttle_release(KCDATA_OBJECT_TYPE_LW_CORPSE);
604 goto out;
605 }
606
607 kr = kcdata_create_object(desc, KCDATA_OBJECT_TYPE_LW_CORPSE, BTINFO_ALLOCATION_SIZE, &obj);
608 assert(kr == KERN_SUCCESS);
609 /* desc ref and throttle slot captured in obj ref */
610
611 thread_backtrace_enqueue(obj, exc_ports, etype);
612 os_log(OS_LOG_DEFAULT, "Lightweight corpse enqueued for %s\n", procname);
613 /* obj ref and exc_ports send rights consumed */
614 lw_corpse_enqueued = true;
615
616 out:
617 if (!lw_corpse_enqueued) {
618 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
619 ipc_port_release_send(exc_ports[i]);
620 }
621 }
622 } else {
623 task_t corpse = TASK_NULL;
624 thread_t thread = THREAD_NULL;
625
626 thread_set_exec_promotion(current_thread());
627 /* Generate a corpse for the given task, will return with a ref on corpse task */
628 kr = task_generate_corpse_internal(task, &corpse, &thread, etype,
629 code[0], code[1], reason);
630 thread_clear_exec_promotion(current_thread());
631 if (kr == KERN_SUCCESS) {
632 if (thread == THREAD_NULL) {
633 return KERN_FAILURE;
634 }
635 assert(corpse != TASK_NULL);
636 assert(etype == EXC_RESOURCE || etype == EXC_GUARD);
637 thread_exception_enqueue(corpse, thread, etype);
638 os_log(OS_LOG_DEFAULT, "Full corpse enqueued for %s\n", procname);
639 }
640 }
641
642 return kr;
643 }
644
645 /*
646 * Routine: task_generate_corpse_internal
647 * params: task - task to fork a corpse
648 * corpse_task - task of the generated corpse
649 * exc_thread - equivalent thread in corpse enqueuing exception
650 * etype - EXC_RESOURCE or EXC_GUARD or 0
651 * code - mach exception code to be passed in corpse blob
652 * subcode - mach exception subcode to be passed in corpse blob
653 * returns: KERN_SUCCESS on Success.
654 * KERN_FAILURE on Failure.
655 * KERN_NOT_SUPPORTED on corpse disabled.
656 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
657 */
658 kern_return_t
task_generate_corpse_internal(task_t task,task_t * corpse_task,thread_t * exc_thread,exception_type_t etype,mach_exception_data_type_t code,mach_exception_data_type_t subcode,void * reason)659 task_generate_corpse_internal(
660 task_t task,
661 task_t *corpse_task,
662 thread_t *exc_thread,
663 exception_type_t etype,
664 mach_exception_data_type_t code,
665 mach_exception_data_type_t subcode,
666 void *reason)
667 {
668 task_t new_task = TASK_NULL;
669 thread_t thread = THREAD_NULL;
670 thread_t thread_next = THREAD_NULL;
671 kern_return_t kr;
672 struct proc *p = NULL;
673 int is_64bit_addr;
674 int is_64bit_data;
675 uint32_t t_flags;
676 uint32_t t_flags_ro;
677 uint64_t *udata_buffer = NULL;
678 int size = 0;
679 int num_udata = 0;
680 corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF;
681 void *corpse_proc = NULL;
682 thread_t self = current_thread();
683
684 #if CONFIG_MACF
685 struct label *label = NULL;
686 #endif
687
688 if (!corpses_enabled()) {
689 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSES_DISABLED), 0 /* arg */);
690 return KERN_NOT_SUPPORTED;
691 }
692
693 if (task_corpse_forking_disabled(task)) {
694 os_log(OS_LOG_DEFAULT, "corpse for pid %d disabled via SPI\n", task_pid(task));
695 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_DISABLED_FOR_PROC), 0 /* arg */);
696 return KERN_FAILURE;
697 }
698
699 if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) {
700 kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT;
701 }
702
703 kr = task_crashinfo_get_ref(kc_u_flags);
704 if (kr != KERN_SUCCESS) {
705 return kr;
706 }
707
708 /* Having a task reference does not guarantee a proc reference */
709 p = proc_find(task_pid(task));
710 if (p == NULL) {
711 kr = KERN_INVALID_TASK;
712 goto error_task_generate_corpse;
713 }
714
715 is_64bit_addr = IS_64BIT_PROCESS(p);
716 is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task);
717 t_flags = TF_CORPSE_FORK |
718 TF_PENDING_CORPSE |
719 (is_64bit_addr ? TF_64B_ADDR : TF_NONE) |
720 (is_64bit_data ? TF_64B_DATA : TF_NONE);
721 t_flags_ro = TFRO_CORPSE;
722
723 #if CONFIG_MACF
724 /* Create the corpse label credentials from the process. */
725 label = mac_exc_create_label_for_proc(p);
726 #endif
727
728 corpse_proc = zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
729 new_task = proc_get_task_raw(corpse_proc);
730
731 /* Create a task for corpse */
732 kr = task_create_internal(task,
733 NULL,
734 NULL,
735 TRUE,
736 is_64bit_addr,
737 is_64bit_data,
738 t_flags,
739 t_flags_ro,
740 TPF_NONE,
741 TWF_NONE,
742 new_task);
743 if (kr != KERN_SUCCESS) {
744 new_task = TASK_NULL;
745 goto error_task_generate_corpse;
746 }
747
748 /* Enable IPC access to the corpse task */
749 vm_map_setup(get_task_map(new_task), new_task);
750 ipc_task_enable(new_task);
751
752 /* new task is now referenced, do not free the struct in error case */
753 corpse_proc = NULL;
754
755 /* Create and copy threads from task, returns a ref to thread */
756 kr = task_duplicate_map_and_threads(task, p, new_task, &thread,
757 &udata_buffer, &size, &num_udata, (etype != 0));
758 if (kr != KERN_SUCCESS) {
759 goto error_task_generate_corpse;
760 }
761
762 kr = task_collect_crash_info(new_task,
763 #if CONFIG_MACF
764 label,
765 #endif
766 TRUE);
767 if (kr != KERN_SUCCESS) {
768 goto error_task_generate_corpse;
769 }
770
771 /* transfer our references to the corpse info */
772 assert(new_task->corpse_info->kcd_user_flags == 0);
773 new_task->corpse_info->kcd_user_flags = kc_u_flags;
774 kc_u_flags = 0;
775
776 kr = task_start_halt(new_task);
777 if (kr != KERN_SUCCESS) {
778 goto error_task_generate_corpse;
779 }
780
781 /* terminate the ipc space */
782 ipc_space_terminate(new_task->itk_space);
783
784 /* Populate the corpse blob, use the proc struct of task instead of corpse task */
785 gather_populate_corpse_crashinfo(p, new_task,
786 code, subcode, udata_buffer, num_udata, reason, etype);
787
788 /* Add it to global corpse task list */
789 task_add_to_corpse_task_list(new_task);
790
791 *corpse_task = new_task;
792 *exc_thread = thread;
793
794 error_task_generate_corpse:
795 #if CONFIG_MACF
796 if (label) {
797 mac_exc_free_label(label);
798 }
799 #endif
800
801 /* Release the proc reference */
802 if (p != NULL) {
803 proc_rele(p);
804 }
805
806 if (corpse_proc != NULL) {
807 zfree(proc_task_zone, corpse_proc);
808 }
809
810 if (kr != KERN_SUCCESS) {
811 if (thread != THREAD_NULL) {
812 thread_deallocate(thread);
813 }
814 if (new_task != TASK_NULL) {
815 task_lock(new_task);
816 /* Terminate all the other threads in the task. */
817 queue_iterate(&new_task->threads, thread_next, thread_t, task_threads)
818 {
819 thread_terminate_internal(thread_next);
820 }
821 /* wait for all the threads in the task to terminate */
822 task_wait_till_threads_terminate_locked(new_task);
823 task_unlock(new_task);
824
825 task_clear_corpse(new_task);
826 task_terminate_internal(new_task);
827 task_deallocate(new_task);
828 }
829 if (kc_u_flags) {
830 task_crashinfo_release_ref(kc_u_flags);
831 }
832 }
833 /* Free the udata buffer allocated in task_duplicate_map_and_threads */
834 kfree_data(udata_buffer, size);
835
836 return kr;
837 }
838
839 static kern_return_t
task_map_kcdata_64(task_t task,void * kcdata_addr,mach_vm_address_t * uaddr,mach_vm_size_t kcd_size,vm_tag_t tag)840 task_map_kcdata_64(
841 task_t task,
842 void *kcdata_addr,
843 mach_vm_address_t *uaddr,
844 mach_vm_size_t kcd_size,
845 vm_tag_t tag)
846 {
847 kern_return_t kr;
848 mach_vm_offset_t udata_ptr;
849
850 kr = mach_vm_allocate_kernel(task->map, &udata_ptr, (size_t)kcd_size,
851 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = tag));
852 if (kr != KERN_SUCCESS) {
853 return kr;
854 }
855 copyout(kcdata_addr, (user_addr_t)udata_ptr, (size_t)kcd_size);
856 *uaddr = udata_ptr;
857
858 return KERN_SUCCESS;
859 }
860
861 /*
862 * Routine: task_map_corpse_info
863 * params: task - Map the corpse info in task's address space
864 * corpse_task - task port of the corpse
865 * kcd_addr_begin - address of the mapped corpse info
866 * kcd_addr_begin - size of the mapped corpse info
867 * returns: KERN_SUCCESS on Success.
868 * KERN_FAILURE on Failure.
869 * KERN_INVALID_ARGUMENT on invalid arguments.
870 * Note: Temporary function, will be deleted soon.
871 */
872 kern_return_t
task_map_corpse_info(task_t task,task_t corpse_task,vm_address_t * kcd_addr_begin,uint32_t * kcd_size)873 task_map_corpse_info(
874 task_t task,
875 task_t corpse_task,
876 vm_address_t *kcd_addr_begin,
877 uint32_t *kcd_size)
878 {
879 kern_return_t kr;
880 mach_vm_address_t kcd_addr_begin_64;
881 mach_vm_size_t size_64;
882
883 kr = task_map_corpse_info_64(task, corpse_task, &kcd_addr_begin_64, &size_64);
884 if (kr != KERN_SUCCESS) {
885 return kr;
886 }
887
888 *kcd_addr_begin = (vm_address_t)kcd_addr_begin_64;
889 *kcd_size = (uint32_t) size_64;
890 return KERN_SUCCESS;
891 }
892
893 /*
894 * Routine: task_map_corpse_info_64
895 * params: task - Map the corpse info in task's address space
896 * corpse_task - task port of the corpse
897 * kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *)
898 * kcd_size - size of the mapped corpse info (takes mach_vm_size_t *)
899 * returns: KERN_SUCCESS on Success.
900 * KERN_FAILURE on Failure.
901 * KERN_INVALID_ARGUMENT on invalid arguments.
902 */
903 kern_return_t
task_map_corpse_info_64(task_t task,task_t corpse_task,mach_vm_address_t * kcd_addr_begin,mach_vm_size_t * kcd_size)904 task_map_corpse_info_64(
905 task_t task,
906 task_t corpse_task,
907 mach_vm_address_t *kcd_addr_begin,
908 mach_vm_size_t *kcd_size)
909 {
910 kern_return_t kr;
911 mach_vm_offset_t crash_data_ptr = 0;
912 const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE;
913 void *corpse_info_kernel = NULL;
914
915 if (task == TASK_NULL || task_is_a_corpse(task) ||
916 corpse_task == TASK_NULL || !task_is_a_corpse(corpse_task)) {
917 return KERN_INVALID_ARGUMENT;
918 }
919
920 corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info);
921 if (corpse_info_kernel == NULL) {
922 return KERN_INVALID_ARGUMENT;
923 }
924
925 kr = task_map_kcdata_64(task, corpse_info_kernel, &crash_data_ptr, size,
926 VM_MEMORY_CORPSEINFO);
927
928 if (kr == KERN_SUCCESS) {
929 *kcd_addr_begin = crash_data_ptr;
930 *kcd_size = size;
931 }
932
933 return kr;
934 }
935
936 /*
937 * Routine: task_map_kcdata_object_64
938 * params: task - Map the underlying kcdata in task's address space
939 * kcdata_obj - Object representing the data
940 * kcd_addr_begin - Address of the mapped kcdata
941 * kcd_size - Size of the mapped kcdata
942 * returns: KERN_SUCCESS on Success.
943 * KERN_FAILURE on Failure.
944 * KERN_INVALID_ARGUMENT on invalid arguments.
945 */
946 kern_return_t
task_map_kcdata_object_64(task_t task,kcdata_object_t kcdata_obj,mach_vm_address_t * kcd_addr_begin,mach_vm_size_t * kcd_size)947 task_map_kcdata_object_64(
948 task_t task,
949 kcdata_object_t kcdata_obj,
950 mach_vm_address_t *kcd_addr_begin,
951 mach_vm_size_t *kcd_size)
952 {
953 kern_return_t kr;
954 mach_vm_offset_t bt_data_ptr = 0;
955 const mach_vm_size_t size = BTINFO_ALLOCATION_SIZE;
956 void *bt_info_kernel = NULL;
957
958 if (task == TASK_NULL || task_is_a_corpse(task) ||
959 kcdata_obj == KCDATA_OBJECT_NULL) {
960 return KERN_INVALID_ARGUMENT;
961 }
962
963 bt_info_kernel = kcdata_memory_get_begin_addr(kcdata_obj->ko_data);
964 if (bt_info_kernel == NULL) {
965 return KERN_INVALID_ARGUMENT;
966 }
967
968 kr = task_map_kcdata_64(task, bt_info_kernel, &bt_data_ptr, size,
969 VM_MEMORY_BTINFO);
970
971 if (kr == KERN_SUCCESS) {
972 *kcd_addr_begin = bt_data_ptr;
973 *kcd_size = size;
974 }
975
976 return kr;
977 }
978
979 uint64_t
task_corpse_get_crashed_thread_id(task_t corpse_task)980 task_corpse_get_crashed_thread_id(task_t corpse_task)
981 {
982 return corpse_task->crashed_thread_id;
983 }
984