1 /*
2 * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 * Corpses Overview
32 * ================
33 *
34 * A corpse is a state of process that is past the point of its death. This means that process has
35 * completed all its termination operations like releasing file descriptors, mach ports, sockets and
36 * other constructs used to identify a process. For all the processes this mimics the behavior as if
37 * the process has died and no longer available by any means.
38 *
39 * Why do we need Corpses?
40 * -----------------------
41 * For crash inspection we need to inspect the state and data that is associated with process so that
42 * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash
43 *
44 * Corpses functionality in kernel
45 * ===============================
46 * The corpse functionality is an extension of existing exception reporting mechanisms we have. The
47 * exception_triage calls will try to deliver the first round of exceptions allowing
48 * task/debugger/ReportCrash/launchd level exception handlers to respond to exception. If even after
49 * notification the exception is not handled, then the process begins the death operations and during
50 * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through
51 * of events and data shuffling that happens when corpses is enabled.
52 *
53 * * a process causes an exception during normal execution of threads.
54 * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD
55 * etc) side is passed through the exception_triage() function to follow the thread -> task -> host
56 * level exception handling system. This set of steps are same as before and allow for existing
57 * crash reporting systems (both internal and 3rd party) to catch and create reports as required.
58 * * If above exception handling returns failed (when nobody handles the notification), then the
59 * proc_prepareexit path has logic to decide to create corpse.
60 * * The task_mark_corpse function allocates userspace vm memory and attaches the information
61 * kcdata_descriptor_t to task->corpse_info field of task.
62 * - All the task's threads are marked with the "inspection" flag which signals the termination
63 * daemon to not reap them but hold until they are being inspected.
64 * - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE
65 * prevents task_terminate from stripping important data from task.
66 * - It marks all the threads to terminate and return to AST for termination.
67 * - The allocation logic takes into account the rate limiting policy of allowing only
68 * TOTAL_CORPSES_ALLOWED in flight.
69 * * The proc exit threads continues and collects required information in the allocated vm region.
70 * Once complete it marks itself for termination.
71 * * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit().
72 * Following this is a check to see if task is marked for corpse notification and will
73 * invoke the the task_deliver_crash_notification().
74 * * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and
75 * inspection flag from all its threads) and allows task_terminate to go ahead and continue
76 * the mach task termination process.
77 * * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the
78 * inspection flag set are just bounced to another holding queue (crashed_threads_queue).
79 * Only after the corpse notification these are pulled out from holding queue and enqueued
80 * back to termination queue
81 *
82 *
83 * Corpse info format
84 * ==================
85 * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag
86 * VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various
87 * subsystems like
88 * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc
89 * * mach side may append data regarding ledger usage, memory stats etc
90 * See detailed info about the memory structure and format in kern_cdata.h documentation.
91 *
92 * Configuring Corpses functionality
93 * =================================
94 * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting
95 * any other subsystem.
96 * TOTAL_CORPSES_ALLOWED : (recompilation required) - Changing this number allows for controlling
97 * the number of corpse instances to be held for inspection before allowing memory to be reclaimed
98 * by system.
99 * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more
100 * data to be put in, then please re-tune this parameter.
101 *
102 * Debugging/Visibility
103 * ====================
104 * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads.
105 * * there are macros to see list of threads in termination queue (dumpthread_terminate_queue)
106 * and holding queue (dumpcrashed_thread_queue).
107 * * In case of corpse creation is disabled of ignored then the system log is updated with
108 * printf data with reason.
109 *
110 * Limitations of Corpses
111 * ======================
112 * With holding off memory for inspection, it creates vm pressure which might not be desirable
113 * on low memory devices. There are limits to max corpses being inspected at a time which is
114 * marked by TOTAL_CORPSES_ALLOWED.
115 *
116 */
117
118
119 #include <stdatomic.h>
120 #include <kern/assert.h>
121 #include <mach/mach_types.h>
122 #include <mach/boolean.h>
123 #include <mach/vm_param.h>
124 #include <kern/kern_types.h>
125 #include <kern/mach_param.h>
126 #include <kern/thread.h>
127 #include <kern/task.h>
128 #include <corpses/task_corpse.h>
129 #include <kern/kalloc.h>
130 #include <kern/kern_cdata.h>
131 #include <mach/mach_vm.h>
132 #include <kern/exc_guard.h>
133 #include <os/log.h>
134
135 #if CONFIG_MACF
136 #include <security/mac_mach_internal.h>
137 #endif
138
139 /*
140 * Exported interfaces
141 */
142 #include <mach/task_server.h>
143
144 union corpse_creation_gate {
145 struct {
146 uint16_t user_faults;
147 uint16_t corpses;
148 };
149 uint32_t value;
150 };
151
152 static _Atomic uint32_t inflight_corpses;
153 unsigned long total_corpses_created = 0;
154
155 static TUNABLE(bool, corpses_disabled, "-no_corpses", false);
156
157 #if DEBUG || DEVELOPMENT
158 /* bootarg to generate corpse with size up to max_footprint_mb */
159 TUNABLE(bool, corpse_threshold_system_limit, "corpse_threshold_system_limit", false);
160 #endif /* DEBUG || DEVELOPMENT */
161
162 /* bootarg to turn on corpse forking for EXC_RESOURCE */
163 TUNABLE(bool, exc_via_corpse_forking, "exc_via_corpse_forking", true);
164
165 /* bootarg to generate corpse for fatal high memory watermark violation */
166 TUNABLE(bool, corpse_for_fatal_memkill, "corpse_for_fatal_memkill", true);
167
168 #ifdef __arm__
169 static inline int
IS_64BIT_PROCESS(__unused void * p)170 IS_64BIT_PROCESS(__unused void *p)
171 {
172 return 0;
173 }
174 #else
175 extern int IS_64BIT_PROCESS(void *);
176 #endif /* __arm__ */
177 extern void gather_populate_corpse_crashinfo(void *p, task_t task,
178 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
179 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
180 extern void *proc_find(int pid);
181 extern int proc_rele(void *p);
182
183 /*
184 * Routine: corpses_enabled
185 * returns FALSE if not enabled
186 */
187 boolean_t
corpses_enabled(void)188 corpses_enabled(void)
189 {
190 return !corpses_disabled;
191 }
192
193 unsigned long
total_corpses_count(void)194 total_corpses_count(void)
195 {
196 union corpse_creation_gate gate;
197
198 gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
199 return gate.corpses;
200 }
201
202 extern char *proc_best_name(struct proc *);
203 extern int proc_pid(struct proc *);
204
205 /*
206 * Routine: task_crashinfo_get_ref()
207 * Grab a slot at creating a corpse.
208 * Returns: KERN_SUCCESS if the policy allows for creating a corpse.
209 */
210 static kern_return_t
task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)211 task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)
212 {
213 union corpse_creation_gate oldgate, newgate;
214 struct proc *p = (void *)current_proc();
215
216 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
217
218 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
219 for (;;) {
220 newgate = oldgate;
221 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
222 if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) {
223 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many faults %d\n",
224 proc_best_name(p), proc_pid(p), newgate.user_faults);
225 return KERN_RESOURCE_SHORTAGE;
226 }
227 }
228 if (newgate.corpses++ >= TOTAL_CORPSES_ALLOWED) {
229 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many %d\n",
230 proc_best_name(p), proc_pid(p), newgate.corpses);
231 return KERN_RESOURCE_SHORTAGE;
232 }
233
234 // this reloads the value in oldgate
235 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
236 &oldgate.value, newgate.value, memory_order_relaxed,
237 memory_order_relaxed)) {
238 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse allowed %d of %d\n",
239 proc_best_name(p), proc_pid(p), newgate.corpses, TOTAL_CORPSES_ALLOWED);
240 return KERN_SUCCESS;
241 }
242 }
243 }
244
245 /*
246 * Routine: task_crashinfo_release_ref
247 * release the slot for corpse being used.
248 */
249 static kern_return_t
task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)250 task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)
251 {
252 union corpse_creation_gate oldgate, newgate;
253
254 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
255
256 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
257 for (;;) {
258 newgate = oldgate;
259 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
260 if (newgate.user_faults-- == 0) {
261 panic("corpse in flight count over-release");
262 }
263 }
264 if (newgate.corpses-- == 0) {
265 panic("corpse in flight count over-release");
266 }
267 // this reloads the value in oldgate
268 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
269 &oldgate.value, newgate.value, memory_order_relaxed,
270 memory_order_relaxed)) {
271 os_log(OS_LOG_DEFAULT, "Corpse released, count at %d\n", newgate.corpses);
272 return KERN_SUCCESS;
273 }
274 }
275 }
276
277
278 kcdata_descriptor_t
task_crashinfo_alloc_init(mach_vm_address_t crash_data_p,unsigned size,corpse_flags_t kc_u_flags,unsigned kc_flags)279 task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size,
280 corpse_flags_t kc_u_flags, unsigned kc_flags)
281 {
282 kcdata_descriptor_t kcdata;
283
284 if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
285 if (KERN_SUCCESS != task_crashinfo_get_ref(kc_u_flags)) {
286 return NULL;
287 }
288 }
289
290 kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size,
291 kc_flags);
292 if (kcdata) {
293 kcdata->kcd_user_flags = kc_u_flags;
294 } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
295 task_crashinfo_release_ref(kc_u_flags);
296 }
297 return kcdata;
298 }
299
300
301 /*
302 * Free up the memory associated with task_crashinfo_data
303 */
304 kern_return_t
task_crashinfo_destroy(kcdata_descriptor_t data)305 task_crashinfo_destroy(kcdata_descriptor_t data)
306 {
307 if (!data) {
308 return KERN_INVALID_ARGUMENT;
309 }
310 if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) {
311 task_crashinfo_release_ref(data->kcd_user_flags);
312 }
313 return kcdata_memory_destroy(data);
314 }
315
316 /*
317 * Routine: task_get_corpseinfo
318 * params: task - task which has corpse info setup.
319 * returns: crash info data attached to task.
320 * NULL if task is null or has no corpse info
321 */
322 kcdata_descriptor_t
task_get_corpseinfo(task_t task)323 task_get_corpseinfo(task_t task)
324 {
325 kcdata_descriptor_t retval = NULL;
326 if (task != NULL) {
327 retval = task->corpse_info;
328 }
329 return retval;
330 }
331
332 /*
333 * Routine: task_add_to_corpse_task_list
334 * params: task - task to be added to corpse task list
335 * returns: None.
336 */
337 void
task_add_to_corpse_task_list(task_t corpse_task)338 task_add_to_corpse_task_list(task_t corpse_task)
339 {
340 lck_mtx_lock(&tasks_corpse_lock);
341 queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks);
342 lck_mtx_unlock(&tasks_corpse_lock);
343 }
344
345 /*
346 * Routine: task_remove_from_corpse_task_list
347 * params: task - task to be removed from corpse task list
348 * returns: None.
349 */
350 void
task_remove_from_corpse_task_list(task_t corpse_task)351 task_remove_from_corpse_task_list(task_t corpse_task)
352 {
353 lck_mtx_lock(&tasks_corpse_lock);
354 queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks);
355 lck_mtx_unlock(&tasks_corpse_lock);
356 }
357
358 /*
359 * Routine: task_purge_all_corpses
360 * params: None.
361 * returns: None.
362 */
363 void
task_purge_all_corpses(void)364 task_purge_all_corpses(void)
365 {
366 task_t task;
367
368 printf("Purging corpses......\n\n");
369
370 lck_mtx_lock(&tasks_corpse_lock);
371 /* Iterate through all the corpse tasks and clear all map entries */
372 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
373 vm_map_terminate(task->map);
374 }
375 lck_mtx_unlock(&tasks_corpse_lock);
376 }
377
378 /*
379 * Routine: find_corpse_task_by_uniqueid_grp
380 * params: task_uniqueid - uniqueid of the corpse
381 * target - target task [Out Param]
382 * grp - task reference group
383 * returns:
384 * KERN_SUCCESS if a matching corpse if found, gives a ref.
385 * KERN_FAILURE corpse with given uniqueid is not found.
386 */
387 kern_return_t
find_corpse_task_by_uniqueid_grp(uint64_t task_uniqueid,task_t * target,task_grp_t grp)388 find_corpse_task_by_uniqueid_grp(
389 uint64_t task_uniqueid,
390 task_t *target,
391 task_grp_t grp)
392 {
393 task_t task;
394
395 lck_mtx_lock(&tasks_corpse_lock);
396
397 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
398 if (task->task_uniqueid == task_uniqueid) {
399 lck_mtx_unlock(&tasks_corpse_lock);
400 task_reference_grp(task, grp);
401 *target = task;
402 return KERN_SUCCESS;
403 }
404 }
405
406 lck_mtx_unlock(&tasks_corpse_lock);
407 return KERN_FAILURE;
408 }
409
410 /*
411 * Routine: task_generate_corpse
412 * params: task - task to fork a corpse
413 * corpse_task - task port of the generated corpse
414 * returns: KERN_SUCCESS on Success.
415 * KERN_FAILURE on Failure.
416 * KERN_NOT_SUPPORTED on corpse disabled.
417 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
418 */
419 kern_return_t
task_generate_corpse(task_t task,ipc_port_t * corpse_task_port)420 task_generate_corpse(
421 task_t task,
422 ipc_port_t *corpse_task_port)
423 {
424 task_t new_task;
425 kern_return_t kr;
426 thread_t thread, th_iter;
427 ipc_port_t corpse_port;
428
429 if (task == kernel_task || task == TASK_NULL) {
430 return KERN_INVALID_ARGUMENT;
431 }
432
433 task_lock(task);
434 if (task_is_a_corpse_fork(task)) {
435 task_unlock(task);
436 return KERN_INVALID_ARGUMENT;
437 }
438 task_unlock(task);
439
440 /* Generate a corpse for the given task, will return with a ref on corpse task */
441 kr = task_generate_corpse_internal(task, &new_task, &thread, 0, 0, 0, NULL);
442 if (kr != KERN_SUCCESS) {
443 return kr;
444 }
445 if (thread != THREAD_NULL) {
446 thread_deallocate(thread);
447 }
448
449 /* wait for all the threads in the task to terminate */
450 task_lock(new_task);
451 task_wait_till_threads_terminate_locked(new_task);
452
453 /* Reset thread ports of all the threads in task */
454 queue_iterate(&new_task->threads, th_iter, thread_t, task_threads)
455 {
456 /* Do not reset the thread port for inactive threads */
457 if (th_iter->corpse_dup == FALSE) {
458 ipc_thread_reset(th_iter);
459 }
460 }
461 task_unlock(new_task);
462
463 /* transfer the task ref to port and arm the no-senders notification */
464 corpse_port = convert_corpse_to_port_and_nsrequest(new_task);
465 assert(IP_NULL != corpse_port);
466
467 *corpse_task_port = corpse_port;
468 return KERN_SUCCESS;
469 }
470
471 /*
472 * Routine: task_enqueue_exception_with_corpse
473 * params: task - task to generate a corpse and enqueue it
474 * etype - EXC_RESOURCE or EXC_GUARD
475 * code - exception code to be enqueued
476 * codeCnt - code array count - code and subcode
477 *
478 * returns: KERN_SUCCESS on Success.
479 * KERN_FAILURE on Failure.
480 * KERN_INVALID_ARGUMENT on invalid arguments passed.
481 * KERN_NOT_SUPPORTED on corpse disabled.
482 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
483 */
484 kern_return_t
task_enqueue_exception_with_corpse(task_t task,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reason)485 task_enqueue_exception_with_corpse(
486 task_t task,
487 exception_type_t etype,
488 mach_exception_data_t code,
489 mach_msg_type_number_t codeCnt,
490 void *reason)
491 {
492 task_t new_task = TASK_NULL;
493 thread_t thread = THREAD_NULL;
494 kern_return_t kr;
495
496 if (codeCnt < 2) {
497 return KERN_INVALID_ARGUMENT;
498 }
499
500 /* Generate a corpse for the given task, will return with a ref on corpse task */
501 kr = task_generate_corpse_internal(task, &new_task, &thread,
502 etype, code[0], code[1], reason);
503 if (kr == KERN_SUCCESS) {
504 if (thread == THREAD_NULL) {
505 return KERN_FAILURE;
506 }
507 assert(new_task != TASK_NULL);
508 assert(etype == EXC_RESOURCE || etype == EXC_GUARD);
509 thread_exception_enqueue(new_task, thread, etype);
510 }
511 return kr;
512 }
513
514 /*
515 * Routine: task_generate_corpse_internal
516 * params: task - task to fork a corpse
517 * corpse_task - task of the generated corpse
518 * exc_thread - equivalent thread in corpse enqueuing exception
519 * etype - EXC_RESOURCE or EXC_GUARD or 0
520 * code - mach exception code to be passed in corpse blob
521 * subcode - mach exception subcode to be passed in corpse blob
522 * returns: KERN_SUCCESS on Success.
523 * KERN_FAILURE on Failure.
524 * KERN_NOT_SUPPORTED on corpse disabled.
525 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
526 */
527 kern_return_t
task_generate_corpse_internal(task_t task,task_t * corpse_task,thread_t * exc_thread,exception_type_t etype,mach_exception_data_type_t code,mach_exception_data_type_t subcode,void * reason)528 task_generate_corpse_internal(
529 task_t task,
530 task_t *corpse_task,
531 thread_t *exc_thread,
532 exception_type_t etype,
533 mach_exception_data_type_t code,
534 mach_exception_data_type_t subcode,
535 void *reason)
536 {
537 task_t new_task = TASK_NULL;
538 thread_t thread = THREAD_NULL;
539 thread_t thread_next = THREAD_NULL;
540 kern_return_t kr;
541 struct proc *p = NULL;
542 int is_64bit_addr;
543 int is_64bit_data;
544 int t_flags;
545 uint64_t *udata_buffer = NULL;
546 int size = 0;
547 int num_udata = 0;
548 corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF;
549
550 #if CONFIG_MACF
551 struct label *label = NULL;
552 #endif
553
554 if (!corpses_enabled()) {
555 return KERN_NOT_SUPPORTED;
556 }
557
558 if (task_corpse_forking_disabled(task)) {
559 os_log(OS_LOG_DEFAULT, "corpse for pid %d disabled via SPI\n", task_pid(task));
560 return KERN_FAILURE;
561 }
562
563 if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) {
564 kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT;
565 }
566
567 kr = task_crashinfo_get_ref(kc_u_flags);
568 if (kr != KERN_SUCCESS) {
569 return kr;
570 }
571
572 /* Having a task reference does not guarantee a proc reference */
573 p = proc_find(task_pid(task));
574 if (p == NULL) {
575 kr = KERN_INVALID_TASK;
576 goto error_task_generate_corpse;
577 }
578
579 is_64bit_addr = IS_64BIT_PROCESS(p);
580 is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task);
581 t_flags = TF_CORPSE_FORK |
582 TF_PENDING_CORPSE |
583 TF_CORPSE |
584 (is_64bit_addr ? TF_64B_ADDR : TF_NONE) |
585 (is_64bit_data ? TF_64B_DATA : TF_NONE);
586
587 #if CONFIG_MACF
588 /* Create the corpse label credentials from the process. */
589 label = mac_exc_create_label_for_proc(p);
590 #endif
591
592 /* Create a task for corpse */
593 kr = task_create_internal(task,
594 NULL,
595 NULL,
596 TRUE,
597 is_64bit_addr,
598 is_64bit_data,
599 t_flags,
600 TPF_NONE,
601 TWF_NONE,
602 &new_task);
603 if (kr != KERN_SUCCESS) {
604 goto error_task_generate_corpse;
605 }
606
607 /* Create and copy threads from task, returns a ref to thread */
608 kr = task_duplicate_map_and_threads(task, p, new_task, &thread,
609 &udata_buffer, &size, &num_udata, (etype != 0));
610 if (kr != KERN_SUCCESS) {
611 goto error_task_generate_corpse;
612 }
613
614 kr = task_collect_crash_info(new_task,
615 #if CONFIG_MACF
616 label,
617 #endif
618 TRUE);
619 if (kr != KERN_SUCCESS) {
620 goto error_task_generate_corpse;
621 }
622
623 /* transfer our references to the corpse info */
624 assert(new_task->corpse_info->kcd_user_flags == 0);
625 new_task->corpse_info->kcd_user_flags = kc_u_flags;
626 kc_u_flags = 0;
627
628 kr = task_start_halt(new_task);
629 if (kr != KERN_SUCCESS) {
630 goto error_task_generate_corpse;
631 }
632
633 /* terminate the ipc space */
634 ipc_space_terminate(new_task->itk_space);
635
636 /* Populate the corpse blob, use the proc struct of task instead of corpse task */
637 gather_populate_corpse_crashinfo(p, new_task,
638 code, subcode, udata_buffer, num_udata, reason, etype);
639
640 /* Add it to global corpse task list */
641 task_add_to_corpse_task_list(new_task);
642
643 *corpse_task = new_task;
644 *exc_thread = thread;
645
646 error_task_generate_corpse:
647 #if CONFIG_MACF
648 if (label) {
649 mac_exc_free_label(label);
650 }
651 #endif
652
653 /* Release the proc reference */
654 if (p != NULL) {
655 proc_rele(p);
656 }
657
658 if (kr != KERN_SUCCESS) {
659 if (thread != THREAD_NULL) {
660 thread_deallocate(thread);
661 }
662 if (new_task != TASK_NULL) {
663 task_lock(new_task);
664 /* Terminate all the other threads in the task. */
665 queue_iterate(&new_task->threads, thread_next, thread_t, task_threads)
666 {
667 thread_terminate_internal(thread_next);
668 }
669 /* wait for all the threads in the task to terminate */
670 task_wait_till_threads_terminate_locked(new_task);
671 task_unlock(new_task);
672
673 task_clear_corpse(new_task);
674 task_terminate_internal(new_task);
675 task_deallocate(new_task);
676 }
677 if (kc_u_flags) {
678 task_crashinfo_release_ref(kc_u_flags);
679 }
680 }
681 /* Free the udata buffer allocated in task_duplicate_map_and_threads */
682 kfree_data(udata_buffer, size);
683
684 return kr;
685 }
686
687 /*
688 * Routine: task_map_corpse_info
689 * params: task - Map the corpse info in task's address space
690 * corpse_task - task port of the corpse
691 * kcd_addr_begin - address of the mapped corpse info
692 * kcd_addr_begin - size of the mapped corpse info
693 * returns: KERN_SUCCESS on Success.
694 * KERN_FAILURE on Failure.
695 * KERN_INVALID_ARGUMENT on invalid arguments.
696 * Note: Temporary function, will be deleted soon.
697 */
698 kern_return_t
task_map_corpse_info(task_t task,task_t corpse_task,vm_address_t * kcd_addr_begin,uint32_t * kcd_size)699 task_map_corpse_info(
700 task_t task,
701 task_t corpse_task,
702 vm_address_t *kcd_addr_begin,
703 uint32_t *kcd_size)
704 {
705 kern_return_t kr;
706 mach_vm_address_t kcd_addr_begin_64;
707 mach_vm_size_t size_64;
708
709 kr = task_map_corpse_info_64(task, corpse_task, &kcd_addr_begin_64, &size_64);
710 if (kr != KERN_SUCCESS) {
711 return kr;
712 }
713
714 *kcd_addr_begin = (vm_address_t)kcd_addr_begin_64;
715 *kcd_size = (uint32_t) size_64;
716 return KERN_SUCCESS;
717 }
718
719 /*
720 * Routine: task_map_corpse_info_64
721 * params: task - Map the corpse info in task's address space
722 * corpse_task - task port of the corpse
723 * kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *)
724 * kcd_addr_begin - size of the mapped corpse info (takes mach_vm_size_t *)
725 * returns: KERN_SUCCESS on Success.
726 * KERN_FAILURE on Failure.
727 * KERN_INVALID_ARGUMENT on invalid arguments.
728 */
729 kern_return_t
task_map_corpse_info_64(task_t task,task_t corpse_task,mach_vm_address_t * kcd_addr_begin,mach_vm_size_t * kcd_size)730 task_map_corpse_info_64(
731 task_t task,
732 task_t corpse_task,
733 mach_vm_address_t *kcd_addr_begin,
734 mach_vm_size_t *kcd_size)
735 {
736 kern_return_t kr;
737 mach_vm_offset_t crash_data_ptr = 0;
738 const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE;
739 void *corpse_info_kernel = NULL;
740
741 if (task == TASK_NULL || task_is_a_corpse_fork(task)) {
742 return KERN_INVALID_ARGUMENT;
743 }
744
745 if (corpse_task == TASK_NULL || !task_is_a_corpse(corpse_task) ||
746 kcdata_memory_get_begin_addr(corpse_task->corpse_info) == NULL) {
747 return KERN_INVALID_ARGUMENT;
748 }
749 corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info);
750 kr = mach_vm_allocate_kernel(task->map, &crash_data_ptr, size,
751 VM_FLAGS_ANYWHERE, VM_MEMORY_CORPSEINFO);
752 if (kr != KERN_SUCCESS) {
753 return kr;
754 }
755 copyout(corpse_info_kernel, (user_addr_t)crash_data_ptr, (size_t)size);
756 *kcd_addr_begin = crash_data_ptr;
757 *kcd_size = size;
758
759 return KERN_SUCCESS;
760 }
761
762 uint64_t
task_corpse_get_crashed_thread_id(task_t corpse_task)763 task_corpse_get_crashed_thread_id(task_t corpse_task)
764 {
765 return corpse_task->crashed_thread_id;
766 }
767