xref: /xnu-8020.101.4/osfmk/corpses/corpse.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 
30 /*
31  * Corpses Overview
32  * ================
33  *
34  * A corpse is a state of process that is past the point of its death. This means that process has
35  * completed all its termination operations like releasing file descriptors, mach ports, sockets and
36  * other constructs used to identify a process. For all the processes this mimics the behavior as if
37  * the process has died and no longer available by any means.
38  *
39  * Why do we need Corpses?
40  * -----------------------
41  * For crash inspection we need to inspect the state and data that is associated with process so that
42  * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash
43  *
44  * Corpses functionality in kernel
45  * ===============================
46  * The corpse functionality is an extension of existing exception reporting mechanisms we have. The
47  * exception_triage calls will try to deliver the first round of exceptions allowing
48  * task/debugger/ReportCrash/launchd level exception handlers to  respond to exception. If even after
49  * notification the exception is not handled, then the process begins the death operations and during
50  * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through
51  * of events and data shuffling that happens when corpses is enabled.
52  *
53  *   * a process causes an exception during normal execution of threads.
54  *   * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD
55  *     etc) side is passed through the exception_triage() function to follow the thread -> task -> host
56  *     level exception handling system. This set of steps are same as before and allow for existing
57  *     crash reporting systems (both internal and 3rd party) to catch and create reports as required.
58  *   * If above exception handling returns failed (when nobody handles the notification), then the
59  *     proc_prepareexit path has logic to decide to create corpse.
60  *   * The task_mark_corpse function allocates userspace vm memory and attaches the information
61  *     kcdata_descriptor_t to task->corpse_info field of task.
62  *     - All the task's threads are marked with the "inspection" flag which signals the termination
63  *       daemon to not reap them but hold until they are being inspected.
64  *     - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE
65  *       prevents task_terminate from stripping important data from task.
66  *     - It marks all the threads to terminate and return to AST for termination.
67  *     - The allocation logic takes into account the rate limiting policy of allowing only
68  *       TOTAL_CORPSES_ALLOWED in flight.
69  *   * The proc exit threads continues and collects required information in the allocated vm region.
70  *     Once complete it marks itself for termination.
71  *   * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit().
72  *     Following this is a check to see if task is marked for corpse notification and will
73  *     invoke the the task_deliver_crash_notification().
74  *   * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and
75  *     inspection flag from all its threads) and allows task_terminate to go ahead and continue
76  *     the mach task termination process.
77  *   * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the
78  *     inspection flag set are just bounced to another holding queue (crashed_threads_queue).
79  *     Only after the corpse notification these are pulled out from holding queue and enqueued
80  *     back to termination queue
81  *
82  *
83  * Corpse info format
84  * ==================
85  * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag
86  *     VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various
87  *     subsystems like
88  *   * bsd proc exit path may write down pid, parent pid, number of file descriptors etc
89  *   * mach side may append data regarding ledger usage, memory stats etc
90  * See detailed info about the memory structure and format in kern_cdata.h documentation.
91  *
92  * Configuring Corpses functionality
93  * =================================
94  *   boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting
95  *     any other subsystem.
96  *   TOTAL_CORPSES_ALLOWED : (recompilation required) - Changing this number allows for controlling
97  *     the number of corpse instances to be held for inspection before allowing memory to be reclaimed
98  *     by system.
99  *   CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more
100  *     data to be put in, then please re-tune this parameter.
101  *
102  * Debugging/Visibility
103  * ====================
104  *   * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads.
105  *   * there are macros to see list of threads in termination queue (dumpthread_terminate_queue)
106  *     and holding queue (dumpcrashed_thread_queue).
107  *   * In case of corpse creation is disabled of ignored then the system log is updated with
108  *     printf data with reason.
109  *
110  * Limitations of Corpses
111  * ======================
112  *   With holding off memory for inspection, it creates vm pressure which might not be desirable
113  *   on low memory devices. There are limits to max corpses being inspected at a time which is
114  *   marked by TOTAL_CORPSES_ALLOWED.
115  *
116  */
117 
118 
119 #include <stdatomic.h>
120 #include <kern/assert.h>
121 #include <mach/mach_types.h>
122 #include <mach/boolean.h>
123 #include <mach/vm_param.h>
124 #include <kern/kern_types.h>
125 #include <kern/mach_param.h>
126 #include <kern/thread.h>
127 #include <kern/task.h>
128 #include <corpses/task_corpse.h>
129 #include <kern/kalloc.h>
130 #include <kern/kern_cdata.h>
131 #include <mach/mach_vm.h>
132 #include <kern/exc_guard.h>
133 #include <os/log.h>
134 
135 #if CONFIG_MACF
136 #include <security/mac_mach_internal.h>
137 #endif
138 
139 /*
140  * Exported interfaces
141  */
142 #include <mach/task_server.h>
143 
144 union corpse_creation_gate {
145 	struct {
146 		uint16_t user_faults;
147 		uint16_t corpses;
148 	};
149 	uint32_t value;
150 };
151 
152 static _Atomic uint32_t inflight_corpses;
153 unsigned long  total_corpses_created = 0;
154 
155 static TUNABLE(bool, corpses_disabled, "-no_corpses", false);
156 
157 #if DEBUG || DEVELOPMENT
158 /* bootarg to generate corpse with size up to max_footprint_mb */
159 TUNABLE(bool, corpse_threshold_system_limit, "corpse_threshold_system_limit", false);
160 #endif /* DEBUG || DEVELOPMENT */
161 
162 /* bootarg to turn on corpse forking for EXC_RESOURCE */
163 TUNABLE(bool, exc_via_corpse_forking, "exc_via_corpse_forking", true);
164 
165 /* bootarg to generate corpse for fatal high memory watermark violation */
166 TUNABLE(bool, corpse_for_fatal_memkill, "corpse_for_fatal_memkill", true);
167 
168 #ifdef  __arm__
169 static inline int
IS_64BIT_PROCESS(__unused void * p)170 IS_64BIT_PROCESS(__unused void *p)
171 {
172 	return 0;
173 }
174 #else
175 extern int IS_64BIT_PROCESS(void *);
176 #endif /* __arm__ */
177 extern void gather_populate_corpse_crashinfo(void *p, task_t task,
178     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
179     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
180 extern void *proc_find(int pid);
181 extern int proc_rele(void *p);
182 
183 /*
184  * Routine: corpses_enabled
185  * returns FALSE if not enabled
186  */
187 boolean_t
corpses_enabled(void)188 corpses_enabled(void)
189 {
190 	return !corpses_disabled;
191 }
192 
193 unsigned long
total_corpses_count(void)194 total_corpses_count(void)
195 {
196 	union corpse_creation_gate gate;
197 
198 	gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
199 	return gate.corpses;
200 }
201 
202 extern char *proc_best_name(struct proc *);
203 extern int proc_pid(struct proc *);
204 
205 /*
206  * Routine: task_crashinfo_get_ref()
207  *          Grab a slot at creating a corpse.
208  * Returns: KERN_SUCCESS if the policy allows for creating a corpse.
209  */
210 static kern_return_t
task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)211 task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)
212 {
213 	union corpse_creation_gate oldgate, newgate;
214 	struct proc *p = (void *)current_proc();
215 
216 	assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
217 
218 	oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
219 	for (;;) {
220 		newgate = oldgate;
221 		if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
222 			if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) {
223 				os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many faults %d\n",
224 				    proc_best_name(p), proc_pid(p), newgate.user_faults);
225 				return KERN_RESOURCE_SHORTAGE;
226 			}
227 		}
228 		if (newgate.corpses++ >= TOTAL_CORPSES_ALLOWED) {
229 			os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many %d\n",
230 			    proc_best_name(p), proc_pid(p), newgate.corpses);
231 			return KERN_RESOURCE_SHORTAGE;
232 		}
233 
234 		// this reloads the value in oldgate
235 		if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
236 		    &oldgate.value, newgate.value, memory_order_relaxed,
237 		    memory_order_relaxed)) {
238 			os_log(OS_LOG_DEFAULT, "%s[%d] Corpse allowed %d of %d\n",
239 			    proc_best_name(p), proc_pid(p), newgate.corpses, TOTAL_CORPSES_ALLOWED);
240 			return KERN_SUCCESS;
241 		}
242 	}
243 }
244 
245 /*
246  * Routine: task_crashinfo_release_ref
247  *          release the slot for corpse being used.
248  */
249 static kern_return_t
task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)250 task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)
251 {
252 	union corpse_creation_gate oldgate, newgate;
253 
254 	assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
255 
256 	oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
257 	for (;;) {
258 		newgate = oldgate;
259 		if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
260 			if (newgate.user_faults-- == 0) {
261 				panic("corpse in flight count over-release");
262 			}
263 		}
264 		if (newgate.corpses-- == 0) {
265 			panic("corpse in flight count over-release");
266 		}
267 		// this reloads the value in oldgate
268 		if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
269 		    &oldgate.value, newgate.value, memory_order_relaxed,
270 		    memory_order_relaxed)) {
271 			os_log(OS_LOG_DEFAULT, "Corpse released, count at %d\n", newgate.corpses);
272 			return KERN_SUCCESS;
273 		}
274 	}
275 }
276 
277 
278 kcdata_descriptor_t
task_crashinfo_alloc_init(mach_vm_address_t crash_data_p,unsigned size,corpse_flags_t kc_u_flags,unsigned kc_flags)279 task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size,
280     corpse_flags_t kc_u_flags, unsigned kc_flags)
281 {
282 	kcdata_descriptor_t kcdata;
283 
284 	if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
285 		if (KERN_SUCCESS != task_crashinfo_get_ref(kc_u_flags)) {
286 			return NULL;
287 		}
288 	}
289 
290 	kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size,
291 	    kc_flags);
292 	if (kcdata) {
293 		kcdata->kcd_user_flags = kc_u_flags;
294 	} else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
295 		task_crashinfo_release_ref(kc_u_flags);
296 	}
297 	return kcdata;
298 }
299 
300 
301 /*
302  * Free up the memory associated with task_crashinfo_data
303  */
304 kern_return_t
task_crashinfo_destroy(kcdata_descriptor_t data)305 task_crashinfo_destroy(kcdata_descriptor_t data)
306 {
307 	if (!data) {
308 		return KERN_INVALID_ARGUMENT;
309 	}
310 	if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) {
311 		task_crashinfo_release_ref(data->kcd_user_flags);
312 	}
313 	return kcdata_memory_destroy(data);
314 }
315 
316 /*
317  * Routine: task_get_corpseinfo
318  * params: task - task which has corpse info setup.
319  * returns: crash info data attached to task.
320  *          NULL if task is null or has no corpse info
321  */
322 kcdata_descriptor_t
task_get_corpseinfo(task_t task)323 task_get_corpseinfo(task_t task)
324 {
325 	kcdata_descriptor_t retval = NULL;
326 	if (task != NULL) {
327 		retval = task->corpse_info;
328 	}
329 	return retval;
330 }
331 
332 /*
333  * Routine: task_add_to_corpse_task_list
334  * params: task - task to be added to corpse task list
335  * returns: None.
336  */
337 void
task_add_to_corpse_task_list(task_t corpse_task)338 task_add_to_corpse_task_list(task_t corpse_task)
339 {
340 	lck_mtx_lock(&tasks_corpse_lock);
341 	queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks);
342 	lck_mtx_unlock(&tasks_corpse_lock);
343 }
344 
345 /*
346  * Routine: task_remove_from_corpse_task_list
347  * params: task - task to be removed from corpse task list
348  * returns: None.
349  */
350 void
task_remove_from_corpse_task_list(task_t corpse_task)351 task_remove_from_corpse_task_list(task_t corpse_task)
352 {
353 	lck_mtx_lock(&tasks_corpse_lock);
354 	queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks);
355 	lck_mtx_unlock(&tasks_corpse_lock);
356 }
357 
358 /*
359  * Routine: task_purge_all_corpses
360  * params: None.
361  * returns: None.
362  */
363 void
task_purge_all_corpses(void)364 task_purge_all_corpses(void)
365 {
366 	task_t task;
367 
368 	printf("Purging corpses......\n\n");
369 
370 	lck_mtx_lock(&tasks_corpse_lock);
371 	/* Iterate through all the corpse tasks and clear all map entries */
372 	queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
373 		vm_map_remove(task->map,
374 		    task->map->min_offset,
375 		    task->map->max_offset,
376 		    /*
377 		     * Final cleanup:
378 		     * + no unnesting
379 		     * + remove immutable mappings
380 		     * + allow gaps in the range
381 		     */
382 		    (VM_MAP_REMOVE_NO_UNNESTING |
383 		    VM_MAP_REMOVE_IMMUTABLE |
384 		    VM_MAP_REMOVE_GAPS_OK));
385 	}
386 
387 	lck_mtx_unlock(&tasks_corpse_lock);
388 }
389 
390 /*
391  * Routine: find_corpse_task_by_uniqueid_grp
392  * params: task_uniqueid - uniqueid of the corpse
393  *         target - target task [Out Param]
394  *                 grp - task reference group
395  * returns:
396  *         KERN_SUCCESS if a matching corpse if found, gives a ref.
397  *         KERN_FAILURE corpse with given uniqueid is not found.
398  */
399 kern_return_t
find_corpse_task_by_uniqueid_grp(uint64_t task_uniqueid,task_t * target,task_grp_t grp)400 find_corpse_task_by_uniqueid_grp(
401 	uint64_t   task_uniqueid,
402 	task_t     *target,
403 	task_grp_t grp)
404 {
405 	task_t task;
406 
407 	lck_mtx_lock(&tasks_corpse_lock);
408 
409 	queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
410 		if (task->task_uniqueid == task_uniqueid) {
411 			lck_mtx_unlock(&tasks_corpse_lock);
412 			task_reference_grp(task, grp);
413 			*target = task;
414 			return KERN_SUCCESS;
415 		}
416 	}
417 
418 	lck_mtx_unlock(&tasks_corpse_lock);
419 	return KERN_FAILURE;
420 }
421 
422 /*
423  * Routine: task_generate_corpse
424  * params: task - task to fork a corpse
425  *         corpse_task - task port of the generated corpse
426  * returns: KERN_SUCCESS on Success.
427  *          KERN_FAILURE on Failure.
428  *          KERN_NOT_SUPPORTED on corpse disabled.
429  *          KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
430  */
431 kern_return_t
task_generate_corpse(task_t task,ipc_port_t * corpse_task_port)432 task_generate_corpse(
433 	task_t task,
434 	ipc_port_t *corpse_task_port)
435 {
436 	task_t new_task;
437 	kern_return_t kr;
438 	thread_t thread, th_iter;
439 	ipc_port_t corpse_port;
440 
441 	if (task == kernel_task || task == TASK_NULL) {
442 		return KERN_INVALID_ARGUMENT;
443 	}
444 
445 	task_lock(task);
446 	if (task_is_a_corpse_fork(task)) {
447 		task_unlock(task);
448 		return KERN_INVALID_ARGUMENT;
449 	}
450 	task_unlock(task);
451 
452 	/* Generate a corpse for the given task, will return with a ref on corpse task */
453 	kr = task_generate_corpse_internal(task, &new_task, &thread, 0, 0, 0, NULL);
454 	if (kr != KERN_SUCCESS) {
455 		return kr;
456 	}
457 	if (thread != THREAD_NULL) {
458 		thread_deallocate(thread);
459 	}
460 
461 	/* wait for all the threads in the task to terminate */
462 	task_lock(new_task);
463 	task_wait_till_threads_terminate_locked(new_task);
464 
465 	/* Reset thread ports of all the threads in task */
466 	queue_iterate(&new_task->threads, th_iter, thread_t, task_threads)
467 	{
468 		/* Do not reset the thread port for inactive threads */
469 		if (th_iter->corpse_dup == FALSE) {
470 			ipc_thread_reset(th_iter);
471 		}
472 	}
473 	task_unlock(new_task);
474 
475 	/* transfer the task ref to port and arm the no-senders notification */
476 	corpse_port = convert_corpse_to_port_and_nsrequest(new_task);
477 	assert(IP_NULL != corpse_port);
478 
479 	*corpse_task_port = corpse_port;
480 	return KERN_SUCCESS;
481 }
482 
483 /*
484  * Routine: task_enqueue_exception_with_corpse
485  * params: task - task to generate a corpse and enqueue it
486  *         etype - EXC_RESOURCE or EXC_GUARD
487  *         code - exception code to be enqueued
488  *         codeCnt - code array count - code and subcode
489  *
490  * returns: KERN_SUCCESS on Success.
491  *          KERN_FAILURE on Failure.
492  *          KERN_INVALID_ARGUMENT on invalid arguments passed.
493  *          KERN_NOT_SUPPORTED on corpse disabled.
494  *          KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
495  */
496 kern_return_t
task_enqueue_exception_with_corpse(task_t task,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reason)497 task_enqueue_exception_with_corpse(
498 	task_t task,
499 	exception_type_t etype,
500 	mach_exception_data_t code,
501 	mach_msg_type_number_t codeCnt,
502 	void *reason)
503 {
504 	task_t new_task = TASK_NULL;
505 	thread_t thread = THREAD_NULL;
506 	kern_return_t kr;
507 
508 	if (codeCnt < 2) {
509 		return KERN_INVALID_ARGUMENT;
510 	}
511 
512 	/* Generate a corpse for the given task, will return with a ref on corpse task */
513 	kr = task_generate_corpse_internal(task, &new_task, &thread,
514 	    etype, code[0], code[1], reason);
515 	if (kr == KERN_SUCCESS) {
516 		if (thread == THREAD_NULL) {
517 			return KERN_FAILURE;
518 		}
519 		assert(new_task != TASK_NULL);
520 		assert(etype == EXC_RESOURCE || etype == EXC_GUARD);
521 		thread_exception_enqueue(new_task, thread, etype);
522 	}
523 	return kr;
524 }
525 
526 /*
527  * Routine: task_generate_corpse_internal
528  * params: task - task to fork a corpse
529  *         corpse_task - task of the generated corpse
530  *         exc_thread - equivalent thread in corpse enqueuing exception
531  *         etype - EXC_RESOURCE or EXC_GUARD or 0
532  *         code - mach exception code to be passed in corpse blob
533  *         subcode - mach exception subcode to be passed in corpse blob
534  * returns: KERN_SUCCESS on Success.
535  *          KERN_FAILURE on Failure.
536  *          KERN_NOT_SUPPORTED on corpse disabled.
537  *          KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
538  */
539 kern_return_t
task_generate_corpse_internal(task_t task,task_t * corpse_task,thread_t * exc_thread,exception_type_t etype,mach_exception_data_type_t code,mach_exception_data_type_t subcode,void * reason)540 task_generate_corpse_internal(
541 	task_t task,
542 	task_t *corpse_task,
543 	thread_t *exc_thread,
544 	exception_type_t etype,
545 	mach_exception_data_type_t code,
546 	mach_exception_data_type_t subcode,
547 	void *reason)
548 {
549 	task_t new_task = TASK_NULL;
550 	thread_t thread = THREAD_NULL;
551 	thread_t thread_next = THREAD_NULL;
552 	kern_return_t kr;
553 	struct proc *p = NULL;
554 	int is_64bit_addr;
555 	int is_64bit_data;
556 	int t_flags;
557 	uint64_t *udata_buffer = NULL;
558 	int size = 0;
559 	int num_udata = 0;
560 	corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF;
561 
562 #if CONFIG_MACF
563 	struct label *label = NULL;
564 #endif
565 
566 	if (!corpses_enabled()) {
567 		return KERN_NOT_SUPPORTED;
568 	}
569 
570 	if (task_corpse_forking_disabled(task)) {
571 		os_log(OS_LOG_DEFAULT, "corpse for pid %d disabled via SPI\n", task_pid(task));
572 		return KERN_FAILURE;
573 	}
574 
575 	if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) {
576 		kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT;
577 	}
578 
579 	kr = task_crashinfo_get_ref(kc_u_flags);
580 	if (kr != KERN_SUCCESS) {
581 		return kr;
582 	}
583 
584 	/* Having a task reference does not guarantee a proc reference */
585 	p = proc_find(task_pid(task));
586 	if (p == NULL) {
587 		kr = KERN_INVALID_TASK;
588 		goto error_task_generate_corpse;
589 	}
590 
591 	is_64bit_addr = IS_64BIT_PROCESS(p);
592 	is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task);
593 	t_flags = TF_CORPSE_FORK |
594 	    TF_PENDING_CORPSE |
595 	    TF_CORPSE |
596 	    (is_64bit_addr ? TF_64B_ADDR : TF_NONE) |
597 	    (is_64bit_data ? TF_64B_DATA : TF_NONE);
598 
599 #if CONFIG_MACF
600 	/* Create the corpse label credentials from the process. */
601 	label = mac_exc_create_label_for_proc(p);
602 #endif
603 
604 	/* Create a task for corpse */
605 	kr = task_create_internal(task,
606 	    NULL,
607 	    NULL,
608 	    TRUE,
609 	    is_64bit_addr,
610 	    is_64bit_data,
611 	    t_flags,
612 	    TPF_NONE,
613 	    TWF_NONE,
614 	    &new_task);
615 	if (kr != KERN_SUCCESS) {
616 		goto error_task_generate_corpse;
617 	}
618 
619 	/* Create and copy threads from task, returns a ref to thread */
620 	kr = task_duplicate_map_and_threads(task, p, new_task, &thread,
621 	    &udata_buffer, &size, &num_udata, (etype != 0));
622 	if (kr != KERN_SUCCESS) {
623 		goto error_task_generate_corpse;
624 	}
625 
626 	kr = task_collect_crash_info(new_task,
627 #if CONFIG_MACF
628 	    label,
629 #endif
630 	    TRUE);
631 	if (kr != KERN_SUCCESS) {
632 		goto error_task_generate_corpse;
633 	}
634 
635 	/* transfer our references to the corpse info */
636 	assert(new_task->corpse_info->kcd_user_flags == 0);
637 	new_task->corpse_info->kcd_user_flags = kc_u_flags;
638 	kc_u_flags = 0;
639 
640 	kr = task_start_halt(new_task);
641 	if (kr != KERN_SUCCESS) {
642 		goto error_task_generate_corpse;
643 	}
644 
645 	/* terminate the ipc space */
646 	ipc_space_terminate(new_task->itk_space);
647 
648 	/* Populate the corpse blob, use the proc struct of task instead of corpse task */
649 	gather_populate_corpse_crashinfo(p, new_task,
650 	    code, subcode, udata_buffer, num_udata, reason, etype);
651 
652 	/* Add it to global corpse task list */
653 	task_add_to_corpse_task_list(new_task);
654 
655 	*corpse_task = new_task;
656 	*exc_thread = thread;
657 
658 error_task_generate_corpse:
659 #if CONFIG_MACF
660 	if (label) {
661 		mac_exc_free_label(label);
662 	}
663 #endif
664 
665 	/* Release the proc reference */
666 	if (p != NULL) {
667 		proc_rele(p);
668 	}
669 
670 	if (kr != KERN_SUCCESS) {
671 		if (thread != THREAD_NULL) {
672 			thread_deallocate(thread);
673 		}
674 		if (new_task != TASK_NULL) {
675 			task_lock(new_task);
676 			/* Terminate all the other threads in the task. */
677 			queue_iterate(&new_task->threads, thread_next, thread_t, task_threads)
678 			{
679 				thread_terminate_internal(thread_next);
680 			}
681 			/* wait for all the threads in the task to terminate */
682 			task_wait_till_threads_terminate_locked(new_task);
683 			task_unlock(new_task);
684 
685 			task_clear_corpse(new_task);
686 			task_terminate_internal(new_task);
687 			task_deallocate(new_task);
688 		}
689 		if (kc_u_flags) {
690 			task_crashinfo_release_ref(kc_u_flags);
691 		}
692 	}
693 	/* Free the udata buffer allocated in task_duplicate_map_and_threads */
694 	kfree_data(udata_buffer, size);
695 
696 	return kr;
697 }
698 
699 /*
700  * Routine: task_map_corpse_info
701  * params: task - Map the corpse info in task's address space
702  *         corpse_task - task port of the corpse
703  *         kcd_addr_begin - address of the mapped corpse info
704  *         kcd_addr_begin - size of the mapped corpse info
705  * returns: KERN_SUCCESS on Success.
706  *          KERN_FAILURE on Failure.
707  *          KERN_INVALID_ARGUMENT on invalid arguments.
708  * Note: Temporary function, will be deleted soon.
709  */
710 kern_return_t
task_map_corpse_info(task_t task,task_t corpse_task,vm_address_t * kcd_addr_begin,uint32_t * kcd_size)711 task_map_corpse_info(
712 	task_t task,
713 	task_t corpse_task,
714 	vm_address_t *kcd_addr_begin,
715 	uint32_t *kcd_size)
716 {
717 	kern_return_t kr;
718 	mach_vm_address_t kcd_addr_begin_64;
719 	mach_vm_size_t size_64;
720 
721 	kr = task_map_corpse_info_64(task, corpse_task, &kcd_addr_begin_64, &size_64);
722 	if (kr != KERN_SUCCESS) {
723 		return kr;
724 	}
725 
726 	*kcd_addr_begin = (vm_address_t)kcd_addr_begin_64;
727 	*kcd_size = (uint32_t) size_64;
728 	return KERN_SUCCESS;
729 }
730 
731 /*
732  * Routine: task_map_corpse_info_64
733  * params: task - Map the corpse info in task's address space
734  *         corpse_task - task port of the corpse
735  *         kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *)
736  *         kcd_addr_begin - size of the mapped corpse info (takes mach_vm_size_t *)
737  * returns: KERN_SUCCESS on Success.
738  *          KERN_FAILURE on Failure.
739  *          KERN_INVALID_ARGUMENT on invalid arguments.
740  */
741 kern_return_t
task_map_corpse_info_64(task_t task,task_t corpse_task,mach_vm_address_t * kcd_addr_begin,mach_vm_size_t * kcd_size)742 task_map_corpse_info_64(
743 	task_t task,
744 	task_t corpse_task,
745 	mach_vm_address_t *kcd_addr_begin,
746 	mach_vm_size_t *kcd_size)
747 {
748 	kern_return_t kr;
749 	mach_vm_offset_t crash_data_ptr = 0;
750 	const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE;
751 	void *corpse_info_kernel = NULL;
752 
753 	if (task == TASK_NULL || task_is_a_corpse_fork(task)) {
754 		return KERN_INVALID_ARGUMENT;
755 	}
756 
757 	if (corpse_task == TASK_NULL || !task_is_a_corpse(corpse_task) ||
758 	    kcdata_memory_get_begin_addr(corpse_task->corpse_info) == NULL) {
759 		return KERN_INVALID_ARGUMENT;
760 	}
761 	corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info);
762 	kr = mach_vm_allocate_kernel(task->map, &crash_data_ptr, size,
763 	    VM_FLAGS_ANYWHERE, VM_MEMORY_CORPSEINFO);
764 	if (kr != KERN_SUCCESS) {
765 		return kr;
766 	}
767 	copyout(corpse_info_kernel, (user_addr_t)crash_data_ptr, (size_t)size);
768 	*kcd_addr_begin = crash_data_ptr;
769 	*kcd_size = size;
770 
771 	return KERN_SUCCESS;
772 }
773 
774 uint64_t
task_corpse_get_crashed_thread_id(task_t corpse_task)775 task_corpse_get_crashed_thread_id(task_t corpse_task)
776 {
777 	return corpse_task->crashed_thread_id;
778 }
779