xref: /xnu-8019.80.24/osfmk/kern/exception.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/message.h>
63 #include <mach/port.h>
64 #include <mach/mig_errors.h>
65 #include <mach/task.h>
66 #include <mach/thread_status.h>
67 #include <mach/exception_types.h>
68 #include <mach/exc.h>
69 #include <mach/mach_exc.h>
70 
71 #include <ipc/port.h>
72 #include <ipc/ipc_entry.h>
73 #include <ipc/ipc_object.h>
74 #include <ipc/ipc_notify.h>
75 #include <ipc/ipc_space.h>
76 #include <ipc/ipc_pset.h>
77 #include <ipc/ipc_machdep.h>
78 
79 #include <kern/ipc_tt.h>
80 #include <kern/task.h>
81 #include <kern/thread.h>
82 #include <kern/processor.h>
83 #include <kern/sched.h>
84 #include <kern/sched_prim.h>
85 #include <kern/host.h>
86 #include <kern/misc_protos.h>
87 #include <kern/ux_handler.h>
88 #include <kern/task_ident.h>
89 
90 #include <vm/vm_map.h>
91 
92 #include <security/mac_mach_internal.h>
93 #include <string.h>
94 
95 #include <pexpert/pexpert.h>
96 
97 #include <os/log.h>
98 
99 #include <libkern/coreanalytics/coreanalytics.h>
100 
101 bool panic_on_exception_triage = false;
102 
103 unsigned long c_thr_exc_raise = 0;
104 unsigned long c_thr_exc_raise_identity_token = 0;
105 unsigned long c_thr_exc_raise_state = 0;
106 unsigned long c_thr_exc_raise_state_id = 0;
107 
108 /* forward declarations */
109 kern_return_t exception_deliver(
110 	thread_t                thread,
111 	exception_type_t        exception,
112 	mach_exception_data_t   code,
113 	mach_msg_type_number_t  codeCnt,
114 	struct exception_action *excp,
115 	lck_mtx_t                       *mutex);
116 
117 #ifdef MACH_BSD
118 kern_return_t bsd_exception(
119 	exception_type_t        exception,
120 	mach_exception_data_t   code,
121 	mach_msg_type_number_t  codeCnt);
122 #endif /* MACH_BSD */
123 
124 #if __has_feature(ptrauth_calls)
125 extern int exit_with_pac_exception(
126 	void *proc,
127 	exception_type_t         exception,
128 	mach_exception_code_t    code,
129 	mach_exception_subcode_t subcode);
130 #endif /* __has_feature(ptrauth_calls) */
131 
132 #ifdef MACH_BSD
133 extern bool proc_is_traced(void *p);
134 extern int      proc_selfpid(void);
135 extern char     *proc_name_address(struct proc *p);
136 #endif /* MACH_BSD */
137 
138 #if (DEVELOPMENT || DEBUG)
139 TUNABLE_WRITEABLE(unsigned int, exception_log_max_pid, "exception_log_max_pid", 0);
140 #endif /* (DEVELOPMENT || DEBUG) */
141 
142 /*
143  * Routine: exception_init
144  * Purpose:
145  *   Global initialization of state for exceptions.
146  * Conditions:
147  *   None.
148  */
149 void
exception_init(void)150 exception_init(void)
151 {
152 	int tmp = 0;
153 
154 	if (PE_parse_boot_argn("-panic_on_exception_triage", &tmp, sizeof(tmp))) {
155 		panic_on_exception_triage = true;
156 	}
157 
158 #if (DEVELOPMENT || DEBUG)
159 	if (exception_log_max_pid) {
160 		printf("Logging all exceptions where pid < exception_log_max_pid (%d)\n", exception_log_max_pid);
161 	}
162 #endif /* (DEVELOPMENT || DEBUG) */
163 }
164 
165 /*
166  *	Routine:	exception_deliver
167  *	Purpose:
168  *		Make an upcall to the exception server provided.
169  *	Conditions:
170  *		Nothing locked and no resources held.
171  *		Called from an exception context, so
172  *		thread_exception_return and thread_kdb_return
173  *		are possible.
174  *	Returns:
175  *		KERN_SUCCESS if the exception was handled
176  */
177 kern_return_t
exception_deliver(thread_t thread,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,struct exception_action * excp,lck_mtx_t * mutex)178 exception_deliver(
179 	thread_t                thread,
180 	exception_type_t        exception,
181 	mach_exception_data_t   code,
182 	mach_msg_type_number_t  codeCnt,
183 	struct exception_action *excp,
184 	lck_mtx_t                       *mutex)
185 {
186 	ipc_port_t              exc_port = IPC_PORT_NULL;
187 	exception_data_type_t   small_code[EXCEPTION_CODE_MAX];
188 	int                     code64;
189 	int                     behavior;
190 	int                     flavor;
191 	kern_return_t           kr;
192 	task_t task;
193 	task_id_token_t task_token;
194 	ipc_port_t thread_port = IPC_PORT_NULL,
195 	    task_port = IPC_PORT_NULL,
196 	    task_token_port = IPC_PORT_NULL;
197 
198 	/*
199 	 *  Save work if we are terminating.
200 	 *  Just go back to our AST handler.
201 	 */
202 	if (!thread->active && !thread->inspection) {
203 		return KERN_SUCCESS;
204 	}
205 
206 	/*
207 	 * If there are no exception actions defined for this entity,
208 	 * we can't deliver here.
209 	 */
210 	if (excp == NULL) {
211 		return KERN_FAILURE;
212 	}
213 
214 	assert(exception < EXC_TYPES_COUNT);
215 	if (exception >= EXC_TYPES_COUNT) {
216 		return KERN_FAILURE;
217 	}
218 
219 	excp = &excp[exception];
220 
221 	/*
222 	 * Snapshot the exception action data under lock for consistency.
223 	 * Hold a reference to the port over the exception_raise_* calls
224 	 * so it can't be destroyed.  This seems like overkill, but keeps
225 	 * the port from disappearing between now and when
226 	 * ipc_object_copyin_from_kernel is finally called.
227 	 */
228 	lck_mtx_lock(mutex);
229 	exc_port = excp->port;
230 	if (!IP_VALID(exc_port)) {
231 		lck_mtx_unlock(mutex);
232 		return KERN_FAILURE;
233 	}
234 	ip_mq_lock(exc_port);
235 	if (!ip_active(exc_port)) {
236 		ip_mq_unlock(exc_port);
237 		lck_mtx_unlock(mutex);
238 		return KERN_FAILURE;
239 	}
240 	ip_reference(exc_port);
241 	exc_port->ip_srights++;
242 	ip_mq_unlock(exc_port);
243 
244 	flavor = excp->flavor;
245 	behavior = excp->behavior;
246 	lck_mtx_unlock(mutex);
247 
248 	code64 = (behavior & MACH_EXCEPTION_CODES);
249 	behavior &= ~MACH_EXCEPTION_MASK;
250 
251 	if (!code64) {
252 		small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
253 		small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
254 	}
255 
256 	task = get_threadtask(thread);
257 
258 #if CONFIG_MACF
259 	/* Now is a reasonably good time to check if the exception action is
260 	 * permitted for this process, because after this point we will send
261 	 * the message out almost certainly.
262 	 * As with other failures, exception_triage_thread will go on
263 	 * to the next level.
264 	 */
265 
266 	/* The global exception-to-signal translation port is safe to be an exception handler. */
267 	if (is_ux_handler_port(exc_port) == FALSE &&
268 	    mac_exc_action_check_exception_send(task, excp) != 0) {
269 		kr = KERN_FAILURE;
270 		goto out_release_right;
271 	}
272 #endif
273 
274 	if ((behavior != EXCEPTION_STATE) && (behavior != EXCEPTION_IDENTITY_PROTECTED)) {
275 		task_reference(task);
276 		task_port = convert_task_to_port(task);
277 		/* task ref consumed */
278 		thread_reference(thread);
279 		thread_port = convert_thread_to_port(thread);
280 		/* thread ref consumed */
281 	}
282 
283 	if (behavior == EXCEPTION_IDENTITY_PROTECTED) {
284 		kr = task_create_identity_token(task, &task_token);
285 		/* task_token now represents a task, or corpse */
286 		assert(kr == KERN_SUCCESS);
287 		task_token_port = convert_task_id_token_to_port(task_token);
288 		/* task token ref consumed */
289 	}
290 
291 	switch (behavior) {
292 	case EXCEPTION_STATE: {
293 		mach_msg_type_number_t state_cnt;
294 		thread_state_data_t state;
295 
296 		c_thr_exc_raise_state++;
297 		state_cnt = _MachineStateCount[flavor];
298 		kr = thread_getstatus_to_user(thread, flavor,
299 		    (thread_state_t)state,
300 		    &state_cnt);
301 		if (kr == KERN_SUCCESS) {
302 			if (code64) {
303 				kr = mach_exception_raise_state(exc_port,
304 				    exception,
305 				    code,
306 				    codeCnt,
307 				    &flavor,
308 				    state, state_cnt,
309 				    state, &state_cnt);
310 			} else {
311 				kr = exception_raise_state(exc_port, exception,
312 				    small_code,
313 				    codeCnt,
314 				    &flavor,
315 				    state, state_cnt,
316 				    state, &state_cnt);
317 			}
318 			if (kr == KERN_SUCCESS) {
319 				if (exception != EXC_CORPSE_NOTIFY) {
320 					kr = thread_setstatus_from_user(thread, flavor,
321 					    (thread_state_t)state,
322 					    state_cnt);
323 				}
324 				goto out_release_right;
325 			}
326 		}
327 
328 		goto out_release_right;
329 	}
330 
331 	case EXCEPTION_DEFAULT: {
332 		c_thr_exc_raise++;
333 		if (code64) {
334 			kr = mach_exception_raise(exc_port,
335 			    thread_port,
336 			    task_port,
337 			    exception,
338 			    code,
339 			    codeCnt);
340 		} else {
341 			kr = exception_raise(exc_port,
342 			    thread_port,
343 			    task_port,
344 			    exception,
345 			    small_code,
346 			    codeCnt);
347 		}
348 
349 		goto out_release_right;
350 	}
351 
352 	case EXCEPTION_IDENTITY_PROTECTED: {
353 		c_thr_exc_raise_identity_token++;
354 		if (code64) {
355 			kr = mach_exception_raise_identity_protected(exc_port,
356 			    thread->thread_id,
357 			    task_token_port,
358 			    exception,
359 			    code,
360 			    codeCnt);
361 		} else {
362 			panic("mach_exception_raise_identity_protected() must be code64");
363 		}
364 
365 		goto out_release_right;
366 	}
367 
368 	case EXCEPTION_STATE_IDENTITY: {
369 		mach_msg_type_number_t state_cnt;
370 		thread_state_data_t state;
371 
372 		c_thr_exc_raise_state_id++;
373 		state_cnt = _MachineStateCount[flavor];
374 		kr = thread_getstatus_to_user(thread, flavor,
375 		    (thread_state_t)state,
376 		    &state_cnt);
377 		if (kr == KERN_SUCCESS) {
378 			if (code64) {
379 				kr = mach_exception_raise_state_identity(
380 					exc_port,
381 					thread_port,
382 					task_port,
383 					exception,
384 					code,
385 					codeCnt,
386 					&flavor,
387 					state, state_cnt,
388 					state, &state_cnt);
389 			} else {
390 				kr = exception_raise_state_identity(exc_port,
391 				    thread_port,
392 				    task_port,
393 				    exception,
394 				    small_code,
395 				    codeCnt,
396 				    &flavor,
397 				    state, state_cnt,
398 				    state, &state_cnt);
399 			}
400 
401 			if (kr == KERN_SUCCESS) {
402 				if (exception != EXC_CORPSE_NOTIFY) {
403 					kr = thread_setstatus_from_user(thread, flavor,
404 					    (thread_state_t)state,
405 					    state_cnt);
406 				}
407 				goto out_release_right;
408 			}
409 		}
410 
411 		goto out_release_right;
412 	}
413 
414 	default:
415 		panic("bad exception behavior!");
416 		return KERN_FAILURE;
417 	}/* switch */
418 
419 out_release_right:
420 
421 	if (task_port) {
422 		ipc_port_release_send(task_port);
423 	}
424 
425 	if (thread_port) {
426 		ipc_port_release_send(thread_port);
427 	}
428 
429 	if (exc_port) {
430 		ipc_port_release_send(exc_port);
431 	}
432 
433 	if (task_token_port) {
434 		ipc_port_release_send(task_token_port);
435 	}
436 
437 	return kr;
438 }
439 
440 /*
441  * Routine: check_exc_receiver_dependency
442  * Purpose:
443  *      Verify that the port destined for receiving this exception is not
444  *      on the current task. This would cause hang in kernel for
445  *      EXC_CRASH primarily. Note: If port is transferred
446  *      between check and delivery then deadlock may happen.
447  *
448  * Conditions:
449  *		Nothing locked and no resources held.
450  *		Called from an exception context.
451  * Returns:
452  *      KERN_SUCCESS if its ok to send exception message.
453  */
454 static kern_return_t
check_exc_receiver_dependency(exception_type_t exception,struct exception_action * excp,lck_mtx_t * mutex)455 check_exc_receiver_dependency(
456 	exception_type_t exception,
457 	struct exception_action *excp,
458 	lck_mtx_t *mutex)
459 {
460 	kern_return_t retval = KERN_SUCCESS;
461 
462 	if (excp == NULL || exception != EXC_CRASH) {
463 		return retval;
464 	}
465 
466 	task_t task = current_task();
467 	lck_mtx_lock(mutex);
468 	ipc_port_t xport = excp[exception].port;
469 	if (IP_VALID(xport) && ip_in_space_noauth(xport, task->itk_space)) {
470 		retval = KERN_FAILURE;
471 	}
472 	lck_mtx_unlock(mutex);
473 	return retval;
474 }
475 
476 
477 /*
478  *	Routine:	exception_triage_thread
479  *	Purpose:
480  *		The thread caught an exception.
481  *		We make an up-call to the thread's exception server.
482  *	Conditions:
483  *		Nothing locked and no resources held.
484  *		Called from an exception context, so
485  *		thread_exception_return and thread_kdb_return
486  *		are possible.
487  *	Returns:
488  *		KERN_SUCCESS if exception is handled by any of the handlers.
489  */
490 kern_return_t
exception_triage_thread(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,thread_t thread)491 exception_triage_thread(
492 	exception_type_t        exception,
493 	mach_exception_data_t   code,
494 	mach_msg_type_number_t  codeCnt,
495 	thread_t                thread)
496 {
497 	task_t                  task;
498 	thread_ro_t             tro;
499 	host_priv_t             host_priv;
500 	lck_mtx_t               *mutex;
501 	struct exception_action *actions;
502 	kern_return_t   kr = KERN_FAILURE;
503 
504 	assert(exception != EXC_RPC_ALERT);
505 
506 	/*
507 	 * If this behavior has been requested by the the kernel
508 	 * (due to the boot environment), we should panic if we
509 	 * enter this function.  This is intended as a debugging
510 	 * aid; it should allow us to debug why we caught an
511 	 * exception in environments where debugging is especially
512 	 * difficult.
513 	 */
514 	if (panic_on_exception_triage) {
515 		panic("called exception_triage when it was forbidden by the boot environment");
516 	}
517 
518 	/*
519 	 * Try to raise the exception at the activation level.
520 	 */
521 	mutex   = &thread->mutex;
522 	tro     = get_thread_ro(thread);
523 	actions = tro->tro_exc_actions;
524 	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
525 		kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
526 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
527 			goto out;
528 		}
529 	}
530 
531 	/*
532 	 * Maybe the task level will handle it.
533 	 */
534 	task    = tro->tro_task;
535 	mutex   = &task->itk_lock_data;
536 	actions = task->exc_actions;
537 	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
538 		kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
539 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
540 			goto out;
541 		}
542 	}
543 
544 	/*
545 	 * How about at the host level?
546 	 */
547 	host_priv = host_priv_self();
548 	mutex     = &host_priv->lock;
549 	actions   = host_priv->exc_actions;
550 	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
551 		kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
552 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
553 			goto out;
554 		}
555 	}
556 
557 out:
558 	if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
559 	    (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
560 		thread_exception_return();
561 	}
562 	return kr;
563 }
564 
565 #if __has_feature(ptrauth_calls)
566 
567 CA_EVENT(pac_exception_event,
568     CA_INT, exception,
569     CA_INT, exception_code_0,
570     CA_INT, exception_code_1,
571     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
572 
573 static void
pac_exception_triage(exception_type_t exception,mach_exception_data_t code)574 pac_exception_triage(
575 	exception_type_t        exception,
576 	mach_exception_data_t   code)
577 {
578 	boolean_t traced_flag = FALSE;
579 	task_t task = current_task();
580 	void *proc = task->bsd_info;
581 	char *proc_name = (char *) "unknown";
582 	int pid = 0;
583 
584 #ifdef MACH_BSD
585 	pid = proc_selfpid();
586 	if (proc) {
587 		traced_flag = proc_is_traced(proc);
588 		/* Should only be called on current proc */
589 		proc_name = proc_name_address(proc);
590 
591 		/*
592 		 * For a ptrauth violation, check if process isn't being ptraced and
593 		 * the task has the TF_PAC_EXC_FATAL flag set. If both conditions are true,
594 		 * terminate the task via exit_with_reason
595 		 */
596 		if (!traced_flag) {
597 			ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_exception_event);
598 			CA_EVENT_TYPE(pac_exception_event) * pexc_event = ca_event->data;
599 			pexc_event->exception = exception;
600 			pexc_event->exception_code_0 = code[0];
601 			pexc_event->exception_code_1 = code[1];
602 			strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
603 			CA_EVENT_SEND(ca_event);
604 			if (task_is_pac_exception_fatal(task)) {
605 				os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit a pac violation\n", __func__, proc_name, pid);
606 				exit_with_pac_exception(proc, exception, code[0], code[1]);
607 				thread_exception_return();
608 				/* NOT_REACHABLE */
609 			}
610 		}
611 	}
612 #endif /* MACH_BSD */
613 }
614 #endif /* __has_feature(ptrauth_calls) */
615 
616 /*
617  *	Routine:	exception_triage
618  *	Purpose:
619  *		The current thread caught an exception.
620  *		We make an up-call to the thread's exception server.
621  *	Conditions:
622  *		Nothing locked and no resources held.
623  *		Called from an exception context, so
624  *		thread_exception_return and thread_kdb_return
625  *		are possible.
626  *	Returns:
627  *		KERN_SUCCESS if exception is handled by any of the handlers.
628  */
629 int debug4k_panic_on_exception = 0;
630 kern_return_t
exception_triage(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)631 exception_triage(
632 	exception_type_t        exception,
633 	mach_exception_data_t   code,
634 	mach_msg_type_number_t  codeCnt)
635 {
636 	thread_t thread = current_thread();
637 	task_t   task   = current_task();
638 
639 	if (VM_MAP_PAGE_SIZE(task->map) < PAGE_SIZE) {
640 		DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx\n",
641 		    thread, task, task->map, exception, code[0], code[1]);
642 		if (debug4k_panic_on_exception) {
643 			panic("DEBUG4K thread %p task %p map %p exception %d codes 0x%llx 0x%llx",
644 			    thread, task, task->map, exception, code[0], code[1]);
645 		}
646 	}
647 
648 #if (DEVELOPMENT || DEBUG)
649 #ifdef MACH_BSD
650 	if (proc_pid(task->bsd_info) <= exception_log_max_pid) {
651 		printf("exception_log_max_pid: pid %d (%s): sending exception %d (0x%llx 0x%llx)\n",
652 		    proc_pid(task->bsd_info), proc_name_address(task->bsd_info), exception, code[0], code[1]);
653 	}
654 #endif /* MACH_BSD */
655 #endif /* DEVELOPMENT || DEBUG */
656 
657 #if __has_feature(ptrauth_calls)
658 	if (exception & EXC_PTRAUTH_BIT) {
659 		exception &= ~EXC_PTRAUTH_BIT;
660 		assert(codeCnt == 2);
661 		pac_exception_triage(exception, code);
662 	}
663 #endif /* __has_feature(ptrauth_calls) */
664 	return exception_triage_thread(exception, code, codeCnt, thread);
665 }
666 
667 kern_return_t
bsd_exception(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)668 bsd_exception(
669 	exception_type_t        exception,
670 	mach_exception_data_t   code,
671 	mach_msg_type_number_t  codeCnt)
672 {
673 	task_t                  task;
674 	lck_mtx_t               *mutex;
675 	thread_t                self = current_thread();
676 	kern_return_t           kr;
677 
678 	/*
679 	 * Maybe the task level will handle it.
680 	 */
681 	task = current_task();
682 	mutex = &task->itk_lock_data;
683 
684 	kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex);
685 
686 	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
687 		return KERN_SUCCESS;
688 	}
689 	return KERN_FAILURE;
690 }
691 
692 
693 /*
694  * Raise an exception on a task.
695  * This should tell launchd to launch Crash Reporter for this task.
696  */
697 kern_return_t
task_exception_notify(exception_type_t exception,mach_exception_data_type_t exccode,mach_exception_data_type_t excsubcode)698 task_exception_notify(exception_type_t exception,
699     mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode)
700 {
701 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
702 	wait_interrupt_t                wsave;
703 	kern_return_t kr = KERN_SUCCESS;
704 
705 	code[0] = exccode;
706 	code[1] = excsubcode;
707 
708 	wsave = thread_interrupt_level(THREAD_UNINT);
709 	kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
710 	(void) thread_interrupt_level(wsave);
711 	return kr;
712 }
713 
714 
715 /*
716  *	Handle interface for special performance monitoring
717  *	This is a special case of the host exception handler
718  */
719 kern_return_t
sys_perf_notify(thread_t thread,int pid)720 sys_perf_notify(thread_t thread, int pid)
721 {
722 	host_priv_t             hostp;
723 	ipc_port_t              xport;
724 	wait_interrupt_t        wsave;
725 	kern_return_t           ret;
726 
727 	hostp = host_priv_self();       /* Get the host privileged ports */
728 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
729 	code[0] = 0xFF000001;           /* Set terminate code */
730 	code[1] = pid;          /* Pass out the pid */
731 
732 	lck_mtx_lock(&hostp->lock);
733 	xport = hostp->exc_actions[EXC_RPC_ALERT].port;
734 
735 	/* Make sure we're not catching our own exception */
736 	if (!IP_VALID(xport) ||
737 	    !ip_active(xport) ||
738 	    ip_in_space_noauth(xport, get_threadtask(thread)->itk_space)) {
739 		lck_mtx_unlock(&hostp->lock);
740 		return KERN_FAILURE;
741 	}
742 
743 	lck_mtx_unlock(&hostp->lock);
744 
745 	wsave = thread_interrupt_level(THREAD_UNINT);
746 	ret = exception_deliver(
747 		thread,
748 		EXC_RPC_ALERT,
749 		code,
750 		2,
751 		hostp->exc_actions,
752 		&hostp->lock);
753 	(void)thread_interrupt_level(wsave);
754 
755 	return ret;
756 }
757