xref: /xnu-12377.41.6/osfmk/kern/exception.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2024 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/message.h>
63 #include <mach/port.h>
64 #include <mach/mig_errors.h>
65 #include <mach/task.h>
66 #include <mach/thread_status.h>
67 #include <mach/exception_types.h>
68 #include <mach/exc.h>
69 #include <mach/mach_exc.h>
70 
71 #include <ipc/port.h>
72 #include <ipc/ipc_entry.h>
73 #include <ipc/ipc_object.h>
74 #include <ipc/ipc_notify.h>
75 #include <ipc/ipc_pset.h>
76 #include <ipc/ipc_machdep.h>
77 
78 #include <kern/ipc_tt.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/processor.h>
82 #include <kern/sched.h>
83 #include <kern/sched_prim.h>
84 #include <kern/host.h>
85 #include <kern/misc_protos.h>
86 #include <kern/ux_handler.h>
87 #include <kern/task_ident.h>
88 
89 #include <vm/vm_map_xnu.h>
90 #include <vm/vm_map.h>
91 #include <sys/reason.h>
92 #include <security/mac_mach_internal.h>
93 #include <string.h>
94 
95 #include <pexpert/pexpert.h>
96 
97 #include <os/log.h>
98 #include <os/system_event_log.h>
99 
100 #include <libkern/coreanalytics/coreanalytics.h>
101 
102 #include <sys/code_signing.h> /* for developer mode state */
103 
104 bool panic_on_exception_triage = false;
105 
106 /* Not used in coded, only for inspection during debugging */
107 unsigned long c_thr_exc_raise = 0;
108 unsigned long c_thr_exc_raise_identity_token = 0;
109 unsigned long c_thr_exc_raise_state_identity_token = 0;
110 unsigned long c_thr_exc_raise_state = 0;
111 unsigned long c_thr_exc_raise_state_id = 0;
112 unsigned long c_thr_exc_raise_backtrace = 0;
113 
114 /* forward declarations */
115 kern_return_t exception_deliver(
116 	thread_t                thread,
117 	exception_type_t        exception,
118 	mach_exception_data_t   code,
119 	mach_msg_type_number_t  codeCnt,
120 	struct exception_action *excp,
121 	lck_mtx_t                       *mutex);
122 
123 #ifdef MACH_BSD
124 kern_return_t bsd_exception(
125 	exception_type_t        exception,
126 	mach_exception_data_t   code,
127 	mach_msg_type_number_t  codeCnt);
128 #endif /* MACH_BSD */
129 
130 #ifdef MACH_BSD
131 extern bool proc_is_traced(void *p);
132 extern int      proc_selfpid(void);
133 extern char     *proc_name_address(struct proc *p);
134 #endif /* MACH_BSD */
135 
136 #if (DEVELOPMENT || DEBUG)
137 TUNABLE_WRITEABLE(unsigned int, exception_log_max_pid, "exception_log_max_pid", 0);
138 #endif /* (DEVELOPMENT || DEBUG) */
139 
140 /*
141  * Routine: exception_init
142  * Purpose:
143  *   Global initialization of state for exceptions.
144  * Conditions:
145  *   None.
146  */
147 void
exception_init(void)148 exception_init(void)
149 {
150 	int tmp = 0;
151 
152 	if (PE_parse_boot_argn("-panic_on_exception_triage", &tmp, sizeof(tmp))) {
153 		panic_on_exception_triage = true;
154 	}
155 
156 #if (DEVELOPMENT || DEBUG)
157 	if (exception_log_max_pid) {
158 		printf("Logging all exceptions where pid < exception_log_max_pid (%d)\n", exception_log_max_pid);
159 	}
160 #endif /* (DEVELOPMENT || DEBUG) */
161 }
162 
163 static TUNABLE(bool, pac_replace_ptrs_user, "pac_replace_ptrs_user", true);
164 
165 ipc_port_t
exception_port_copy_send(ipc_port_t port)166 exception_port_copy_send(ipc_port_t port)
167 {
168 	if (IP_VALID(port)) {
169 		if (is_ux_handler_port(port)) {
170 			/* is_ux_handler_port() compares against __DATA_CONST */
171 			port = ipc_port_copy_send_any(port);
172 		} else {
173 			port = ipc_port_copy_send_mqueue(port);
174 		}
175 	}
176 	return port;
177 }
178 
179 /*
180  *	Routine:	exception_deliver
181  *	Purpose:
182  *		Make an upcall to the exception server provided.
183  *	Conditions:
184  *		Nothing locked and no resources held.
185  *		Called from an exception context, so
186  *		thread_exception_return and thread_kdb_return
187  *		are possible.
188  *	Returns:
189  *		KERN_SUCCESS if the exception was handled
190  */
191 kern_return_t
exception_deliver(thread_t thread,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,struct exception_action * excp,lck_mtx_t * mutex)192 exception_deliver(
193 	thread_t                thread,
194 	exception_type_t        exception,
195 	mach_exception_data_t   code,
196 	mach_msg_type_number_t  codeCnt,
197 	struct exception_action *excp,
198 	lck_mtx_t               *mutex)
199 {
200 	ipc_port_t              exc_port = IPC_PORT_NULL;
201 	exception_data_type_t   small_code[EXCEPTION_CODE_MAX];
202 	thread_state_t          new_state = NULL;
203 	int                     code64;
204 	int                     behavior;
205 	int                     flavor;
206 	kern_return_t           kr = KERN_FAILURE;
207 	task_t task;
208 	task_id_token_t task_token;
209 	ipc_port_t thread_port = IPC_PORT_NULL,
210 	    task_port = IPC_PORT_NULL,
211 	    task_token_port = IPC_PORT_NULL;
212 	thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
213 	thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
214 
215 	/*
216 	 *  Save work if we are terminating.
217 	 *  Just go back to our AST handler.
218 	 */
219 	if (!thread->active && !thread->inspection) {
220 		return KERN_SUCCESS;
221 	}
222 
223 	/*
224 	 * If there are no exception actions defined for this entity,
225 	 * we can't deliver here.
226 	 */
227 	if (excp == NULL) {
228 		return KERN_FAILURE;
229 	}
230 
231 	assert(exception < EXC_TYPES_COUNT);
232 	if (exception >= EXC_TYPES_COUNT) {
233 		return KERN_FAILURE;
234 	}
235 
236 	excp = &excp[exception];
237 
238 	/*
239 	 * Snapshot the exception action data under lock for consistency.
240 	 * Hold a reference to the port over the exception_raise_* calls
241 	 * so it can't be destroyed.  This seems like overkill, but keeps
242 	 * the port from disappearing between now and when
243 	 * ipc_object_copyin_from_kernel is finally called.
244 	 */
245 	lck_mtx_lock(mutex);
246 	exc_port = exception_port_copy_send(excp->port);
247 	if (!IP_VALID(exc_port)) {
248 		lck_mtx_unlock(mutex);
249 		return KERN_FAILURE;
250 	}
251 	task = get_threadtask(thread);
252 
253 	flavor = excp->flavor;
254 	behavior = excp->behavior;
255 	if (excp->hardened) {
256 		/*
257 		 * On arm64e devices we have protected the pc returned via exception
258 		 * handlers with PAC. We also want to protect all other thread state
259 		 * for hardened exceptions to prevent modification of any registers
260 		 * that could affect control flow integrity sometime in the future.
261 		 */
262 		set_flags |= TSSF_ONLY_PC;
263 	}
264 	lck_mtx_unlock(mutex);
265 
266 	code64 = (behavior & MACH_EXCEPTION_CODES);
267 	behavior &= ~MACH_EXCEPTION_MASK;
268 
269 	if (!code64) {
270 		small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
271 		small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
272 	}
273 
274 
275 #if CONFIG_MACF
276 	/* Now is a reasonably good time to check if the exception action is
277 	 * permitted for this process, because after this point we will send
278 	 * the message out almost certainly.
279 	 * As with other failures, exception_triage_thread will go on
280 	 * to the next level.
281 	 */
282 
283 	/* The global exception-to-signal translation port is safe to be an exception handler. */
284 	if (is_ux_handler_port(exc_port) == FALSE &&
285 	    mac_exc_action_check_exception_send(task, excp) != 0) {
286 		kr = KERN_FAILURE;
287 		goto out_release_right;
288 	}
289 #endif
290 
291 	thread->options |= TH_IN_MACH_EXCEPTION;
292 
293 	switch (behavior) {
294 	case EXCEPTION_STATE: {
295 		mach_msg_type_number_t old_state_cnt, new_state_cnt;
296 		thread_state_data_t old_state;
297 		bool task_allow_user_state = task_needs_user_signed_thread_state(task);
298 
299 		if (pac_replace_ptrs_user || task_allow_user_state) {
300 			get_flags |= TSSF_RANDOM_USER_DIV;
301 			set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
302 		}
303 
304 		c_thr_exc_raise_state++;
305 		assert(flavor < THREAD_STATE_FLAVORS);
306 		old_state_cnt = (flavor < THREAD_STATE_FLAVORS) ? _MachineStateCount[flavor] : 0;
307 		kr = thread_getstatus_to_user(thread, flavor,
308 		    (thread_state_t)old_state,
309 		    &old_state_cnt, get_flags);
310 		new_state_cnt = old_state_cnt;
311 		if (kr == KERN_SUCCESS) {
312 			new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
313 			if (new_state == NULL) {
314 				kr = KERN_RESOURCE_SHORTAGE;
315 				goto out_release_right;
316 			}
317 			if (code64) {
318 				kr = mach_exception_raise_state(exc_port,
319 				    exception,
320 				    code,
321 				    codeCnt,
322 				    &flavor,
323 				    old_state, old_state_cnt,
324 				    new_state, &new_state_cnt);
325 			} else {
326 				kr = exception_raise_state(exc_port, exception,
327 				    small_code,
328 				    codeCnt,
329 				    &flavor,
330 				    old_state, old_state_cnt,
331 				    new_state, &new_state_cnt);
332 			}
333 			if (kr == KERN_SUCCESS) {
334 				if (exception != EXC_CORPSE_NOTIFY) {
335 					kr = thread_setstatus_from_user(thread, flavor,
336 					    (thread_state_t)new_state, new_state_cnt,
337 					    (thread_state_t)old_state, old_state_cnt,
338 					    set_flags);
339 				}
340 				goto out_release_right;
341 			}
342 		}
343 
344 		goto out_release_right;
345 	}
346 
347 	case EXCEPTION_DEFAULT: {
348 		c_thr_exc_raise++;
349 
350 		task_reference(task);
351 		thread_reference(thread);
352 		/*
353 		 * Only deliver control port if Developer Mode enabled,
354 		 * or task is a corpse. Otherwise we only deliver the
355 		 * (immovable) read port in exception handler (both in
356 		 * or out of process). (94669540)
357 		 */
358 		if (developer_mode_state() || task_is_a_corpse(task)) {
359 			task_port = convert_task_to_port(task);
360 			thread_port = convert_thread_to_port(thread);
361 		} else {
362 			task_port = convert_task_read_to_port(task);
363 			thread_port = convert_thread_read_to_port(thread);
364 		}
365 		/* task and thread ref consumed */
366 
367 		if (code64) {
368 			kr = mach_exception_raise(exc_port,
369 			    thread_port,
370 			    task_port,
371 			    exception,
372 			    code,
373 			    codeCnt);
374 		} else {
375 			kr = exception_raise(exc_port,
376 			    thread_port,
377 			    task_port,
378 			    exception,
379 			    small_code,
380 			    codeCnt);
381 		}
382 
383 		goto out_release_right;
384 	}
385 
386 	case EXCEPTION_IDENTITY_PROTECTED: {
387 		c_thr_exc_raise_identity_token++;
388 
389 		kr = task_create_identity_token(task, &task_token);
390 		if (!task->active && kr == KERN_INVALID_ARGUMENT) {
391 			/* The task is terminating, don't need to send more exceptions */
392 			kr = KERN_SUCCESS;
393 			goto out_release_right;
394 		}
395 		/* task_token now represents a task, or corpse */
396 		assert(kr == KERN_SUCCESS);
397 		task_token_port = convert_task_id_token_to_port(task_token);
398 		/* task token ref consumed */
399 
400 		if (code64) {
401 			kr = mach_exception_raise_identity_protected(exc_port,
402 			    thread->thread_id,
403 			    task_token_port,
404 			    exception,
405 			    code,
406 			    codeCnt);
407 		} else {
408 			panic("mach_exception_raise_identity_protected() must be code64");
409 		}
410 
411 		goto out_release_right;
412 	}
413 
414 
415 	case EXCEPTION_STATE_IDENTITY_PROTECTED: {
416 		mach_msg_type_number_t old_state_cnt, new_state_cnt;
417 		thread_state_data_t old_state;
418 		bool task_allow_user_state = task_needs_user_signed_thread_state(task);
419 
420 		if (pac_replace_ptrs_user || task_allow_user_state) {
421 			set_flags |= TSSF_ALLOW_ONLY_USER_PTRS;
422 			if (excp->hardened) {
423 				/* Use the signed_pc_key diversifier on the task for authentication. */
424 				set_flags |= TSSF_TASK_USER_DIV;
425 				get_flags |= TSSF_TASK_USER_DIV;
426 			} else {
427 				/* Otherwise we should use the random diversifier */
428 				set_flags |= TSSF_RANDOM_USER_DIV;
429 				get_flags |= TSSF_RANDOM_USER_DIV;
430 			}
431 		}
432 
433 		c_thr_exc_raise_state_identity_token++;
434 		kr = task_create_identity_token(task, &task_token);
435 
436 		if (!task->active && kr == KERN_INVALID_ARGUMENT) {
437 			/* The task is terminating, don't need to send more exceptions */
438 			kr = KERN_SUCCESS;
439 			goto out_release_right;
440 		}
441 
442 		/* task_token now represents a task, or corpse */
443 		assert(kr == KERN_SUCCESS);
444 		task_token_port = convert_task_id_token_to_port(task_token);
445 		/* task token ref consumed */
446 
447 		old_state_cnt = _MachineStateCount[flavor];
448 		kr = thread_getstatus_to_user(thread, flavor,
449 		    (thread_state_t)old_state,
450 		    &old_state_cnt, get_flags);
451 		new_state_cnt = old_state_cnt;
452 
453 		if (kr == KERN_SUCCESS) {
454 			new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
455 			if (new_state == NULL) {
456 				kr = KERN_RESOURCE_SHORTAGE;
457 				goto out_release_right;
458 			}
459 
460 			if (code64) {
461 				kr = mach_exception_raise_state_identity_protected(exc_port,
462 				    thread->thread_id,
463 				    task_token_port,
464 				    exception,
465 				    code,
466 				    codeCnt,
467 				    &flavor,
468 				    old_state, old_state_cnt,
469 				    new_state, &new_state_cnt);
470 			} else {
471 				panic("mach_exception_raise_state_identity_protected() must be code64");
472 			}
473 
474 			if (kr == KERN_SUCCESS) {
475 				if (exception != EXC_CORPSE_NOTIFY) {
476 					kr = thread_setstatus_from_user(thread, flavor,
477 					    (thread_state_t)new_state, new_state_cnt,
478 					    (thread_state_t)old_state, old_state_cnt, set_flags);
479 				}
480 				goto out_release_right;
481 			}
482 		}
483 
484 		goto out_release_right;
485 	}
486 
487 	case EXCEPTION_STATE_IDENTITY: {
488 		mach_msg_type_number_t old_state_cnt, new_state_cnt;
489 		thread_state_data_t old_state;
490 		bool task_allow_user_state = task_needs_user_signed_thread_state(task);
491 
492 		if (pac_replace_ptrs_user || task_allow_user_state) {
493 			get_flags |= TSSF_RANDOM_USER_DIV;
494 			set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
495 		}
496 
497 		c_thr_exc_raise_state_id++;
498 
499 		task_reference(task);
500 		thread_reference(thread);
501 		/*
502 		 * Only deliver control port if Developer Mode enabled,
503 		 * or task is a corpse. Otherwise we only deliver the
504 		 * (immovable) read port in exception handler (both in
505 		 * or out of process). (94669540)
506 		 */
507 		if (developer_mode_state() || task_is_a_corpse(task)) {
508 			task_port = convert_task_to_port(task);
509 			thread_port = convert_thread_to_port(thread);
510 		} else {
511 			task_port = convert_task_read_to_port(task);
512 			thread_port = convert_thread_read_to_port(thread);
513 		}
514 		/* task and thread ref consumed */
515 
516 		assert(flavor < THREAD_STATE_FLAVORS);
517 		old_state_cnt = (flavor < THREAD_STATE_FLAVORS) ? _MachineStateCount[flavor] : 0;
518 		kr = thread_getstatus_to_user(thread, flavor,
519 		    (thread_state_t)old_state,
520 		    &old_state_cnt, get_flags);
521 		new_state_cnt = old_state_cnt;
522 		if (kr == KERN_SUCCESS) {
523 			new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
524 			if (new_state == NULL) {
525 				kr = KERN_RESOURCE_SHORTAGE;
526 				goto out_release_right;
527 			}
528 			if (code64) {
529 				kr = mach_exception_raise_state_identity(
530 					exc_port,
531 					thread_port,
532 					task_port,
533 					exception,
534 					code,
535 					codeCnt,
536 					&flavor,
537 					old_state, old_state_cnt,
538 					new_state, &new_state_cnt);
539 			} else {
540 				kr = exception_raise_state_identity(exc_port,
541 				    thread_port,
542 				    task_port,
543 				    exception,
544 				    small_code,
545 				    codeCnt,
546 				    &flavor,
547 				    old_state, old_state_cnt,
548 				    new_state, &new_state_cnt);
549 			}
550 
551 			if (kr == KERN_SUCCESS) {
552 				if (exception != EXC_CORPSE_NOTIFY &&
553 				    ip_type(thread_port) == IKOT_THREAD_CONTROL) {
554 					kr = thread_setstatus_from_user(thread, flavor,
555 					    (thread_state_t)new_state, new_state_cnt,
556 					    (thread_state_t)old_state, old_state_cnt, set_flags);
557 				}
558 				goto out_release_right;
559 			}
560 		}
561 
562 		goto out_release_right;
563 	}
564 
565 	default:
566 		panic("bad exception behavior!");
567 		return KERN_FAILURE;
568 	}/* switch */
569 
570 out_release_right:
571 
572 	thread->options &= ~TH_IN_MACH_EXCEPTION;
573 
574 	if (task_port) {
575 		ipc_port_release_send(task_port);
576 	}
577 
578 	if (thread_port) {
579 		ipc_port_release_send(thread_port);
580 	}
581 
582 	if (exc_port) {
583 		ipc_port_release_send(exc_port);
584 	}
585 
586 	if (task_token_port) {
587 		ipc_port_release_send(task_token_port);
588 	}
589 
590 	if (new_state) {
591 		kfree_data(new_state, sizeof(thread_state_data_t));
592 	}
593 
594 	return kr;
595 }
596 
597 /*
598  * Attempt exception delivery with backtrace info to exception ports
599  * in exc_ports in order.
600  */
601 /*
602  *	Routine:	exception_deliver_backtrace
603  *	Purpose:
604  *      Attempt exception delivery with backtrace info to exception ports
605  *      in exc_ports in order.
606  *	Conditions:
607  *		Caller has a reference on bt_object, and send rights on exc_ports.
608  *		Does not consume any passed references or rights
609  */
610 void
exception_deliver_backtrace(kcdata_object_t bt_object,ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT],exception_type_t exception)611 exception_deliver_backtrace(
612 	kcdata_object_t  bt_object,
613 	ipc_port_t       exc_ports[static BT_EXC_PORTS_COUNT],
614 	exception_type_t exception)
615 {
616 	kern_return_t kr;
617 	mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
618 	ipc_port_t target_port, bt_obj_port;
619 
620 	assert(exception == EXC_GUARD);
621 
622 	code[0] = exception;
623 	code[1] = 0;
624 
625 	kcdata_object_reference(bt_object);
626 	bt_obj_port = convert_kcdata_object_to_port(bt_object);
627 	/* backtrace object ref consumed, no-senders is armed */
628 
629 	if (!IP_VALID(bt_obj_port)) {
630 		return;
631 	}
632 
633 	/*
634 	 * We are guaranteed at task_enqueue_exception_with_corpse() time
635 	 * that the exception port prefers backtrace delivery.
636 	 */
637 	for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
638 		target_port = exc_ports[i];
639 
640 		if (!IP_VALID(target_port)) {
641 			continue;
642 		}
643 
644 		ip_mq_lock(target_port);
645 		if (!ip_active(target_port)) {
646 			ip_mq_unlock(target_port);
647 			continue;
648 		}
649 		ip_mq_unlock(target_port);
650 
651 		kr = mach_exception_raise_backtrace(target_port,
652 		    bt_obj_port,
653 		    EXC_CORPSE_NOTIFY,
654 		    code,
655 		    EXCEPTION_CODE_MAX);
656 
657 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
658 			/* Exception is handled at this level */
659 			break;
660 		}
661 	}
662 
663 	/* May trigger no-senders notification for backtrace object */
664 	ipc_port_release_send(bt_obj_port);
665 
666 	return;
667 }
668 
669 /*
670  * Routine: check_exc_receiver_dependency
671  * Purpose:
672  *      Verify that the port destined for receiving this exception is not
673  *      on the current task. This would cause hang in kernel for
674  *      EXC_CRASH primarily. Note: If port is transferred
675  *      between check and delivery then deadlock may happen.
676  *
677  * Conditions:
678  *		Nothing locked and no resources held.
679  *		Called from an exception context.
680  * Returns:
681  *      KERN_SUCCESS if its ok to send exception message.
682  */
683 static kern_return_t
check_exc_receiver_dependency(exception_type_t exception,struct exception_action * excp,lck_mtx_t * mutex)684 check_exc_receiver_dependency(
685 	exception_type_t exception,
686 	struct exception_action *excp,
687 	lck_mtx_t *mutex)
688 {
689 	kern_return_t retval = KERN_SUCCESS;
690 
691 	if (excp == NULL || exception != EXC_CRASH) {
692 		return retval;
693 	}
694 
695 	task_t task = current_task();
696 	lck_mtx_lock(mutex);
697 	ipc_port_t xport = excp[exception].port;
698 	if (IP_VALID(xport) && ip_in_space_noauth(xport, task->itk_space)) {
699 		retval = KERN_FAILURE;
700 	}
701 	lck_mtx_unlock(mutex);
702 	return retval;
703 }
704 
705 
706 /*
707  *	Routine:	exception_triage_thread
708  *	Purpose:
709  *		The thread caught an exception.
710  *		We make an up-call to the thread's exception server.
711  *	Conditions:
712  *		Nothing locked and no resources held.
713  *		Called from an exception context, so
714  *		thread_exception_return and thread_kdb_return
715  *		are possible.
716  *	Returns:
717  *		KERN_SUCCESS if exception is handled by any of the handlers.
718  */
719 kern_return_t
exception_triage_thread(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,thread_t thread)720 exception_triage_thread(
721 	exception_type_t        exception,
722 	mach_exception_data_t   code,
723 	mach_msg_type_number_t  codeCnt,
724 	thread_t                thread)
725 {
726 	task_t                  task;
727 	thread_ro_t             tro;
728 	host_priv_t             host_priv;
729 	lck_mtx_t               *mutex;
730 	struct exception_action *actions;
731 	kern_return_t   kr = KERN_FAILURE;
732 
733 	assert(exception != EXC_RPC_ALERT);
734 
735 	/*
736 	 * If this behavior has been requested by the the kernel
737 	 * (due to the boot environment), we should panic if we
738 	 * enter this function.  This is intended as a debugging
739 	 * aid; it should allow us to debug why we caught an
740 	 * exception in environments where debugging is especially
741 	 * difficult.
742 	 */
743 	if (panic_on_exception_triage) {
744 		panic("called exception_triage when it was forbidden by the boot environment");
745 	}
746 
747 	/*
748 	 * Try to raise the exception at the activation level.
749 	 */
750 	mutex   = &thread->mutex;
751 	tro     = get_thread_ro(thread);
752 	actions = tro->tro_exc_actions;
753 	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
754 		kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
755 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
756 			goto out;
757 		}
758 	}
759 
760 	/*
761 	 * Maybe the task level will handle it.
762 	 */
763 	task    = tro->tro_task;
764 	mutex   = &task->itk_lock_data;
765 	actions = task->exc_actions;
766 	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
767 		kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
768 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
769 			goto out;
770 		}
771 	}
772 
773 	/*
774 	 * How about at the host level?
775 	 */
776 	host_priv = host_priv_self();
777 	mutex     = &host_priv->lock;
778 	actions   = host_priv->exc_actions;
779 	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
780 		kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
781 		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
782 			goto out;
783 		}
784 	}
785 
786 out:
787 	if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
788 	    (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
789 		thread_exception_return();
790 	}
791 	return kr;
792 }
793 
794 #if __has_feature(ptrauth_calls)
795 static TUNABLE(bool, pac_exception_telemetry, "-pac_exception_telemetry", false);
796 
797 CA_EVENT(pac_exception_event,
798     CA_INT, exception,
799     CA_INT, exception_code_0,
800     CA_INT, exception_code_1,
801     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
802 
803 static void
pac_exception_triage(exception_type_t exception,mach_exception_data_t code)804 pac_exception_triage(
805 	exception_type_t        exception,
806 	mach_exception_data_t   code)
807 {
808 	boolean_t traced_flag = FALSE;
809 	task_t task = current_task();
810 	void *proc = get_bsdtask_info(task);
811 	char *proc_name = (char *) "unknown";
812 	int pid = 0;
813 
814 #ifdef MACH_BSD
815 	pid = proc_selfpid();
816 	if (proc) {
817 		traced_flag = proc_is_traced(proc);
818 		/* Should only be called on current proc */
819 		proc_name = proc_name_address(proc);
820 
821 		/*
822 		 * For a ptrauth violation, check if process isn't being ptraced and
823 		 * the task has the TFRO_PAC_EXC_FATAL flag set. If both conditions are true,
824 		 * terminate the task via exit_with_reason
825 		 */
826 		if (!traced_flag) {
827 			if (pac_exception_telemetry) {
828 				ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_exception_event);
829 				CA_EVENT_TYPE(pac_exception_event) * pexc_event = ca_event->data;
830 				pexc_event->exception = exception;
831 				pexc_event->exception_code_0 = code[0];
832 				pexc_event->exception_code_1 = code[1];
833 				strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
834 				CA_EVENT_SEND(ca_event);
835 			}
836 			if (task_is_pac_exception_fatal(task)) {
837 				os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit a pac violation\n", __func__, proc_name, pid);
838 
839 				exception_info_t info = {
840 					.os_reason = OS_REASON_PAC_EXCEPTION,
841 					.exception_type = exception,
842 					.mx_code = code[0],
843 					.mx_subcode = code[1]
844 				};
845 				exit_with_mach_exception(proc, info, PX_FLAGS_NONE);
846 				thread_exception_return();
847 				/* NOT_REACHABLE */
848 			}
849 		}
850 	}
851 #endif /* MACH_BSD */
852 }
853 #endif /* __has_feature(ptrauth_calls) */
854 
855 static void
maybe_unrecoverable_exception_triage(exception_type_t exception,mach_exception_data_t code)856 maybe_unrecoverable_exception_triage(
857 	exception_type_t        exception,
858 	mach_exception_data_t   code)
859 {
860 	task_t task = current_task();
861 	void *proc = get_bsdtask_info(task);
862 
863 #ifdef MACH_BSD
864 	if (!proc) {
865 		return;
866 	}
867 
868 	/*
869 	 * Note that the below policy to decide whether this should be unrecoverable is
870 	 * likely conceptually specific to the particular exception.
871 	 * If you find yourself adding another user_brk_..._descriptor and want to customize the
872 	 * policy for whether it should be unrecoverable, consider attaching each policy to
873 	 * the corresponding descriptor and somehow carrying it through to here.
874 	 */
875 	/* These exceptions are deliverable (and potentially recoverable) if the process is being debugged. */
876 	if (is_address_space_debugged(proc)) {
877 		return;
878 	}
879 
880 	/*
881 	 * By policy, this exception is uncatchable by exception/signal handlers.
882 	 * Therefore exit immediately.
883 	 */
884 	/* Should only be called on current proc */
885 	int pid = proc_selfpid();
886 	char *proc_name = proc_name_address(proc);
887 	os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit an unrecoverable exception\n", __func__, proc_name, pid);
888 
889 	exception_info_t info = {
890 		/*
891 		 * For now, hard-code this to OS_REASON_FOUNDATION as that's the path we expect to be on today.
892 		 * In the future this should probably be carried by the user_brk_..._descriptor and piped through.
893 		 */
894 		.os_reason = OS_REASON_FOUNDATION,
895 		.exception_type = exception,
896 		.mx_code = code[0],
897 		.mx_subcode = code[1]
898 	};
899 	exit_with_mach_exception(proc, info, PX_FLAGS_NONE);
900 	thread_exception_return();
901 	/* NOT_REACHABLE */
902 #endif /* MACH_BSD */
903 }
904 
905 /*
906  *	Routine:	exception_triage
907  *	Purpose:
908  *		The current thread caught an exception.
909  *		We make an up-call to the thread's exception server.
910  *	Conditions:
911  *		Nothing locked and no resources held.
912  *		Called from an exception context, so
913  *		thread_exception_return and thread_kdb_return
914  *		are possible.
915  *	Returns:
916  *		KERN_SUCCESS if exception is handled by any of the handlers.
917  */
918 int debug4k_panic_on_exception = 0;
919 kern_return_t
exception_triage(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)920 exception_triage(
921 	exception_type_t        exception,
922 	mach_exception_data_t   code,
923 	mach_msg_type_number_t  codeCnt)
924 {
925 	thread_t thread = current_thread();
926 	task_t   task   = current_task();
927 
928 	assert(codeCnt > 0);
929 
930 	if (VM_MAP_PAGE_SIZE(task->map) < PAGE_SIZE) {
931 		DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx\n",
932 		    thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
933 		if (debug4k_panic_on_exception) {
934 			panic("DEBUG4K thread %p task %p map %p exception %d codes 0x%llx 0x%llx",
935 			    thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
936 		}
937 	}
938 
939 #if DEVELOPMENT || DEBUG
940 #ifdef MACH_BSD
941 	if (proc_pid(get_bsdtask_info(task)) <= exception_log_max_pid) {
942 		record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
943 		    "exception_log_max_pid: pid %d (%s): sending exception %d (0x%llx 0x%llx)",
944 		    proc_pid(get_bsdtask_info(task)), proc_name_address(get_bsdtask_info(task)),
945 		    exception, code[0], codeCnt > 1 ? code[1] : 0);
946 	}
947 #endif /* MACH_BSD */
948 #endif /* DEVELOPMENT || DEBUG */
949 
950 #if __has_feature(ptrauth_calls)
951 	if (exception & EXC_PTRAUTH_BIT) {
952 		exception &= ~EXC_PTRAUTH_BIT;
953 		assert(codeCnt == 2);
954 		/* Note this may consume control flow if it decides the exception is unrecoverable. */
955 		pac_exception_triage(exception, code);
956 	}
957 #endif /* __has_feature(ptrauth_calls) */
958 	if (exception & EXC_MAY_BE_UNRECOVERABLE_BIT) {
959 		exception &= ~EXC_MAY_BE_UNRECOVERABLE_BIT;
960 		assert(codeCnt == 2);
961 		/* Note this may consume control flow if it decides the exception is unrecoverable. */
962 		maybe_unrecoverable_exception_triage(exception, code);
963 	}
964 	return exception_triage_thread(exception, code, codeCnt, thread);
965 }
966 
967 kern_return_t
bsd_exception(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)968 bsd_exception(
969 	exception_type_t        exception,
970 	mach_exception_data_t   code,
971 	mach_msg_type_number_t  codeCnt)
972 {
973 	task_t                  task;
974 	lck_mtx_t               *mutex;
975 	thread_t                self = current_thread();
976 	kern_return_t           kr;
977 
978 	/*
979 	 * Maybe the task level will handle it.
980 	 */
981 	task = current_task();
982 	mutex = &task->itk_lock_data;
983 
984 	kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex);
985 
986 	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
987 		return KERN_SUCCESS;
988 	}
989 	return KERN_FAILURE;
990 }
991 
992 
993 /*
994  * Raise an exception on a task.
995  * This should tell launchd to launch Crash Reporter for this task.
996  * If the exception is fatal, we should be careful about sending a synchronous exception
997  */
998 kern_return_t
task_exception_notify(exception_type_t exception,mach_exception_data_type_t exccode,mach_exception_data_type_t excsubcode,const bool fatal)999 task_exception_notify(exception_type_t exception,
1000     mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode, const bool fatal)
1001 {
1002 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
1003 	wait_interrupt_t                wsave;
1004 	kern_return_t kr = KERN_SUCCESS;
1005 
1006 	/*
1007 	 * If we are not in dev mode, nobody should be allowed to synchronously handle
1008 	 * a fatal EXC_GUARD - they might stall on it indefinitely
1009 	 */
1010 	if (fatal && !developer_mode_state() && exception == EXC_GUARD) {
1011 		return KERN_DENIED;
1012 	}
1013 
1014 	code[0] = exccode;
1015 	code[1] = excsubcode;
1016 
1017 	wsave = thread_interrupt_level(THREAD_UNINT);
1018 	kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
1019 	(void) thread_interrupt_level(wsave);
1020 	return kr;
1021 }
1022 
1023 
1024 /*
1025  *	Handle interface for special performance monitoring
1026  *	This is a special case of the host exception handler
1027  */
1028 kern_return_t
sys_perf_notify(thread_t thread,int pid)1029 sys_perf_notify(thread_t thread, int pid)
1030 {
1031 	host_priv_t             hostp;
1032 	ipc_port_t              xport;
1033 	struct exception_action saved_exc_actions[EXC_TYPES_COUNT] = {};
1034 	wait_interrupt_t        wsave;
1035 	kern_return_t           ret;
1036 	struct label            *temp_label;
1037 
1038 	hostp = host_priv_self();       /* Get the host privileged ports */
1039 	mach_exception_data_type_t      code[EXCEPTION_CODE_MAX];
1040 	code[0] = 0xFF000001;           /* Set terminate code */
1041 	code[1] = pid;          /* Pass out the pid */
1042 
1043 #if CONFIG_MACF
1044 	/* Create new label for saved_exc_actions[EXC_RPC_ALERT] */
1045 	mac_exc_associate_action_label(&saved_exc_actions[EXC_RPC_ALERT],
1046 	    mac_exc_create_label(&saved_exc_actions[EXC_RPC_ALERT]));
1047 #endif /* CONFIG_MACF */
1048 
1049 	lck_mtx_lock(&hostp->lock);
1050 	xport = hostp->exc_actions[EXC_RPC_ALERT].port;
1051 
1052 	/* Make sure we're not catching our own exception */
1053 	if (!IP_VALID(xport) ||
1054 	    !ip_active(xport) ||
1055 	    ip_in_space_noauth(xport, get_threadtask(thread)->itk_space)) {
1056 		lck_mtx_unlock(&hostp->lock);
1057 #if CONFIG_MACF
1058 		mac_exc_free_action_label(&saved_exc_actions[EXC_RPC_ALERT]);
1059 #endif /* CONFIG_MACF */
1060 		return KERN_FAILURE;
1061 	}
1062 
1063 	/* Save hostp->exc_actions and hold a sright to xport so it can't be dropped after unlock */
1064 	temp_label = saved_exc_actions[EXC_RPC_ALERT].label;
1065 	saved_exc_actions[EXC_RPC_ALERT] = hostp->exc_actions[EXC_RPC_ALERT];
1066 	saved_exc_actions[EXC_RPC_ALERT].port = exception_port_copy_send(xport);
1067 	saved_exc_actions[EXC_RPC_ALERT].label = temp_label;
1068 
1069 #if CONFIG_MACF
1070 	mac_exc_inherit_action_label(&hostp->exc_actions[EXC_RPC_ALERT], &saved_exc_actions[EXC_RPC_ALERT]);
1071 #endif /* CONFIG_MACF */
1072 
1073 	lck_mtx_unlock(&hostp->lock);
1074 
1075 	wsave = thread_interrupt_level(THREAD_UNINT);
1076 	ret = exception_deliver(
1077 		thread,
1078 		EXC_RPC_ALERT,
1079 		code,
1080 		2,
1081 		saved_exc_actions,
1082 		&hostp->lock);
1083 	(void)thread_interrupt_level(wsave);
1084 
1085 #if CONFIG_MACF
1086 	mac_exc_free_action_label(&saved_exc_actions[EXC_RPC_ALERT]);
1087 #endif /* CONFIG_MACF */
1088 	ipc_port_release_send(saved_exc_actions[EXC_RPC_ALERT].port);
1089 
1090 	return ret;
1091 }
1092