1 /*
2 * Copyright (c) 2000-2024 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/message.h>
63 #include <mach/port.h>
64 #include <mach/mig_errors.h>
65 #include <mach/task.h>
66 #include <mach/thread_status.h>
67 #include <mach/exception_types.h>
68 #include <mach/exc.h>
69 #include <mach/mach_exc.h>
70
71 #include <ipc/port.h>
72 #include <ipc/ipc_entry.h>
73 #include <ipc/ipc_object.h>
74 #include <ipc/ipc_notify.h>
75 #include <ipc/ipc_space.h>
76 #include <ipc/ipc_pset.h>
77 #include <ipc/ipc_machdep.h>
78
79 #include <kern/ipc_tt.h>
80 #include <kern/task.h>
81 #include <kern/thread.h>
82 #include <kern/processor.h>
83 #include <kern/sched.h>
84 #include <kern/sched_prim.h>
85 #include <kern/host.h>
86 #include <kern/misc_protos.h>
87 #include <kern/ux_handler.h>
88 #include <kern/task_ident.h>
89
90 #include <vm/vm_map_xnu.h>
91 #include <vm/vm_map.h>
92 #include <sys/reason.h>
93 #include <security/mac_mach_internal.h>
94 #include <string.h>
95
96 #include <pexpert/pexpert.h>
97
98 #include <os/log.h>
99 #include <os/system_event_log.h>
100
101 #include <libkern/coreanalytics/coreanalytics.h>
102
103 #include <sys/code_signing.h> /* for developer mode state */
104
105 bool panic_on_exception_triage = false;
106
107 /* Not used in coded, only for inspection during debugging */
108 unsigned long c_thr_exc_raise = 0;
109 unsigned long c_thr_exc_raise_identity_token = 0;
110 unsigned long c_thr_exc_raise_state_identity_token = 0;
111 unsigned long c_thr_exc_raise_state = 0;
112 unsigned long c_thr_exc_raise_state_id = 0;
113 unsigned long c_thr_exc_raise_backtrace = 0;
114
115 /* forward declarations */
116 kern_return_t exception_deliver(
117 thread_t thread,
118 exception_type_t exception,
119 mach_exception_data_t code,
120 mach_msg_type_number_t codeCnt,
121 struct exception_action *excp,
122 lck_mtx_t *mutex);
123
124 #ifdef MACH_BSD
125 kern_return_t bsd_exception(
126 exception_type_t exception,
127 mach_exception_data_t code,
128 mach_msg_type_number_t codeCnt);
129 #endif /* MACH_BSD */
130
131 #ifdef MACH_BSD
132 extern bool proc_is_traced(void *p);
133 extern int proc_selfpid(void);
134 extern char *proc_name_address(struct proc *p);
135 #endif /* MACH_BSD */
136
137 #if (DEVELOPMENT || DEBUG)
138 TUNABLE_WRITEABLE(unsigned int, exception_log_max_pid, "exception_log_max_pid", 0);
139 #endif /* (DEVELOPMENT || DEBUG) */
140
141 /*
142 * Routine: exception_init
143 * Purpose:
144 * Global initialization of state for exceptions.
145 * Conditions:
146 * None.
147 */
148 void
exception_init(void)149 exception_init(void)
150 {
151 int tmp = 0;
152
153 if (PE_parse_boot_argn("-panic_on_exception_triage", &tmp, sizeof(tmp))) {
154 panic_on_exception_triage = true;
155 }
156
157 #if (DEVELOPMENT || DEBUG)
158 if (exception_log_max_pid) {
159 printf("Logging all exceptions where pid < exception_log_max_pid (%d)\n", exception_log_max_pid);
160 }
161 #endif /* (DEVELOPMENT || DEBUG) */
162 }
163
164 static TUNABLE(bool, pac_replace_ptrs_user, "pac_replace_ptrs_user", true);
165
166 ipc_port_t
exception_port_copy_send(ipc_port_t port)167 exception_port_copy_send(ipc_port_t port)
168 {
169 if (IP_VALID(port)) {
170 if (is_ux_handler_port(port)) {
171 /* is_ux_handler_port() compares against __DATA_CONST */
172 port = ipc_port_copy_send_any(port);
173 } else {
174 port = ipc_port_copy_send_mqueue(port);
175 }
176 }
177 return port;
178 }
179
180 /*
181 * Routine: exception_deliver
182 * Purpose:
183 * Make an upcall to the exception server provided.
184 * Conditions:
185 * Nothing locked and no resources held.
186 * Called from an exception context, so
187 * thread_exception_return and thread_kdb_return
188 * are possible.
189 * Returns:
190 * KERN_SUCCESS if the exception was handled
191 */
192 kern_return_t
exception_deliver(thread_t thread,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,struct exception_action * excp,lck_mtx_t * mutex)193 exception_deliver(
194 thread_t thread,
195 exception_type_t exception,
196 mach_exception_data_t code,
197 mach_msg_type_number_t codeCnt,
198 struct exception_action *excp,
199 lck_mtx_t *mutex)
200 {
201 ipc_port_t exc_port = IPC_PORT_NULL;
202 exception_data_type_t small_code[EXCEPTION_CODE_MAX];
203 thread_state_t new_state = NULL;
204 int code64;
205 int behavior;
206 int flavor;
207 kern_return_t kr = KERN_FAILURE;
208 task_t task;
209 task_id_token_t task_token;
210 ipc_port_t thread_port = IPC_PORT_NULL,
211 task_port = IPC_PORT_NULL,
212 task_token_port = IPC_PORT_NULL;
213 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
214 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
215
216 /*
217 * Save work if we are terminating.
218 * Just go back to our AST handler.
219 */
220 if (!thread->active && !thread->inspection) {
221 return KERN_SUCCESS;
222 }
223
224 /*
225 * If there are no exception actions defined for this entity,
226 * we can't deliver here.
227 */
228 if (excp == NULL) {
229 return KERN_FAILURE;
230 }
231
232 assert(exception < EXC_TYPES_COUNT);
233 if (exception >= EXC_TYPES_COUNT) {
234 return KERN_FAILURE;
235 }
236
237 excp = &excp[exception];
238
239 /*
240 * Snapshot the exception action data under lock for consistency.
241 * Hold a reference to the port over the exception_raise_* calls
242 * so it can't be destroyed. This seems like overkill, but keeps
243 * the port from disappearing between now and when
244 * ipc_object_copyin_from_kernel is finally called.
245 */
246 lck_mtx_lock(mutex);
247 exc_port = exception_port_copy_send(excp->port);
248 if (!IP_VALID(exc_port)) {
249 lck_mtx_unlock(mutex);
250 return KERN_FAILURE;
251 }
252 task = get_threadtask(thread);
253
254 flavor = excp->flavor;
255 behavior = excp->behavior;
256 if (excp->hardened) {
257 /*
258 * On arm64e devices we have protected the pc returned via exception
259 * handlers with PAC. We also want to protect all other thread state
260 * for hardened exceptions to prevent modification of any registers
261 * that could affect control flow integrity sometime in the future.
262 */
263 set_flags |= TSSF_ONLY_PC;
264 }
265 lck_mtx_unlock(mutex);
266
267 code64 = (behavior & MACH_EXCEPTION_CODES);
268 behavior &= ~MACH_EXCEPTION_MASK;
269
270 if (!code64) {
271 small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
272 small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
273 }
274
275
276 #if CONFIG_MACF
277 /* Now is a reasonably good time to check if the exception action is
278 * permitted for this process, because after this point we will send
279 * the message out almost certainly.
280 * As with other failures, exception_triage_thread will go on
281 * to the next level.
282 */
283
284 /* The global exception-to-signal translation port is safe to be an exception handler. */
285 if (is_ux_handler_port(exc_port) == FALSE &&
286 mac_exc_action_check_exception_send(task, excp) != 0) {
287 kr = KERN_FAILURE;
288 goto out_release_right;
289 }
290 #endif
291
292 thread->options |= TH_IN_MACH_EXCEPTION;
293
294 switch (behavior) {
295 case EXCEPTION_STATE: {
296 mach_msg_type_number_t old_state_cnt, new_state_cnt;
297 thread_state_data_t old_state;
298 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
299
300 if (pac_replace_ptrs_user || task_allow_user_state) {
301 get_flags |= TSSF_RANDOM_USER_DIV;
302 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
303 }
304
305 c_thr_exc_raise_state++;
306 assert(flavor < THREAD_STATE_FLAVORS);
307 old_state_cnt = (flavor < THREAD_STATE_FLAVORS) ? _MachineStateCount[flavor] : 0;
308 kr = thread_getstatus_to_user(thread, flavor,
309 (thread_state_t)old_state,
310 &old_state_cnt, get_flags);
311 new_state_cnt = old_state_cnt;
312 if (kr == KERN_SUCCESS) {
313 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
314 if (new_state == NULL) {
315 kr = KERN_RESOURCE_SHORTAGE;
316 goto out_release_right;
317 }
318 if (code64) {
319 kr = mach_exception_raise_state(exc_port,
320 exception,
321 code,
322 codeCnt,
323 &flavor,
324 old_state, old_state_cnt,
325 new_state, &new_state_cnt);
326 } else {
327 kr = exception_raise_state(exc_port, exception,
328 small_code,
329 codeCnt,
330 &flavor,
331 old_state, old_state_cnt,
332 new_state, &new_state_cnt);
333 }
334 if (kr == KERN_SUCCESS) {
335 if (exception != EXC_CORPSE_NOTIFY) {
336 kr = thread_setstatus_from_user(thread, flavor,
337 (thread_state_t)new_state, new_state_cnt,
338 (thread_state_t)old_state, old_state_cnt,
339 set_flags);
340 }
341 goto out_release_right;
342 }
343 }
344
345 goto out_release_right;
346 }
347
348 case EXCEPTION_DEFAULT: {
349 c_thr_exc_raise++;
350
351 task_reference(task);
352 thread_reference(thread);
353 /*
354 * Only deliver control port if Developer Mode enabled,
355 * or task is a corpse. Otherwise we only deliver the
356 * (immovable) read port in exception handler (both in
357 * or out of process). (94669540)
358 */
359 if (developer_mode_state() || task_is_a_corpse(task)) {
360 task_port = convert_task_to_port(task);
361 thread_port = convert_thread_to_port(thread);
362 } else {
363 task_port = convert_task_read_to_port(task);
364 thread_port = convert_thread_read_to_port(thread);
365 }
366 /* task and thread ref consumed */
367
368 if (code64) {
369 kr = mach_exception_raise(exc_port,
370 thread_port,
371 task_port,
372 exception,
373 code,
374 codeCnt);
375 } else {
376 kr = exception_raise(exc_port,
377 thread_port,
378 task_port,
379 exception,
380 small_code,
381 codeCnt);
382 }
383
384 goto out_release_right;
385 }
386
387 case EXCEPTION_IDENTITY_PROTECTED: {
388 c_thr_exc_raise_identity_token++;
389
390 kr = task_create_identity_token(task, &task_token);
391 if (!task->active && kr == KERN_INVALID_ARGUMENT) {
392 /* The task is terminating, don't need to send more exceptions */
393 kr = KERN_SUCCESS;
394 goto out_release_right;
395 }
396 /* task_token now represents a task, or corpse */
397 assert(kr == KERN_SUCCESS);
398 task_token_port = convert_task_id_token_to_port(task_token);
399 /* task token ref consumed */
400
401 if (code64) {
402 kr = mach_exception_raise_identity_protected(exc_port,
403 thread->thread_id,
404 task_token_port,
405 exception,
406 code,
407 codeCnt);
408 } else {
409 panic("mach_exception_raise_identity_protected() must be code64");
410 }
411
412 goto out_release_right;
413 }
414
415
416 case EXCEPTION_STATE_IDENTITY_PROTECTED: {
417 mach_msg_type_number_t old_state_cnt, new_state_cnt;
418 thread_state_data_t old_state;
419 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
420
421 if (pac_replace_ptrs_user || task_allow_user_state) {
422 set_flags |= TSSF_ALLOW_ONLY_USER_PTRS;
423 if (excp->hardened) {
424 /* Use the signed_pc_key diversifier on the task for authentication. */
425 set_flags |= TSSF_TASK_USER_DIV;
426 get_flags |= TSSF_TASK_USER_DIV;
427 } else {
428 /* Otherwise we should use the random diversifier */
429 set_flags |= TSSF_RANDOM_USER_DIV;
430 get_flags |= TSSF_RANDOM_USER_DIV;
431 }
432 }
433
434 c_thr_exc_raise_state_identity_token++;
435 kr = task_create_identity_token(task, &task_token);
436
437 if (!task->active && kr == KERN_INVALID_ARGUMENT) {
438 /* The task is terminating, don't need to send more exceptions */
439 kr = KERN_SUCCESS;
440 goto out_release_right;
441 }
442
443 /* task_token now represents a task, or corpse */
444 assert(kr == KERN_SUCCESS);
445 task_token_port = convert_task_id_token_to_port(task_token);
446 /* task token ref consumed */
447
448 old_state_cnt = _MachineStateCount[flavor];
449 kr = thread_getstatus_to_user(thread, flavor,
450 (thread_state_t)old_state,
451 &old_state_cnt, get_flags);
452 new_state_cnt = old_state_cnt;
453
454 if (kr == KERN_SUCCESS) {
455 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
456 if (new_state == NULL) {
457 kr = KERN_RESOURCE_SHORTAGE;
458 goto out_release_right;
459 }
460
461 if (code64) {
462 kr = mach_exception_raise_state_identity_protected(exc_port,
463 thread->thread_id,
464 task_token_port,
465 exception,
466 code,
467 codeCnt,
468 &flavor,
469 old_state, old_state_cnt,
470 new_state, &new_state_cnt);
471 } else {
472 panic("mach_exception_raise_state_identity_protected() must be code64");
473 }
474
475 if (kr == KERN_SUCCESS) {
476 if (exception != EXC_CORPSE_NOTIFY) {
477 kr = thread_setstatus_from_user(thread, flavor,
478 (thread_state_t)new_state, new_state_cnt,
479 (thread_state_t)old_state, old_state_cnt, set_flags);
480 }
481 goto out_release_right;
482 }
483 }
484
485 goto out_release_right;
486 }
487
488 case EXCEPTION_STATE_IDENTITY: {
489 mach_msg_type_number_t old_state_cnt, new_state_cnt;
490 thread_state_data_t old_state;
491 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
492
493 if (pac_replace_ptrs_user || task_allow_user_state) {
494 get_flags |= TSSF_RANDOM_USER_DIV;
495 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
496 }
497
498 c_thr_exc_raise_state_id++;
499
500 task_reference(task);
501 thread_reference(thread);
502 /*
503 * Only deliver control port if Developer Mode enabled,
504 * or task is a corpse. Otherwise we only deliver the
505 * (immovable) read port in exception handler (both in
506 * or out of process). (94669540)
507 */
508 if (developer_mode_state() || task_is_a_corpse(task)) {
509 task_port = convert_task_to_port(task);
510 thread_port = convert_thread_to_port(thread);
511 } else {
512 task_port = convert_task_read_to_port(task);
513 thread_port = convert_thread_read_to_port(thread);
514 }
515 /* task and thread ref consumed */
516
517 assert(flavor < THREAD_STATE_FLAVORS);
518 old_state_cnt = (flavor < THREAD_STATE_FLAVORS) ? _MachineStateCount[flavor] : 0;
519 kr = thread_getstatus_to_user(thread, flavor,
520 (thread_state_t)old_state,
521 &old_state_cnt, get_flags);
522 new_state_cnt = old_state_cnt;
523 if (kr == KERN_SUCCESS) {
524 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
525 if (new_state == NULL) {
526 kr = KERN_RESOURCE_SHORTAGE;
527 goto out_release_right;
528 }
529 if (code64) {
530 kr = mach_exception_raise_state_identity(
531 exc_port,
532 thread_port,
533 task_port,
534 exception,
535 code,
536 codeCnt,
537 &flavor,
538 old_state, old_state_cnt,
539 new_state, &new_state_cnt);
540 } else {
541 kr = exception_raise_state_identity(exc_port,
542 thread_port,
543 task_port,
544 exception,
545 small_code,
546 codeCnt,
547 &flavor,
548 old_state, old_state_cnt,
549 new_state, &new_state_cnt);
550 }
551
552 if (kr == KERN_SUCCESS) {
553 if (exception != EXC_CORPSE_NOTIFY &&
554 ip_kotype(thread_port) == IKOT_THREAD_CONTROL) {
555 kr = thread_setstatus_from_user(thread, flavor,
556 (thread_state_t)new_state, new_state_cnt,
557 (thread_state_t)old_state, old_state_cnt, set_flags);
558 }
559 goto out_release_right;
560 }
561 }
562
563 goto out_release_right;
564 }
565
566 default:
567 panic("bad exception behavior!");
568 return KERN_FAILURE;
569 }/* switch */
570
571 out_release_right:
572
573 thread->options &= ~TH_IN_MACH_EXCEPTION;
574
575 if (task_port) {
576 ipc_port_release_send(task_port);
577 }
578
579 if (thread_port) {
580 ipc_port_release_send(thread_port);
581 }
582
583 if (exc_port) {
584 ipc_port_release_send(exc_port);
585 }
586
587 if (task_token_port) {
588 ipc_port_release_send(task_token_port);
589 }
590
591 if (new_state) {
592 kfree_data(new_state, sizeof(thread_state_data_t));
593 }
594
595 return kr;
596 }
597
598 /*
599 * Attempt exception delivery with backtrace info to exception ports
600 * in exc_ports in order.
601 */
602 /*
603 * Routine: exception_deliver_backtrace
604 * Purpose:
605 * Attempt exception delivery with backtrace info to exception ports
606 * in exc_ports in order.
607 * Conditions:
608 * Caller has a reference on bt_object, and send rights on exc_ports.
609 * Does not consume any passed references or rights
610 */
611 void
exception_deliver_backtrace(kcdata_object_t bt_object,ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT],exception_type_t exception)612 exception_deliver_backtrace(
613 kcdata_object_t bt_object,
614 ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT],
615 exception_type_t exception)
616 {
617 kern_return_t kr;
618 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
619 ipc_port_t target_port, bt_obj_port;
620
621 assert(exception == EXC_GUARD);
622
623 code[0] = exception;
624 code[1] = 0;
625
626 kcdata_object_reference(bt_object);
627 bt_obj_port = convert_kcdata_object_to_port(bt_object);
628 /* backtrace object ref consumed, no-senders is armed */
629
630 if (!IP_VALID(bt_obj_port)) {
631 return;
632 }
633
634 /*
635 * We are guaranteed at task_enqueue_exception_with_corpse() time
636 * that the exception port prefers backtrace delivery.
637 */
638 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
639 target_port = exc_ports[i];
640
641 if (!IP_VALID(target_port)) {
642 continue;
643 }
644
645 ip_mq_lock(target_port);
646 if (!ip_active(target_port)) {
647 ip_mq_unlock(target_port);
648 continue;
649 }
650 ip_mq_unlock(target_port);
651
652 kr = mach_exception_raise_backtrace(target_port,
653 bt_obj_port,
654 EXC_CORPSE_NOTIFY,
655 code,
656 EXCEPTION_CODE_MAX);
657
658 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
659 /* Exception is handled at this level */
660 break;
661 }
662 }
663
664 /* May trigger no-senders notification for backtrace object */
665 ipc_port_release_send(bt_obj_port);
666
667 return;
668 }
669
670 /*
671 * Routine: check_exc_receiver_dependency
672 * Purpose:
673 * Verify that the port destined for receiving this exception is not
674 * on the current task. This would cause hang in kernel for
675 * EXC_CRASH primarily. Note: If port is transferred
676 * between check and delivery then deadlock may happen.
677 *
678 * Conditions:
679 * Nothing locked and no resources held.
680 * Called from an exception context.
681 * Returns:
682 * KERN_SUCCESS if its ok to send exception message.
683 */
684 static kern_return_t
check_exc_receiver_dependency(exception_type_t exception,struct exception_action * excp,lck_mtx_t * mutex)685 check_exc_receiver_dependency(
686 exception_type_t exception,
687 struct exception_action *excp,
688 lck_mtx_t *mutex)
689 {
690 kern_return_t retval = KERN_SUCCESS;
691
692 if (excp == NULL || exception != EXC_CRASH) {
693 return retval;
694 }
695
696 task_t task = current_task();
697 lck_mtx_lock(mutex);
698 ipc_port_t xport = excp[exception].port;
699 if (IP_VALID(xport) && ip_in_space_noauth(xport, task->itk_space)) {
700 retval = KERN_FAILURE;
701 }
702 lck_mtx_unlock(mutex);
703 return retval;
704 }
705
706
707 /*
708 * Routine: exception_triage_thread
709 * Purpose:
710 * The thread caught an exception.
711 * We make an up-call to the thread's exception server.
712 * Conditions:
713 * Nothing locked and no resources held.
714 * Called from an exception context, so
715 * thread_exception_return and thread_kdb_return
716 * are possible.
717 * Returns:
718 * KERN_SUCCESS if exception is handled by any of the handlers.
719 */
720 kern_return_t
exception_triage_thread(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,thread_t thread)721 exception_triage_thread(
722 exception_type_t exception,
723 mach_exception_data_t code,
724 mach_msg_type_number_t codeCnt,
725 thread_t thread)
726 {
727 task_t task;
728 thread_ro_t tro;
729 host_priv_t host_priv;
730 lck_mtx_t *mutex;
731 struct exception_action *actions;
732 kern_return_t kr = KERN_FAILURE;
733
734 assert(exception != EXC_RPC_ALERT);
735
736 /*
737 * If this behavior has been requested by the the kernel
738 * (due to the boot environment), we should panic if we
739 * enter this function. This is intended as a debugging
740 * aid; it should allow us to debug why we caught an
741 * exception in environments where debugging is especially
742 * difficult.
743 */
744 if (panic_on_exception_triage) {
745 panic("called exception_triage when it was forbidden by the boot environment");
746 }
747
748 /*
749 * Try to raise the exception at the activation level.
750 */
751 mutex = &thread->mutex;
752 tro = get_thread_ro(thread);
753 actions = tro->tro_exc_actions;
754 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
755 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
756 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
757 goto out;
758 }
759 }
760
761 /*
762 * Maybe the task level will handle it.
763 */
764 task = tro->tro_task;
765 mutex = &task->itk_lock_data;
766 actions = task->exc_actions;
767 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
768 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
769 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
770 goto out;
771 }
772 }
773
774 /*
775 * How about at the host level?
776 */
777 host_priv = host_priv_self();
778 mutex = &host_priv->lock;
779 actions = host_priv->exc_actions;
780 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
781 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
782 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
783 goto out;
784 }
785 }
786
787 out:
788 if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
789 (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
790 thread_exception_return();
791 }
792 return kr;
793 }
794
795 #if __has_feature(ptrauth_calls)
796 static TUNABLE(bool, pac_exception_telemetry, "-pac_exception_telemetry", false);
797
798 CA_EVENT(pac_exception_event,
799 CA_INT, exception,
800 CA_INT, exception_code_0,
801 CA_INT, exception_code_1,
802 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
803
804 static void
pac_exception_triage(exception_type_t exception,mach_exception_data_t code)805 pac_exception_triage(
806 exception_type_t exception,
807 mach_exception_data_t code)
808 {
809 boolean_t traced_flag = FALSE;
810 task_t task = current_task();
811 void *proc = get_bsdtask_info(task);
812 char *proc_name = (char *) "unknown";
813 int pid = 0;
814
815 #ifdef MACH_BSD
816 pid = proc_selfpid();
817 if (proc) {
818 traced_flag = proc_is_traced(proc);
819 /* Should only be called on current proc */
820 proc_name = proc_name_address(proc);
821
822 /*
823 * For a ptrauth violation, check if process isn't being ptraced and
824 * the task has the TFRO_PAC_EXC_FATAL flag set. If both conditions are true,
825 * terminate the task via exit_with_reason
826 */
827 if (!traced_flag) {
828 if (pac_exception_telemetry) {
829 ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_exception_event);
830 CA_EVENT_TYPE(pac_exception_event) * pexc_event = ca_event->data;
831 pexc_event->exception = exception;
832 pexc_event->exception_code_0 = code[0];
833 pexc_event->exception_code_1 = code[1];
834 strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
835 CA_EVENT_SEND(ca_event);
836 }
837 if (task_is_pac_exception_fatal(task)) {
838 os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit a pac violation\n", __func__, proc_name, pid);
839
840 exception_info_t info = {
841 .os_reason = OS_REASON_PAC_EXCEPTION,
842 .exception_type = exception,
843 .mx_code = code[0],
844 .mx_subcode = code[1]
845 };
846 exit_with_mach_exception(proc, info, PX_FLAGS_NONE);
847 thread_exception_return();
848 /* NOT_REACHABLE */
849 }
850 }
851 }
852 #endif /* MACH_BSD */
853 }
854 #endif /* __has_feature(ptrauth_calls) */
855
856 static void
maybe_unrecoverable_exception_triage(exception_type_t exception,mach_exception_data_t code)857 maybe_unrecoverable_exception_triage(
858 exception_type_t exception,
859 mach_exception_data_t code)
860 {
861 task_t task = current_task();
862 void *proc = get_bsdtask_info(task);
863
864 #ifdef MACH_BSD
865 if (!proc) {
866 return;
867 }
868
869 /*
870 * Note that the below policy to decide whether this should be unrecoverable is
871 * likely conceptually specific to the particular exception.
872 * If you find yourself adding another user_brk_..._descriptor and want to customize the
873 * policy for whether it should be unrecoverable, consider attaching each policy to
874 * the corresponding descriptor and somehow carrying it through to here.
875 */
876 /* These exceptions are deliverable (and potentially recoverable) if the process is being debugged. */
877 if (is_address_space_debugged(proc)) {
878 return;
879 }
880
881 /*
882 * By policy, this exception is uncatchable by exception/signal handlers.
883 * Therefore exit immediately.
884 */
885 /* Should only be called on current proc */
886 int pid = proc_selfpid();
887 char *proc_name = proc_name_address(proc);
888 os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit an unrecoverable exception\n", __func__, proc_name, pid);
889
890 exception_info_t info = {
891 /*
892 * For now, hard-code this to OS_REASON_FOUNDATION as that's the path we expect to be on today.
893 * In the future this should probably be carried by the user_brk_..._descriptor and piped through.
894 */
895 .os_reason = OS_REASON_FOUNDATION,
896 .exception_type = exception,
897 .mx_code = code[0],
898 .mx_subcode = code[1]
899 };
900 exit_with_mach_exception(proc, info, PX_FLAGS_NONE);
901 thread_exception_return();
902 /* NOT_REACHABLE */
903 #endif /* MACH_BSD */
904 }
905
906 /*
907 * Routine: exception_triage
908 * Purpose:
909 * The current thread caught an exception.
910 * We make an up-call to the thread's exception server.
911 * Conditions:
912 * Nothing locked and no resources held.
913 * Called from an exception context, so
914 * thread_exception_return and thread_kdb_return
915 * are possible.
916 * Returns:
917 * KERN_SUCCESS if exception is handled by any of the handlers.
918 */
919 int debug4k_panic_on_exception = 0;
920 kern_return_t
exception_triage(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)921 exception_triage(
922 exception_type_t exception,
923 mach_exception_data_t code,
924 mach_msg_type_number_t codeCnt)
925 {
926 thread_t thread = current_thread();
927 task_t task = current_task();
928
929 assert(codeCnt > 0);
930
931 if (VM_MAP_PAGE_SIZE(task->map) < PAGE_SIZE) {
932 DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx\n",
933 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
934 if (debug4k_panic_on_exception) {
935 panic("DEBUG4K thread %p task %p map %p exception %d codes 0x%llx 0x%llx",
936 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
937 }
938 }
939
940 #if DEVELOPMENT || DEBUG
941 #ifdef MACH_BSD
942 if (proc_pid(get_bsdtask_info(task)) <= exception_log_max_pid) {
943 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
944 "exception_log_max_pid: pid %d (%s): sending exception %d (0x%llx 0x%llx)",
945 proc_pid(get_bsdtask_info(task)), proc_name_address(get_bsdtask_info(task)),
946 exception, code[0], codeCnt > 1 ? code[1] : 0);
947 }
948 #endif /* MACH_BSD */
949 #endif /* DEVELOPMENT || DEBUG */
950
951 #if __has_feature(ptrauth_calls)
952 if (exception & EXC_PTRAUTH_BIT) {
953 exception &= ~EXC_PTRAUTH_BIT;
954 assert(codeCnt == 2);
955 /* Note this may consume control flow if it decides the exception is unrecoverable. */
956 pac_exception_triage(exception, code);
957 }
958 #endif /* __has_feature(ptrauth_calls) */
959 if (exception & EXC_MAY_BE_UNRECOVERABLE_BIT) {
960 exception &= ~EXC_MAY_BE_UNRECOVERABLE_BIT;
961 assert(codeCnt == 2);
962 /* Note this may consume control flow if it decides the exception is unrecoverable. */
963 maybe_unrecoverable_exception_triage(exception, code);
964 }
965 return exception_triage_thread(exception, code, codeCnt, thread);
966 }
967
968 kern_return_t
bsd_exception(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)969 bsd_exception(
970 exception_type_t exception,
971 mach_exception_data_t code,
972 mach_msg_type_number_t codeCnt)
973 {
974 task_t task;
975 lck_mtx_t *mutex;
976 thread_t self = current_thread();
977 kern_return_t kr;
978
979 /*
980 * Maybe the task level will handle it.
981 */
982 task = current_task();
983 mutex = &task->itk_lock_data;
984
985 kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex);
986
987 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
988 return KERN_SUCCESS;
989 }
990 return KERN_FAILURE;
991 }
992
993
994 /*
995 * Raise an exception on a task.
996 * This should tell launchd to launch Crash Reporter for this task.
997 * If the exception is fatal, we should be careful about sending a synchronous exception
998 */
999 kern_return_t
task_exception_notify(exception_type_t exception,mach_exception_data_type_t exccode,mach_exception_data_type_t excsubcode,const bool fatal)1000 task_exception_notify(exception_type_t exception,
1001 mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode, const bool fatal)
1002 {
1003 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
1004 wait_interrupt_t wsave;
1005 kern_return_t kr = KERN_SUCCESS;
1006
1007 /*
1008 * If we are not in dev mode, nobody should be allowed to synchronously handle
1009 * a fatal EXC_GUARD - they might stall on it indefinitely
1010 */
1011 if (fatal && !developer_mode_state() && exception == EXC_GUARD) {
1012 return KERN_DENIED;
1013 }
1014
1015 code[0] = exccode;
1016 code[1] = excsubcode;
1017
1018 wsave = thread_interrupt_level(THREAD_UNINT);
1019 kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
1020 (void) thread_interrupt_level(wsave);
1021 return kr;
1022 }
1023
1024
1025 /*
1026 * Handle interface for special performance monitoring
1027 * This is a special case of the host exception handler
1028 */
1029 kern_return_t
sys_perf_notify(thread_t thread,int pid)1030 sys_perf_notify(thread_t thread, int pid)
1031 {
1032 host_priv_t hostp;
1033 ipc_port_t xport;
1034 struct exception_action saved_exc_actions[EXC_TYPES_COUNT] = {};
1035 wait_interrupt_t wsave;
1036 kern_return_t ret;
1037 struct label *temp_label;
1038
1039 hostp = host_priv_self(); /* Get the host privileged ports */
1040 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
1041 code[0] = 0xFF000001; /* Set terminate code */
1042 code[1] = pid; /* Pass out the pid */
1043
1044 #if CONFIG_MACF
1045 /* Create new label for saved_exc_actions[EXC_RPC_ALERT] */
1046 mac_exc_associate_action_label(&saved_exc_actions[EXC_RPC_ALERT],
1047 mac_exc_create_label(&saved_exc_actions[EXC_RPC_ALERT]));
1048 #endif /* CONFIG_MACF */
1049
1050 lck_mtx_lock(&hostp->lock);
1051 xport = hostp->exc_actions[EXC_RPC_ALERT].port;
1052
1053 /* Make sure we're not catching our own exception */
1054 if (!IP_VALID(xport) ||
1055 !ip_active(xport) ||
1056 ip_in_space_noauth(xport, get_threadtask(thread)->itk_space)) {
1057 lck_mtx_unlock(&hostp->lock);
1058 #if CONFIG_MACF
1059 mac_exc_free_action_label(&saved_exc_actions[EXC_RPC_ALERT]);
1060 #endif /* CONFIG_MACF */
1061 return KERN_FAILURE;
1062 }
1063
1064 /* Save hostp->exc_actions and hold a sright to xport so it can't be dropped after unlock */
1065 temp_label = saved_exc_actions[EXC_RPC_ALERT].label;
1066 saved_exc_actions[EXC_RPC_ALERT] = hostp->exc_actions[EXC_RPC_ALERT];
1067 saved_exc_actions[EXC_RPC_ALERT].port = exception_port_copy_send(xport);
1068 saved_exc_actions[EXC_RPC_ALERT].label = temp_label;
1069
1070 #if CONFIG_MACF
1071 mac_exc_inherit_action_label(&hostp->exc_actions[EXC_RPC_ALERT], &saved_exc_actions[EXC_RPC_ALERT]);
1072 #endif /* CONFIG_MACF */
1073
1074 lck_mtx_unlock(&hostp->lock);
1075
1076 wsave = thread_interrupt_level(THREAD_UNINT);
1077 ret = exception_deliver(
1078 thread,
1079 EXC_RPC_ALERT,
1080 code,
1081 2,
1082 saved_exc_actions,
1083 &hostp->lock);
1084 (void)thread_interrupt_level(wsave);
1085
1086 #if CONFIG_MACF
1087 mac_exc_free_action_label(&saved_exc_actions[EXC_RPC_ALERT]);
1088 #endif /* CONFIG_MACF */
1089 ipc_port_release_send(saved_exc_actions[EXC_RPC_ALERT].port);
1090
1091 return ret;
1092 }
1093