1 /*
2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/message.h>
63 #include <mach/port.h>
64 #include <mach/mig_errors.h>
65 #include <mach/task.h>
66 #include <mach/thread_status.h>
67 #include <mach/exception_types.h>
68 #include <mach/exc.h>
69 #include <mach/mach_exc.h>
70
71 #include <ipc/port.h>
72 #include <ipc/ipc_entry.h>
73 #include <ipc/ipc_object.h>
74 #include <ipc/ipc_notify.h>
75 #include <ipc/ipc_space.h>
76 #include <ipc/ipc_pset.h>
77 #include <ipc/ipc_machdep.h>
78
79 #include <kern/ipc_tt.h>
80 #include <kern/task.h>
81 #include <kern/thread.h>
82 #include <kern/processor.h>
83 #include <kern/sched.h>
84 #include <kern/sched_prim.h>
85 #include <kern/host.h>
86 #include <kern/misc_protos.h>
87 #include <kern/ux_handler.h>
88 #include <kern/task_ident.h>
89
90 #include <vm/vm_map.h>
91
92 #include <security/mac_mach_internal.h>
93 #include <string.h>
94
95 #include <pexpert/pexpert.h>
96
97 #include <os/log.h>
98 #include <os/system_event_log.h>
99
100 #include <libkern/coreanalytics/coreanalytics.h>
101
102 #include <sys/code_signing.h> /* for developer mode state */
103
104 bool panic_on_exception_triage = false;
105
106 /* Not used in coded, only for inspection during debugging */
107 unsigned long c_thr_exc_raise = 0;
108 unsigned long c_thr_exc_raise_identity_token = 0;
109 unsigned long c_thr_exc_raise_state = 0;
110 unsigned long c_thr_exc_raise_state_id = 0;
111 unsigned long c_thr_exc_raise_backtrace = 0;
112
113 /* forward declarations */
114 kern_return_t exception_deliver(
115 thread_t thread,
116 exception_type_t exception,
117 mach_exception_data_t code,
118 mach_msg_type_number_t codeCnt,
119 struct exception_action *excp,
120 lck_mtx_t *mutex);
121
122 #ifdef MACH_BSD
123 kern_return_t bsd_exception(
124 exception_type_t exception,
125 mach_exception_data_t code,
126 mach_msg_type_number_t codeCnt);
127 #endif /* MACH_BSD */
128
129 #if __has_feature(ptrauth_calls)
130 extern int exit_with_pac_exception(
131 void *proc,
132 exception_type_t exception,
133 mach_exception_code_t code,
134 mach_exception_subcode_t subcode);
135 #endif /* __has_feature(ptrauth_calls) */
136
137 #ifdef MACH_BSD
138 extern bool proc_is_traced(void *p);
139 extern int proc_selfpid(void);
140 extern char *proc_name_address(struct proc *p);
141 #endif /* MACH_BSD */
142
143 #if (DEVELOPMENT || DEBUG)
144 TUNABLE_WRITEABLE(unsigned int, exception_log_max_pid, "exception_log_max_pid", 0);
145 #endif /* (DEVELOPMENT || DEBUG) */
146
147 /*
148 * Routine: exception_init
149 * Purpose:
150 * Global initialization of state for exceptions.
151 * Conditions:
152 * None.
153 */
154 void
exception_init(void)155 exception_init(void)
156 {
157 int tmp = 0;
158
159 if (PE_parse_boot_argn("-panic_on_exception_triage", &tmp, sizeof(tmp))) {
160 panic_on_exception_triage = true;
161 }
162
163 #if (DEVELOPMENT || DEBUG)
164 if (exception_log_max_pid) {
165 printf("Logging all exceptions where pid < exception_log_max_pid (%d)\n", exception_log_max_pid);
166 }
167 #endif /* (DEVELOPMENT || DEBUG) */
168 }
169
170 static TUNABLE(bool, pac_replace_ptrs_user, "pac_replace_ptrs_user", true);
171
172 ipc_port_t
exception_port_copy_send(ipc_port_t port)173 exception_port_copy_send(ipc_port_t port)
174 {
175 if (IP_VALID(port)) {
176 if (is_ux_handler_port(port)) {
177 /* is_ux_handler_port() compares against __DATA_CONST */
178 port = ipc_port_copy_send_any(port);
179 } else {
180 port = ipc_port_copy_send_mqueue(port);
181 }
182 }
183 return port;
184 }
185
186 /*
187 * Routine: exception_deliver
188 * Purpose:
189 * Make an upcall to the exception server provided.
190 * Conditions:
191 * Nothing locked and no resources held.
192 * Called from an exception context, so
193 * thread_exception_return and thread_kdb_return
194 * are possible.
195 * Returns:
196 * KERN_SUCCESS if the exception was handled
197 */
198 kern_return_t
exception_deliver(thread_t thread,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,struct exception_action * excp,lck_mtx_t * mutex)199 exception_deliver(
200 thread_t thread,
201 exception_type_t exception,
202 mach_exception_data_t code,
203 mach_msg_type_number_t codeCnt,
204 struct exception_action *excp,
205 lck_mtx_t *mutex)
206 {
207 ipc_port_t exc_port = IPC_PORT_NULL;
208 exception_data_type_t small_code[EXCEPTION_CODE_MAX];
209 thread_state_t new_state = NULL;
210 int code64;
211 int behavior;
212 int flavor;
213 kern_return_t kr;
214 task_t task;
215 task_id_token_t task_token;
216 ipc_port_t thread_port = IPC_PORT_NULL,
217 task_port = IPC_PORT_NULL,
218 task_token_port = IPC_PORT_NULL;
219
220 /*
221 * Save work if we are terminating.
222 * Just go back to our AST handler.
223 */
224 if (!thread->active && !thread->inspection) {
225 return KERN_SUCCESS;
226 }
227
228 /*
229 * If there are no exception actions defined for this entity,
230 * we can't deliver here.
231 */
232 if (excp == NULL) {
233 return KERN_FAILURE;
234 }
235
236 assert(exception < EXC_TYPES_COUNT);
237 if (exception >= EXC_TYPES_COUNT) {
238 return KERN_FAILURE;
239 }
240
241 excp = &excp[exception];
242
243 /*
244 * Snapshot the exception action data under lock for consistency.
245 * Hold a reference to the port over the exception_raise_* calls
246 * so it can't be destroyed. This seems like overkill, but keeps
247 * the port from disappearing between now and when
248 * ipc_object_copyin_from_kernel is finally called.
249 */
250 lck_mtx_lock(mutex);
251 exc_port = exception_port_copy_send(excp->port);
252 if (!IP_VALID(exc_port)) {
253 lck_mtx_unlock(mutex);
254 return KERN_FAILURE;
255 }
256
257 flavor = excp->flavor;
258 behavior = excp->behavior;
259 lck_mtx_unlock(mutex);
260
261 code64 = (behavior & MACH_EXCEPTION_CODES);
262 behavior &= ~MACH_EXCEPTION_MASK;
263
264 if (!code64) {
265 small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
266 small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
267 }
268
269 task = get_threadtask(thread);
270
271 #if CONFIG_MACF
272 /* Now is a reasonably good time to check if the exception action is
273 * permitted for this process, because after this point we will send
274 * the message out almost certainly.
275 * As with other failures, exception_triage_thread will go on
276 * to the next level.
277 */
278
279 /* The global exception-to-signal translation port is safe to be an exception handler. */
280 if (is_ux_handler_port(exc_port) == FALSE &&
281 mac_exc_action_check_exception_send(task, excp) != 0) {
282 kr = KERN_FAILURE;
283 goto out_release_right;
284 }
285 #endif
286
287 switch (behavior) {
288 case EXCEPTION_STATE: {
289 mach_msg_type_number_t old_state_cnt, new_state_cnt;
290 thread_state_data_t old_state;
291 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
292 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
293 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
294
295 if (pac_replace_ptrs_user || task_allow_user_state) {
296 get_flags |= TSSF_RANDOM_USER_DIV;
297 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
298 }
299
300 c_thr_exc_raise_state++;
301 old_state_cnt = _MachineStateCount[flavor];
302 kr = thread_getstatus_to_user(thread, flavor,
303 (thread_state_t)old_state,
304 &old_state_cnt, get_flags);
305 new_state_cnt = old_state_cnt;
306 if (kr == KERN_SUCCESS) {
307 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
308 if (new_state == NULL) {
309 kr = KERN_RESOURCE_SHORTAGE;
310 goto out_release_right;
311 }
312 if (code64) {
313 kr = mach_exception_raise_state(exc_port,
314 exception,
315 code,
316 codeCnt,
317 &flavor,
318 old_state, old_state_cnt,
319 new_state, &new_state_cnt);
320 } else {
321 kr = exception_raise_state(exc_port, exception,
322 small_code,
323 codeCnt,
324 &flavor,
325 old_state, old_state_cnt,
326 new_state, &new_state_cnt);
327 }
328 if (kr == KERN_SUCCESS) {
329 if (exception != EXC_CORPSE_NOTIFY) {
330 kr = thread_setstatus_from_user(thread, flavor,
331 (thread_state_t)new_state, new_state_cnt,
332 (thread_state_t)old_state, old_state_cnt,
333 set_flags);
334 }
335 goto out_release_right;
336 }
337 }
338
339 goto out_release_right;
340 }
341
342 case EXCEPTION_DEFAULT: {
343 c_thr_exc_raise++;
344
345 task_reference(task);
346 thread_reference(thread);
347 /*
348 * Only deliver control port if Developer Mode enabled,
349 * or task is a corpse. Otherwise we only deliver the
350 * (immovable) read port in exception handler (both in
351 * or out of process). (94669540)
352 */
353 if (developer_mode_state() || task_is_a_corpse(task)) {
354 task_port = convert_task_to_port(task);
355 thread_port = convert_thread_to_port(thread);
356 } else {
357 task_port = convert_task_read_to_port(task);
358 thread_port = convert_thread_read_to_port(thread);
359 }
360 /* task and thread ref consumed */
361
362 if (code64) {
363 kr = mach_exception_raise(exc_port,
364 thread_port,
365 task_port,
366 exception,
367 code,
368 codeCnt);
369 } else {
370 kr = exception_raise(exc_port,
371 thread_port,
372 task_port,
373 exception,
374 small_code,
375 codeCnt);
376 }
377
378 goto out_release_right;
379 }
380
381 case EXCEPTION_IDENTITY_PROTECTED: {
382 c_thr_exc_raise_identity_token++;
383
384 kr = task_create_identity_token(task, &task_token);
385 /* task_token now represents a task, or corpse */
386 assert(kr == KERN_SUCCESS);
387 task_token_port = convert_task_id_token_to_port(task_token);
388 /* task token ref consumed */
389
390 if (code64) {
391 kr = mach_exception_raise_identity_protected(exc_port,
392 thread->thread_id,
393 task_token_port,
394 exception,
395 code,
396 codeCnt);
397 } else {
398 panic("mach_exception_raise_identity_protected() must be code64");
399 }
400
401 goto out_release_right;
402 }
403
404 case EXCEPTION_STATE_IDENTITY: {
405 mach_msg_type_number_t old_state_cnt, new_state_cnt;
406 thread_state_data_t old_state;
407 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
408 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
409 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
410
411 if (pac_replace_ptrs_user || task_allow_user_state) {
412 get_flags |= TSSF_RANDOM_USER_DIV;
413 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
414 }
415
416 c_thr_exc_raise_state_id++;
417
418 task_reference(task);
419 thread_reference(thread);
420 /*
421 * Only deliver control port if Developer Mode enabled,
422 * or task is a corpse. Otherwise we only deliver the
423 * (immovable) read port in exception handler (both in
424 * or out of process). (94669540)
425 */
426 if (developer_mode_state() || task_is_a_corpse(task)) {
427 task_port = convert_task_to_port(task);
428 thread_port = convert_thread_to_port(thread);
429 } else {
430 task_port = convert_task_read_to_port(task);
431 thread_port = convert_thread_read_to_port(thread);
432 }
433 /* task and thread ref consumed */
434
435 old_state_cnt = _MachineStateCount[flavor];
436 kr = thread_getstatus_to_user(thread, flavor,
437 (thread_state_t)old_state,
438 &old_state_cnt, get_flags);
439 new_state_cnt = old_state_cnt;
440 if (kr == KERN_SUCCESS) {
441 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
442 if (new_state == NULL) {
443 kr = KERN_RESOURCE_SHORTAGE;
444 goto out_release_right;
445 }
446 if (code64) {
447 kr = mach_exception_raise_state_identity(
448 exc_port,
449 thread_port,
450 task_port,
451 exception,
452 code,
453 codeCnt,
454 &flavor,
455 old_state, old_state_cnt,
456 new_state, &new_state_cnt);
457 } else {
458 kr = exception_raise_state_identity(exc_port,
459 thread_port,
460 task_port,
461 exception,
462 small_code,
463 codeCnt,
464 &flavor,
465 old_state, old_state_cnt,
466 new_state, &new_state_cnt);
467 }
468
469 if (kr == KERN_SUCCESS) {
470 if (exception != EXC_CORPSE_NOTIFY) {
471 kr = thread_setstatus_from_user(thread, flavor,
472 (thread_state_t)new_state, new_state_cnt,
473 (thread_state_t)old_state, old_state_cnt, set_flags);
474 }
475 goto out_release_right;
476 }
477 }
478
479 goto out_release_right;
480 }
481
482 default:
483 panic("bad exception behavior!");
484 return KERN_FAILURE;
485 }/* switch */
486
487 out_release_right:
488
489 if (task_port) {
490 ipc_port_release_send(task_port);
491 }
492
493 if (thread_port) {
494 ipc_port_release_send(thread_port);
495 }
496
497 if (exc_port) {
498 ipc_port_release_send(exc_port);
499 }
500
501 if (task_token_port) {
502 ipc_port_release_send(task_token_port);
503 }
504
505 if (new_state) {
506 kfree_data(new_state, sizeof(thread_state_data_t));
507 }
508
509 return kr;
510 }
511
512 /*
513 * Attempt exception delivery with backtrace info to exception ports
514 * in exc_ports in order.
515 */
516 /*
517 * Routine: exception_deliver_backtrace
518 * Purpose:
519 * Attempt exception delivery with backtrace info to exception ports
520 * in exc_ports in order.
521 * Conditions:
522 * Caller has a reference on bt_object, and send rights on exc_ports.
523 * Does not consume any passed references or rights
524 */
525 void
exception_deliver_backtrace(kcdata_object_t bt_object,ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT],exception_type_t exception)526 exception_deliver_backtrace(
527 kcdata_object_t bt_object,
528 ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT],
529 exception_type_t exception)
530 {
531 kern_return_t kr;
532 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
533 ipc_port_t target_port, bt_obj_port;
534
535 assert(exception == EXC_GUARD);
536
537 code[0] = exception;
538 code[1] = 0;
539
540 kcdata_object_reference(bt_object);
541 bt_obj_port = convert_kcdata_object_to_port(bt_object);
542 /* backtrace object ref consumed, no-senders is armed */
543
544 if (!IP_VALID(bt_obj_port)) {
545 return;
546 }
547
548 /*
549 * We are guaranteed at task_enqueue_exception_with_corpse() time
550 * that the exception port prefers backtrace delivery.
551 */
552 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
553 target_port = exc_ports[i];
554
555 if (!IP_VALID(target_port)) {
556 continue;
557 }
558
559 ip_mq_lock(target_port);
560 if (!ip_active(target_port)) {
561 ip_mq_unlock(target_port);
562 continue;
563 }
564 ip_mq_unlock(target_port);
565
566 kr = mach_exception_raise_backtrace(target_port,
567 bt_obj_port,
568 EXC_CORPSE_NOTIFY,
569 code,
570 EXCEPTION_CODE_MAX);
571
572 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
573 /* Exception is handled at this level */
574 break;
575 }
576 }
577
578 /* May trigger no-senders notification for backtrace object */
579 ipc_port_release_send(bt_obj_port);
580
581 return;
582 }
583
584 /*
585 * Routine: check_exc_receiver_dependency
586 * Purpose:
587 * Verify that the port destined for receiving this exception is not
588 * on the current task. This would cause hang in kernel for
589 * EXC_CRASH primarily. Note: If port is transferred
590 * between check and delivery then deadlock may happen.
591 *
592 * Conditions:
593 * Nothing locked and no resources held.
594 * Called from an exception context.
595 * Returns:
596 * KERN_SUCCESS if its ok to send exception message.
597 */
598 static kern_return_t
check_exc_receiver_dependency(exception_type_t exception,struct exception_action * excp,lck_mtx_t * mutex)599 check_exc_receiver_dependency(
600 exception_type_t exception,
601 struct exception_action *excp,
602 lck_mtx_t *mutex)
603 {
604 kern_return_t retval = KERN_SUCCESS;
605
606 if (excp == NULL || exception != EXC_CRASH) {
607 return retval;
608 }
609
610 task_t task = current_task();
611 lck_mtx_lock(mutex);
612 ipc_port_t xport = excp[exception].port;
613 if (IP_VALID(xport) && ip_in_space_noauth(xport, task->itk_space)) {
614 retval = KERN_FAILURE;
615 }
616 lck_mtx_unlock(mutex);
617 return retval;
618 }
619
620
621 /*
622 * Routine: exception_triage_thread
623 * Purpose:
624 * The thread caught an exception.
625 * We make an up-call to the thread's exception server.
626 * Conditions:
627 * Nothing locked and no resources held.
628 * Called from an exception context, so
629 * thread_exception_return and thread_kdb_return
630 * are possible.
631 * Returns:
632 * KERN_SUCCESS if exception is handled by any of the handlers.
633 */
634 kern_return_t
exception_triage_thread(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,thread_t thread)635 exception_triage_thread(
636 exception_type_t exception,
637 mach_exception_data_t code,
638 mach_msg_type_number_t codeCnt,
639 thread_t thread)
640 {
641 task_t task;
642 thread_ro_t tro;
643 host_priv_t host_priv;
644 lck_mtx_t *mutex;
645 struct exception_action *actions;
646 kern_return_t kr = KERN_FAILURE;
647
648 assert(exception != EXC_RPC_ALERT);
649
650 /*
651 * If this behavior has been requested by the the kernel
652 * (due to the boot environment), we should panic if we
653 * enter this function. This is intended as a debugging
654 * aid; it should allow us to debug why we caught an
655 * exception in environments where debugging is especially
656 * difficult.
657 */
658 if (panic_on_exception_triage) {
659 panic("called exception_triage when it was forbidden by the boot environment");
660 }
661
662 /*
663 * Try to raise the exception at the activation level.
664 */
665 mutex = &thread->mutex;
666 tro = get_thread_ro(thread);
667 actions = tro->tro_exc_actions;
668 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
669 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
670 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
671 goto out;
672 }
673 }
674
675 /*
676 * Maybe the task level will handle it.
677 */
678 task = tro->tro_task;
679 mutex = &task->itk_lock_data;
680 actions = task->exc_actions;
681 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
682 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
683 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
684 goto out;
685 }
686 }
687
688 /*
689 * How about at the host level?
690 */
691 host_priv = host_priv_self();
692 mutex = &host_priv->lock;
693 actions = host_priv->exc_actions;
694 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
695 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
696 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
697 goto out;
698 }
699 }
700
701 out:
702 if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
703 (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
704 thread_exception_return();
705 }
706 return kr;
707 }
708
709 #if __has_feature(ptrauth_calls)
710 static TUNABLE(bool, pac_exception_telemetry, "-pac_exception_telemetry", false);
711
712 CA_EVENT(pac_exception_event,
713 CA_INT, exception,
714 CA_INT, exception_code_0,
715 CA_INT, exception_code_1,
716 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
717
718 static void
pac_exception_triage(exception_type_t exception,mach_exception_data_t code)719 pac_exception_triage(
720 exception_type_t exception,
721 mach_exception_data_t code)
722 {
723 boolean_t traced_flag = FALSE;
724 task_t task = current_task();
725 void *proc = get_bsdtask_info(task);
726 char *proc_name = (char *) "unknown";
727 int pid = 0;
728
729 #ifdef MACH_BSD
730 pid = proc_selfpid();
731 if (proc) {
732 traced_flag = proc_is_traced(proc);
733 /* Should only be called on current proc */
734 proc_name = proc_name_address(proc);
735
736 /*
737 * For a ptrauth violation, check if process isn't being ptraced and
738 * the task has the TFRO_PAC_EXC_FATAL flag set. If both conditions are true,
739 * terminate the task via exit_with_reason
740 */
741 if (!traced_flag) {
742 if (pac_exception_telemetry) {
743 ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_exception_event);
744 CA_EVENT_TYPE(pac_exception_event) * pexc_event = ca_event->data;
745 pexc_event->exception = exception;
746 pexc_event->exception_code_0 = code[0];
747 pexc_event->exception_code_1 = code[1];
748 strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
749 CA_EVENT_SEND(ca_event);
750 }
751 if (task_is_pac_exception_fatal(task)) {
752 os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit a pac violation\n", __func__, proc_name, pid);
753 exit_with_pac_exception(proc, exception, code[0], code[1]);
754 thread_exception_return();
755 /* NOT_REACHABLE */
756 }
757 }
758 }
759 #endif /* MACH_BSD */
760 }
761 #endif /* __has_feature(ptrauth_calls) */
762
763 /*
764 * Routine: exception_triage
765 * Purpose:
766 * The current thread caught an exception.
767 * We make an up-call to the thread's exception server.
768 * Conditions:
769 * Nothing locked and no resources held.
770 * Called from an exception context, so
771 * thread_exception_return and thread_kdb_return
772 * are possible.
773 * Returns:
774 * KERN_SUCCESS if exception is handled by any of the handlers.
775 */
776 int debug4k_panic_on_exception = 0;
777 kern_return_t
exception_triage(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)778 exception_triage(
779 exception_type_t exception,
780 mach_exception_data_t code,
781 mach_msg_type_number_t codeCnt)
782 {
783 thread_t thread = current_thread();
784 task_t task = current_task();
785
786 assert(codeCnt > 0);
787
788 if (VM_MAP_PAGE_SIZE(task->map) < PAGE_SIZE) {
789 DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx\n",
790 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
791 if (debug4k_panic_on_exception) {
792 panic("DEBUG4K thread %p task %p map %p exception %d codes 0x%llx 0x%llx",
793 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
794 }
795 }
796
797 #if (DEVELOPMENT || DEBUG)
798 #ifdef MACH_BSD
799 if (proc_pid(get_bsdtask_info(task)) <= exception_log_max_pid) {
800 record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
801 "exception_log_max_pid: pid %d (%s): sending exception %d (0x%llx 0x%llx)",
802 proc_pid(get_bsdtask_info(task)), proc_name_address(get_bsdtask_info(task)),
803 exception, code[0], codeCnt > 1 ? code[1] : 0);
804 }
805 #endif /* MACH_BSD */
806 #endif /* DEVELOPMENT || DEBUG */
807
808 #if __has_feature(ptrauth_calls)
809 if (exception & EXC_PTRAUTH_BIT) {
810 exception &= ~EXC_PTRAUTH_BIT;
811 assert(codeCnt == 2);
812 pac_exception_triage(exception, code);
813 }
814 #endif /* __has_feature(ptrauth_calls) */
815 return exception_triage_thread(exception, code, codeCnt, thread);
816 }
817
818 kern_return_t
bsd_exception(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)819 bsd_exception(
820 exception_type_t exception,
821 mach_exception_data_t code,
822 mach_msg_type_number_t codeCnt)
823 {
824 task_t task;
825 lck_mtx_t *mutex;
826 thread_t self = current_thread();
827 kern_return_t kr;
828
829 /*
830 * Maybe the task level will handle it.
831 */
832 task = current_task();
833 mutex = &task->itk_lock_data;
834
835 kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex);
836
837 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
838 return KERN_SUCCESS;
839 }
840 return KERN_FAILURE;
841 }
842
843
844 /*
845 * Raise an exception on a task.
846 * This should tell launchd to launch Crash Reporter for this task.
847 */
848 kern_return_t
task_exception_notify(exception_type_t exception,mach_exception_data_type_t exccode,mach_exception_data_type_t excsubcode)849 task_exception_notify(exception_type_t exception,
850 mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode)
851 {
852 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
853 wait_interrupt_t wsave;
854 kern_return_t kr = KERN_SUCCESS;
855
856 code[0] = exccode;
857 code[1] = excsubcode;
858
859 wsave = thread_interrupt_level(THREAD_UNINT);
860 kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
861 (void) thread_interrupt_level(wsave);
862 return kr;
863 }
864
865
866 /*
867 * Handle interface for special performance monitoring
868 * This is a special case of the host exception handler
869 */
870 kern_return_t
sys_perf_notify(thread_t thread,int pid)871 sys_perf_notify(thread_t thread, int pid)
872 {
873 host_priv_t hostp;
874 ipc_port_t xport;
875 wait_interrupt_t wsave;
876 kern_return_t ret;
877
878 hostp = host_priv_self(); /* Get the host privileged ports */
879 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
880 code[0] = 0xFF000001; /* Set terminate code */
881 code[1] = pid; /* Pass out the pid */
882
883 lck_mtx_lock(&hostp->lock);
884 xport = hostp->exc_actions[EXC_RPC_ALERT].port;
885
886 /* Make sure we're not catching our own exception */
887 if (!IP_VALID(xport) ||
888 !ip_active(xport) ||
889 ip_in_space_noauth(xport, get_threadtask(thread)->itk_space)) {
890 lck_mtx_unlock(&hostp->lock);
891 return KERN_FAILURE;
892 }
893
894 lck_mtx_unlock(&hostp->lock);
895
896 wsave = thread_interrupt_level(THREAD_UNINT);
897 ret = exception_deliver(
898 thread,
899 EXC_RPC_ALERT,
900 code,
901 2,
902 hostp->exc_actions,
903 &hostp->lock);
904 (void)thread_interrupt_level(wsave);
905
906 return ret;
907 }
908