1 /*
2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/message.h>
63 #include <mach/port.h>
64 #include <mach/mig_errors.h>
65 #include <mach/task.h>
66 #include <mach/thread_status.h>
67 #include <mach/exception_types.h>
68 #include <mach/exc.h>
69 #include <mach/mach_exc.h>
70
71 #include <ipc/port.h>
72 #include <ipc/ipc_entry.h>
73 #include <ipc/ipc_object.h>
74 #include <ipc/ipc_notify.h>
75 #include <ipc/ipc_space.h>
76 #include <ipc/ipc_pset.h>
77 #include <ipc/ipc_machdep.h>
78
79 #include <kern/ipc_tt.h>
80 #include <kern/task.h>
81 #include <kern/thread.h>
82 #include <kern/processor.h>
83 #include <kern/sched.h>
84 #include <kern/sched_prim.h>
85 #include <kern/host.h>
86 #include <kern/misc_protos.h>
87 #include <kern/ux_handler.h>
88 #include <kern/task_ident.h>
89
90 #include <vm/vm_map.h>
91
92 #include <security/mac_mach_internal.h>
93 #include <string.h>
94
95 #include <pexpert/pexpert.h>
96
97 #include <os/log.h>
98
99 #include <libkern/coreanalytics/coreanalytics.h>
100
101 bool panic_on_exception_triage = false;
102
103 unsigned long c_thr_exc_raise = 0;
104 unsigned long c_thr_exc_raise_identity_token = 0;
105 unsigned long c_thr_exc_raise_state = 0;
106 unsigned long c_thr_exc_raise_state_id = 0;
107
108 /* forward declarations */
109 kern_return_t exception_deliver(
110 thread_t thread,
111 exception_type_t exception,
112 mach_exception_data_t code,
113 mach_msg_type_number_t codeCnt,
114 struct exception_action *excp,
115 lck_mtx_t *mutex);
116
117 #ifdef MACH_BSD
118 kern_return_t bsd_exception(
119 exception_type_t exception,
120 mach_exception_data_t code,
121 mach_msg_type_number_t codeCnt);
122 #endif /* MACH_BSD */
123
124 #if __has_feature(ptrauth_calls)
125 extern int exit_with_pac_exception(
126 void *proc,
127 exception_type_t exception,
128 mach_exception_code_t code,
129 mach_exception_subcode_t subcode);
130 #endif /* __has_feature(ptrauth_calls) */
131
132 #ifdef MACH_BSD
133 extern bool proc_is_traced(void *p);
134 extern int proc_selfpid(void);
135 extern char *proc_name_address(struct proc *p);
136 #endif /* MACH_BSD */
137
138 #if (DEVELOPMENT || DEBUG)
139 TUNABLE_WRITEABLE(unsigned int, exception_log_max_pid, "exception_log_max_pid", 0);
140 #endif /* (DEVELOPMENT || DEBUG) */
141
142 /*
143 * Routine: exception_init
144 * Purpose:
145 * Global initialization of state for exceptions.
146 * Conditions:
147 * None.
148 */
149 void
exception_init(void)150 exception_init(void)
151 {
152 int tmp = 0;
153
154 if (PE_parse_boot_argn("-panic_on_exception_triage", &tmp, sizeof(tmp))) {
155 panic_on_exception_triage = true;
156 }
157
158 #if (DEVELOPMENT || DEBUG)
159 if (exception_log_max_pid) {
160 printf("Logging all exceptions where pid < exception_log_max_pid (%d)\n", exception_log_max_pid);
161 }
162 #endif /* (DEVELOPMENT || DEBUG) */
163 }
164
165 static TUNABLE(bool, pac_replace_ptrs_user, "-pac_replace_ptrs_user", false);
166
167 /*
168 * Routine: exception_deliver
169 * Purpose:
170 * Make an upcall to the exception server provided.
171 * Conditions:
172 * Nothing locked and no resources held.
173 * Called from an exception context, so
174 * thread_exception_return and thread_kdb_return
175 * are possible.
176 * Returns:
177 * KERN_SUCCESS if the exception was handled
178 */
179 kern_return_t
exception_deliver(thread_t thread,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,struct exception_action * excp,lck_mtx_t * mutex)180 exception_deliver(
181 thread_t thread,
182 exception_type_t exception,
183 mach_exception_data_t code,
184 mach_msg_type_number_t codeCnt,
185 struct exception_action *excp,
186 lck_mtx_t *mutex)
187 {
188 ipc_port_t exc_port = IPC_PORT_NULL;
189 exception_data_type_t small_code[EXCEPTION_CODE_MAX];
190 thread_state_t new_state = NULL;
191 int code64;
192 int behavior;
193 int flavor;
194 kern_return_t kr;
195 task_t task;
196 task_id_token_t task_token;
197 ipc_port_t thread_port = IPC_PORT_NULL,
198 task_port = IPC_PORT_NULL,
199 task_token_port = IPC_PORT_NULL;
200
201 /*
202 * Save work if we are terminating.
203 * Just go back to our AST handler.
204 */
205 if (!thread->active && !thread->inspection) {
206 return KERN_SUCCESS;
207 }
208
209 /*
210 * If there are no exception actions defined for this entity,
211 * we can't deliver here.
212 */
213 if (excp == NULL) {
214 return KERN_FAILURE;
215 }
216
217 assert(exception < EXC_TYPES_COUNT);
218 if (exception >= EXC_TYPES_COUNT) {
219 return KERN_FAILURE;
220 }
221
222 excp = &excp[exception];
223
224 /*
225 * Snapshot the exception action data under lock for consistency.
226 * Hold a reference to the port over the exception_raise_* calls
227 * so it can't be destroyed. This seems like overkill, but keeps
228 * the port from disappearing between now and when
229 * ipc_object_copyin_from_kernel is finally called.
230 */
231 lck_mtx_lock(mutex);
232 exc_port = excp->port;
233 if (!IP_VALID(exc_port)) {
234 lck_mtx_unlock(mutex);
235 return KERN_FAILURE;
236 }
237 ip_mq_lock(exc_port);
238 if (!ip_active(exc_port)) {
239 ip_mq_unlock(exc_port);
240 lck_mtx_unlock(mutex);
241 return KERN_FAILURE;
242 }
243 ip_reference(exc_port);
244 exc_port->ip_srights++;
245 ip_mq_unlock(exc_port);
246
247 flavor = excp->flavor;
248 behavior = excp->behavior;
249 lck_mtx_unlock(mutex);
250
251 code64 = (behavior & MACH_EXCEPTION_CODES);
252 behavior &= ~MACH_EXCEPTION_MASK;
253
254 if (!code64) {
255 small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
256 small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
257 }
258
259 task = get_threadtask(thread);
260
261 #if CONFIG_MACF
262 /* Now is a reasonably good time to check if the exception action is
263 * permitted for this process, because after this point we will send
264 * the message out almost certainly.
265 * As with other failures, exception_triage_thread will go on
266 * to the next level.
267 */
268
269 /* The global exception-to-signal translation port is safe to be an exception handler. */
270 if (is_ux_handler_port(exc_port) == FALSE &&
271 mac_exc_action_check_exception_send(task, excp) != 0) {
272 kr = KERN_FAILURE;
273 goto out_release_right;
274 }
275 #endif
276
277 if ((behavior != EXCEPTION_STATE) && (behavior != EXCEPTION_IDENTITY_PROTECTED)) {
278 task_reference(task);
279 task_port = convert_task_to_port(task);
280 /* task ref consumed */
281 thread_reference(thread);
282 thread_port = convert_thread_to_port(thread);
283 /* thread ref consumed */
284 }
285
286 if (behavior == EXCEPTION_IDENTITY_PROTECTED) {
287 kr = task_create_identity_token(task, &task_token);
288 /* task_token now represents a task, or corpse */
289 assert(kr == KERN_SUCCESS);
290 task_token_port = convert_task_id_token_to_port(task_token);
291 /* task token ref consumed */
292 }
293
294 switch (behavior) {
295 case EXCEPTION_STATE: {
296 mach_msg_type_number_t old_state_cnt, new_state_cnt;
297 thread_state_data_t old_state;
298 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
299 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
300 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
301
302 if (pac_replace_ptrs_user || task_allow_user_state) {
303 get_flags |= TSSF_RANDOM_USER_DIV;
304 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
305 }
306
307 c_thr_exc_raise_state++;
308 old_state_cnt = _MachineStateCount[flavor];
309 kr = thread_getstatus_to_user(thread, flavor,
310 (thread_state_t)old_state,
311 &old_state_cnt, get_flags);
312 new_state_cnt = old_state_cnt;
313 if (kr == KERN_SUCCESS) {
314 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
315 if (new_state == NULL) {
316 kr = KERN_RESOURCE_SHORTAGE;
317 goto out_release_right;
318 }
319 if (code64) {
320 kr = mach_exception_raise_state(exc_port,
321 exception,
322 code,
323 codeCnt,
324 &flavor,
325 old_state, old_state_cnt,
326 new_state, &new_state_cnt);
327 } else {
328 kr = exception_raise_state(exc_port, exception,
329 small_code,
330 codeCnt,
331 &flavor,
332 old_state, old_state_cnt,
333 new_state, &new_state_cnt);
334 }
335 if (kr == KERN_SUCCESS) {
336 if (exception != EXC_CORPSE_NOTIFY) {
337 kr = thread_setstatus_from_user(thread, flavor,
338 (thread_state_t)new_state, new_state_cnt,
339 (thread_state_t)old_state, old_state_cnt,
340 set_flags);
341 }
342 goto out_release_right;
343 }
344 }
345
346 goto out_release_right;
347 }
348
349 case EXCEPTION_DEFAULT: {
350 c_thr_exc_raise++;
351 if (code64) {
352 kr = mach_exception_raise(exc_port,
353 thread_port,
354 task_port,
355 exception,
356 code,
357 codeCnt);
358 } else {
359 kr = exception_raise(exc_port,
360 thread_port,
361 task_port,
362 exception,
363 small_code,
364 codeCnt);
365 }
366
367 goto out_release_right;
368 }
369
370 case EXCEPTION_IDENTITY_PROTECTED: {
371 c_thr_exc_raise_identity_token++;
372 if (code64) {
373 kr = mach_exception_raise_identity_protected(exc_port,
374 thread->thread_id,
375 task_token_port,
376 exception,
377 code,
378 codeCnt);
379 } else {
380 panic("mach_exception_raise_identity_protected() must be code64");
381 }
382
383 goto out_release_right;
384 }
385
386 case EXCEPTION_STATE_IDENTITY: {
387 mach_msg_type_number_t old_state_cnt, new_state_cnt;
388 thread_state_data_t old_state;
389 thread_set_status_flags_t get_flags = TSSF_TRANSLATE_TO_USER;
390 thread_set_status_flags_t set_flags = TSSF_CHECK_USER_FLAGS;
391 bool task_allow_user_state = task_needs_user_signed_thread_state(task);
392
393 if (pac_replace_ptrs_user || task_allow_user_state) {
394 get_flags |= TSSF_RANDOM_USER_DIV;
395 set_flags |= (TSSF_ALLOW_ONLY_USER_PTRS | TSSF_RANDOM_USER_DIV);
396 }
397
398 c_thr_exc_raise_state_id++;
399 old_state_cnt = _MachineStateCount[flavor];
400 kr = thread_getstatus_to_user(thread, flavor,
401 (thread_state_t)old_state,
402 &old_state_cnt, get_flags);
403 new_state_cnt = old_state_cnt;
404 if (kr == KERN_SUCCESS) {
405 new_state = (thread_state_t)kalloc_data(sizeof(thread_state_data_t), Z_WAITOK | Z_ZERO);
406 if (new_state == NULL) {
407 kr = KERN_RESOURCE_SHORTAGE;
408 goto out_release_right;
409 }
410 if (code64) {
411 kr = mach_exception_raise_state_identity(
412 exc_port,
413 thread_port,
414 task_port,
415 exception,
416 code,
417 codeCnt,
418 &flavor,
419 old_state, old_state_cnt,
420 new_state, &new_state_cnt);
421 } else {
422 kr = exception_raise_state_identity(exc_port,
423 thread_port,
424 task_port,
425 exception,
426 small_code,
427 codeCnt,
428 &flavor,
429 old_state, old_state_cnt,
430 new_state, &new_state_cnt);
431 }
432
433 if (kr == KERN_SUCCESS) {
434 if (exception != EXC_CORPSE_NOTIFY) {
435 kr = thread_setstatus_from_user(thread, flavor,
436 (thread_state_t)new_state, new_state_cnt,
437 (thread_state_t)old_state, old_state_cnt, set_flags);
438 }
439 goto out_release_right;
440 }
441 }
442
443 goto out_release_right;
444 }
445
446 default:
447 panic("bad exception behavior!");
448 return KERN_FAILURE;
449 }/* switch */
450
451 out_release_right:
452
453 if (task_port) {
454 ipc_port_release_send(task_port);
455 }
456
457 if (thread_port) {
458 ipc_port_release_send(thread_port);
459 }
460
461 if (exc_port) {
462 ipc_port_release_send(exc_port);
463 }
464
465 if (task_token_port) {
466 ipc_port_release_send(task_token_port);
467 }
468
469 if (new_state) {
470 kfree_data(new_state, sizeof(thread_state_data_t));
471 }
472
473 return kr;
474 }
475
476 /*
477 * Routine: check_exc_receiver_dependency
478 * Purpose:
479 * Verify that the port destined for receiving this exception is not
480 * on the current task. This would cause hang in kernel for
481 * EXC_CRASH primarily. Note: If port is transferred
482 * between check and delivery then deadlock may happen.
483 *
484 * Conditions:
485 * Nothing locked and no resources held.
486 * Called from an exception context.
487 * Returns:
488 * KERN_SUCCESS if its ok to send exception message.
489 */
490 static kern_return_t
check_exc_receiver_dependency(exception_type_t exception,struct exception_action * excp,lck_mtx_t * mutex)491 check_exc_receiver_dependency(
492 exception_type_t exception,
493 struct exception_action *excp,
494 lck_mtx_t *mutex)
495 {
496 kern_return_t retval = KERN_SUCCESS;
497
498 if (excp == NULL || exception != EXC_CRASH) {
499 return retval;
500 }
501
502 task_t task = current_task();
503 lck_mtx_lock(mutex);
504 ipc_port_t xport = excp[exception].port;
505 if (IP_VALID(xport) && ip_in_space_noauth(xport, task->itk_space)) {
506 retval = KERN_FAILURE;
507 }
508 lck_mtx_unlock(mutex);
509 return retval;
510 }
511
512
513 /*
514 * Routine: exception_triage_thread
515 * Purpose:
516 * The thread caught an exception.
517 * We make an up-call to the thread's exception server.
518 * Conditions:
519 * Nothing locked and no resources held.
520 * Called from an exception context, so
521 * thread_exception_return and thread_kdb_return
522 * are possible.
523 * Returns:
524 * KERN_SUCCESS if exception is handled by any of the handlers.
525 */
526 kern_return_t
exception_triage_thread(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,thread_t thread)527 exception_triage_thread(
528 exception_type_t exception,
529 mach_exception_data_t code,
530 mach_msg_type_number_t codeCnt,
531 thread_t thread)
532 {
533 task_t task;
534 thread_ro_t tro;
535 host_priv_t host_priv;
536 lck_mtx_t *mutex;
537 struct exception_action *actions;
538 kern_return_t kr = KERN_FAILURE;
539
540 assert(exception != EXC_RPC_ALERT);
541
542 /*
543 * If this behavior has been requested by the the kernel
544 * (due to the boot environment), we should panic if we
545 * enter this function. This is intended as a debugging
546 * aid; it should allow us to debug why we caught an
547 * exception in environments where debugging is especially
548 * difficult.
549 */
550 if (panic_on_exception_triage) {
551 panic("called exception_triage when it was forbidden by the boot environment");
552 }
553
554 /*
555 * Try to raise the exception at the activation level.
556 */
557 mutex = &thread->mutex;
558 tro = get_thread_ro(thread);
559 actions = tro->tro_exc_actions;
560 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
561 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
562 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
563 goto out;
564 }
565 }
566
567 /*
568 * Maybe the task level will handle it.
569 */
570 task = tro->tro_task;
571 mutex = &task->itk_lock_data;
572 actions = task->exc_actions;
573 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
574 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
575 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
576 goto out;
577 }
578 }
579
580 /*
581 * How about at the host level?
582 */
583 host_priv = host_priv_self();
584 mutex = &host_priv->lock;
585 actions = host_priv->exc_actions;
586 if (KERN_SUCCESS == check_exc_receiver_dependency(exception, actions, mutex)) {
587 kr = exception_deliver(thread, exception, code, codeCnt, actions, mutex);
588 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
589 goto out;
590 }
591 }
592
593 out:
594 if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
595 (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
596 thread_exception_return();
597 }
598 return kr;
599 }
600
601 #if __has_feature(ptrauth_calls)
602 static TUNABLE(bool, pac_exception_telemetry, "-pac_exception_telemetry", false);
603
604 CA_EVENT(pac_exception_event,
605 CA_INT, exception,
606 CA_INT, exception_code_0,
607 CA_INT, exception_code_1,
608 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
609
610 static void
pac_exception_triage(exception_type_t exception,mach_exception_data_t code)611 pac_exception_triage(
612 exception_type_t exception,
613 mach_exception_data_t code)
614 {
615 boolean_t traced_flag = FALSE;
616 task_t task = current_task();
617 void *proc = task->bsd_info;
618 char *proc_name = (char *) "unknown";
619 int pid = 0;
620
621 #ifdef MACH_BSD
622 pid = proc_selfpid();
623 if (proc) {
624 traced_flag = proc_is_traced(proc);
625 /* Should only be called on current proc */
626 proc_name = proc_name_address(proc);
627
628 /*
629 * For a ptrauth violation, check if process isn't being ptraced and
630 * the task has the TF_PAC_EXC_FATAL flag set. If both conditions are true,
631 * terminate the task via exit_with_reason
632 */
633 if (!traced_flag) {
634 if (pac_exception_telemetry) {
635 ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_exception_event);
636 CA_EVENT_TYPE(pac_exception_event) * pexc_event = ca_event->data;
637 pexc_event->exception = exception;
638 pexc_event->exception_code_0 = code[0];
639 pexc_event->exception_code_1 = code[1];
640 strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
641 CA_EVENT_SEND(ca_event);
642 }
643 if (task_is_pac_exception_fatal(task)) {
644 os_log_error(OS_LOG_DEFAULT, "%s: process %s[%d] hit a pac violation\n", __func__, proc_name, pid);
645 exit_with_pac_exception(proc, exception, code[0], code[1]);
646 thread_exception_return();
647 /* NOT_REACHABLE */
648 }
649 }
650 }
651 #endif /* MACH_BSD */
652 }
653 #endif /* __has_feature(ptrauth_calls) */
654
655 /*
656 * Routine: exception_triage
657 * Purpose:
658 * The current thread caught an exception.
659 * We make an up-call to the thread's exception server.
660 * Conditions:
661 * Nothing locked and no resources held.
662 * Called from an exception context, so
663 * thread_exception_return and thread_kdb_return
664 * are possible.
665 * Returns:
666 * KERN_SUCCESS if exception is handled by any of the handlers.
667 */
668 int debug4k_panic_on_exception = 0;
669 kern_return_t
exception_triage(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)670 exception_triage(
671 exception_type_t exception,
672 mach_exception_data_t code,
673 mach_msg_type_number_t codeCnt)
674 {
675 thread_t thread = current_thread();
676 task_t task = current_task();
677
678 assert(codeCnt > 0);
679
680 if (VM_MAP_PAGE_SIZE(task->map) < PAGE_SIZE) {
681 DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx\n",
682 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
683 if (debug4k_panic_on_exception) {
684 panic("DEBUG4K thread %p task %p map %p exception %d codes 0x%llx 0x%llx",
685 thread, task, task->map, exception, code[0], codeCnt > 1 ? code[1] : 0);
686 }
687 }
688
689 #if (DEVELOPMENT || DEBUG)
690 #ifdef MACH_BSD
691 if (proc_pid(task->bsd_info) <= exception_log_max_pid) {
692 printf("exception_log_max_pid: pid %d (%s): sending exception %d (0x%llx 0x%llx)\n",
693 proc_pid(task->bsd_info), proc_name_address(task->bsd_info),
694 exception, code[0], codeCnt > 1 ? code[1] : 0);
695 }
696 #endif /* MACH_BSD */
697 #endif /* DEVELOPMENT || DEBUG */
698
699 #if __has_feature(ptrauth_calls)
700 if (exception & EXC_PTRAUTH_BIT) {
701 exception &= ~EXC_PTRAUTH_BIT;
702 assert(codeCnt == 2);
703 pac_exception_triage(exception, code);
704 }
705 #endif /* __has_feature(ptrauth_calls) */
706 return exception_triage_thread(exception, code, codeCnt, thread);
707 }
708
709 kern_return_t
bsd_exception(exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt)710 bsd_exception(
711 exception_type_t exception,
712 mach_exception_data_t code,
713 mach_msg_type_number_t codeCnt)
714 {
715 task_t task;
716 lck_mtx_t *mutex;
717 thread_t self = current_thread();
718 kern_return_t kr;
719
720 /*
721 * Maybe the task level will handle it.
722 */
723 task = current_task();
724 mutex = &task->itk_lock_data;
725
726 kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex);
727
728 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) {
729 return KERN_SUCCESS;
730 }
731 return KERN_FAILURE;
732 }
733
734
735 /*
736 * Raise an exception on a task.
737 * This should tell launchd to launch Crash Reporter for this task.
738 */
739 kern_return_t
task_exception_notify(exception_type_t exception,mach_exception_data_type_t exccode,mach_exception_data_type_t excsubcode)740 task_exception_notify(exception_type_t exception,
741 mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode)
742 {
743 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
744 wait_interrupt_t wsave;
745 kern_return_t kr = KERN_SUCCESS;
746
747 code[0] = exccode;
748 code[1] = excsubcode;
749
750 wsave = thread_interrupt_level(THREAD_UNINT);
751 kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
752 (void) thread_interrupt_level(wsave);
753 return kr;
754 }
755
756
757 /*
758 * Handle interface for special performance monitoring
759 * This is a special case of the host exception handler
760 */
761 kern_return_t
sys_perf_notify(thread_t thread,int pid)762 sys_perf_notify(thread_t thread, int pid)
763 {
764 host_priv_t hostp;
765 ipc_port_t xport;
766 wait_interrupt_t wsave;
767 kern_return_t ret;
768
769 hostp = host_priv_self(); /* Get the host privileged ports */
770 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
771 code[0] = 0xFF000001; /* Set terminate code */
772 code[1] = pid; /* Pass out the pid */
773
774 lck_mtx_lock(&hostp->lock);
775 xport = hostp->exc_actions[EXC_RPC_ALERT].port;
776
777 /* Make sure we're not catching our own exception */
778 if (!IP_VALID(xport) ||
779 !ip_active(xport) ||
780 ip_in_space_noauth(xport, get_threadtask(thread)->itk_space)) {
781 lck_mtx_unlock(&hostp->lock);
782 return KERN_FAILURE;
783 }
784
785 lck_mtx_unlock(&hostp->lock);
786
787 wsave = thread_interrupt_level(THREAD_UNINT);
788 ret = exception_deliver(
789 thread,
790 EXC_RPC_ALERT,
791 code,
792 2,
793 hostp->exc_actions,
794 &hostp->lock);
795 (void)thread_interrupt_level(wsave);
796
797 return ret;
798 }
799