1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40
41 #include <kern/debug.h>
42 #include <kern/socd_client.h>
43 #include <kern/thread.h>
44 #include <mach/exception.h>
45 #include <mach/arm/traps.h>
46 #include <mach/vm_types.h>
47 #include <mach/machine/thread_status.h>
48
49 #include <machine/atomic.h>
50 #include <machine/limits.h>
51
52 #include <pexpert/arm/protos.h>
53
54 #include <vm/vm_page.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_fault.h>
57 #include <vm/vm_kern.h>
58
59 #include <sys/errno.h>
60 #include <sys/kdebug.h>
61 #include <kperf/kperf.h>
62
63 #include <kern/policy_internal.h>
64 #if CONFIG_TELEMETRY
65 #include <kern/telemetry.h>
66 #endif
67
68 #include <prng/entropy.h>
69
70
71
72
73
74 #if CONFIG_KERNEL_TBI && KASAN_TBI
75 #include <san/kasan.h>
76 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
77
78 #if CONFIG_UBSAN_MINIMAL
79 #include <san/ubsan_minimal.h>
80 #endif /* CONFIG_UBSAN_MINIMAL */
81
82 #ifndef __arm64__
83 #error Should only be compiling for arm64.
84 #endif
85
86 #define TEST_CONTEXT32_SANITY(context) \
87 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
88 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
89
90 #define TEST_CONTEXT64_SANITY(context) \
91 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
92 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
93
94 #define ASSERT_CONTEXT_SANITY(context) \
95 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
96
97
98 #define COPYIN(src, dst, size) \
99 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
100 copyin_kern(src, dst, size) : \
101 copyin(src, dst, size)
102
103 #define COPYOUT(src, dst, size) \
104 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
105 copyout_kern(src, dst, size) : \
106 copyout(src, dst, size)
107
108 // Below is for concatenating a string param to a string literal
109 #define STR1(x) #x
110 #define STR(x) STR1(x)
111
112 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
113 #define ARM64_KDBG_CODE_USER (1 << 8)
114 #define ARM64_KDBG_CODE_GUEST (2 << 8)
115
116 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
117 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
118
119 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
120
121 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
122 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
123
124
125
126 void sleh_irq(arm_saved_state_t *);
127 void sleh_fiq(arm_saved_state_t *);
128 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
129 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
130
131 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
132 static void sleh_interrupt_handler_epilogue(void);
133
134 static void handle_svc(arm_saved_state_t *);
135 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
136 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
137
138 static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
139 #ifdef __ARM_ARCH_8_6__
140 static void handle_pac_fail(arm_saved_state_t *state, uint32_t esr) __dead2;
141 #endif
142
143 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
144
145 static void handle_uncategorized(arm_saved_state_t *);
146
147 /*
148 * For UBSan trap and continue handling, we must be able to recover
149 * from handle_kernel_breakpoint().
150 */
151 #if !CONFIG_UBSAN_MINIMAL
152 __dead2
153 #endif /* CONFIG_UBSAN_MINIMAL */
154 static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t);
155
156 static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
157
158 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
159 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
160 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
161
162 static int is_vm_fault(fault_status_t);
163 static int is_translation_fault(fault_status_t);
164 static int is_alignment_fault(fault_status_t);
165
166 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
167 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
168 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
169
170 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
171 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
172 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
173 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
174 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
175
176 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
177
178 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
179
180 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
181
182 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
183
184 extern void mach_kauth_cred_thread_update(void);
185 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
186
187 struct proc;
188
189 typedef uint32_t arm64_instr_t;
190
191 extern void
192 unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct proc * proc);
193
194 extern void
195 mach_syscall(struct arm_saved_state*);
196
197 #if CONFIG_DTRACE
198 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
199 extern boolean_t dtrace_tally_fault(user_addr_t);
200
201 /*
202 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
203 * and paste the trap instructions
204 * over from that file. Need to keep these in sync!
205 */
206 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
207 #define FASTTRAP_THUMB32_INSTR 0xdefc
208 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
209
210 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
211 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
212 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
213
214 /* See <rdar://problem/4613924> */
215 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
216 #endif
217
218
219
220 extern void arm64_thread_exception_return(void) __dead2;
221
222 #if defined(APPLETYPHOON)
223 #define CPU_NAME "Typhoon"
224 #elif defined(APPLETWISTER)
225 #define CPU_NAME "Twister"
226 #elif defined(APPLEHURRICANE)
227 #define CPU_NAME "Hurricane"
228 #elif defined(APPLELIGHTNING)
229 #define CPU_NAME "Lightning"
230 #else
231 #define CPU_NAME "Unknown"
232 #endif
233
234 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
235 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
236 #define ESR_WT_REASON(esr) ((esr) & 0xff)
237
238 #define WT_REASON_NONE 0
239 #define WT_REASON_INTEGRITY_FAIL 1
240 #define WT_REASON_BAD_SYSCALL 2
241 #define WT_REASON_NOT_LOCKED 3
242 #define WT_REASON_ALREADY_LOCKED 4
243 #define WT_REASON_SW_REQ 5
244 #define WT_REASON_PT_INVALID 6
245 #define WT_REASON_PT_VIOLATION 7
246 #define WT_REASON_REG_VIOLATION 8
247 #endif
248
249 #if defined(HAS_IPI)
250 void cpu_signal_handler(void);
251 extern unsigned int gFastIPI;
252 #endif /* defined(HAS_IPI) */
253
254 static arm_saved_state64_t *original_faulting_state = NULL;
255
256 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
257
258 extern vm_offset_t static_memory_end;
259
260 static inline int
is_vm_fault(fault_status_t status)261 is_vm_fault(fault_status_t status)
262 {
263 switch (status) {
264 case FSC_TRANSLATION_FAULT_L0:
265 case FSC_TRANSLATION_FAULT_L1:
266 case FSC_TRANSLATION_FAULT_L2:
267 case FSC_TRANSLATION_FAULT_L3:
268 case FSC_ACCESS_FLAG_FAULT_L1:
269 case FSC_ACCESS_FLAG_FAULT_L2:
270 case FSC_ACCESS_FLAG_FAULT_L3:
271 case FSC_PERMISSION_FAULT_L1:
272 case FSC_PERMISSION_FAULT_L2:
273 case FSC_PERMISSION_FAULT_L3:
274 return TRUE;
275 default:
276 return FALSE;
277 }
278 }
279
280 static inline int
is_translation_fault(fault_status_t status)281 is_translation_fault(fault_status_t status)
282 {
283 switch (status) {
284 case FSC_TRANSLATION_FAULT_L0:
285 case FSC_TRANSLATION_FAULT_L1:
286 case FSC_TRANSLATION_FAULT_L2:
287 case FSC_TRANSLATION_FAULT_L3:
288 return TRUE;
289 default:
290 return FALSE;
291 }
292 }
293
294 static inline int
is_permission_fault(fault_status_t status)295 is_permission_fault(fault_status_t status)
296 {
297 switch (status) {
298 case FSC_PERMISSION_FAULT_L1:
299 case FSC_PERMISSION_FAULT_L2:
300 case FSC_PERMISSION_FAULT_L3:
301 return TRUE;
302 default:
303 return FALSE;
304 }
305 }
306
307 static inline int
is_alignment_fault(fault_status_t status)308 is_alignment_fault(fault_status_t status)
309 {
310 return status == FSC_ALIGNMENT_FAULT;
311 }
312
313 static inline int
is_parity_error(fault_status_t status)314 is_parity_error(fault_status_t status)
315 {
316 switch (status) {
317 case FSC_SYNC_PARITY:
318 case FSC_ASYNC_PARITY:
319 case FSC_SYNC_PARITY_TT_L1:
320 case FSC_SYNC_PARITY_TT_L2:
321 case FSC_SYNC_PARITY_TT_L3:
322 return TRUE;
323 default:
324 return FALSE;
325 }
326 }
327
328 __dead2 __unused
329 static void
arm64_implementation_specific_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)330 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
331 {
332 #pragma unused (state, esr, far)
333 panic_plain("Unhandled implementation specific error\n");
334 }
335
336 #if CONFIG_KERNEL_INTEGRITY
337 #pragma clang diagnostic push
338 #pragma clang diagnostic ignored "-Wunused-parameter"
339 static void
kernel_integrity_error_handler(uint32_t esr,vm_offset_t far)340 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
341 {
342 #if defined(KERNEL_INTEGRITY_WT)
343 #if (DEVELOPMENT || DEBUG)
344 if (ESR_WT_SERROR(esr)) {
345 switch (ESR_WT_REASON(esr)) {
346 case WT_REASON_INTEGRITY_FAIL:
347 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
348 case WT_REASON_BAD_SYSCALL:
349 panic_plain("Kernel integrity, bad syscall.");
350 case WT_REASON_NOT_LOCKED:
351 panic_plain("Kernel integrity, not locked.");
352 case WT_REASON_ALREADY_LOCKED:
353 panic_plain("Kernel integrity, already locked.");
354 case WT_REASON_SW_REQ:
355 panic_plain("Kernel integrity, software request.");
356 case WT_REASON_PT_INVALID:
357 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
358 "walking 0x%016lx.", far);
359 case WT_REASON_PT_VIOLATION:
360 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
361 far);
362 case WT_REASON_REG_VIOLATION:
363 panic_plain("Kernel integrity, violation in system register %d.",
364 (unsigned) far);
365 default:
366 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
367 }
368 }
369 #else
370 if (ESR_WT_SERROR(esr)) {
371 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
372 }
373 #endif
374 #endif
375 }
376 #pragma clang diagnostic pop
377 #endif
378
379 static void
arm64_platform_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)380 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
381 {
382 #if CONFIG_KERNEL_INTEGRITY
383 kernel_integrity_error_handler(esr, far);
384 #endif
385
386 cpu_data_t *cdp = getCpuDatap();
387
388 if (PE_handle_platform_error(far)) {
389 return;
390 } else if (cdp->platform_error_handler != NULL) {
391 cdp->platform_error_handler(cdp->cpu_id, far);
392 } else {
393 arm64_implementation_specific_error(state, esr, far);
394 }
395 }
396
397 void
panic_with_thread_kernel_state(const char * msg,arm_saved_state_t * ss)398 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
399 {
400 boolean_t ss_valid;
401
402 ss_valid = is_saved_state64(ss);
403 arm_saved_state64_t *state = saved_state64(ss);
404
405 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
406
407 // rdar://80659177
408 // Read SoCD tracepoints up to twice — once the first time we call panic and
409 // another time if we encounter a nested panic after that.
410 static int twice = 2;
411 if (twice > 0) {
412 twice--;
413 SOCD_TRACE_XNU(KERNEL_STATE_PANIC, ADDR(state->pc),
414 PACK_LSB(VALUE(state->lr), VALUE(ss_valid)),
415 PACK_2X32(VALUE(state->esr), VALUE(state->cpsr)),
416 VALUE(state->far));
417 }
418
419 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
420 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
421 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
422 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
423 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
424 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
425 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
426 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
427 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
428 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
429 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
430 state->x[0], state->x[1], state->x[2], state->x[3],
431 state->x[4], state->x[5], state->x[6], state->x[7],
432 state->x[8], state->x[9], state->x[10], state->x[11],
433 state->x[12], state->x[13], state->x[14], state->x[15],
434 state->x[16], state->x[17], state->x[18], state->x[19],
435 state->x[20], state->x[21], state->x[22], state->x[23],
436 state->x[24], state->x[25], state->x[26], state->x[27],
437 state->x[28], state->fp, state->lr, state->sp,
438 state->pc, state->cpsr, state->esr, state->far);
439 }
440
441 void
sleh_synchronous_sp1(arm_context_t * context,uint32_t esr,vm_offset_t far __unused)442 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
443 {
444 esr_exception_class_t class = ESR_EC(esr);
445 arm_saved_state_t * state = &context->ss;
446
447 switch (class) {
448 case ESR_EC_UNCATEGORIZED:
449 {
450 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
451 if (IS_ARM_GDB_TRAP(instr)) {
452 DebuggerCall(EXC_BREAKPOINT, state);
453 }
454 }
455 OS_FALLTHROUGH; // panic if we return from the debugger
456 default:
457 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
458 }
459 }
460
461
462 __attribute__((noreturn))
463 void
thread_exception_return()464 thread_exception_return()
465 {
466 thread_t thread = current_thread();
467 if (thread->machine.exception_trace_code != 0) {
468 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
469 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
470 thread->machine.exception_trace_code = 0;
471 }
472
473 arm64_thread_exception_return();
474 __builtin_unreachable();
475 }
476
477 /*
478 * check whether task vtimers are running and set thread and CPU BSD AST
479 *
480 * must be called with interrupts masked so updates of fields are atomic
481 * must be emitted inline to avoid generating an FBT probe on the exception path
482 *
483 */
484 __attribute__((__always_inline__))
485 static inline void
task_vtimer_check(thread_t thread)486 task_vtimer_check(thread_t thread)
487 {
488 task_t task = get_threadtask_early(thread);
489
490 if (__improbable(task != NULL && task->vtimers)) {
491 thread->ast |= AST_BSD;
492 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
493 }
494 }
495
496 #if MACH_ASSERT
497 /**
498 * A version of get_preemption_level() that works in early boot.
499 *
500 * If an exception is raised in early boot before the initial thread has been
501 * set up, then calling get_preemption_level() in the SLEH will trigger an
502 * infinitely-recursing exception. This function handles this edge case.
503 */
504 static inline int
sleh_get_preemption_level(void)505 sleh_get_preemption_level(void)
506 {
507 if (__improbable(current_thread() == NULL)) {
508 return 0;
509 }
510 return get_preemption_level();
511 }
512 #endif // MACH_ASSERT
513
514 void
sleh_synchronous(arm_context_t * context,uint32_t esr,vm_offset_t far)515 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
516 {
517 esr_exception_class_t class = ESR_EC(esr);
518 arm_saved_state_t * state = &context->ss;
519 vm_offset_t recover = 0;
520 thread_t thread = current_thread();
521 #if MACH_ASSERT
522 int preemption_level = sleh_get_preemption_level();
523 #endif
524 expected_fault_handler_t expected_fault_handler = NULL;
525 #ifdef CONFIG_XNUPOST
526 expected_fault_handler_t saved_expected_fault_handler = NULL;
527 uintptr_t saved_expected_fault_addr = 0;
528 #endif /* CONFIG_XNUPOST */
529
530 ASSERT_CONTEXT_SANITY(context);
531
532 task_vtimer_check(thread);
533
534 #if CONFIG_DTRACE
535 /*
536 * Handle kernel DTrace probes as early as possible to minimize the likelihood
537 * that this path will itself trigger a DTrace probe, which would lead to infinite
538 * probe recursion.
539 */
540 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
541 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
542 return;
543 }
544 #endif
545 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
546
547 /*
548 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
549 * that would disclose the behavior of PT_DENY_ATTACH processes.
550 */
551 if (is_user) {
552 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
553 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
554 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
555 esr, far, get_saved_state_pc(state), 0, 0);
556 } else {
557 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
558 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
559 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
560 }
561
562 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
563 /*
564 * We no longer support 32-bit, which means no 2-byte
565 * instructions.
566 */
567 if (is_user) {
568 panic("Exception on 2-byte instruction, "
569 "context=%p, esr=%#x, far=%p",
570 context, esr, (void *)far);
571 } else {
572 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
573 }
574 }
575
576 /* Don't run exception handler with recover handler set in case of double fault */
577 if (thread->recover) {
578 recover = thread->recover;
579 thread->recover = (vm_offset_t)NULL;
580 }
581
582 #ifdef CONFIG_XNUPOST
583 if (thread->machine.expected_fault_handler != NULL) {
584 saved_expected_fault_handler = thread->machine.expected_fault_handler;
585 saved_expected_fault_addr = thread->machine.expected_fault_addr;
586
587 thread->machine.expected_fault_handler = NULL;
588 thread->machine.expected_fault_addr = 0;
589
590 if (saved_expected_fault_addr == far) {
591 expected_fault_handler = saved_expected_fault_handler;
592 }
593 }
594 #endif /* CONFIG_XNUPOST */
595
596 /* Inherit the interrupt masks from previous context */
597 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
598 ml_set_interrupts_enabled(TRUE);
599 }
600
601 switch (class) {
602 case ESR_EC_SVC_64:
603 if (!is_saved_state64(state) || !is_user) {
604 panic("Invalid SVC_64 context");
605 }
606
607 handle_svc(state);
608 break;
609
610 case ESR_EC_DABORT_EL0:
611 handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort, expected_fault_handler);
612 break;
613
614 case ESR_EC_MSR_TRAP:
615 handle_msr_trap(state, esr);
616 break;
617
618 #ifdef __ARM_ARCH_8_6__
619 case ESR_EC_PAC_FAIL:
620 handle_pac_fail(state, esr);
621 __builtin_unreachable();
622
623 #endif /* __ARM_ARCH_8_6__ */
624
625 case ESR_EC_IABORT_EL0:
626 handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
627 break;
628
629 case ESR_EC_IABORT_EL1:
630 #ifdef CONFIG_XNUPOST
631 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
632 break;
633 }
634 #endif /* CONFIG_XNUPOST */
635
636 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
637
638 case ESR_EC_PC_ALIGN:
639 handle_pc_align(state);
640 __builtin_unreachable();
641
642 case ESR_EC_DABORT_EL1:
643 handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
644 break;
645
646 case ESR_EC_UNCATEGORIZED:
647 assert(!ESR_ISS(esr));
648
649 handle_uncategorized(&context->ss);
650 break;
651
652 case ESR_EC_SP_ALIGN:
653 handle_sp_align(state);
654 __builtin_unreachable();
655
656 case ESR_EC_BKPT_AARCH32:
657 handle_breakpoint(state, esr);
658 __builtin_unreachable();
659
660 case ESR_EC_BRK_AARCH64:
661 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
662 handle_kernel_breakpoint(state, esr);
663 #if CONFIG_UBSAN_MINIMAL
664 /* UBSan breakpoints are recoverable */
665 break;
666 #endif /* CONFIG_UBSAN_MINIMAL */
667 } else {
668 handle_breakpoint(state, esr);
669 __builtin_unreachable();
670 }
671
672 case ESR_EC_BKPT_REG_MATCH_EL0:
673 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
674 handle_breakpoint(state, esr);
675 }
676 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
677 class, state, class, esr, (void *)far);
678 __builtin_unreachable();
679
680 case ESR_EC_BKPT_REG_MATCH_EL1:
681 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
682 __builtin_unreachable();
683
684 case ESR_EC_SW_STEP_DEBUG_EL0:
685 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
686 handle_sw_step_debug(state);
687 }
688 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
689 class, state, class, esr, (void *)far);
690 __builtin_unreachable();
691
692 case ESR_EC_SW_STEP_DEBUG_EL1:
693 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
694 __builtin_unreachable();
695
696 case ESR_EC_WATCHPT_MATCH_EL0:
697 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
698 handle_watchpoint(far);
699 }
700 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
701 class, state, class, esr, (void *)far);
702 __builtin_unreachable();
703
704 case ESR_EC_WATCHPT_MATCH_EL1:
705 /*
706 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
707 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
708 */
709 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
710 arm_debug_set(NULL);
711 break; /* return to first level handler */
712 }
713 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
714 class, state, class, esr, (void *)far);
715 __builtin_unreachable();
716
717 case ESR_EC_TRAP_SIMD_FP:
718 handle_simd_trap(state, esr);
719 __builtin_unreachable();
720
721 case ESR_EC_ILLEGAL_INSTR_SET:
722 if (EXCB_ACTION_RERUN !=
723 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
724 // instruction is not re-executed
725 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
726 state, class, esr, (void *)far, get_saved_state_cpsr(state));
727 }
728 // must clear this fault in PSR to re-run
729 mask_saved_state_cpsr(state, 0, PSR64_IL);
730 break;
731
732 case ESR_EC_MCR_MRC_CP15_TRAP:
733 case ESR_EC_MCRR_MRRC_CP15_TRAP:
734 case ESR_EC_MCR_MRC_CP14_TRAP:
735 case ESR_EC_LDC_STC_CP14_TRAP:
736 case ESR_EC_MCRR_MRRC_CP14_TRAP:
737 handle_user_trapped_instruction32(state, esr);
738 __builtin_unreachable();
739
740 case ESR_EC_WFI_WFE:
741 // Use of WFI or WFE instruction when they have been disabled for EL0
742 handle_wf_trap(state);
743 __builtin_unreachable();
744
745 case ESR_EC_FLOATING_POINT_64:
746 handle_fp_trap(state, esr);
747 __builtin_unreachable();
748
749 default:
750 handle_uncategorized(state);
751 }
752
753 #ifdef CONFIG_XNUPOST
754 if (saved_expected_fault_handler != NULL) {
755 thread->machine.expected_fault_handler = saved_expected_fault_handler;
756 thread->machine.expected_fault_addr = saved_expected_fault_addr;
757 }
758 #endif /* CONFIG_XNUPOST */
759
760 if (recover) {
761 thread->recover = recover;
762 }
763 if (is_user) {
764 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
765 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
766 esr, far, get_saved_state_pc(state), 0, 0);
767 thread->machine.exception_trace_code = 0;
768 } else {
769 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
770 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
771 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
772 }
773 #if MACH_ASSERT
774 if (preemption_level != sleh_get_preemption_level()) {
775 panic("synchronous exception changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
776 }
777 #endif
778 }
779
780 /*
781 * Uncategorized exceptions are a catch-all for general execution errors.
782 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
783 */
784 static void
handle_uncategorized(arm_saved_state_t * state)785 handle_uncategorized(arm_saved_state_t *state)
786 {
787 exception_type_t exception = EXC_BAD_INSTRUCTION;
788 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
789 mach_msg_type_number_t numcodes = 2;
790 uint32_t instr = 0;
791
792 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
793
794 #if CONFIG_DTRACE
795
796 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
797 /*
798 * For a 64bit user process, we care about all 4 bytes of the
799 * instr.
800 */
801 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
802 if (dtrace_user_probe(state) == KERN_SUCCESS) {
803 return;
804 }
805 }
806 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
807 /*
808 * For a 32bit user process, we check for thumb mode, in
809 * which case we only care about a 2 byte instruction length.
810 * For non-thumb mode, we care about all 4 bytes of the instructin.
811 */
812 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
813 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
814 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
815 if (dtrace_user_probe(state) == KERN_SUCCESS) {
816 return;
817 }
818 }
819 } else {
820 if ((instr == FASTTRAP_ARM32_INSTR) ||
821 (instr == FASTTRAP_ARM32_RET_INSTR)) {
822 if (dtrace_user_probe(state) == KERN_SUCCESS) {
823 return;
824 }
825 }
826 }
827 }
828
829 #endif /* CONFIG_DTRACE */
830
831 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
832 if (IS_ARM_GDB_TRAP(instr)) {
833 boolean_t interrupt_state;
834 exception = EXC_BREAKPOINT;
835
836 interrupt_state = ml_set_interrupts_enabled(FALSE);
837
838 /* Save off the context here (so that the debug logic
839 * can see the original state of this thread).
840 */
841 current_thread()->machine.kpcb = state;
842
843 /* Hop into the debugger (typically either due to a
844 * fatal exception, an explicit panic, or a stackshot
845 * request.
846 */
847 DebuggerCall(exception, state);
848
849 current_thread()->machine.kpcb = NULL;
850 (void) ml_set_interrupts_enabled(interrupt_state);
851 return;
852 } else {
853 panic("Undefined kernel instruction: pc=%p instr=%x", (void*)get_saved_state_pc(state), instr);
854 }
855 }
856
857 /*
858 * Check for GDB breakpoint via illegal opcode.
859 */
860 if (IS_ARM_GDB_TRAP(instr)) {
861 exception = EXC_BREAKPOINT;
862 codes[0] = EXC_ARM_BREAKPOINT;
863 codes[1] = instr;
864 } else {
865 codes[1] = instr;
866 }
867
868 exception_triage(exception, codes, numcodes);
869 __builtin_unreachable();
870 }
871
872 #if __has_feature(ptrauth_calls)
873 static const uint16_t ptrauth_brk_comment_base = 0xc470;
874
875 static inline bool
brk_comment_is_ptrauth(uint16_t comment)876 brk_comment_is_ptrauth(uint16_t comment)
877 {
878 return comment >= ptrauth_brk_comment_base &&
879 comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
880 }
881
882 static inline const char *
ptrauth_key_to_string(ptrauth_key key)883 ptrauth_key_to_string(ptrauth_key key)
884 {
885 switch (key) {
886 case ptrauth_key_asia:
887 return "IA";
888 case ptrauth_key_asib:
889 return "IB";
890 case ptrauth_key_asda:
891 return "DA";
892 case ptrauth_key_asdb:
893 return "DB";
894 default:
895 __builtin_unreachable();
896 }
897 }
898 #endif /* __has_feature(ptrauth_calls) */
899
900 #if CONFIG_KERNEL_TBI && KASAN_TBI
901 static inline bool
brk_comment_is_kasan_failure(uint16_t comment)902 brk_comment_is_kasan_failure(uint16_t comment)
903 {
904 return comment >= KASAN_TBI_ESR_BASE &&
905 comment <= KASAN_TBI_ESR_TOP;
906 }
907 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
908
909 #if CONFIG_UBSAN_MINIMAL
910 static inline bool
brk_comment_is_ubsan(uint16_t comment)911 brk_comment_is_ubsan(uint16_t comment)
912 {
913 return comment >= UBSAN_MINIMAL_TRAPS_START &&
914 comment < UBSAN_MINIMAL_TRAPS_END;
915 }
916 #endif /* CONFIG_UBSAN_MINIMAL */
917
918 static void
handle_kernel_breakpoint(arm_saved_state_t * state,uint32_t esr)919 handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
920 {
921 uint16_t comment = ISS_BRK_COMMENT(esr);
922
923 #if __has_feature(ptrauth_calls)
924 if (brk_comment_is_ptrauth(comment)) {
925 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx"
926 char msg[strlen(MSG_FMT)
927 - strlen("0x%04X") + strlen("0xFFFF")
928 - strlen("%s") + strlen("IA")
929 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
930 + 1];
931 ptrauth_key key = (ptrauth_key)(comment - ptrauth_brk_comment_base);
932 const char *key_str = ptrauth_key_to_string(key);
933 snprintf(msg, sizeof(msg), MSG_FMT, comment, key_str, saved_state64(state)->x[16]);
934
935 panic_with_thread_kernel_state(msg, state);
936 __builtin_unreachable();
937 #undef MSG_FMT
938 }
939 #endif /* __has_feature(ptrauth_calls) */
940
941 #if CONFIG_KERNEL_TBI && KASAN_TBI
942 if (brk_comment_is_kasan_failure(comment)) {
943 kasan_handle_brk_failure(saved_state64(state)->x[0], comment);
944 __builtin_unreachable();
945 }
946 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
947
948 #if CONFIG_UBSAN_MINIMAL
949 if (brk_comment_is_ubsan(comment)) {
950 ubsan_handle_brk_trap(comment, get_saved_state_pc(state),
951 get_saved_state_fp(state));
952 add_saved_state_pc(state, 4);
953 return;
954 }
955 #endif /* CONFIG_UBSAN_MINIMAL */
956
957 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Panic (by design)"
958 char msg[strlen(MSG_FMT) - strlen("0x%04X") + strlen("0xFFFF") + 1];
959 snprintf(msg, sizeof(msg), MSG_FMT, comment);
960 #undef MSG_FMT
961
962 panic_with_thread_kernel_state(msg, state);
963 __builtin_unreachable();
964 }
965
966 static void
handle_breakpoint(arm_saved_state_t * state,uint32_t esr __unused)967 handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
968 {
969 exception_type_t exception = EXC_BREAKPOINT;
970 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
971 mach_msg_type_number_t numcodes = 2;
972
973 #if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
974 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
975 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
976 exception |= EXC_PTRAUTH_BIT;
977 }
978 #endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
979
980 codes[1] = get_saved_state_pc(state);
981 exception_triage(exception, codes, numcodes);
982 __builtin_unreachable();
983 }
984
985 static void
handle_watchpoint(vm_offset_t fault_addr)986 handle_watchpoint(vm_offset_t fault_addr)
987 {
988 exception_type_t exception = EXC_BREAKPOINT;
989 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
990 mach_msg_type_number_t numcodes = 2;
991
992 codes[1] = fault_addr;
993 exception_triage(exception, codes, numcodes);
994 __builtin_unreachable();
995 }
996
997 static void
handle_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,vm_offset_t recover,abort_inspector_t inspect_abort,abort_handler_t handler,expected_fault_handler_t expected_fault_handler)998 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover,
999 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
1000 {
1001 fault_status_t fault_code;
1002 vm_prot_t fault_type;
1003
1004 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
1005 handler(state, esr, fault_addr, fault_code, fault_type, recover, expected_fault_handler);
1006 }
1007
1008 static void
inspect_instruction_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1009 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1010 {
1011 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1012 *fault_code = ISS_IA_FSC(iss);
1013 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1014 }
1015
1016 static void
inspect_data_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1017 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1018 {
1019 getCpuDatap()->cpu_stat.data_ex_cnt++;
1020 *fault_code = ISS_DA_FSC(iss);
1021
1022 /*
1023 * Cache maintenance operations always report faults as write access.
1024 * Change these to read access, unless they report a permission fault.
1025 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1026 * access to the mapping, but if a cache maintenance operation that only requires
1027 * read access generates a permission fault, then we will not be able to handle
1028 * the fault regardless of whether we treat it as a read or write fault.
1029 */
1030 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1031 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1032 } else {
1033 *fault_type = (VM_PROT_READ);
1034 }
1035 }
1036
1037 #if __has_feature(ptrauth_calls)
1038 #ifdef __ARM_ARCH_8_6__
1039 static inline uint64_t
fault_addr_bitmask(unsigned int bit_from,unsigned int bit_to)1040 fault_addr_bitmask(unsigned int bit_from, unsigned int bit_to)
1041 {
1042 return ((1ULL << (bit_to - bit_from + 1)) - 1) << bit_from;
1043 }
1044 #else
1045 static inline bool
fault_addr_bit(vm_offset_t fault_addr,unsigned int bit)1046 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1047 {
1048 return (bool)((fault_addr >> bit) & 1);
1049 }
1050 #endif /* __ARM_ARCH_8_6__ */
1051
1052 /**
1053 * Determines whether a fault address taken at EL0 contains a PAC error code
1054 * corresponding to the specified kind of ptrauth key.
1055 */
1056 static bool
user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr,bool data_key)1057 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1058 {
1059 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1060 bool tbi = data_key || __improbable(instruction_tbi);
1061 #ifdef __ARM_ARCH_8_6__
1062 /*
1063 * EnhancedPAC2 CPUs don't encode error codes at fixed positions, so
1064 * treat all non-canonical address bits like potential poison bits.
1065 */
1066 uint64_t mask = fault_addr_bitmask(T0SZ_BOOT, 54);
1067 if (!tbi) {
1068 mask |= fault_addr_bitmask(56, 63);
1069 }
1070 return (fault_addr & mask) != 0;
1071 #else /* !__ARM_ARCH_8_6__ */
1072 unsigned int poison_shift;
1073 if (tbi) {
1074 poison_shift = 53;
1075 } else {
1076 poison_shift = 61;
1077 }
1078
1079 /* PAC error codes are always in the form key_number:NOT(key_number) */
1080 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1081 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1082 return poison_bit_1 != poison_bit_2;
1083 #endif /* __ARM_ARCH_8_6__ */
1084 }
1085 #endif /* __has_feature(ptrauth_calls) */
1086
1087 static void
handle_pc_align(arm_saved_state_t * ss)1088 handle_pc_align(arm_saved_state_t *ss)
1089 {
1090 exception_type_t exc;
1091 mach_exception_data_type_t codes[2];
1092 mach_msg_type_number_t numcodes = 2;
1093
1094 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1095 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1096 }
1097
1098 exc = EXC_BAD_ACCESS;
1099 #if __has_feature(ptrauth_calls)
1100 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1101 exc |= EXC_PTRAUTH_BIT;
1102 }
1103 #endif /* __has_feature(ptrauth_calls) */
1104
1105 codes[0] = EXC_ARM_DA_ALIGN;
1106 codes[1] = get_saved_state_pc(ss);
1107
1108 exception_triage(exc, codes, numcodes);
1109 __builtin_unreachable();
1110 }
1111
1112 static void
handle_sp_align(arm_saved_state_t * ss)1113 handle_sp_align(arm_saved_state_t *ss)
1114 {
1115 exception_type_t exc;
1116 mach_exception_data_type_t codes[2];
1117 mach_msg_type_number_t numcodes = 2;
1118
1119 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1120 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1121 }
1122
1123 exc = EXC_BAD_ACCESS;
1124 #if __has_feature(ptrauth_calls)
1125 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1126 exc |= EXC_PTRAUTH_BIT;
1127 }
1128 #endif /* __has_feature(ptrauth_calls) */
1129
1130 codes[0] = EXC_ARM_SP_ALIGN;
1131 codes[1] = get_saved_state_sp(ss);
1132
1133 exception_triage(exc, codes, numcodes);
1134 __builtin_unreachable();
1135 }
1136
1137 static void
handle_wf_trap(arm_saved_state_t * state)1138 handle_wf_trap(arm_saved_state_t *state)
1139 {
1140 exception_type_t exc;
1141 mach_exception_data_type_t codes[2];
1142 mach_msg_type_number_t numcodes = 2;
1143 uint32_t instr = 0;
1144
1145 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1146
1147 exc = EXC_BAD_INSTRUCTION;
1148 codes[0] = EXC_ARM_UNDEFINED;
1149 codes[1] = instr;
1150
1151 exception_triage(exc, codes, numcodes);
1152 __builtin_unreachable();
1153 }
1154
1155 static void
handle_fp_trap(arm_saved_state_t * state,uint32_t esr)1156 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1157 {
1158 exception_type_t exc = EXC_ARITHMETIC;
1159 mach_exception_data_type_t codes[2];
1160 mach_msg_type_number_t numcodes = 2;
1161 uint32_t instr = 0;
1162
1163 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1164 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1165 }
1166
1167 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1168 codes[1] = instr;
1169
1170 /* The floating point trap flags are only valid if TFV is set. */
1171 if (!fp_exceptions_enabled) {
1172 exc = EXC_BAD_INSTRUCTION;
1173 codes[0] = EXC_ARM_UNDEFINED;
1174 } else if (!(esr & ISS_FP_TFV)) {
1175 codes[0] = EXC_ARM_FP_UNDEFINED;
1176 } else if (esr & ISS_FP_UFF) {
1177 codes[0] = EXC_ARM_FP_UF;
1178 } else if (esr & ISS_FP_OFF) {
1179 codes[0] = EXC_ARM_FP_OF;
1180 } else if (esr & ISS_FP_IOF) {
1181 codes[0] = EXC_ARM_FP_IO;
1182 } else if (esr & ISS_FP_DZF) {
1183 codes[0] = EXC_ARM_FP_DZ;
1184 } else if (esr & ISS_FP_IDF) {
1185 codes[0] = EXC_ARM_FP_ID;
1186 } else if (esr & ISS_FP_IXF) {
1187 codes[0] = EXC_ARM_FP_IX;
1188 } else {
1189 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1190 }
1191
1192 exception_triage(exc, codes, numcodes);
1193 __builtin_unreachable();
1194 }
1195
1196
1197
1198 /*
1199 * handle_alignment_fault_from_user:
1200 * state: Saved state
1201 *
1202 * Attempts to deal with an alignment fault from userspace (possibly by
1203 * emulating the faulting instruction). If emulation failed due to an
1204 * unservicable fault, the ESR for that fault will be stored in the
1205 * recovery_esr field of the thread by the exception code.
1206 *
1207 * Returns:
1208 * -1: Emulation failed (emulation of state/instr not supported)
1209 * 0: Successfully emulated the instruction
1210 * EFAULT: Emulation failed (probably due to permissions)
1211 * EINVAL: Emulation failed (probably due to a bad address)
1212 */
1213
1214
1215 static int
handle_alignment_fault_from_user(arm_saved_state_t * state,kern_return_t * vmfr)1216 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1217 {
1218 int ret = -1;
1219
1220 #pragma unused (state)
1221 #pragma unused (vmfr)
1222
1223 return ret;
1224 }
1225
1226
1227 static void
handle_sw_step_debug(arm_saved_state_t * state)1228 handle_sw_step_debug(arm_saved_state_t *state)
1229 {
1230 thread_t thread = current_thread();
1231 exception_type_t exc;
1232 mach_exception_data_type_t codes[2];
1233 mach_msg_type_number_t numcodes = 2;
1234
1235 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1236 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1237 }
1238
1239 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1240 if (thread->machine.DebugData != NULL) {
1241 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1242 } else {
1243 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1244 }
1245
1246 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_ALL);
1247
1248 // Special encoding for gdb single step event on ARM
1249 exc = EXC_BREAKPOINT;
1250 codes[0] = 1;
1251 codes[1] = 0;
1252
1253 exception_triage(exc, codes, numcodes);
1254 __builtin_unreachable();
1255 }
1256
1257 static void
set_saved_state_pc_to_recovery_handler(arm_saved_state_t * iss,vm_offset_t recover)1258 set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover)
1259 {
1260 #if defined(HAS_APPLE_PAC)
1261 thread_t thread = current_thread();
1262 const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER);
1263 const char *panic_msg = "Illegal thread->recover value %p";
1264
1265 MANIPULATE_SIGNED_THREAD_STATE(iss,
1266 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1267 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1268 "mov x1, %[recover] \n"
1269 "mov x6, %[disc] \n"
1270 "autia x1, x6 \n"
1271 #if !__ARM_ARCH_8_6__
1272 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1273 "mov x6, x1 \n"
1274 "xpaci x6 \n"
1275 "cmp x1, x6 \n"
1276 "beq 1f \n"
1277 // panic("Illegal thread->recover value %p", (void *)recover);
1278 "mov x0, %[panic_msg] \n"
1279 "bl _panic \n"
1280 // }
1281 "1: \n"
1282 #endif /* !__ARM_ARCH_8_6__ */
1283 "str x1, [x0, %[SS64_PC]] \n",
1284 [recover] "r"(recover),
1285 [disc] "r"(disc),
1286 [panic_msg] "r"(panic_msg)
1287 );
1288 #else
1289 set_saved_state_pc(iss, recover);
1290 #endif
1291 }
1292
1293 static void
handle_user_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,vm_offset_t recover,expected_fault_handler_t expected_fault_handler)1294 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1295 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1296 {
1297 exception_type_t exc = EXC_BAD_ACCESS;
1298 mach_exception_data_type_t codes[2];
1299 mach_msg_type_number_t numcodes = 2;
1300 thread_t thread = current_thread();
1301
1302 (void)esr;
1303 (void)expected_fault_handler;
1304
1305 if (ml_at_interrupt_context()) {
1306 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1307 }
1308
1309 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1310
1311 if (is_vm_fault(fault_code)) {
1312 vm_map_t map = thread->map;
1313 vm_offset_t vm_fault_addr = fault_addr;
1314 kern_return_t result = KERN_FAILURE;
1315
1316 assert(map != kernel_map);
1317
1318 if (!(fault_type & VM_PROT_EXECUTE)) {
1319 vm_fault_addr = tbi_clear(fault_addr);
1320 }
1321
1322 #if CONFIG_DTRACE
1323 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1324 if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
1325 if (recover) {
1326 thread->machine.recover_esr = esr;
1327 thread->machine.recover_far = vm_fault_addr;
1328 set_saved_state_pc_to_recovery_handler(state, recover);
1329 } else {
1330 panic_with_thread_kernel_state("copyin/out has no recovery point", state);
1331 }
1332 return;
1333 } else {
1334 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state);
1335 }
1336 }
1337 #else
1338 (void)recover;
1339 #endif
1340
1341 /* check to see if it is just a pmap ref/modify fault */
1342 if (!is_translation_fault(fault_code)) {
1343 result = arm_fast_fault(map->pmap,
1344 vm_fault_addr,
1345 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1346 }
1347 if (result == KERN_SUCCESS) {
1348 return;
1349 }
1350
1351 {
1352 /* We have to fault the page in */
1353 result = vm_fault(map, vm_fault_addr, fault_type,
1354 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1355 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1356 }
1357 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1358 return;
1359 }
1360
1361 /*
1362 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1363 * If it does, we're leaking preemption disables somewhere in the kernel.
1364 */
1365 if (__improbable(result == KERN_FAILURE)) {
1366 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1367 }
1368
1369 codes[0] = result;
1370 } else if (is_alignment_fault(fault_code)) {
1371 kern_return_t vmfkr = KERN_SUCCESS;
1372 thread->machine.recover_esr = 0;
1373 thread->machine.recover_far = 0;
1374 int result = handle_alignment_fault_from_user(state, &vmfkr);
1375 if (result == 0) {
1376 /* Successfully emulated, or instruction
1377 * copyin() for decode/emulation failed.
1378 * Continue, or redrive instruction.
1379 */
1380 thread_exception_return();
1381 } else if (((result == EFAULT) || (result == EINVAL)) &&
1382 (thread->machine.recover_esr == 0)) {
1383 /*
1384 * If we didn't actually take a fault, but got one of
1385 * these errors, then we failed basic sanity checks of
1386 * the fault address. Treat this as an invalid
1387 * address.
1388 */
1389 codes[0] = KERN_INVALID_ADDRESS;
1390 } else if ((result == EFAULT) &&
1391 (thread->machine.recover_esr)) {
1392 /*
1393 * Since alignment aborts are prioritized
1394 * ahead of translation aborts, the misaligned
1395 * atomic emulation flow may have triggered a
1396 * VM pagefault, which the VM could not resolve.
1397 * Report the VM fault error in codes[]
1398 */
1399
1400 codes[0] = vmfkr;
1401 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1402 /* Cause ESR_EC to reflect an EL0 abort */
1403 thread->machine.recover_esr &= ~ESR_EC_MASK;
1404 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1405 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1406 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1407 fault_addr = thread->machine.recover_far;
1408 } else {
1409 /* This was just an unsupported alignment
1410 * exception. Misaligned atomic emulation
1411 * timeouts fall in this category.
1412 */
1413 codes[0] = EXC_ARM_DA_ALIGN;
1414 }
1415 } else if (is_parity_error(fault_code)) {
1416 #if defined(APPLE_ARM64_ARCH_FAMILY)
1417 if (fault_code == FSC_SYNC_PARITY) {
1418 arm64_platform_error(state, esr, fault_addr);
1419 return;
1420 }
1421 #else
1422 panic("User parity error.");
1423 #endif
1424 } else {
1425 codes[0] = KERN_FAILURE;
1426 }
1427
1428 codes[1] = fault_addr;
1429 #if __has_feature(ptrauth_calls)
1430 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1431 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1432 exc |= EXC_PTRAUTH_BIT;
1433 }
1434 #endif /* __has_feature(ptrauth_calls) */
1435 exception_triage(exc, codes, numcodes);
1436 __builtin_unreachable();
1437 }
1438
1439 #if __ARM_PAN_AVAILABLE__
1440 static int
is_pan_fault(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code)1441 is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1442 {
1443 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1444 // virtual address that is readable/writeable from both EL1 and EL0
1445
1446 // To check for PAN fault, we evaluate if the following conditions are true:
1447 // 1. This is a permission fault
1448 // 2. PAN is enabled
1449 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1450 // succeeds
1451
1452 vm_offset_t pa;
1453
1454 if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1455 return FALSE;
1456 }
1457
1458 if (esr & ISS_DA_WNR) {
1459 pa = mmu_kvtop_wpreflight(fault_addr);
1460 } else {
1461 pa = mmu_kvtop(fault_addr);
1462 }
1463 return (pa)? TRUE: FALSE;
1464 }
1465 #endif
1466
1467 static void
handle_kernel_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,vm_offset_t recover,expected_fault_handler_t expected_fault_handler)1468 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1469 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1470 {
1471 thread_t thread = current_thread();
1472 (void)esr;
1473
1474 #ifndef CONFIG_XNUPOST
1475 (void)expected_fault_handler;
1476 #endif /* CONFIG_XNUPOST */
1477
1478 #if CONFIG_DTRACE
1479 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1480 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1481 /*
1482 * Point to next instruction, or recovery handler if set.
1483 */
1484 if (recover) {
1485 thread->machine.recover_esr = esr;
1486 thread->machine.recover_far = fault_addr;
1487 set_saved_state_pc_to_recovery_handler(state, recover);
1488 } else {
1489 add_saved_state_pc(state, 4);
1490 }
1491 return;
1492 } else {
1493 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1494 }
1495 }
1496 #endif
1497
1498 if (ml_at_interrupt_context()) {
1499 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1500 }
1501
1502 if (is_vm_fault(fault_code)) {
1503 kern_return_t result = KERN_FAILURE;
1504 vm_map_t map;
1505 int interruptible;
1506
1507 /*
1508 * Ensure no faults in the physical aperture. This could happen if
1509 * a page table is incorrectly allocated from the read only region
1510 * when running with KTRR.
1511 */
1512
1513 #ifdef CONFIG_XNUPOST
1514 if (expected_fault_handler && expected_fault_handler(state)) {
1515 return;
1516 }
1517 #endif /* CONFIG_XNUPOST */
1518
1519 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1520 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1521 }
1522
1523 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1524 map = kernel_map;
1525 interruptible = THREAD_UNINT;
1526 } else {
1527 map = thread->map;
1528
1529 /**
1530 * In the case that the recovery handler is set (e.g., during copyio
1531 * and dtrace probes), we don't want the vm_fault() operation to be
1532 * aborted early. Those code paths can't handle restarting the
1533 * vm_fault() operation so don't allow it to return early without
1534 * creating the wanted mapping.
1535 */
1536 interruptible = (recover) ? THREAD_UNINT : THREAD_ABORTSAFE;
1537 }
1538
1539 /* check to see if it is just a pmap ref/modify fault */
1540 if (!is_translation_fault(fault_code)) {
1541 result = arm_fast_fault(map->pmap,
1542 fault_addr,
1543 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1544 if (result == KERN_SUCCESS) {
1545 return;
1546 }
1547 }
1548
1549 if (result != KERN_PROTECTION_FAILURE) {
1550 /*
1551 * We have to "fault" the page in.
1552 */
1553 result = vm_fault(map, fault_addr, fault_type,
1554 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1555 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1556 }
1557
1558 if (result == KERN_SUCCESS) {
1559 return;
1560 }
1561
1562 /*
1563 * If we have a recover handler, invoke it now.
1564 */
1565 if (recover) {
1566 thread->machine.recover_esr = esr;
1567 thread->machine.recover_far = fault_addr;
1568 set_saved_state_pc_to_recovery_handler(state, recover);
1569 return;
1570 }
1571
1572 #if __ARM_PAN_AVAILABLE__
1573 if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1574 panic_with_thread_kernel_state("Privileged access never abort.", state);
1575 }
1576 #endif
1577 } else if (is_alignment_fault(fault_code)) {
1578 if (recover) {
1579 thread->machine.recover_esr = esr;
1580 thread->machine.recover_far = fault_addr;
1581 set_saved_state_pc_to_recovery_handler(state, recover);
1582 return;
1583 }
1584 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1585 } else if (is_parity_error(fault_code)) {
1586 #if defined(APPLE_ARM64_ARCH_FAMILY)
1587 if (fault_code == FSC_SYNC_PARITY) {
1588 arm64_platform_error(state, esr, fault_addr);
1589 return;
1590 }
1591 #else
1592 panic_with_thread_kernel_state("Kernel parity error.", state);
1593 #endif
1594 } else {
1595 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1596 }
1597
1598 panic_with_thread_kernel_state("Kernel data abort.", state);
1599 }
1600
1601 extern void syscall_trace(struct arm_saved_state * regs);
1602
1603 static void
handle_svc(arm_saved_state_t * state)1604 handle_svc(arm_saved_state_t *state)
1605 {
1606 int trap_no = get_saved_state_svc_number(state);
1607 thread_t thread = current_thread();
1608 struct proc *p;
1609
1610 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1611
1612 #define TRACE_SYSCALL 1
1613 #if TRACE_SYSCALL
1614 syscall_trace(state);
1615 #endif
1616
1617 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1618
1619 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1620 platform_syscall(state);
1621 panic("Returned from platform_syscall()?");
1622 }
1623
1624 mach_kauth_cred_thread_update();
1625
1626 if (trap_no < 0) {
1627 switch (trap_no) {
1628 case MACH_ARM_TRAP_ABSTIME:
1629 handle_mach_absolute_time_trap(state);
1630 return;
1631 case MACH_ARM_TRAP_CONTTIME:
1632 handle_mach_continuous_time_trap(state);
1633 return;
1634 }
1635
1636 /* Counting perhaps better in the handler, but this is how it's been done */
1637 thread->syscalls_mach++;
1638 mach_syscall(state);
1639 } else {
1640 /* Counting perhaps better in the handler, but this is how it's been done */
1641 thread->syscalls_unix++;
1642 p = get_bsdthreadtask_info(thread);
1643
1644 assert(p);
1645
1646 unix_syscall(state, thread, p);
1647 }
1648 }
1649
1650 static void
handle_mach_absolute_time_trap(arm_saved_state_t * state)1651 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1652 {
1653 uint64_t now = mach_absolute_time();
1654 saved_state64(state)->x[0] = now;
1655 }
1656
1657 static void
handle_mach_continuous_time_trap(arm_saved_state_t * state)1658 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1659 {
1660 uint64_t now = mach_continuous_time();
1661 saved_state64(state)->x[0] = now;
1662 }
1663
1664
1665 __attribute__((noreturn))
1666 static void
handle_msr_trap(arm_saved_state_t * state,uint32_t esr)1667 handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
1668 {
1669 exception_type_t exception = EXC_BAD_INSTRUCTION;
1670 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1671 mach_msg_type_number_t numcodes = 2;
1672 uint32_t instr = 0;
1673
1674 if (!is_saved_state64(state)) {
1675 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state", esr);
1676 }
1677
1678 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1679 panic("MSR/MRS trap (ESR 0x%x) from kernel", esr);
1680 }
1681
1682 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1683 codes[1] = instr;
1684
1685 exception_triage(exception, codes, numcodes);
1686 __builtin_unreachable();
1687 }
1688
1689 #ifdef __ARM_ARCH_8_6__
1690 static void
autxx_instruction_extract_reg(uint32_t instr,char reg[4])1691 autxx_instruction_extract_reg(uint32_t instr, char reg[4])
1692 {
1693 unsigned int rd = ARM64_INSTR_AUTxx_RD_GET(instr);
1694 switch (rd) {
1695 case 29:
1696 strncpy(reg, "fp", 4);
1697 return;
1698
1699 case 30:
1700 strncpy(reg, "lr", 4);
1701 return;
1702
1703 case 31:
1704 strncpy(reg, "xzr", 4);
1705 return;
1706
1707 default:
1708 snprintf(reg, 4, "x%u", rd);
1709 return;
1710 }
1711 }
1712
1713 static const char *
autix_system_instruction_extract_reg(uint32_t instr)1714 autix_system_instruction_extract_reg(uint32_t instr)
1715 {
1716 unsigned int crm_op2 = ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_GET(instr);
1717 if (crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIA1716 ||
1718 crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIB1716) {
1719 return "x17";
1720 } else {
1721 return "lr";
1722 }
1723 }
1724
1725 static void
handle_pac_fail(arm_saved_state_t * state,uint32_t esr)1726 handle_pac_fail(arm_saved_state_t *state, uint32_t esr)
1727 {
1728 exception_type_t exception = EXC_BAD_ACCESS | EXC_PTRAUTH_BIT;
1729 mach_exception_data_type_t codes[2] = {EXC_ARM_PAC_FAIL};
1730 mach_msg_type_number_t numcodes = 2;
1731 uint32_t instr = 0;
1732
1733 if (!is_saved_state64(state)) {
1734 panic("PAC failure (ESR 0x%x) from 32-bit state", esr);
1735 }
1736
1737 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1738
1739 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1740 #define GENERIC_PAC_FAILURE_MSG_FMT "PAC failure from kernel with %s key"
1741 #define AUTXX_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT " while authing %s"
1742 #define GENERIC_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT
1743
1744 char msg[strlen(AUTXX_MSG_FMT)
1745 - strlen("%s") + strlen("IA")
1746 - strlen("%s") + strlen("xzr")
1747 + 1];
1748 ptrauth_key key = (ptrauth_key)(esr & 0x3);
1749 const char *key_str = ptrauth_key_to_string(key);
1750
1751 if (ARM64_INSTR_IS_AUTxx(instr)) {
1752 char reg[4];
1753 autxx_instruction_extract_reg(instr, reg);
1754 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
1755 } else if (ARM64_INSTR_IS_AUTIx_SYSTEM(instr)) {
1756 const char *reg = autix_system_instruction_extract_reg(instr);
1757 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
1758 } else {
1759 snprintf(msg, sizeof(msg), GENERIC_MSG_FMT, key_str);
1760 }
1761 panic_with_thread_kernel_state(msg, state);
1762 }
1763
1764 codes[1] = instr;
1765
1766 exception_triage(exception, codes, numcodes);
1767 __builtin_unreachable();
1768 }
1769 #endif /* __ARM_ARCH_8_6__ */
1770
1771 static void
handle_user_trapped_instruction32(arm_saved_state_t * state,uint32_t esr)1772 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1773 {
1774 exception_type_t exception = EXC_BAD_INSTRUCTION;
1775 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1776 mach_msg_type_number_t numcodes = 2;
1777 uint32_t instr;
1778
1779 if (is_saved_state64(state)) {
1780 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1781 }
1782
1783 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1784 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1785 }
1786
1787 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1788 codes[1] = instr;
1789
1790 exception_triage(exception, codes, numcodes);
1791 __builtin_unreachable();
1792 }
1793
1794 static void
handle_simd_trap(arm_saved_state_t * state,uint32_t esr)1795 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1796 {
1797 exception_type_t exception = EXC_BAD_INSTRUCTION;
1798 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1799 mach_msg_type_number_t numcodes = 2;
1800 uint32_t instr = 0;
1801
1802 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1803 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1804 }
1805
1806 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1807 codes[1] = instr;
1808
1809 exception_triage(exception, codes, numcodes);
1810 __builtin_unreachable();
1811 }
1812
1813 void
sleh_irq(arm_saved_state_t * state)1814 sleh_irq(arm_saved_state_t *state)
1815 {
1816 cpu_data_t * cdp __unused = getCpuDatap();
1817 #if MACH_ASSERT
1818 int preemption_level = sleh_get_preemption_level();
1819 #endif
1820
1821
1822 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1823
1824 #if USE_APPLEARMSMP
1825 PE_handle_ext_interrupt();
1826 #else
1827 /* Run the registered interrupt handler. */
1828 cdp->interrupt_handler(cdp->interrupt_target,
1829 cdp->interrupt_refCon,
1830 cdp->interrupt_nub,
1831 cdp->interrupt_source);
1832 #endif
1833
1834 entropy_collect();
1835
1836
1837 sleh_interrupt_handler_epilogue();
1838 #if MACH_ASSERT
1839 if (preemption_level != sleh_get_preemption_level()) {
1840 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, sleh_get_preemption_level());
1841 }
1842 #endif
1843 }
1844
1845 void
sleh_fiq(arm_saved_state_t * state)1846 sleh_fiq(arm_saved_state_t *state)
1847 {
1848 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
1849 #if MACH_ASSERT
1850 int preemption_level = sleh_get_preemption_level();
1851 #endif
1852
1853 #if MONOTONIC_FIQ
1854 uint64_t pmcr0 = 0, upmsr = 0;
1855 #endif /* MONOTONIC_FIQ */
1856
1857 #if defined(HAS_IPI)
1858 boolean_t is_ipi = FALSE;
1859 uint64_t ipi_sr = 0;
1860
1861 if (gFastIPI) {
1862 MRS(ipi_sr, "S3_5_C15_C1_1");
1863
1864 if (ipi_sr & 1) {
1865 is_ipi = TRUE;
1866 }
1867 }
1868
1869 if (is_ipi) {
1870 type = DBG_INTR_TYPE_IPI;
1871 } else
1872 #endif /* defined(HAS_IPI) */
1873 if (ml_get_timer_pending()) {
1874 type = DBG_INTR_TYPE_TIMER;
1875 }
1876 #if MONOTONIC_FIQ
1877 /* Consult the PMI sysregs last, after IPI/timer
1878 * classification.
1879 */
1880 else if (mt_pmi_pending(&pmcr0, &upmsr)) {
1881 type = DBG_INTR_TYPE_PMI;
1882 }
1883 #endif /* MONOTONIC_FIQ */
1884
1885 sleh_interrupt_handler_prologue(state, type);
1886
1887 #if APPLEVIRTUALPLATFORM
1888 uint64_t iar = __builtin_arm_rsr64("ICC_IAR0_EL1");
1889 #endif
1890
1891 #if defined(HAS_IPI)
1892 if (is_ipi) {
1893 /*
1894 * Order is important here: we must ack the IPI by writing IPI_SR
1895 * before we call cpu_signal_handler(). Otherwise, there will be
1896 * a window between the completion of pending-signal processing in
1897 * cpu_signal_handler() and the ack during which a newly-issued
1898 * IPI to this CPU may be lost. ISB is required to ensure the msr
1899 * is retired before execution of cpu_signal_handler().
1900 */
1901 MSR("S3_5_C15_C1_1", ipi_sr);
1902 __builtin_arm_isb(ISB_SY);
1903 cpu_signal_handler();
1904 } else
1905 #endif /* defined(HAS_IPI) */
1906 #if MONOTONIC_FIQ
1907 if (type == DBG_INTR_TYPE_PMI) {
1908 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
1909 mt_fiq(getCpuDatap(), pmcr0, upmsr);
1910 INTERRUPT_MASKED_DEBUG_END();
1911 } else
1912 #endif /* MONOTONIC_FIQ */
1913 {
1914 /*
1915 * We don't know that this is a timer, but we don't have insight into
1916 * the other interrupts that go down this path.
1917 */
1918
1919 cpu_data_t *cdp = getCpuDatap();
1920
1921 cdp->cpu_decrementer = -1; /* Large */
1922
1923 /*
1924 * ARM64_TODO: whether we're coming from userland is ignored right now.
1925 * We can easily thread it through, but not bothering for the
1926 * moment (AArch32 doesn't either).
1927 */
1928 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
1929 rtclock_intr(TRUE);
1930 INTERRUPT_MASKED_DEBUG_END();
1931 }
1932
1933 #if APPLEVIRTUALPLATFORM
1934 if (iar != GIC_SPURIOUS_IRQ) {
1935 __builtin_arm_wsr64("ICC_EOIR0_EL1", iar);
1936 __builtin_arm_isb(ISB_SY);
1937 }
1938 #endif
1939
1940 sleh_interrupt_handler_epilogue();
1941 #if MACH_ASSERT
1942 if (preemption_level != sleh_get_preemption_level()) {
1943 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, sleh_get_preemption_level());
1944 }
1945 #endif
1946 }
1947
1948 void
sleh_serror(arm_context_t * context,uint32_t esr,vm_offset_t far)1949 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1950 {
1951 task_vtimer_check(current_thread());
1952
1953 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1954 esr, VM_KERNEL_ADDRHIDE(far));
1955 arm_saved_state_t *state = &context->ss;
1956 #if MACH_ASSERT
1957 int preemption_level = sleh_get_preemption_level();
1958 #endif
1959
1960
1961 ASSERT_CONTEXT_SANITY(context);
1962 arm64_platform_error(state, esr, far);
1963 #if MACH_ASSERT
1964 if (preemption_level != sleh_get_preemption_level()) {
1965 panic("serror changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
1966 }
1967 #endif
1968 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
1969 esr, VM_KERNEL_ADDRHIDE(far));
1970 }
1971
1972 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)1973 mach_syscall_trace_exit(unsigned int retval,
1974 unsigned int call_number)
1975 {
1976 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1977 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1978 DBG_FUNC_END, retval, 0, 0, 0, 0);
1979 }
1980
1981 __attribute__((noreturn))
1982 void
thread_syscall_return(kern_return_t error)1983 thread_syscall_return(kern_return_t error)
1984 {
1985 thread_t thread;
1986 struct arm_saved_state *state;
1987
1988 thread = current_thread();
1989 state = get_user_regs(thread);
1990
1991 assert(is_saved_state64(state));
1992 saved_state64(state)->x[0] = error;
1993
1994 #if MACH_ASSERT
1995 kern_allocation_name_t
1996 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
1997 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
1998 #endif /* MACH_ASSERT */
1999
2000 if (kdebug_enable) {
2001 /* Invert syscall number (negative for a mach syscall) */
2002 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
2003 }
2004
2005 thread_exception_return();
2006 }
2007
2008 void
syscall_trace(struct arm_saved_state * regs __unused)2009 syscall_trace(
2010 struct arm_saved_state * regs __unused)
2011 {
2012 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
2013 }
2014
2015 static void
sleh_interrupt_handler_prologue(arm_saved_state_t * state,unsigned int type)2016 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
2017 {
2018 boolean_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
2019
2020 task_vtimer_check(current_thread());
2021
2022 uint64_t pc = is_user ? get_saved_state_pc(state) :
2023 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
2024
2025 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
2026 0, pc, is_user, type);
2027
2028 #if CONFIG_TELEMETRY
2029 if (telemetry_needs_record) {
2030 telemetry_mark_curthread(is_user, FALSE);
2031 }
2032 #endif /* CONFIG_TELEMETRY */
2033 }
2034
2035 static void
sleh_interrupt_handler_epilogue(void)2036 sleh_interrupt_handler_epilogue(void)
2037 {
2038 #if KPERF
2039 kperf_interrupt();
2040 #endif /* KPERF */
2041 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
2042 }
2043
2044 void
sleh_invalid_stack(arm_context_t * context,uint32_t esr __unused,vm_offset_t far __unused)2045 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
2046 {
2047 thread_t thread = current_thread();
2048 vm_offset_t kernel_stack_bottom, sp;
2049
2050 sp = get_saved_state_sp(&context->ss);
2051 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
2052
2053 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
2054 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
2055 }
2056
2057 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
2058 }
2059
2060