1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40
41 #include <kern/debug.h>
42 #include <kern/restartable.h>
43 #include <kern/socd_client.h>
44 #include <kern/thread.h>
45 #include <kern/zalloc_internal.h>
46 #include <mach/exception.h>
47 #include <mach/arm/traps.h>
48 #include <mach/vm_types.h>
49 #include <mach/machine/thread_status.h>
50
51 #include <machine/atomic.h>
52 #include <machine/limits.h>
53
54 #include <pexpert/arm/protos.h>
55 #include <pexpert/arm64/apple_arm64_regs.h>
56 #include <pexpert/arm64/board_config.h>
57
58 #include <vm/vm_page.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_fault.h>
61 #include <vm/vm_kern.h>
62
63 #include <sys/errno.h>
64 #include <sys/kdebug.h>
65 #include <kperf/kperf.h>
66
67 #include <kern/policy_internal.h>
68 #if CONFIG_TELEMETRY
69 #include <kern/telemetry.h>
70 #endif
71
72 #include <prng/entropy.h>
73
74
75
76
77 #include <arm64/platform_error_handler.h>
78
79 #if KASAN_TBI
80 #include <san/kasan.h>
81 #endif /* KASAN_TBI */
82
83 #if CONFIG_UBSAN_MINIMAL
84 #include <san/ubsan_minimal.h>
85 #endif /* CONFIG_UBSAN_MINIMAL */
86
87
88 #ifndef __arm64__
89 #error Should only be compiling for arm64.
90 #endif
91
92 #define TEST_CONTEXT32_SANITY(context) \
93 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
94 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
95
96 #define TEST_CONTEXT64_SANITY(context) \
97 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
98 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
99
100 #define ASSERT_CONTEXT_SANITY(context) \
101 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
102
103
104 #define COPYIN(src, dst, size) \
105 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
106 copyin_kern(src, dst, size) : \
107 copyin(src, dst, size)
108
109 #define COPYOUT(src, dst, size) \
110 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
111 copyout_kern(src, dst, size) : \
112 copyout(src, dst, size)
113
114 // Below is for concatenating a string param to a string literal
115 #define STR1(x) #x
116 #define STR(x) STR1(x)
117
118 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
119 #define ARM64_KDBG_CODE_USER (1 << 8)
120 #define ARM64_KDBG_CODE_GUEST (2 << 8)
121
122 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
123 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
124
125 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
126
127 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
128 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
129
130
131
132 void sleh_irq(arm_saved_state_t *);
133 void sleh_fiq(arm_saved_state_t *);
134 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
135 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
136
137 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
138 static void sleh_interrupt_handler_epilogue(void);
139
140 static void handle_svc(arm_saved_state_t *);
141 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
142 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
143
144 static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
145 #if __has_feature(ptrauth_calls)
146 static void handle_pac_fail(arm_saved_state_t *state, uint32_t esr) __dead2;
147 #endif
148
149 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
150
151 static void handle_uncategorized(arm_saved_state_t *);
152
153 /*
154 * For UBSan trap and continue handling, we must be able to recover
155 * from handle_kernel_breakpoint().
156 */
157 #if !CONFIG_UBSAN_MINIMAL
158 __dead2
159 #endif /* CONFIG_UBSAN_MINIMAL */
160 static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t);
161
162 static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
163
164 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
165 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
166 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
167
168 static int is_vm_fault(fault_status_t);
169 static int is_translation_fault(fault_status_t);
170 static int is_alignment_fault(fault_status_t);
171
172 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
173 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
174 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
175
176 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
177 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
178 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
179 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
180 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
181
182 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
183
184 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
185
186 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
187
188 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
189
190 extern void mach_kauth_cred_thread_update(void);
191 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
192
193 struct proc;
194
195 typedef uint32_t arm64_instr_t;
196
197 extern void
198 unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct proc * proc);
199
200 extern void
201 mach_syscall(struct arm_saved_state*);
202
203 #if CONFIG_DTRACE
204 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
205 extern boolean_t dtrace_tally_fault(user_addr_t);
206
207 /*
208 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
209 * and paste the trap instructions
210 * over from that file. Need to keep these in sync!
211 */
212 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
213 #define FASTTRAP_THUMB32_INSTR 0xdefc
214 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
215
216 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
217 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
218 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
219
220 /* See <rdar://problem/4613924> */
221 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
222 #endif
223
224
225
226 extern void arm64_thread_exception_return(void) __dead2;
227
228 #if defined(APPLETYPHOON)
229 #define CPU_NAME "Typhoon"
230 #elif defined(APPLETWISTER)
231 #define CPU_NAME "Twister"
232 #elif defined(APPLEHURRICANE)
233 #define CPU_NAME "Hurricane"
234 #elif defined(APPLELIGHTNING)
235 #define CPU_NAME "Lightning"
236 #else
237 #define CPU_NAME "Unknown"
238 #endif
239
240 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
241 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
242 #define ESR_WT_REASON(esr) ((esr) & 0xff)
243
244 #define WT_REASON_NONE 0
245 #define WT_REASON_INTEGRITY_FAIL 1
246 #define WT_REASON_BAD_SYSCALL 2
247 #define WT_REASON_NOT_LOCKED 3
248 #define WT_REASON_ALREADY_LOCKED 4
249 #define WT_REASON_SW_REQ 5
250 #define WT_REASON_PT_INVALID 6
251 #define WT_REASON_PT_VIOLATION 7
252 #define WT_REASON_REG_VIOLATION 8
253 #endif
254
255 #if defined(HAS_IPI)
256 void cpu_signal_handler(void);
257 extern unsigned int gFastIPI;
258 #endif /* defined(HAS_IPI) */
259
260 static arm_saved_state64_t *original_faulting_state = NULL;
261
262 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
263
264 extern vm_offset_t static_memory_end;
265
266 /*
267 * Fault copyio_recovery_entry in copyin/copyout routines.
268 *
269 * Offets are expressed in bytes from ©_recovery_table
270 */
271 struct copyio_recovery_entry {
272 ptrdiff_t cre_start;
273 ptrdiff_t cre_end;
274 ptrdiff_t cre_recovery;
275 };
276
277 extern struct copyio_recovery_entry copyio_recover_table[];
278 extern struct copyio_recovery_entry copyio_recover_table_end[];
279
280 static inline ptrdiff_t
copyio_recovery_offset(uintptr_t addr)281 copyio_recovery_offset(uintptr_t addr)
282 {
283 return (ptrdiff_t)(addr - (uintptr_t)copyio_recover_table);
284 }
285
286 static inline uintptr_t
copyio_recovery_addr(ptrdiff_t offset)287 copyio_recovery_addr(ptrdiff_t offset)
288 {
289 return (uintptr_t)copyio_recover_table + (uintptr_t)offset;
290 }
291
292 static inline struct copyio_recovery_entry *
find_copyio_recovery_entry(arm_saved_state_t * state)293 find_copyio_recovery_entry(arm_saved_state_t *state)
294 {
295 ptrdiff_t offset = copyio_recovery_offset(get_saved_state_pc(state));
296 struct copyio_recovery_entry *e;
297
298 for (e = copyio_recover_table; e < copyio_recover_table_end; e++) {
299 if (offset >= e->cre_start && offset < e->cre_end) {
300 return e;
301 }
302 }
303
304 return NULL;
305 }
306
307 static inline uintptr_t
copyio_recovery_get_recover_addr(arm_saved_state_t * state)308 copyio_recovery_get_recover_addr(
309 arm_saved_state_t *state)
310 {
311 struct copyio_recovery_entry *e = find_copyio_recovery_entry(state);
312 if (e == NULL) {
313 panic("copyio recovery: couldn't find a range for %p",
314 (void *)get_saved_state_pc(state));
315 }
316 return copyio_recovery_addr(e->cre_recovery);
317 }
318
319 static inline int
is_vm_fault(fault_status_t status)320 is_vm_fault(fault_status_t status)
321 {
322 switch (status) {
323 case FSC_TRANSLATION_FAULT_L0:
324 case FSC_TRANSLATION_FAULT_L1:
325 case FSC_TRANSLATION_FAULT_L2:
326 case FSC_TRANSLATION_FAULT_L3:
327 case FSC_ACCESS_FLAG_FAULT_L1:
328 case FSC_ACCESS_FLAG_FAULT_L2:
329 case FSC_ACCESS_FLAG_FAULT_L3:
330 case FSC_PERMISSION_FAULT_L1:
331 case FSC_PERMISSION_FAULT_L2:
332 case FSC_PERMISSION_FAULT_L3:
333 return TRUE;
334 default:
335 return FALSE;
336 }
337 }
338
339 static inline int
is_translation_fault(fault_status_t status)340 is_translation_fault(fault_status_t status)
341 {
342 switch (status) {
343 case FSC_TRANSLATION_FAULT_L0:
344 case FSC_TRANSLATION_FAULT_L1:
345 case FSC_TRANSLATION_FAULT_L2:
346 case FSC_TRANSLATION_FAULT_L3:
347 return TRUE;
348 default:
349 return FALSE;
350 }
351 }
352
353 static inline int
is_permission_fault(fault_status_t status)354 is_permission_fault(fault_status_t status)
355 {
356 switch (status) {
357 case FSC_PERMISSION_FAULT_L1:
358 case FSC_PERMISSION_FAULT_L2:
359 case FSC_PERMISSION_FAULT_L3:
360 return TRUE;
361 default:
362 return FALSE;
363 }
364 }
365
366 static inline int
is_alignment_fault(fault_status_t status)367 is_alignment_fault(fault_status_t status)
368 {
369 return status == FSC_ALIGNMENT_FAULT;
370 }
371
372 static inline int
is_parity_error(fault_status_t status)373 is_parity_error(fault_status_t status)
374 {
375 switch (status) {
376 /*
377 * TODO: According to ARM ARM, Async Parity (0b011001) is a DFSC that is
378 * only applicable to AArch32 HSR register. Can this be removed?
379 */
380 case FSC_ASYNC_PARITY:
381 case FSC_SYNC_PARITY:
382 case FSC_SYNC_PARITY_TT_L1:
383 case FSC_SYNC_PARITY_TT_L2:
384 case FSC_SYNC_PARITY_TT_L3:
385 return TRUE;
386 default:
387 return FALSE;
388 }
389 }
390
391
392 __dead2 __unused
393 static void
arm64_implementation_specific_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)394 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
395 {
396 #pragma unused (state, esr, far)
397 panic_plain("Unhandled implementation specific error\n");
398 }
399
400 #if CONFIG_KERNEL_INTEGRITY
401 #pragma clang diagnostic push
402 #pragma clang diagnostic ignored "-Wunused-parameter"
403 static void
kernel_integrity_error_handler(uint32_t esr,vm_offset_t far)404 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
405 {
406 #if defined(KERNEL_INTEGRITY_WT)
407 #if (DEVELOPMENT || DEBUG)
408 if (ESR_WT_SERROR(esr)) {
409 switch (ESR_WT_REASON(esr)) {
410 case WT_REASON_INTEGRITY_FAIL:
411 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
412 case WT_REASON_BAD_SYSCALL:
413 panic_plain("Kernel integrity, bad syscall.");
414 case WT_REASON_NOT_LOCKED:
415 panic_plain("Kernel integrity, not locked.");
416 case WT_REASON_ALREADY_LOCKED:
417 panic_plain("Kernel integrity, already locked.");
418 case WT_REASON_SW_REQ:
419 panic_plain("Kernel integrity, software request.");
420 case WT_REASON_PT_INVALID:
421 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
422 "walking 0x%016lx.", far);
423 case WT_REASON_PT_VIOLATION:
424 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
425 far);
426 case WT_REASON_REG_VIOLATION:
427 panic_plain("Kernel integrity, violation in system register %d.",
428 (unsigned) far);
429 default:
430 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
431 }
432 }
433 #else
434 if (ESR_WT_SERROR(esr)) {
435 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
436 }
437 #endif
438 #endif
439 }
440 #pragma clang diagnostic pop
441 #endif
442
443 static void
arm64_platform_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far,platform_error_source_t source)444 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far, platform_error_source_t source)
445 {
446 #if CONFIG_KERNEL_INTEGRITY
447 kernel_integrity_error_handler(esr, far);
448 #endif
449
450 (void)source;
451 cpu_data_t *cdp = getCpuDatap();
452
453 if (PE_handle_platform_error(far)) {
454 return;
455 } else if (cdp->platform_error_handler != NULL) {
456 cdp->platform_error_handler(cdp->cpu_id, far);
457 } else {
458 arm64_implementation_specific_error(state, esr, far);
459 }
460 }
461
462 void
panic_with_thread_kernel_state(const char * msg,arm_saved_state_t * ss)463 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
464 {
465 boolean_t ss_valid;
466
467 ss_valid = is_saved_state64(ss);
468 arm_saved_state64_t *state = saved_state64(ss);
469
470 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
471
472 // rdar://80659177
473 // Read SoCD tracepoints up to twice — once the first time we call panic and
474 // another time if we encounter a nested panic after that.
475 static int twice = 2;
476 if (twice > 0) {
477 twice--;
478 SOCD_TRACE_XNU(KERNEL_STATE_PANIC, ADDR(state->pc),
479 PACK_LSB(VALUE(state->lr), VALUE(ss_valid)),
480 PACK_2X32(VALUE(state->esr), VALUE(state->cpsr)),
481 VALUE(state->far));
482 }
483
484 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
485 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
486 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
487 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
488 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
489 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
490 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
491 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
492 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
493 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
494 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
495 state->x[0], state->x[1], state->x[2], state->x[3],
496 state->x[4], state->x[5], state->x[6], state->x[7],
497 state->x[8], state->x[9], state->x[10], state->x[11],
498 state->x[12], state->x[13], state->x[14], state->x[15],
499 state->x[16], state->x[17], state->x[18], state->x[19],
500 state->x[20], state->x[21], state->x[22], state->x[23],
501 state->x[24], state->x[25], state->x[26], state->x[27],
502 state->x[28], state->fp, state->lr, state->sp,
503 state->pc, state->cpsr, state->esr, state->far);
504 }
505
506 void
sleh_synchronous_sp1(arm_context_t * context,uint32_t esr,vm_offset_t far __unused)507 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
508 {
509 esr_exception_class_t class = ESR_EC(esr);
510 arm_saved_state_t * state = &context->ss;
511
512 switch (class) {
513 case ESR_EC_UNCATEGORIZED:
514 {
515 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
516 if (IS_ARM_GDB_TRAP(instr)) {
517 DebuggerCall(EXC_BREAKPOINT, state);
518 }
519 }
520 OS_FALLTHROUGH; // panic if we return from the debugger
521 default:
522 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
523 }
524 }
525
526
527 __attribute__((noreturn))
528 void
thread_exception_return()529 thread_exception_return()
530 {
531 thread_t thread = current_thread();
532 if (thread->machine.exception_trace_code != 0) {
533 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
534 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
535 thread->machine.exception_trace_code = 0;
536 }
537
538 #if KASAN_TBI
539 kasan_unpoison_curstack(true);
540 #endif /* KASAN_TBI */
541 arm64_thread_exception_return();
542 __builtin_unreachable();
543 }
544
545 /*
546 * check whether task vtimers are running and set thread and CPU BSD AST
547 *
548 * must be called with interrupts masked so updates of fields are atomic
549 * must be emitted inline to avoid generating an FBT probe on the exception path
550 *
551 */
552 __attribute__((__always_inline__))
553 static inline void
task_vtimer_check(thread_t thread)554 task_vtimer_check(thread_t thread)
555 {
556 task_t task = get_threadtask_early(thread);
557
558 if (__improbable(task != NULL && task->vtimers)) {
559 thread_ast_set(thread, AST_BSD);
560 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
561 }
562 }
563
564 #if MACH_ASSERT
565 /**
566 * A version of get_preemption_level() that works in early boot.
567 *
568 * If an exception is raised in early boot before the initial thread has been
569 * set up, then calling get_preemption_level() in the SLEH will trigger an
570 * infinitely-recursing exception. This function handles this edge case.
571 */
572 static inline int
sleh_get_preemption_level(void)573 sleh_get_preemption_level(void)
574 {
575 if (__improbable(current_thread() == NULL)) {
576 return 0;
577 }
578 return get_preemption_level();
579 }
580 #endif // MACH_ASSERT
581
582 static inline bool
is_platform_error(uint32_t esr)583 is_platform_error(uint32_t esr)
584 {
585 esr_exception_class_t class = ESR_EC(esr);
586 uint32_t iss = ESR_ISS(esr);
587 fault_status_t fault_code;
588
589 if (class == ESR_EC_DABORT_EL0 || class == ESR_EC_DABORT_EL1) {
590 fault_code = ISS_DA_FSC(iss);
591 } else if (class == ESR_EC_IABORT_EL0 || class == ESR_EC_IABORT_EL1) {
592 fault_code = ISS_IA_FSC(iss);
593 } else {
594 return false;
595 }
596
597 return fault_code == FSC_SYNC_PARITY;
598 }
599
600 void
sleh_synchronous(arm_context_t * context,uint32_t esr,vm_offset_t far)601 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
602 {
603 esr_exception_class_t class = ESR_EC(esr);
604 arm_saved_state_t * state = &context->ss;
605 thread_t thread = current_thread();
606 #if MACH_ASSERT
607 int preemption_level = sleh_get_preemption_level();
608 #endif
609 expected_fault_handler_t expected_fault_handler = NULL;
610 #ifdef CONFIG_XNUPOST
611 expected_fault_handler_t saved_expected_fault_handler = NULL;
612 uintptr_t saved_expected_fault_addr = 0;
613 #endif /* CONFIG_XNUPOST */
614
615 ASSERT_CONTEXT_SANITY(context);
616
617 task_vtimer_check(thread);
618
619 #if CONFIG_DTRACE
620 /*
621 * Handle kernel DTrace probes as early as possible to minimize the likelihood
622 * that this path will itself trigger a DTrace probe, which would lead to infinite
623 * probe recursion.
624 */
625 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
626 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
627 return;
628 }
629 #endif
630 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
631
632 /*
633 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
634 * that would disclose the behavior of PT_DENY_ATTACH processes.
635 */
636 if (is_user) {
637 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
638 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
639 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
640 esr, far, get_saved_state_pc(state), 0, 0);
641 } else {
642 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
643 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
644 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
645 }
646
647 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
648 /*
649 * We no longer support 32-bit, which means no 2-byte
650 * instructions.
651 */
652 if (is_user) {
653 panic("Exception on 2-byte instruction, "
654 "context=%p, esr=%#x, far=%p",
655 context, esr, (void *)far);
656 } else {
657 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
658 }
659 }
660
661 #ifdef CONFIG_XNUPOST
662 if (thread->machine.expected_fault_handler != NULL) {
663 saved_expected_fault_handler = thread->machine.expected_fault_handler;
664 saved_expected_fault_addr = thread->machine.expected_fault_addr;
665
666 thread->machine.expected_fault_handler = NULL;
667 thread->machine.expected_fault_addr = 0;
668
669 if (saved_expected_fault_addr == far) {
670 expected_fault_handler = saved_expected_fault_handler;
671 }
672 }
673 #endif /* CONFIG_XNUPOST */
674
675 if (__improbable(is_platform_error(esr))) {
676 /*
677 * Must gather error info in platform error handler before
678 * thread is preempted to another core/cluster to guarantee
679 * accurate error details
680 */
681
682 arm64_platform_error(state, esr, far, PLAT_ERR_SRC_SYNC);
683 return;
684 }
685
686 if (is_user && class == ESR_EC_DABORT_EL0) {
687 thread_reset_pcs_will_fault(thread);
688 }
689
690 /* Inherit the interrupt masks from previous context */
691 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
692 ml_set_interrupts_enabled(TRUE);
693 }
694
695 switch (class) {
696 case ESR_EC_SVC_64:
697 if (!is_saved_state64(state) || !is_user) {
698 panic("Invalid SVC_64 context");
699 }
700
701 handle_svc(state);
702 break;
703
704 case ESR_EC_DABORT_EL0:
705 handle_abort(state, esr, far, inspect_data_abort, handle_user_abort, expected_fault_handler);
706 break;
707
708 case ESR_EC_MSR_TRAP:
709 handle_msr_trap(state, esr);
710 break;
711 /**
712 * Some APPLEVIRTUALPLATFORM targets do not specify armv8.6, but it's still possible for
713 * them to be hosted by a host that implements ARM_FPAC. There's no way for such a host
714 * to disable it or trap it without substantial performance penalty. Therefore, the FPAC
715 * handler here needs to be built into the guest kernels to prevent the exception to fall
716 * through.
717 */
718 #if __has_feature(ptrauth_calls)
719 case ESR_EC_PAC_FAIL:
720 handle_pac_fail(state, esr);
721 __builtin_unreachable();
722
723 #endif /* __has_feature(ptrauth_calls) */
724
725 case ESR_EC_IABORT_EL0:
726 handle_abort(state, esr, far, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
727 break;
728
729 case ESR_EC_IABORT_EL1:
730 #ifdef CONFIG_XNUPOST
731 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
732 break;
733 }
734 #endif /* CONFIG_XNUPOST */
735
736 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
737
738 case ESR_EC_PC_ALIGN:
739 handle_pc_align(state);
740 __builtin_unreachable();
741
742 case ESR_EC_DABORT_EL1:
743 handle_abort(state, esr, far, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
744 break;
745
746 case ESR_EC_UNCATEGORIZED:
747 assert(!ESR_ISS(esr));
748
749 handle_uncategorized(&context->ss);
750 break;
751
752 case ESR_EC_SP_ALIGN:
753 handle_sp_align(state);
754 __builtin_unreachable();
755
756 case ESR_EC_BKPT_AARCH32:
757 handle_breakpoint(state, esr);
758 __builtin_unreachable();
759
760 case ESR_EC_BRK_AARCH64:
761 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
762 handle_kernel_breakpoint(state, esr);
763 #if CONFIG_UBSAN_MINIMAL
764 /* UBSan breakpoints are recoverable */
765 break;
766 #endif /* CONFIG_UBSAN_MINIMAL */
767 } else {
768 handle_breakpoint(state, esr);
769 __builtin_unreachable();
770 }
771
772 case ESR_EC_BKPT_REG_MATCH_EL0:
773 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
774 handle_breakpoint(state, esr);
775 }
776 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
777 class, state, class, esr, (void *)far);
778 __builtin_unreachable();
779
780 case ESR_EC_BKPT_REG_MATCH_EL1:
781 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
782 __builtin_unreachable();
783
784 case ESR_EC_SW_STEP_DEBUG_EL0:
785 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
786 handle_sw_step_debug(state);
787 }
788 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
789 class, state, class, esr, (void *)far);
790 __builtin_unreachable();
791
792 case ESR_EC_SW_STEP_DEBUG_EL1:
793 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
794 __builtin_unreachable();
795
796 case ESR_EC_WATCHPT_MATCH_EL0:
797 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
798 handle_watchpoint(far);
799 }
800 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
801 class, state, class, esr, (void *)far);
802 __builtin_unreachable();
803
804 case ESR_EC_WATCHPT_MATCH_EL1:
805 /*
806 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
807 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
808 */
809 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
810 arm_debug_set(NULL);
811 break; /* return to first level handler */
812 }
813 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
814 class, state, class, esr, (void *)far);
815 __builtin_unreachable();
816
817 case ESR_EC_TRAP_SIMD_FP:
818 handle_simd_trap(state, esr);
819 __builtin_unreachable();
820
821 case ESR_EC_ILLEGAL_INSTR_SET:
822 if (EXCB_ACTION_RERUN !=
823 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
824 // instruction is not re-executed
825 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
826 state, class, esr, (void *)far, get_saved_state_cpsr(state));
827 }
828 // must clear this fault in PSR to re-run
829 mask_saved_state_cpsr(state, 0, PSR64_IL);
830 break;
831
832 case ESR_EC_MCR_MRC_CP15_TRAP:
833 case ESR_EC_MCRR_MRRC_CP15_TRAP:
834 case ESR_EC_MCR_MRC_CP14_TRAP:
835 case ESR_EC_LDC_STC_CP14_TRAP:
836 case ESR_EC_MCRR_MRRC_CP14_TRAP:
837 handle_user_trapped_instruction32(state, esr);
838 __builtin_unreachable();
839
840 case ESR_EC_WFI_WFE:
841 // Use of WFI or WFE instruction when they have been disabled for EL0
842 handle_wf_trap(state);
843 __builtin_unreachable();
844
845 case ESR_EC_FLOATING_POINT_64:
846 handle_fp_trap(state, esr);
847 __builtin_unreachable();
848
849 default:
850 handle_uncategorized(state);
851 }
852
853 #ifdef CONFIG_XNUPOST
854 if (saved_expected_fault_handler != NULL) {
855 thread->machine.expected_fault_handler = saved_expected_fault_handler;
856 thread->machine.expected_fault_addr = saved_expected_fault_addr;
857 }
858 #endif /* CONFIG_XNUPOST */
859
860 if (is_user) {
861 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
862 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
863 esr, far, get_saved_state_pc(state), 0, 0);
864 thread->machine.exception_trace_code = 0;
865 } else {
866 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
867 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
868 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
869 }
870 #if MACH_ASSERT
871 if (preemption_level != sleh_get_preemption_level()) {
872 panic("synchronous exception changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
873 }
874 #endif
875 }
876
877 /*
878 * Uncategorized exceptions are a catch-all for general execution errors.
879 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
880 */
881 static void
handle_uncategorized(arm_saved_state_t * state)882 handle_uncategorized(arm_saved_state_t *state)
883 {
884 exception_type_t exception = EXC_BAD_INSTRUCTION;
885 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
886 mach_msg_type_number_t numcodes = 2;
887 uint32_t instr = 0;
888
889 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
890
891 #if CONFIG_DTRACE
892
893 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
894 /*
895 * For a 64bit user process, we care about all 4 bytes of the
896 * instr.
897 */
898 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
899 if (dtrace_user_probe(state) == KERN_SUCCESS) {
900 return;
901 }
902 }
903 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
904 /*
905 * For a 32bit user process, we check for thumb mode, in
906 * which case we only care about a 2 byte instruction length.
907 * For non-thumb mode, we care about all 4 bytes of the instructin.
908 */
909 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
910 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
911 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
912 if (dtrace_user_probe(state) == KERN_SUCCESS) {
913 return;
914 }
915 }
916 } else {
917 if ((instr == FASTTRAP_ARM32_INSTR) ||
918 (instr == FASTTRAP_ARM32_RET_INSTR)) {
919 if (dtrace_user_probe(state) == KERN_SUCCESS) {
920 return;
921 }
922 }
923 }
924 }
925
926 #endif /* CONFIG_DTRACE */
927
928 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
929 if (IS_ARM_GDB_TRAP(instr)) {
930 boolean_t interrupt_state;
931 exception = EXC_BREAKPOINT;
932
933 interrupt_state = ml_set_interrupts_enabled(FALSE);
934
935 /* Save off the context here (so that the debug logic
936 * can see the original state of this thread).
937 */
938 current_thread()->machine.kpcb = state;
939
940 /* Hop into the debugger (typically either due to a
941 * fatal exception, an explicit panic, or a stackshot
942 * request.
943 */
944 DebuggerCall(exception, state);
945
946 current_thread()->machine.kpcb = NULL;
947 (void) ml_set_interrupts_enabled(interrupt_state);
948 return;
949 } else {
950 panic("Undefined kernel instruction: pc=%p instr=%x", (void*)get_saved_state_pc(state), instr);
951 }
952 }
953
954 /*
955 * Check for GDB breakpoint via illegal opcode.
956 */
957 if (IS_ARM_GDB_TRAP(instr)) {
958 exception = EXC_BREAKPOINT;
959 codes[0] = EXC_ARM_BREAKPOINT;
960 codes[1] = instr;
961 } else {
962 codes[1] = instr;
963 }
964
965 exception_triage(exception, codes, numcodes);
966 __builtin_unreachable();
967 }
968
969 #if __has_feature(ptrauth_calls)
970 static const uint16_t ptrauth_brk_comment_base = 0xc470;
971
972 static inline bool
brk_comment_is_ptrauth(uint16_t comment)973 brk_comment_is_ptrauth(uint16_t comment)
974 {
975 return comment >= ptrauth_brk_comment_base &&
976 comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
977 }
978
979 static inline const char *
ptrauth_key_to_string(ptrauth_key key)980 ptrauth_key_to_string(ptrauth_key key)
981 {
982 switch (key) {
983 case ptrauth_key_asia:
984 return "IA";
985 case ptrauth_key_asib:
986 return "IB";
987 case ptrauth_key_asda:
988 return "DA";
989 case ptrauth_key_asdb:
990 return "DB";
991 default:
992 __builtin_unreachable();
993 }
994 }
995 #endif /* __has_feature(ptrauth_calls) */
996
997 #if KASAN_TBI
998 static inline bool
brk_comment_is_kasan_failure(uint16_t comment)999 brk_comment_is_kasan_failure(uint16_t comment)
1000 {
1001 return comment >= KASAN_TBI_ESR_BASE &&
1002 comment <= KASAN_TBI_ESR_TOP;
1003 }
1004 #endif /* KASAN_TBI */
1005
1006 #if CONFIG_UBSAN_MINIMAL
1007 static inline bool
brk_comment_is_ubsan(uint16_t comment)1008 brk_comment_is_ubsan(uint16_t comment)
1009 {
1010 return comment >= UBSAN_MINIMAL_TRAPS_START &&
1011 comment < UBSAN_MINIMAL_TRAPS_END;
1012 }
1013 #endif /* CONFIG_UBSAN_MINIMAL */
1014
1015 static void
handle_kernel_breakpoint(arm_saved_state_t * state,uint32_t esr)1016 handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
1017 {
1018 uint16_t comment = ISS_BRK_COMMENT(esr);
1019
1020 #if __has_feature(ptrauth_calls)
1021 if (brk_comment_is_ptrauth(comment)) {
1022 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx"
1023 char msg[strlen(MSG_FMT)
1024 - strlen("0x%04X") + strlen("0xFFFF")
1025 - strlen("%s") + strlen("IA")
1026 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
1027 + 1];
1028 ptrauth_key key = (ptrauth_key)(comment - ptrauth_brk_comment_base);
1029 const char *key_str = ptrauth_key_to_string(key);
1030 snprintf(msg, sizeof(msg), MSG_FMT, comment, key_str, saved_state64(state)->x[16]);
1031
1032 panic_with_thread_kernel_state(msg, state);
1033 __builtin_unreachable();
1034 #undef MSG_FMT
1035 }
1036 #endif /* __has_feature(ptrauth_calls) */
1037
1038 #if KASAN_TBI
1039 if (brk_comment_is_kasan_failure(comment)) {
1040 kasan_handle_brk_failure(saved_state64(state)->x[0], comment);
1041 __builtin_unreachable();
1042 }
1043 #endif /* KASAN_TBI */
1044
1045 #if CONFIG_UBSAN_MINIMAL
1046 if (brk_comment_is_ubsan(comment)) {
1047 ubsan_handle_brk_trap(comment, get_saved_state_pc(state),
1048 get_saved_state_fp(state));
1049 add_saved_state_pc(state, 4);
1050 return;
1051 }
1052 #endif /* CONFIG_UBSAN_MINIMAL */
1053
1054 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Panic (by design)"
1055 char msg[strlen(MSG_FMT) - strlen("0x%04X") + strlen("0xFFFF") + 1];
1056 snprintf(msg, sizeof(msg), MSG_FMT, comment);
1057 #undef MSG_FMT
1058
1059 panic_with_thread_kernel_state(msg, state);
1060 __builtin_unreachable();
1061 }
1062
1063 static void
handle_breakpoint(arm_saved_state_t * state,uint32_t esr __unused)1064 handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
1065 {
1066 exception_type_t exception = EXC_BREAKPOINT;
1067 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
1068 mach_msg_type_number_t numcodes = 2;
1069
1070 #if __has_feature(ptrauth_calls)
1071 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
1072 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
1073 exception |= EXC_PTRAUTH_BIT;
1074 }
1075 #endif /* __has_feature(ptrauth_calls) */
1076
1077 codes[1] = get_saved_state_pc(state);
1078 exception_triage(exception, codes, numcodes);
1079 __builtin_unreachable();
1080 }
1081
1082 static void
handle_watchpoint(vm_offset_t fault_addr)1083 handle_watchpoint(vm_offset_t fault_addr)
1084 {
1085 exception_type_t exception = EXC_BREAKPOINT;
1086 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
1087 mach_msg_type_number_t numcodes = 2;
1088
1089 codes[1] = fault_addr;
1090 exception_triage(exception, codes, numcodes);
1091 __builtin_unreachable();
1092 }
1093
1094 static void
handle_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,abort_inspector_t inspect_abort,abort_handler_t handler,expected_fault_handler_t expected_fault_handler)1095 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1096 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
1097 {
1098 fault_status_t fault_code;
1099 vm_prot_t fault_type;
1100
1101 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
1102 handler(state, esr, fault_addr, fault_code, fault_type, expected_fault_handler);
1103 }
1104
1105 static void
inspect_instruction_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1106 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1107 {
1108 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1109 *fault_code = ISS_IA_FSC(iss);
1110 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1111 }
1112
1113 static void
inspect_data_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1114 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1115 {
1116 getCpuDatap()->cpu_stat.data_ex_cnt++;
1117 *fault_code = ISS_DA_FSC(iss);
1118
1119 /*
1120 * Cache maintenance operations always report faults as write access.
1121 * Change these to read access, unless they report a permission fault.
1122 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1123 * access to the mapping, but if a cache maintenance operation that only requires
1124 * read access generates a permission fault, then we will not be able to handle
1125 * the fault regardless of whether we treat it as a read or write fault.
1126 */
1127 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1128 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1129 } else {
1130 *fault_type = (VM_PROT_READ);
1131 }
1132 }
1133
1134 #if __has_feature(ptrauth_calls)
1135 static inline uint64_t
fault_addr_bitmask(unsigned int bit_from,unsigned int bit_to)1136 fault_addr_bitmask(unsigned int bit_from, unsigned int bit_to)
1137 {
1138 return ((1ULL << (bit_to - bit_from + 1)) - 1) << bit_from;
1139 }
1140
1141 static inline bool
fault_addr_bit(vm_offset_t fault_addr,unsigned int bit)1142 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1143 {
1144 return (bool)((fault_addr >> bit) & 1);
1145 }
1146
1147 extern int gARM_FEAT_PAuth2;
1148
1149 /**
1150 * Determines whether a fault address taken at EL0 contains a PAC error code
1151 * corresponding to the specified kind of ptrauth key.
1152 */
1153 static bool
user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr,bool data_key)1154 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1155 {
1156 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1157 bool tbi = data_key || __improbable(instruction_tbi);
1158
1159 if (gARM_FEAT_PAuth2) {
1160 /*
1161 * EnhancedPAC2 CPUs don't encode error codes at fixed positions, so
1162 * treat all non-canonical address bits like potential poison bits.
1163 */
1164 uint64_t mask = fault_addr_bitmask(T0SZ_BOOT, 54);
1165 if (!tbi) {
1166 mask |= fault_addr_bitmask(56, 63);
1167 }
1168 return (fault_addr & mask) != 0;
1169 } else {
1170 unsigned int poison_shift;
1171 if (tbi) {
1172 poison_shift = 53;
1173 } else {
1174 poison_shift = 61;
1175 }
1176
1177 /* PAC error codes are always in the form key_number:NOT(key_number) */
1178 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1179 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1180 return poison_bit_1 != poison_bit_2;
1181 }
1182 }
1183 #endif /* __has_feature(ptrauth_calls) */
1184
1185 static void
handle_pc_align(arm_saved_state_t * ss)1186 handle_pc_align(arm_saved_state_t *ss)
1187 {
1188 exception_type_t exc;
1189 mach_exception_data_type_t codes[2];
1190 mach_msg_type_number_t numcodes = 2;
1191
1192 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1193 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1194 }
1195
1196 exc = EXC_BAD_ACCESS;
1197 #if __has_feature(ptrauth_calls)
1198 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1199 exc |= EXC_PTRAUTH_BIT;
1200 }
1201 #endif /* __has_feature(ptrauth_calls) */
1202
1203 codes[0] = EXC_ARM_DA_ALIGN;
1204 codes[1] = get_saved_state_pc(ss);
1205
1206 exception_triage(exc, codes, numcodes);
1207 __builtin_unreachable();
1208 }
1209
1210 static void
handle_sp_align(arm_saved_state_t * ss)1211 handle_sp_align(arm_saved_state_t *ss)
1212 {
1213 exception_type_t exc;
1214 mach_exception_data_type_t codes[2];
1215 mach_msg_type_number_t numcodes = 2;
1216
1217 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1218 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1219 }
1220
1221 exc = EXC_BAD_ACCESS;
1222 #if __has_feature(ptrauth_calls)
1223 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1224 exc |= EXC_PTRAUTH_BIT;
1225 }
1226 #endif /* __has_feature(ptrauth_calls) */
1227
1228 codes[0] = EXC_ARM_SP_ALIGN;
1229 codes[1] = get_saved_state_sp(ss);
1230
1231 exception_triage(exc, codes, numcodes);
1232 __builtin_unreachable();
1233 }
1234
1235 static void
handle_wf_trap(arm_saved_state_t * state)1236 handle_wf_trap(arm_saved_state_t *state)
1237 {
1238 exception_type_t exc;
1239 mach_exception_data_type_t codes[2];
1240 mach_msg_type_number_t numcodes = 2;
1241 uint32_t instr = 0;
1242
1243 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1244
1245 exc = EXC_BAD_INSTRUCTION;
1246 codes[0] = EXC_ARM_UNDEFINED;
1247 codes[1] = instr;
1248
1249 exception_triage(exc, codes, numcodes);
1250 __builtin_unreachable();
1251 }
1252
1253 static void
handle_fp_trap(arm_saved_state_t * state,uint32_t esr)1254 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1255 {
1256 exception_type_t exc = EXC_ARITHMETIC;
1257 mach_exception_data_type_t codes[2];
1258 mach_msg_type_number_t numcodes = 2;
1259 uint32_t instr = 0;
1260
1261 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1262 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1263 }
1264
1265 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1266 codes[1] = instr;
1267
1268 /* The floating point trap flags are only valid if TFV is set. */
1269 if (!fp_exceptions_enabled) {
1270 exc = EXC_BAD_INSTRUCTION;
1271 codes[0] = EXC_ARM_UNDEFINED;
1272 } else if (!(esr & ISS_FP_TFV)) {
1273 codes[0] = EXC_ARM_FP_UNDEFINED;
1274 } else if (esr & ISS_FP_UFF) {
1275 codes[0] = EXC_ARM_FP_UF;
1276 } else if (esr & ISS_FP_OFF) {
1277 codes[0] = EXC_ARM_FP_OF;
1278 } else if (esr & ISS_FP_IOF) {
1279 codes[0] = EXC_ARM_FP_IO;
1280 } else if (esr & ISS_FP_DZF) {
1281 codes[0] = EXC_ARM_FP_DZ;
1282 } else if (esr & ISS_FP_IDF) {
1283 codes[0] = EXC_ARM_FP_ID;
1284 } else if (esr & ISS_FP_IXF) {
1285 codes[0] = EXC_ARM_FP_IX;
1286 } else {
1287 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1288 }
1289
1290 exception_triage(exc, codes, numcodes);
1291 __builtin_unreachable();
1292 }
1293
1294
1295
1296 /*
1297 * handle_alignment_fault_from_user:
1298 * state: Saved state
1299 *
1300 * Attempts to deal with an alignment fault from userspace (possibly by
1301 * emulating the faulting instruction). If emulation failed due to an
1302 * unservicable fault, the ESR for that fault will be stored in the
1303 * recovery_esr field of the thread by the exception code.
1304 *
1305 * Returns:
1306 * -1: Emulation failed (emulation of state/instr not supported)
1307 * 0: Successfully emulated the instruction
1308 * EFAULT: Emulation failed (probably due to permissions)
1309 * EINVAL: Emulation failed (probably due to a bad address)
1310 */
1311
1312
1313 static int
handle_alignment_fault_from_user(arm_saved_state_t * state,kern_return_t * vmfr)1314 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1315 {
1316 int ret = -1;
1317
1318 #pragma unused (state)
1319 #pragma unused (vmfr)
1320
1321 return ret;
1322 }
1323
1324
1325 static void
handle_sw_step_debug(arm_saved_state_t * state)1326 handle_sw_step_debug(arm_saved_state_t *state)
1327 {
1328 thread_t thread = current_thread();
1329 exception_type_t exc;
1330 mach_exception_data_type_t codes[2];
1331 mach_msg_type_number_t numcodes = 2;
1332
1333 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1334 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1335 }
1336
1337 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1338 if (thread->machine.DebugData != NULL) {
1339 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1340 } else {
1341 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1342 }
1343
1344 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_ALL);
1345
1346 // Special encoding for gdb single step event on ARM
1347 exc = EXC_BREAKPOINT;
1348 codes[0] = 1;
1349 codes[1] = 0;
1350
1351 exception_triage(exc, codes, numcodes);
1352 __builtin_unreachable();
1353 }
1354
1355 static void
handle_user_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,expected_fault_handler_t expected_fault_handler)1356 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1357 fault_status_t fault_code, vm_prot_t fault_type, expected_fault_handler_t expected_fault_handler)
1358 {
1359 exception_type_t exc = EXC_BAD_ACCESS;
1360 mach_exception_data_type_t codes[2];
1361 mach_msg_type_number_t numcodes = 2;
1362 thread_t thread = current_thread();
1363
1364 (void)esr;
1365 (void)expected_fault_handler;
1366
1367 if (ml_at_interrupt_context()) {
1368 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1369 }
1370
1371 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1372
1373 if (!is_vm_fault(fault_code) &&
1374 thread->t_rr_state.trr_fault_state != TRR_FAULT_NONE) {
1375 thread_reset_pcs_done_faulting(thread);
1376 }
1377
1378 if (is_vm_fault(fault_code)) {
1379 vm_map_t map = thread->map;
1380 vm_offset_t vm_fault_addr = fault_addr;
1381 kern_return_t result = KERN_FAILURE;
1382
1383 assert(map != kernel_map);
1384
1385 if (!(fault_type & VM_PROT_EXECUTE)) {
1386 vm_fault_addr = tbi_clear(fault_addr);
1387 }
1388
1389 /* check to see if it is just a pmap ref/modify fault */
1390 if (!is_translation_fault(fault_code)) {
1391 result = arm_fast_fault(map->pmap,
1392 vm_fault_addr,
1393 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1394 }
1395 if (result != KERN_SUCCESS) {
1396
1397 {
1398 /* We have to fault the page in */
1399 result = vm_fault(map, vm_fault_addr, fault_type,
1400 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1401 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1402 }
1403 }
1404 if (thread->t_rr_state.trr_fault_state != TRR_FAULT_NONE) {
1405 thread_reset_pcs_done_faulting(thread);
1406 }
1407 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1408 return;
1409 }
1410
1411 /*
1412 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1413 * If it does, we're leaking preemption disables somewhere in the kernel.
1414 */
1415 if (__improbable(result == KERN_FAILURE)) {
1416 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1417 }
1418
1419 codes[0] = result;
1420 } else if (is_alignment_fault(fault_code)) {
1421 kern_return_t vmfkr = KERN_SUCCESS;
1422 thread->machine.recover_esr = 0;
1423 thread->machine.recover_far = 0;
1424 int result = handle_alignment_fault_from_user(state, &vmfkr);
1425 if (result == 0) {
1426 /* Successfully emulated, or instruction
1427 * copyin() for decode/emulation failed.
1428 * Continue, or redrive instruction.
1429 */
1430 thread_exception_return();
1431 } else if (((result == EFAULT) || (result == EINVAL)) &&
1432 (thread->machine.recover_esr == 0)) {
1433 /*
1434 * If we didn't actually take a fault, but got one of
1435 * these errors, then we failed basic sanity checks of
1436 * the fault address. Treat this as an invalid
1437 * address.
1438 */
1439 codes[0] = KERN_INVALID_ADDRESS;
1440 } else if ((result == EFAULT) &&
1441 (thread->machine.recover_esr)) {
1442 /*
1443 * Since alignment aborts are prioritized
1444 * ahead of translation aborts, the misaligned
1445 * atomic emulation flow may have triggered a
1446 * VM pagefault, which the VM could not resolve.
1447 * Report the VM fault error in codes[]
1448 */
1449
1450 codes[0] = vmfkr;
1451 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1452 /* Cause ESR_EC to reflect an EL0 abort */
1453 thread->machine.recover_esr &= ~ESR_EC_MASK;
1454 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1455 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1456 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1457 fault_addr = thread->machine.recover_far;
1458 } else {
1459 /* This was just an unsupported alignment
1460 * exception. Misaligned atomic emulation
1461 * timeouts fall in this category.
1462 */
1463 codes[0] = EXC_ARM_DA_ALIGN;
1464 }
1465 } else if (is_parity_error(fault_code)) {
1466 #if defined(APPLE_ARM64_ARCH_FAMILY)
1467 /*
1468 * Platform errors are handled in sleh_sync before interrupts are enabled.
1469 */
1470 #else
1471 panic("User parity error.");
1472 #endif
1473 } else {
1474 codes[0] = KERN_FAILURE;
1475 }
1476
1477 codes[1] = fault_addr;
1478 #if __has_feature(ptrauth_calls)
1479 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1480 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1481 exc |= EXC_PTRAUTH_BIT;
1482 }
1483 #endif /* __has_feature(ptrauth_calls) */
1484 exception_triage(exc, codes, numcodes);
1485 __builtin_unreachable();
1486 }
1487
1488 static void
handle_kernel_abort_recover(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,thread_t thread)1489 handle_kernel_abort_recover(
1490 arm_saved_state_t *state,
1491 uint32_t esr,
1492 vm_offset_t fault_addr,
1493 thread_t thread)
1494 {
1495 thread->machine.recover_esr = esr;
1496 thread->machine.recover_far = fault_addr;
1497 #if defined(HAS_APPLE_PAC)
1498 MANIPULATE_SIGNED_THREAD_STATE(state,
1499 "mov x1, %[pc] \n"
1500 "str x1, [x0, %[SS64_PC]] \n",
1501 [pc] "r"(copyio_recovery_get_recover_addr(state))
1502 );
1503 #else
1504 saved_state64(state)->pc = copyio_recovery_get_recover_addr(state);
1505 #endif
1506 }
1507
1508 static void
handle_kernel_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,expected_fault_handler_t expected_fault_handler)1509 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1510 fault_status_t fault_code, vm_prot_t fault_type, expected_fault_handler_t expected_fault_handler)
1511 {
1512 thread_t thread = current_thread();
1513 bool recover = find_copyio_recovery_entry(state) != 0;
1514
1515 #ifndef CONFIG_XNUPOST
1516 (void)expected_fault_handler;
1517 #endif /* CONFIG_XNUPOST */
1518
1519 #if CONFIG_DTRACE
1520 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1521 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1522 /*
1523 * Point to next instruction, or recovery handler if set.
1524 */
1525 if (recover) {
1526 handle_kernel_abort_recover(state, esr, fault_addr, thread);
1527 } else {
1528 add_saved_state_pc(state, 4);
1529 }
1530 return;
1531 } else {
1532 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1533 }
1534 }
1535 #endif
1536
1537 if (ml_at_interrupt_context()) {
1538 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1539 }
1540
1541 if (is_vm_fault(fault_code)) {
1542 kern_return_t result = KERN_FAILURE;
1543 vm_map_t map;
1544 int interruptible;
1545
1546 /*
1547 * Ensure no faults in the physical aperture. This could happen if
1548 * a page table is incorrectly allocated from the read only region
1549 * when running with KTRR.
1550 */
1551
1552 #ifdef CONFIG_XNUPOST
1553 if (expected_fault_handler && expected_fault_handler(state)) {
1554 return;
1555 }
1556 #endif /* CONFIG_XNUPOST */
1557
1558 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1559 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1560 }
1561
1562 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL || recover == 0) {
1563 /*
1564 * If no recovery handler is supplied, always drive the fault against
1565 * the kernel map. If the fault was taken against a userspace VA, indicating
1566 * an unprotected access to user address space, vm_fault() should fail and
1567 * ultimately lead to a panic here.
1568 */
1569 map = kernel_map;
1570 interruptible = THREAD_UNINT;
1571 } else {
1572 map = thread->map;
1573
1574 /**
1575 * In the case that the recovery handler is set (e.g., during copyio
1576 * and dtrace probes), we don't want the vm_fault() operation to be
1577 * aborted early. Those code paths can't handle restarting the
1578 * vm_fault() operation so don't allow it to return early without
1579 * creating the wanted mapping.
1580 */
1581 interruptible = (recover) ? THREAD_UNINT : THREAD_ABORTSAFE;
1582 }
1583
1584 /* check to see if it is just a pmap ref/modify fault */
1585 if (!is_translation_fault(fault_code)) {
1586 result = arm_fast_fault(map->pmap,
1587 fault_addr,
1588 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1589 if (result == KERN_SUCCESS) {
1590 return;
1591 }
1592 }
1593
1594 if (result != KERN_PROTECTION_FAILURE) {
1595 /*
1596 * We have to "fault" the page in.
1597 */
1598 result = vm_fault(map, fault_addr, fault_type,
1599 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1600 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1601 }
1602
1603 if (result == KERN_SUCCESS) {
1604 return;
1605 }
1606
1607 /*
1608 * If we have a recover handler, invoke it now.
1609 */
1610 if (recover) {
1611 handle_kernel_abort_recover(state, esr, fault_addr, thread);
1612 return;
1613 }
1614
1615 panic_fault_address = fault_addr;
1616 } else if (is_alignment_fault(fault_code)) {
1617 if (recover) {
1618 handle_kernel_abort_recover(state, esr, fault_addr, thread);
1619 return;
1620 }
1621 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1622 } else if (is_parity_error(fault_code)) {
1623 #if defined(APPLE_ARM64_ARCH_FAMILY)
1624 /*
1625 * Platform errors are handled in sleh_sync before interrupts are enabled.
1626 */
1627 #else
1628 panic_with_thread_kernel_state("Kernel parity error.", state);
1629 #endif
1630 } else {
1631 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1632 }
1633
1634 panic_with_thread_kernel_state("Kernel data abort.", state);
1635 }
1636
1637 extern void syscall_trace(struct arm_saved_state * regs);
1638
1639 static void
handle_svc(arm_saved_state_t * state)1640 handle_svc(arm_saved_state_t *state)
1641 {
1642 int trap_no = get_saved_state_svc_number(state);
1643 thread_t thread = current_thread();
1644 struct proc *p;
1645
1646 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1647
1648 #define TRACE_SYSCALL 1
1649 #if TRACE_SYSCALL
1650 syscall_trace(state);
1651 #endif
1652
1653 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1654
1655 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1656 platform_syscall(state);
1657 panic("Returned from platform_syscall()?");
1658 }
1659
1660 mach_kauth_cred_thread_update();
1661
1662 if (trap_no < 0) {
1663 switch (trap_no) {
1664 case MACH_ARM_TRAP_ABSTIME:
1665 handle_mach_absolute_time_trap(state);
1666 return;
1667 case MACH_ARM_TRAP_CONTTIME:
1668 handle_mach_continuous_time_trap(state);
1669 return;
1670 }
1671
1672 /* Counting perhaps better in the handler, but this is how it's been done */
1673 thread->syscalls_mach++;
1674 mach_syscall(state);
1675 } else {
1676 /* Counting perhaps better in the handler, but this is how it's been done */
1677 thread->syscalls_unix++;
1678 p = get_bsdthreadtask_info(thread);
1679
1680 assert(p);
1681
1682 unix_syscall(state, thread, p);
1683 }
1684 }
1685
1686 static void
handle_mach_absolute_time_trap(arm_saved_state_t * state)1687 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1688 {
1689 uint64_t now = mach_absolute_time();
1690 saved_state64(state)->x[0] = now;
1691 }
1692
1693 static void
handle_mach_continuous_time_trap(arm_saved_state_t * state)1694 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1695 {
1696 uint64_t now = mach_continuous_time();
1697 saved_state64(state)->x[0] = now;
1698 }
1699
1700
1701 __attribute__((noreturn))
1702 static void
handle_msr_trap(arm_saved_state_t * state,uint32_t esr)1703 handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
1704 {
1705 exception_type_t exception = EXC_BAD_INSTRUCTION;
1706 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1707 mach_msg_type_number_t numcodes = 2;
1708 uint32_t instr = 0;
1709
1710 if (!is_saved_state64(state)) {
1711 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state", esr);
1712 }
1713
1714 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1715 panic("MSR/MRS trap (ESR 0x%x) from kernel", esr);
1716 }
1717
1718 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1719 codes[1] = instr;
1720
1721 exception_triage(exception, codes, numcodes);
1722 __builtin_unreachable();
1723 }
1724
1725 #if __has_feature(ptrauth_calls)
1726 static void
autxx_instruction_extract_reg(uint32_t instr,char reg[4])1727 autxx_instruction_extract_reg(uint32_t instr, char reg[4])
1728 {
1729 unsigned int rd = ARM64_INSTR_AUTxx_RD_GET(instr);
1730 switch (rd) {
1731 case 29:
1732 strncpy(reg, "fp", 4);
1733 return;
1734
1735 case 30:
1736 strncpy(reg, "lr", 4);
1737 return;
1738
1739 case 31:
1740 strncpy(reg, "xzr", 4);
1741 return;
1742
1743 default:
1744 snprintf(reg, 4, "x%u", rd);
1745 return;
1746 }
1747 }
1748
1749 static const char *
autix_system_instruction_extract_reg(uint32_t instr)1750 autix_system_instruction_extract_reg(uint32_t instr)
1751 {
1752 unsigned int crm_op2 = ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_GET(instr);
1753 if (crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIA1716 ||
1754 crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIB1716) {
1755 return "x17";
1756 } else {
1757 return "lr";
1758 }
1759 }
1760
1761 static void
handle_pac_fail(arm_saved_state_t * state,uint32_t esr)1762 handle_pac_fail(arm_saved_state_t *state, uint32_t esr)
1763 {
1764 exception_type_t exception = EXC_BAD_ACCESS | EXC_PTRAUTH_BIT;
1765 mach_exception_data_type_t codes[2] = {EXC_ARM_PAC_FAIL};
1766 mach_msg_type_number_t numcodes = 2;
1767 uint32_t instr = 0;
1768
1769 if (!is_saved_state64(state)) {
1770 panic("PAC failure (ESR 0x%x) from 32-bit state", esr);
1771 }
1772
1773 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1774
1775 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1776 #define GENERIC_PAC_FAILURE_MSG_FMT "PAC failure from kernel with %s key"
1777 #define AUTXX_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT " while authing %s"
1778 #define GENERIC_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT
1779
1780 char msg[strlen(AUTXX_MSG_FMT)
1781 - strlen("%s") + strlen("IA")
1782 - strlen("%s") + strlen("xzr")
1783 + 1];
1784 ptrauth_key key = (ptrauth_key)(esr & 0x3);
1785 const char *key_str = ptrauth_key_to_string(key);
1786
1787 if (ARM64_INSTR_IS_AUTxx(instr)) {
1788 char reg[4];
1789 autxx_instruction_extract_reg(instr, reg);
1790 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
1791 } else if (ARM64_INSTR_IS_AUTIx_SYSTEM(instr)) {
1792 const char *reg = autix_system_instruction_extract_reg(instr);
1793 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
1794 } else {
1795 snprintf(msg, sizeof(msg), GENERIC_MSG_FMT, key_str);
1796 }
1797 panic_with_thread_kernel_state(msg, state);
1798 }
1799
1800 codes[1] = instr;
1801
1802 exception_triage(exception, codes, numcodes);
1803 __builtin_unreachable();
1804 }
1805 #endif /* __has_feature(ptrauth_calls) */
1806
1807 static void
handle_user_trapped_instruction32(arm_saved_state_t * state,uint32_t esr)1808 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1809 {
1810 exception_type_t exception = EXC_BAD_INSTRUCTION;
1811 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1812 mach_msg_type_number_t numcodes = 2;
1813 uint32_t instr;
1814
1815 if (is_saved_state64(state)) {
1816 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1817 }
1818
1819 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1820 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1821 }
1822
1823 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1824 codes[1] = instr;
1825
1826 exception_triage(exception, codes, numcodes);
1827 __builtin_unreachable();
1828 }
1829
1830 static void
handle_simd_trap(arm_saved_state_t * state,uint32_t esr)1831 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1832 {
1833 exception_type_t exception = EXC_BAD_INSTRUCTION;
1834 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1835 mach_msg_type_number_t numcodes = 2;
1836 uint32_t instr = 0;
1837
1838 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1839 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1840 }
1841
1842 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1843 codes[1] = instr;
1844
1845 exception_triage(exception, codes, numcodes);
1846 __builtin_unreachable();
1847 }
1848
1849 void
sleh_irq(arm_saved_state_t * state)1850 sleh_irq(arm_saved_state_t *state)
1851 {
1852 cpu_data_t * cdp __unused = getCpuDatap();
1853 #if MACH_ASSERT
1854 int preemption_level = sleh_get_preemption_level();
1855 #endif
1856
1857
1858 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1859
1860 #if USE_APPLEARMSMP
1861 PE_handle_ext_interrupt();
1862 #else
1863 /* Run the registered interrupt handler. */
1864 cdp->interrupt_handler(cdp->interrupt_target,
1865 cdp->interrupt_refCon,
1866 cdp->interrupt_nub,
1867 cdp->interrupt_source);
1868 #endif
1869
1870 entropy_collect();
1871
1872
1873 sleh_interrupt_handler_epilogue();
1874 #if MACH_ASSERT
1875 if (preemption_level != sleh_get_preemption_level()) {
1876 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, sleh_get_preemption_level());
1877 }
1878 #endif
1879 }
1880
1881 void
sleh_fiq(arm_saved_state_t * state)1882 sleh_fiq(arm_saved_state_t *state)
1883 {
1884 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
1885 #if MACH_ASSERT
1886 int preemption_level = sleh_get_preemption_level();
1887 #endif
1888
1889 #if MONOTONIC_FIQ
1890 uint64_t pmcr0 = 0, upmsr = 0;
1891 #endif /* MONOTONIC_FIQ */
1892
1893 #if defined(HAS_IPI)
1894 boolean_t is_ipi = FALSE;
1895 uint64_t ipi_sr = 0;
1896
1897 if (gFastIPI) {
1898 MRS(ipi_sr, "S3_5_C15_C1_1");
1899
1900 if (ipi_sr & ARM64_IPISR_IPI_PENDING) {
1901 is_ipi = TRUE;
1902 }
1903 }
1904
1905 if (is_ipi) {
1906 type = DBG_INTR_TYPE_IPI;
1907 } else
1908 #endif /* defined(HAS_IPI) */
1909 if (ml_get_timer_pending()) {
1910 type = DBG_INTR_TYPE_TIMER;
1911 }
1912 #if MONOTONIC_FIQ
1913 /* Consult the PMI sysregs last, after IPI/timer
1914 * classification.
1915 */
1916 else if (mt_pmi_pending(&pmcr0, &upmsr)) {
1917 type = DBG_INTR_TYPE_PMI;
1918 }
1919 #endif /* MONOTONIC_FIQ */
1920
1921 sleh_interrupt_handler_prologue(state, type);
1922
1923 #if APPLEVIRTUALPLATFORM
1924 uint64_t iar = __builtin_arm_rsr64("ICC_IAR0_EL1");
1925 #endif
1926
1927 #if defined(HAS_IPI)
1928 if (type == DBG_INTR_TYPE_IPI) {
1929 /*
1930 * Order is important here: we must ack the IPI by writing IPI_SR
1931 * before we call cpu_signal_handler(). Otherwise, there will be
1932 * a window between the completion of pending-signal processing in
1933 * cpu_signal_handler() and the ack during which a newly-issued
1934 * IPI to this CPU may be lost. ISB is required to ensure the msr
1935 * is retired before execution of cpu_signal_handler().
1936 */
1937 MSR("S3_5_C15_C1_1", ARM64_IPISR_IPI_PENDING);
1938 __builtin_arm_isb(ISB_SY);
1939 cpu_signal_handler();
1940 } else
1941 #endif /* defined(HAS_IPI) */
1942 #if MONOTONIC_FIQ
1943 if (type == DBG_INTR_TYPE_PMI) {
1944 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
1945 mt_fiq(getCpuDatap(), pmcr0, upmsr);
1946 INTERRUPT_MASKED_DEBUG_END();
1947 } else
1948 #endif /* MONOTONIC_FIQ */
1949 {
1950 /*
1951 * We don't know that this is a timer, but we don't have insight into
1952 * the other interrupts that go down this path.
1953 */
1954
1955 cpu_data_t *cdp = getCpuDatap();
1956
1957 cdp->cpu_decrementer = -1; /* Large */
1958
1959 /*
1960 * ARM64_TODO: whether we're coming from userland is ignored right now.
1961 * We can easily thread it through, but not bothering for the
1962 * moment (AArch32 doesn't either).
1963 */
1964 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
1965 rtclock_intr(TRUE);
1966 INTERRUPT_MASKED_DEBUG_END();
1967 }
1968
1969 #if APPLEVIRTUALPLATFORM
1970 if (iar != GIC_SPURIOUS_IRQ) {
1971 __builtin_arm_wsr64("ICC_EOIR0_EL1", iar);
1972 __builtin_arm_isb(ISB_SY);
1973 }
1974 #endif
1975
1976 sleh_interrupt_handler_epilogue();
1977 #if MACH_ASSERT
1978 if (preemption_level != sleh_get_preemption_level()) {
1979 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, sleh_get_preemption_level());
1980 }
1981 #endif
1982 }
1983
1984 void
sleh_serror(arm_context_t * context,uint32_t esr,vm_offset_t far)1985 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1986 {
1987 task_vtimer_check(current_thread());
1988
1989 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1990 esr, VM_KERNEL_ADDRHIDE(far));
1991 arm_saved_state_t *state = &context->ss;
1992 #if MACH_ASSERT
1993 int preemption_level = sleh_get_preemption_level();
1994 #endif
1995
1996
1997 ASSERT_CONTEXT_SANITY(context);
1998 arm64_platform_error(state, esr, far, PLAT_ERR_SRC_ASYNC);
1999 #if MACH_ASSERT
2000 if (preemption_level != sleh_get_preemption_level()) {
2001 panic("serror changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
2002 }
2003 #endif
2004 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
2005 esr, VM_KERNEL_ADDRHIDE(far));
2006 }
2007
2008 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)2009 mach_syscall_trace_exit(unsigned int retval,
2010 unsigned int call_number)
2011 {
2012 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2013 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
2014 DBG_FUNC_END, retval, 0, 0, 0, 0);
2015 }
2016
2017 __attribute__((noreturn))
2018 void
thread_syscall_return(kern_return_t error)2019 thread_syscall_return(kern_return_t error)
2020 {
2021 thread_t thread;
2022 struct arm_saved_state *state;
2023
2024 thread = current_thread();
2025 state = get_user_regs(thread);
2026
2027 assert(is_saved_state64(state));
2028 saved_state64(state)->x[0] = error;
2029
2030 #if MACH_ASSERT
2031 kern_allocation_name_t
2032 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
2033 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
2034 #endif /* MACH_ASSERT */
2035
2036 if (kdebug_enable) {
2037 /* Invert syscall number (negative for a mach syscall) */
2038 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
2039 }
2040
2041 thread_exception_return();
2042 }
2043
2044 void
syscall_trace(struct arm_saved_state * regs __unused)2045 syscall_trace(
2046 struct arm_saved_state * regs __unused)
2047 {
2048 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
2049 }
2050
2051 static void
sleh_interrupt_handler_prologue(arm_saved_state_t * state,unsigned int type)2052 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
2053 {
2054 boolean_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
2055
2056 task_vtimer_check(current_thread());
2057
2058 uint64_t pc = is_user ? get_saved_state_pc(state) :
2059 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
2060
2061 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
2062 0, pc, is_user, type);
2063
2064 #if CONFIG_TELEMETRY
2065 if (telemetry_needs_record) {
2066 telemetry_mark_curthread(is_user, FALSE);
2067 }
2068 #endif /* CONFIG_TELEMETRY */
2069 }
2070
2071 static void
sleh_interrupt_handler_epilogue(void)2072 sleh_interrupt_handler_epilogue(void)
2073 {
2074 #if KPERF
2075 kperf_interrupt();
2076 #endif /* KPERF */
2077 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
2078 }
2079
2080 void
sleh_invalid_stack(arm_context_t * context,uint32_t esr __unused,vm_offset_t far __unused)2081 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
2082 {
2083 thread_t thread = current_thread();
2084 vm_offset_t kernel_stack_bottom, sp;
2085
2086 sp = get_saved_state_sp(&context->ss);
2087 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
2088
2089 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
2090 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
2091 }
2092
2093 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
2094 }
2095
2096