1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40
41 #include <kern/debug.h>
42 #include <kern/restartable.h>
43 #include <kern/socd_client.h>
44 #include <kern/thread.h>
45 #include <mach/exception.h>
46 #include <mach/arm/traps.h>
47 #include <mach/vm_types.h>
48 #include <mach/machine/thread_status.h>
49
50 #include <machine/atomic.h>
51 #include <machine/limits.h>
52
53 #include <pexpert/arm/protos.h>
54
55 #include <vm/vm_page.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_fault.h>
58 #include <vm/vm_kern.h>
59
60 #include <sys/errno.h>
61 #include <sys/kdebug.h>
62 #include <kperf/kperf.h>
63
64 #include <kern/policy_internal.h>
65 #if CONFIG_TELEMETRY
66 #include <kern/telemetry.h>
67 #endif
68
69 #include <prng/entropy.h>
70
71
72
73
74
75 #if CONFIG_KERNEL_TBI && KASAN_TBI
76 #include <san/kasan.h>
77 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
78
79 #if CONFIG_UBSAN_MINIMAL
80 #include <san/ubsan_minimal.h>
81 #endif /* CONFIG_UBSAN_MINIMAL */
82
83 #ifndef __arm64__
84 #error Should only be compiling for arm64.
85 #endif
86
87 #define TEST_CONTEXT32_SANITY(context) \
88 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
89 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
90
91 #define TEST_CONTEXT64_SANITY(context) \
92 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
93 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
94
95 #define ASSERT_CONTEXT_SANITY(context) \
96 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
97
98
99 #define COPYIN(src, dst, size) \
100 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
101 copyin_kern(src, dst, size) : \
102 copyin(src, dst, size)
103
104 #define COPYOUT(src, dst, size) \
105 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
106 copyout_kern(src, dst, size) : \
107 copyout(src, dst, size)
108
109 // Below is for concatenating a string param to a string literal
110 #define STR1(x) #x
111 #define STR(x) STR1(x)
112
113 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
114 #define ARM64_KDBG_CODE_USER (1 << 8)
115 #define ARM64_KDBG_CODE_GUEST (2 << 8)
116
117 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
118 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
119
120 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
121
122 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
123 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
124
125
126
127 void sleh_irq(arm_saved_state_t *);
128 void sleh_fiq(arm_saved_state_t *);
129 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
130 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
131
132 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
133 static void sleh_interrupt_handler_epilogue(void);
134
135 static void handle_svc(arm_saved_state_t *);
136 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
137 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
138
139 static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
140 #ifdef __ARM_ARCH_8_6__
141 static void handle_pac_fail(arm_saved_state_t *state, uint32_t esr) __dead2;
142 #endif
143
144 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
145
146 static void handle_uncategorized(arm_saved_state_t *);
147
148 /*
149 * For UBSan trap and continue handling, we must be able to recover
150 * from handle_kernel_breakpoint().
151 */
152 #if !CONFIG_UBSAN_MINIMAL
153 __dead2
154 #endif /* CONFIG_UBSAN_MINIMAL */
155 static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t);
156
157 static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
158
159 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
160 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
161 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
162
163 static int is_vm_fault(fault_status_t);
164 static int is_translation_fault(fault_status_t);
165 static int is_alignment_fault(fault_status_t);
166
167 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
168 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
169 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
170
171 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
172 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
173 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
174 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
175 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
176
177 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
178
179 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
180
181 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
182
183 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
184
185 extern void mach_kauth_cred_thread_update(void);
186 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
187
188 struct proc;
189
190 typedef uint32_t arm64_instr_t;
191
192 extern void
193 unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct proc * proc);
194
195 extern void
196 mach_syscall(struct arm_saved_state*);
197
198 #if CONFIG_DTRACE
199 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
200 extern boolean_t dtrace_tally_fault(user_addr_t);
201
202 /*
203 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
204 * and paste the trap instructions
205 * over from that file. Need to keep these in sync!
206 */
207 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
208 #define FASTTRAP_THUMB32_INSTR 0xdefc
209 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
210
211 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
212 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
213 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
214
215 /* See <rdar://problem/4613924> */
216 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
217 #endif
218
219
220
221 extern void arm64_thread_exception_return(void) __dead2;
222
223 #if defined(APPLETYPHOON)
224 #define CPU_NAME "Typhoon"
225 #elif defined(APPLETWISTER)
226 #define CPU_NAME "Twister"
227 #elif defined(APPLEHURRICANE)
228 #define CPU_NAME "Hurricane"
229 #elif defined(APPLELIGHTNING)
230 #define CPU_NAME "Lightning"
231 #else
232 #define CPU_NAME "Unknown"
233 #endif
234
235 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
236 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
237 #define ESR_WT_REASON(esr) ((esr) & 0xff)
238
239 #define WT_REASON_NONE 0
240 #define WT_REASON_INTEGRITY_FAIL 1
241 #define WT_REASON_BAD_SYSCALL 2
242 #define WT_REASON_NOT_LOCKED 3
243 #define WT_REASON_ALREADY_LOCKED 4
244 #define WT_REASON_SW_REQ 5
245 #define WT_REASON_PT_INVALID 6
246 #define WT_REASON_PT_VIOLATION 7
247 #define WT_REASON_REG_VIOLATION 8
248 #endif
249
250 #if defined(HAS_IPI)
251 void cpu_signal_handler(void);
252 extern unsigned int gFastIPI;
253 #endif /* defined(HAS_IPI) */
254
255 static arm_saved_state64_t *original_faulting_state = NULL;
256
257 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
258
259 extern vm_offset_t static_memory_end;
260
261 /*
262 * Fault copyio_recovery_entry in copyin/copyout routines.
263 *
264 * Offets are expressed in bytes from ©_recovery_table
265 */
266 struct copyio_recovery_entry {
267 ptrdiff_t cre_start;
268 ptrdiff_t cre_end;
269 ptrdiff_t cre_recovery;
270 };
271
272 extern struct copyio_recovery_entry copyio_recover_table[];
273 extern struct copyio_recovery_entry copyio_recover_table_end[];
274
275 static inline ptrdiff_t
copyio_recovery_offset(uintptr_t addr)276 copyio_recovery_offset(uintptr_t addr)
277 {
278 return (ptrdiff_t)(addr - (uintptr_t)copyio_recover_table);
279 }
280
281 static inline uintptr_t
copyio_recovery_addr(ptrdiff_t offset)282 copyio_recovery_addr(ptrdiff_t offset)
283 {
284 return (uintptr_t)copyio_recover_table + (uintptr_t)offset;
285 }
286
287 static inline struct copyio_recovery_entry *
find_copyio_recovery_entry(arm_saved_state_t * state)288 find_copyio_recovery_entry(arm_saved_state_t *state)
289 {
290 ptrdiff_t offset = copyio_recovery_offset(get_saved_state_pc(state));
291 struct copyio_recovery_entry *e;
292
293 for (e = copyio_recover_table; e < copyio_recover_table_end; e++) {
294 if (offset >= e->cre_start && offset < e->cre_end) {
295 return e;
296 }
297 }
298
299 return NULL;
300 }
301
302 static inline uintptr_t
copyio_recovery_get_recover_addr(arm_saved_state_t * state)303 copyio_recovery_get_recover_addr(
304 arm_saved_state_t *state)
305 {
306 struct copyio_recovery_entry *e = find_copyio_recovery_entry(state);
307 if (e == NULL) {
308 panic("copyio recovery: couldn't find a range for %p",
309 (void *)get_saved_state_pc(state));
310 }
311 return copyio_recovery_addr(e->cre_recovery);
312 }
313
314 static inline int
is_vm_fault(fault_status_t status)315 is_vm_fault(fault_status_t status)
316 {
317 switch (status) {
318 case FSC_TRANSLATION_FAULT_L0:
319 case FSC_TRANSLATION_FAULT_L1:
320 case FSC_TRANSLATION_FAULT_L2:
321 case FSC_TRANSLATION_FAULT_L3:
322 case FSC_ACCESS_FLAG_FAULT_L1:
323 case FSC_ACCESS_FLAG_FAULT_L2:
324 case FSC_ACCESS_FLAG_FAULT_L3:
325 case FSC_PERMISSION_FAULT_L1:
326 case FSC_PERMISSION_FAULT_L2:
327 case FSC_PERMISSION_FAULT_L3:
328 return TRUE;
329 default:
330 return FALSE;
331 }
332 }
333
334 static inline int
is_translation_fault(fault_status_t status)335 is_translation_fault(fault_status_t status)
336 {
337 switch (status) {
338 case FSC_TRANSLATION_FAULT_L0:
339 case FSC_TRANSLATION_FAULT_L1:
340 case FSC_TRANSLATION_FAULT_L2:
341 case FSC_TRANSLATION_FAULT_L3:
342 return TRUE;
343 default:
344 return FALSE;
345 }
346 }
347
348 static inline int
is_permission_fault(fault_status_t status)349 is_permission_fault(fault_status_t status)
350 {
351 switch (status) {
352 case FSC_PERMISSION_FAULT_L1:
353 case FSC_PERMISSION_FAULT_L2:
354 case FSC_PERMISSION_FAULT_L3:
355 return TRUE;
356 default:
357 return FALSE;
358 }
359 }
360
361 static inline int
is_alignment_fault(fault_status_t status)362 is_alignment_fault(fault_status_t status)
363 {
364 return status == FSC_ALIGNMENT_FAULT;
365 }
366
367 static inline int
is_parity_error(fault_status_t status)368 is_parity_error(fault_status_t status)
369 {
370 switch (status) {
371 case FSC_SYNC_PARITY:
372 case FSC_ASYNC_PARITY:
373 case FSC_SYNC_PARITY_TT_L1:
374 case FSC_SYNC_PARITY_TT_L2:
375 case FSC_SYNC_PARITY_TT_L3:
376 return TRUE;
377 default:
378 return FALSE;
379 }
380 }
381
382 __dead2 __unused
383 static void
arm64_implementation_specific_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)384 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
385 {
386 #pragma unused (state, esr, far)
387 panic_plain("Unhandled implementation specific error\n");
388 }
389
390 #if CONFIG_KERNEL_INTEGRITY
391 #pragma clang diagnostic push
392 #pragma clang diagnostic ignored "-Wunused-parameter"
393 static void
kernel_integrity_error_handler(uint32_t esr,vm_offset_t far)394 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
395 {
396 #if defined(KERNEL_INTEGRITY_WT)
397 #if (DEVELOPMENT || DEBUG)
398 if (ESR_WT_SERROR(esr)) {
399 switch (ESR_WT_REASON(esr)) {
400 case WT_REASON_INTEGRITY_FAIL:
401 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
402 case WT_REASON_BAD_SYSCALL:
403 panic_plain("Kernel integrity, bad syscall.");
404 case WT_REASON_NOT_LOCKED:
405 panic_plain("Kernel integrity, not locked.");
406 case WT_REASON_ALREADY_LOCKED:
407 panic_plain("Kernel integrity, already locked.");
408 case WT_REASON_SW_REQ:
409 panic_plain("Kernel integrity, software request.");
410 case WT_REASON_PT_INVALID:
411 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
412 "walking 0x%016lx.", far);
413 case WT_REASON_PT_VIOLATION:
414 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
415 far);
416 case WT_REASON_REG_VIOLATION:
417 panic_plain("Kernel integrity, violation in system register %d.",
418 (unsigned) far);
419 default:
420 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
421 }
422 }
423 #else
424 if (ESR_WT_SERROR(esr)) {
425 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
426 }
427 #endif
428 #endif
429 }
430 #pragma clang diagnostic pop
431 #endif
432
433 static void
arm64_platform_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)434 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
435 {
436 #if CONFIG_KERNEL_INTEGRITY
437 kernel_integrity_error_handler(esr, far);
438 #endif
439
440 cpu_data_t *cdp = getCpuDatap();
441
442 if (PE_handle_platform_error(far)) {
443 return;
444 } else if (cdp->platform_error_handler != NULL) {
445 cdp->platform_error_handler(cdp->cpu_id, far);
446 } else {
447 arm64_implementation_specific_error(state, esr, far);
448 }
449 }
450
451 void
panic_with_thread_kernel_state(const char * msg,arm_saved_state_t * ss)452 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
453 {
454 boolean_t ss_valid;
455
456 ss_valid = is_saved_state64(ss);
457 arm_saved_state64_t *state = saved_state64(ss);
458
459 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
460
461 // rdar://80659177
462 // Read SoCD tracepoints up to twice — once the first time we call panic and
463 // another time if we encounter a nested panic after that.
464 static int twice = 2;
465 if (twice > 0) {
466 twice--;
467 SOCD_TRACE_XNU(KERNEL_STATE_PANIC, ADDR(state->pc),
468 PACK_LSB(VALUE(state->lr), VALUE(ss_valid)),
469 PACK_2X32(VALUE(state->esr), VALUE(state->cpsr)),
470 VALUE(state->far));
471 }
472
473 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
474 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
475 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
476 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
477 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
478 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
479 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
480 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
481 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
482 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
483 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
484 state->x[0], state->x[1], state->x[2], state->x[3],
485 state->x[4], state->x[5], state->x[6], state->x[7],
486 state->x[8], state->x[9], state->x[10], state->x[11],
487 state->x[12], state->x[13], state->x[14], state->x[15],
488 state->x[16], state->x[17], state->x[18], state->x[19],
489 state->x[20], state->x[21], state->x[22], state->x[23],
490 state->x[24], state->x[25], state->x[26], state->x[27],
491 state->x[28], state->fp, state->lr, state->sp,
492 state->pc, state->cpsr, state->esr, state->far);
493 }
494
495 void
sleh_synchronous_sp1(arm_context_t * context,uint32_t esr,vm_offset_t far __unused)496 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
497 {
498 esr_exception_class_t class = ESR_EC(esr);
499 arm_saved_state_t * state = &context->ss;
500
501 switch (class) {
502 case ESR_EC_UNCATEGORIZED:
503 {
504 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
505 if (IS_ARM_GDB_TRAP(instr)) {
506 DebuggerCall(EXC_BREAKPOINT, state);
507 }
508 }
509 OS_FALLTHROUGH; // panic if we return from the debugger
510 default:
511 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
512 }
513 }
514
515
516 __attribute__((noreturn))
517 void
thread_exception_return()518 thread_exception_return()
519 {
520 thread_t thread = current_thread();
521 if (thread->machine.exception_trace_code != 0) {
522 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
523 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
524 thread->machine.exception_trace_code = 0;
525 }
526
527 arm64_thread_exception_return();
528 __builtin_unreachable();
529 }
530
531 /*
532 * check whether task vtimers are running and set thread and CPU BSD AST
533 *
534 * must be called with interrupts masked so updates of fields are atomic
535 * must be emitted inline to avoid generating an FBT probe on the exception path
536 *
537 */
538 __attribute__((__always_inline__))
539 static inline void
task_vtimer_check(thread_t thread)540 task_vtimer_check(thread_t thread)
541 {
542 task_t task = get_threadtask_early(thread);
543
544 if (__improbable(task != NULL && task->vtimers)) {
545 thread_ast_set(thread, AST_BSD);
546 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
547 }
548 }
549
550 #if MACH_ASSERT
551 /**
552 * A version of get_preemption_level() that works in early boot.
553 *
554 * If an exception is raised in early boot before the initial thread has been
555 * set up, then calling get_preemption_level() in the SLEH will trigger an
556 * infinitely-recursing exception. This function handles this edge case.
557 */
558 static inline int
sleh_get_preemption_level(void)559 sleh_get_preemption_level(void)
560 {
561 if (__improbable(current_thread() == NULL)) {
562 return 0;
563 }
564 return get_preemption_level();
565 }
566 #endif // MACH_ASSERT
567
568 void
sleh_synchronous(arm_context_t * context,uint32_t esr,vm_offset_t far)569 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
570 {
571 esr_exception_class_t class = ESR_EC(esr);
572 arm_saved_state_t * state = &context->ss;
573 thread_t thread = current_thread();
574 #if MACH_ASSERT
575 int preemption_level = sleh_get_preemption_level();
576 #endif
577 expected_fault_handler_t expected_fault_handler = NULL;
578 #ifdef CONFIG_XNUPOST
579 expected_fault_handler_t saved_expected_fault_handler = NULL;
580 uintptr_t saved_expected_fault_addr = 0;
581 #endif /* CONFIG_XNUPOST */
582
583 ASSERT_CONTEXT_SANITY(context);
584
585 task_vtimer_check(thread);
586
587 #if CONFIG_DTRACE
588 /*
589 * Handle kernel DTrace probes as early as possible to minimize the likelihood
590 * that this path will itself trigger a DTrace probe, which would lead to infinite
591 * probe recursion.
592 */
593 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
594 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
595 return;
596 }
597 #endif
598 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
599
600 /*
601 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
602 * that would disclose the behavior of PT_DENY_ATTACH processes.
603 */
604 if (is_user) {
605 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
606 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
607 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
608 esr, far, get_saved_state_pc(state), 0, 0);
609 } else {
610 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
611 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
612 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
613 }
614
615 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
616 /*
617 * We no longer support 32-bit, which means no 2-byte
618 * instructions.
619 */
620 if (is_user) {
621 panic("Exception on 2-byte instruction, "
622 "context=%p, esr=%#x, far=%p",
623 context, esr, (void *)far);
624 } else {
625 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
626 }
627 }
628
629 #ifdef CONFIG_XNUPOST
630 if (thread->machine.expected_fault_handler != NULL) {
631 saved_expected_fault_handler = thread->machine.expected_fault_handler;
632 saved_expected_fault_addr = thread->machine.expected_fault_addr;
633
634 thread->machine.expected_fault_handler = NULL;
635 thread->machine.expected_fault_addr = 0;
636
637 if (saved_expected_fault_addr == far) {
638 expected_fault_handler = saved_expected_fault_handler;
639 }
640 }
641 #endif /* CONFIG_XNUPOST */
642
643 if (is_user && class == ESR_EC_DABORT_EL0) {
644 thread_reset_pcs_will_fault(thread);
645 }
646
647 /* Inherit the interrupt masks from previous context */
648 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
649 ml_set_interrupts_enabled(TRUE);
650 }
651
652 switch (class) {
653 case ESR_EC_SVC_64:
654 if (!is_saved_state64(state) || !is_user) {
655 panic("Invalid SVC_64 context");
656 }
657
658 handle_svc(state);
659 break;
660
661 case ESR_EC_DABORT_EL0:
662 handle_abort(state, esr, far, inspect_data_abort, handle_user_abort, expected_fault_handler);
663 break;
664
665 case ESR_EC_MSR_TRAP:
666 handle_msr_trap(state, esr);
667 break;
668
669 #ifdef __ARM_ARCH_8_6__
670 case ESR_EC_PAC_FAIL:
671 handle_pac_fail(state, esr);
672 __builtin_unreachable();
673
674 #endif /* __ARM_ARCH_8_6__ */
675
676 case ESR_EC_IABORT_EL0:
677 handle_abort(state, esr, far, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
678 break;
679
680 case ESR_EC_IABORT_EL1:
681 #ifdef CONFIG_XNUPOST
682 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
683 break;
684 }
685 #endif /* CONFIG_XNUPOST */
686
687 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
688
689 case ESR_EC_PC_ALIGN:
690 handle_pc_align(state);
691 __builtin_unreachable();
692
693 case ESR_EC_DABORT_EL1:
694 handle_abort(state, esr, far, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
695 break;
696
697 case ESR_EC_UNCATEGORIZED:
698 assert(!ESR_ISS(esr));
699
700 handle_uncategorized(&context->ss);
701 break;
702
703 case ESR_EC_SP_ALIGN:
704 handle_sp_align(state);
705 __builtin_unreachable();
706
707 case ESR_EC_BKPT_AARCH32:
708 handle_breakpoint(state, esr);
709 __builtin_unreachable();
710
711 case ESR_EC_BRK_AARCH64:
712 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
713 handle_kernel_breakpoint(state, esr);
714 #if CONFIG_UBSAN_MINIMAL
715 /* UBSan breakpoints are recoverable */
716 break;
717 #endif /* CONFIG_UBSAN_MINIMAL */
718 } else {
719 handle_breakpoint(state, esr);
720 __builtin_unreachable();
721 }
722
723 case ESR_EC_BKPT_REG_MATCH_EL0:
724 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
725 handle_breakpoint(state, esr);
726 }
727 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
728 class, state, class, esr, (void *)far);
729 __builtin_unreachable();
730
731 case ESR_EC_BKPT_REG_MATCH_EL1:
732 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
733 __builtin_unreachable();
734
735 case ESR_EC_SW_STEP_DEBUG_EL0:
736 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
737 handle_sw_step_debug(state);
738 }
739 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
740 class, state, class, esr, (void *)far);
741 __builtin_unreachable();
742
743 case ESR_EC_SW_STEP_DEBUG_EL1:
744 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
745 __builtin_unreachable();
746
747 case ESR_EC_WATCHPT_MATCH_EL0:
748 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
749 handle_watchpoint(far);
750 }
751 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
752 class, state, class, esr, (void *)far);
753 __builtin_unreachable();
754
755 case ESR_EC_WATCHPT_MATCH_EL1:
756 /*
757 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
758 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
759 */
760 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
761 arm_debug_set(NULL);
762 break; /* return to first level handler */
763 }
764 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
765 class, state, class, esr, (void *)far);
766 __builtin_unreachable();
767
768 case ESR_EC_TRAP_SIMD_FP:
769 handle_simd_trap(state, esr);
770 __builtin_unreachable();
771
772 case ESR_EC_ILLEGAL_INSTR_SET:
773 if (EXCB_ACTION_RERUN !=
774 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
775 // instruction is not re-executed
776 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
777 state, class, esr, (void *)far, get_saved_state_cpsr(state));
778 }
779 // must clear this fault in PSR to re-run
780 mask_saved_state_cpsr(state, 0, PSR64_IL);
781 break;
782
783 case ESR_EC_MCR_MRC_CP15_TRAP:
784 case ESR_EC_MCRR_MRRC_CP15_TRAP:
785 case ESR_EC_MCR_MRC_CP14_TRAP:
786 case ESR_EC_LDC_STC_CP14_TRAP:
787 case ESR_EC_MCRR_MRRC_CP14_TRAP:
788 handle_user_trapped_instruction32(state, esr);
789 __builtin_unreachable();
790
791 case ESR_EC_WFI_WFE:
792 // Use of WFI or WFE instruction when they have been disabled for EL0
793 handle_wf_trap(state);
794 __builtin_unreachable();
795
796 case ESR_EC_FLOATING_POINT_64:
797 handle_fp_trap(state, esr);
798 __builtin_unreachable();
799
800 default:
801 handle_uncategorized(state);
802 }
803
804 #ifdef CONFIG_XNUPOST
805 if (saved_expected_fault_handler != NULL) {
806 thread->machine.expected_fault_handler = saved_expected_fault_handler;
807 thread->machine.expected_fault_addr = saved_expected_fault_addr;
808 }
809 #endif /* CONFIG_XNUPOST */
810
811 if (is_user) {
812 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
813 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
814 esr, far, get_saved_state_pc(state), 0, 0);
815 thread->machine.exception_trace_code = 0;
816 } else {
817 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
818 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
819 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
820 }
821 #if MACH_ASSERT
822 if (preemption_level != sleh_get_preemption_level()) {
823 panic("synchronous exception changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
824 }
825 #endif
826 }
827
828 /*
829 * Uncategorized exceptions are a catch-all for general execution errors.
830 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
831 */
832 static void
handle_uncategorized(arm_saved_state_t * state)833 handle_uncategorized(arm_saved_state_t *state)
834 {
835 exception_type_t exception = EXC_BAD_INSTRUCTION;
836 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
837 mach_msg_type_number_t numcodes = 2;
838 uint32_t instr = 0;
839
840 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
841
842 #if CONFIG_DTRACE
843
844 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
845 /*
846 * For a 64bit user process, we care about all 4 bytes of the
847 * instr.
848 */
849 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
850 if (dtrace_user_probe(state) == KERN_SUCCESS) {
851 return;
852 }
853 }
854 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
855 /*
856 * For a 32bit user process, we check for thumb mode, in
857 * which case we only care about a 2 byte instruction length.
858 * For non-thumb mode, we care about all 4 bytes of the instructin.
859 */
860 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
861 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
862 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
863 if (dtrace_user_probe(state) == KERN_SUCCESS) {
864 return;
865 }
866 }
867 } else {
868 if ((instr == FASTTRAP_ARM32_INSTR) ||
869 (instr == FASTTRAP_ARM32_RET_INSTR)) {
870 if (dtrace_user_probe(state) == KERN_SUCCESS) {
871 return;
872 }
873 }
874 }
875 }
876
877 #endif /* CONFIG_DTRACE */
878
879 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
880 if (IS_ARM_GDB_TRAP(instr)) {
881 boolean_t interrupt_state;
882 exception = EXC_BREAKPOINT;
883
884 interrupt_state = ml_set_interrupts_enabled(FALSE);
885
886 /* Save off the context here (so that the debug logic
887 * can see the original state of this thread).
888 */
889 current_thread()->machine.kpcb = state;
890
891 /* Hop into the debugger (typically either due to a
892 * fatal exception, an explicit panic, or a stackshot
893 * request.
894 */
895 DebuggerCall(exception, state);
896
897 current_thread()->machine.kpcb = NULL;
898 (void) ml_set_interrupts_enabled(interrupt_state);
899 return;
900 } else {
901 panic("Undefined kernel instruction: pc=%p instr=%x", (void*)get_saved_state_pc(state), instr);
902 }
903 }
904
905 /*
906 * Check for GDB breakpoint via illegal opcode.
907 */
908 if (IS_ARM_GDB_TRAP(instr)) {
909 exception = EXC_BREAKPOINT;
910 codes[0] = EXC_ARM_BREAKPOINT;
911 codes[1] = instr;
912 } else {
913 codes[1] = instr;
914 }
915
916 exception_triage(exception, codes, numcodes);
917 __builtin_unreachable();
918 }
919
920 #if __has_feature(ptrauth_calls)
921 static const uint16_t ptrauth_brk_comment_base = 0xc470;
922
923 static inline bool
brk_comment_is_ptrauth(uint16_t comment)924 brk_comment_is_ptrauth(uint16_t comment)
925 {
926 return comment >= ptrauth_brk_comment_base &&
927 comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
928 }
929
930 static inline const char *
ptrauth_key_to_string(ptrauth_key key)931 ptrauth_key_to_string(ptrauth_key key)
932 {
933 switch (key) {
934 case ptrauth_key_asia:
935 return "IA";
936 case ptrauth_key_asib:
937 return "IB";
938 case ptrauth_key_asda:
939 return "DA";
940 case ptrauth_key_asdb:
941 return "DB";
942 default:
943 __builtin_unreachable();
944 }
945 }
946 #endif /* __has_feature(ptrauth_calls) */
947
948 #if CONFIG_KERNEL_TBI && KASAN_TBI
949 static inline bool
brk_comment_is_kasan_failure(uint16_t comment)950 brk_comment_is_kasan_failure(uint16_t comment)
951 {
952 return comment >= KASAN_TBI_ESR_BASE &&
953 comment <= KASAN_TBI_ESR_TOP;
954 }
955 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
956
957 #if CONFIG_UBSAN_MINIMAL
958 static inline bool
brk_comment_is_ubsan(uint16_t comment)959 brk_comment_is_ubsan(uint16_t comment)
960 {
961 return comment >= UBSAN_MINIMAL_TRAPS_START &&
962 comment < UBSAN_MINIMAL_TRAPS_END;
963 }
964 #endif /* CONFIG_UBSAN_MINIMAL */
965
966 static void
handle_kernel_breakpoint(arm_saved_state_t * state,uint32_t esr)967 handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
968 {
969 uint16_t comment = ISS_BRK_COMMENT(esr);
970
971 #if __has_feature(ptrauth_calls)
972 if (brk_comment_is_ptrauth(comment)) {
973 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx"
974 char msg[strlen(MSG_FMT)
975 - strlen("0x%04X") + strlen("0xFFFF")
976 - strlen("%s") + strlen("IA")
977 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
978 + 1];
979 ptrauth_key key = (ptrauth_key)(comment - ptrauth_brk_comment_base);
980 const char *key_str = ptrauth_key_to_string(key);
981 snprintf(msg, sizeof(msg), MSG_FMT, comment, key_str, saved_state64(state)->x[16]);
982
983 panic_with_thread_kernel_state(msg, state);
984 __builtin_unreachable();
985 #undef MSG_FMT
986 }
987 #endif /* __has_feature(ptrauth_calls) */
988
989 #if CONFIG_KERNEL_TBI && KASAN_TBI
990 if (brk_comment_is_kasan_failure(comment)) {
991 kasan_handle_brk_failure(saved_state64(state)->x[0], comment);
992 __builtin_unreachable();
993 }
994 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
995
996 #if CONFIG_UBSAN_MINIMAL
997 if (brk_comment_is_ubsan(comment)) {
998 ubsan_handle_brk_trap(comment, get_saved_state_pc(state),
999 get_saved_state_fp(state));
1000 add_saved_state_pc(state, 4);
1001 return;
1002 }
1003 #endif /* CONFIG_UBSAN_MINIMAL */
1004
1005 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Panic (by design)"
1006 char msg[strlen(MSG_FMT) - strlen("0x%04X") + strlen("0xFFFF") + 1];
1007 snprintf(msg, sizeof(msg), MSG_FMT, comment);
1008 #undef MSG_FMT
1009
1010 panic_with_thread_kernel_state(msg, state);
1011 __builtin_unreachable();
1012 }
1013
1014 static void
handle_breakpoint(arm_saved_state_t * state,uint32_t esr __unused)1015 handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
1016 {
1017 exception_type_t exception = EXC_BREAKPOINT;
1018 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
1019 mach_msg_type_number_t numcodes = 2;
1020
1021 #if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
1022 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
1023 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
1024 exception |= EXC_PTRAUTH_BIT;
1025 }
1026 #endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
1027
1028 codes[1] = get_saved_state_pc(state);
1029 exception_triage(exception, codes, numcodes);
1030 __builtin_unreachable();
1031 }
1032
1033 static void
handle_watchpoint(vm_offset_t fault_addr)1034 handle_watchpoint(vm_offset_t fault_addr)
1035 {
1036 exception_type_t exception = EXC_BREAKPOINT;
1037 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
1038 mach_msg_type_number_t numcodes = 2;
1039
1040 codes[1] = fault_addr;
1041 exception_triage(exception, codes, numcodes);
1042 __builtin_unreachable();
1043 }
1044
1045 static void
handle_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,abort_inspector_t inspect_abort,abort_handler_t handler,expected_fault_handler_t expected_fault_handler)1046 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1047 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
1048 {
1049 fault_status_t fault_code;
1050 vm_prot_t fault_type;
1051
1052 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
1053 handler(state, esr, fault_addr, fault_code, fault_type, expected_fault_handler);
1054 }
1055
1056 static void
inspect_instruction_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1057 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1058 {
1059 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1060 *fault_code = ISS_IA_FSC(iss);
1061 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1062 }
1063
1064 static void
inspect_data_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1065 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1066 {
1067 getCpuDatap()->cpu_stat.data_ex_cnt++;
1068 *fault_code = ISS_DA_FSC(iss);
1069
1070 /*
1071 * Cache maintenance operations always report faults as write access.
1072 * Change these to read access, unless they report a permission fault.
1073 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1074 * access to the mapping, but if a cache maintenance operation that only requires
1075 * read access generates a permission fault, then we will not be able to handle
1076 * the fault regardless of whether we treat it as a read or write fault.
1077 */
1078 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1079 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1080 } else {
1081 *fault_type = (VM_PROT_READ);
1082 }
1083 }
1084
1085 #if __has_feature(ptrauth_calls)
1086 #ifdef __ARM_ARCH_8_6__
1087 static inline uint64_t
fault_addr_bitmask(unsigned int bit_from,unsigned int bit_to)1088 fault_addr_bitmask(unsigned int bit_from, unsigned int bit_to)
1089 {
1090 return ((1ULL << (bit_to - bit_from + 1)) - 1) << bit_from;
1091 }
1092 #else
1093 static inline bool
fault_addr_bit(vm_offset_t fault_addr,unsigned int bit)1094 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1095 {
1096 return (bool)((fault_addr >> bit) & 1);
1097 }
1098 #endif /* __ARM_ARCH_8_6__ */
1099
1100 /**
1101 * Determines whether a fault address taken at EL0 contains a PAC error code
1102 * corresponding to the specified kind of ptrauth key.
1103 */
1104 static bool
user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr,bool data_key)1105 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1106 {
1107 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1108 bool tbi = data_key || __improbable(instruction_tbi);
1109 #ifdef __ARM_ARCH_8_6__
1110 /*
1111 * EnhancedPAC2 CPUs don't encode error codes at fixed positions, so
1112 * treat all non-canonical address bits like potential poison bits.
1113 */
1114 uint64_t mask = fault_addr_bitmask(T0SZ_BOOT, 54);
1115 if (!tbi) {
1116 mask |= fault_addr_bitmask(56, 63);
1117 }
1118 return (fault_addr & mask) != 0;
1119 #else /* !__ARM_ARCH_8_6__ */
1120 unsigned int poison_shift;
1121 if (tbi) {
1122 poison_shift = 53;
1123 } else {
1124 poison_shift = 61;
1125 }
1126
1127 /* PAC error codes are always in the form key_number:NOT(key_number) */
1128 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1129 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1130 return poison_bit_1 != poison_bit_2;
1131 #endif /* __ARM_ARCH_8_6__ */
1132 }
1133 #endif /* __has_feature(ptrauth_calls) */
1134
1135 static void
handle_pc_align(arm_saved_state_t * ss)1136 handle_pc_align(arm_saved_state_t *ss)
1137 {
1138 exception_type_t exc;
1139 mach_exception_data_type_t codes[2];
1140 mach_msg_type_number_t numcodes = 2;
1141
1142 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1143 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1144 }
1145
1146 exc = EXC_BAD_ACCESS;
1147 #if __has_feature(ptrauth_calls)
1148 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1149 exc |= EXC_PTRAUTH_BIT;
1150 }
1151 #endif /* __has_feature(ptrauth_calls) */
1152
1153 codes[0] = EXC_ARM_DA_ALIGN;
1154 codes[1] = get_saved_state_pc(ss);
1155
1156 exception_triage(exc, codes, numcodes);
1157 __builtin_unreachable();
1158 }
1159
1160 static void
handle_sp_align(arm_saved_state_t * ss)1161 handle_sp_align(arm_saved_state_t *ss)
1162 {
1163 exception_type_t exc;
1164 mach_exception_data_type_t codes[2];
1165 mach_msg_type_number_t numcodes = 2;
1166
1167 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1168 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1169 }
1170
1171 exc = EXC_BAD_ACCESS;
1172 #if __has_feature(ptrauth_calls)
1173 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1174 exc |= EXC_PTRAUTH_BIT;
1175 }
1176 #endif /* __has_feature(ptrauth_calls) */
1177
1178 codes[0] = EXC_ARM_SP_ALIGN;
1179 codes[1] = get_saved_state_sp(ss);
1180
1181 exception_triage(exc, codes, numcodes);
1182 __builtin_unreachable();
1183 }
1184
1185 static void
handle_wf_trap(arm_saved_state_t * state)1186 handle_wf_trap(arm_saved_state_t *state)
1187 {
1188 exception_type_t exc;
1189 mach_exception_data_type_t codes[2];
1190 mach_msg_type_number_t numcodes = 2;
1191 uint32_t instr = 0;
1192
1193 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1194
1195 exc = EXC_BAD_INSTRUCTION;
1196 codes[0] = EXC_ARM_UNDEFINED;
1197 codes[1] = instr;
1198
1199 exception_triage(exc, codes, numcodes);
1200 __builtin_unreachable();
1201 }
1202
1203 static void
handle_fp_trap(arm_saved_state_t * state,uint32_t esr)1204 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1205 {
1206 exception_type_t exc = EXC_ARITHMETIC;
1207 mach_exception_data_type_t codes[2];
1208 mach_msg_type_number_t numcodes = 2;
1209 uint32_t instr = 0;
1210
1211 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1212 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1213 }
1214
1215 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1216 codes[1] = instr;
1217
1218 /* The floating point trap flags are only valid if TFV is set. */
1219 if (!fp_exceptions_enabled) {
1220 exc = EXC_BAD_INSTRUCTION;
1221 codes[0] = EXC_ARM_UNDEFINED;
1222 } else if (!(esr & ISS_FP_TFV)) {
1223 codes[0] = EXC_ARM_FP_UNDEFINED;
1224 } else if (esr & ISS_FP_UFF) {
1225 codes[0] = EXC_ARM_FP_UF;
1226 } else if (esr & ISS_FP_OFF) {
1227 codes[0] = EXC_ARM_FP_OF;
1228 } else if (esr & ISS_FP_IOF) {
1229 codes[0] = EXC_ARM_FP_IO;
1230 } else if (esr & ISS_FP_DZF) {
1231 codes[0] = EXC_ARM_FP_DZ;
1232 } else if (esr & ISS_FP_IDF) {
1233 codes[0] = EXC_ARM_FP_ID;
1234 } else if (esr & ISS_FP_IXF) {
1235 codes[0] = EXC_ARM_FP_IX;
1236 } else {
1237 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1238 }
1239
1240 exception_triage(exc, codes, numcodes);
1241 __builtin_unreachable();
1242 }
1243
1244
1245
1246 /*
1247 * handle_alignment_fault_from_user:
1248 * state: Saved state
1249 *
1250 * Attempts to deal with an alignment fault from userspace (possibly by
1251 * emulating the faulting instruction). If emulation failed due to an
1252 * unservicable fault, the ESR for that fault will be stored in the
1253 * recovery_esr field of the thread by the exception code.
1254 *
1255 * Returns:
1256 * -1: Emulation failed (emulation of state/instr not supported)
1257 * 0: Successfully emulated the instruction
1258 * EFAULT: Emulation failed (probably due to permissions)
1259 * EINVAL: Emulation failed (probably due to a bad address)
1260 */
1261
1262
1263 static int
handle_alignment_fault_from_user(arm_saved_state_t * state,kern_return_t * vmfr)1264 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1265 {
1266 int ret = -1;
1267
1268 #pragma unused (state)
1269 #pragma unused (vmfr)
1270
1271 return ret;
1272 }
1273
1274
1275 static void
handle_sw_step_debug(arm_saved_state_t * state)1276 handle_sw_step_debug(arm_saved_state_t *state)
1277 {
1278 thread_t thread = current_thread();
1279 exception_type_t exc;
1280 mach_exception_data_type_t codes[2];
1281 mach_msg_type_number_t numcodes = 2;
1282
1283 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1284 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1285 }
1286
1287 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1288 if (thread->machine.DebugData != NULL) {
1289 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1290 } else {
1291 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1292 }
1293
1294 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_ALL);
1295
1296 // Special encoding for gdb single step event on ARM
1297 exc = EXC_BREAKPOINT;
1298 codes[0] = 1;
1299 codes[1] = 0;
1300
1301 exception_triage(exc, codes, numcodes);
1302 __builtin_unreachable();
1303 }
1304
1305 static void
handle_user_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,expected_fault_handler_t expected_fault_handler)1306 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1307 fault_status_t fault_code, vm_prot_t fault_type, expected_fault_handler_t expected_fault_handler)
1308 {
1309 exception_type_t exc = EXC_BAD_ACCESS;
1310 mach_exception_data_type_t codes[2];
1311 mach_msg_type_number_t numcodes = 2;
1312 thread_t thread = current_thread();
1313
1314 (void)esr;
1315 (void)expected_fault_handler;
1316
1317 if (ml_at_interrupt_context()) {
1318 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1319 }
1320
1321 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1322
1323 if (!is_vm_fault(fault_code) &&
1324 thread->t_rr_state.trr_fault_state != TRR_FAULT_NONE) {
1325 thread_reset_pcs_done_faulting(thread);
1326 }
1327
1328 if (is_vm_fault(fault_code)) {
1329 vm_map_t map = thread->map;
1330 vm_offset_t vm_fault_addr = fault_addr;
1331 kern_return_t result = KERN_FAILURE;
1332
1333 assert(map != kernel_map);
1334
1335 if (!(fault_type & VM_PROT_EXECUTE)) {
1336 vm_fault_addr = tbi_clear(fault_addr);
1337 }
1338
1339 /* check to see if it is just a pmap ref/modify fault */
1340 if (!is_translation_fault(fault_code)) {
1341 result = arm_fast_fault(map->pmap,
1342 vm_fault_addr,
1343 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1344 }
1345 if (result != KERN_SUCCESS) {
1346
1347 {
1348 /* We have to fault the page in */
1349 result = vm_fault(map, vm_fault_addr, fault_type,
1350 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1351 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1352 }
1353 }
1354 if (thread->t_rr_state.trr_fault_state != TRR_FAULT_NONE) {
1355 thread_reset_pcs_done_faulting(thread);
1356 }
1357 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1358 return;
1359 }
1360
1361 /*
1362 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1363 * If it does, we're leaking preemption disables somewhere in the kernel.
1364 */
1365 if (__improbable(result == KERN_FAILURE)) {
1366 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1367 }
1368
1369 codes[0] = result;
1370 } else if (is_alignment_fault(fault_code)) {
1371 kern_return_t vmfkr = KERN_SUCCESS;
1372 thread->machine.recover_esr = 0;
1373 thread->machine.recover_far = 0;
1374 int result = handle_alignment_fault_from_user(state, &vmfkr);
1375 if (result == 0) {
1376 /* Successfully emulated, or instruction
1377 * copyin() for decode/emulation failed.
1378 * Continue, or redrive instruction.
1379 */
1380 thread_exception_return();
1381 } else if (((result == EFAULT) || (result == EINVAL)) &&
1382 (thread->machine.recover_esr == 0)) {
1383 /*
1384 * If we didn't actually take a fault, but got one of
1385 * these errors, then we failed basic sanity checks of
1386 * the fault address. Treat this as an invalid
1387 * address.
1388 */
1389 codes[0] = KERN_INVALID_ADDRESS;
1390 } else if ((result == EFAULT) &&
1391 (thread->machine.recover_esr)) {
1392 /*
1393 * Since alignment aborts are prioritized
1394 * ahead of translation aborts, the misaligned
1395 * atomic emulation flow may have triggered a
1396 * VM pagefault, which the VM could not resolve.
1397 * Report the VM fault error in codes[]
1398 */
1399
1400 codes[0] = vmfkr;
1401 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1402 /* Cause ESR_EC to reflect an EL0 abort */
1403 thread->machine.recover_esr &= ~ESR_EC_MASK;
1404 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1405 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1406 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1407 fault_addr = thread->machine.recover_far;
1408 } else {
1409 /* This was just an unsupported alignment
1410 * exception. Misaligned atomic emulation
1411 * timeouts fall in this category.
1412 */
1413 codes[0] = EXC_ARM_DA_ALIGN;
1414 }
1415 } else if (is_parity_error(fault_code)) {
1416 #if defined(APPLE_ARM64_ARCH_FAMILY)
1417 if (fault_code == FSC_SYNC_PARITY) {
1418 arm64_platform_error(state, esr, fault_addr);
1419 return;
1420 }
1421 #else
1422 panic("User parity error.");
1423 #endif
1424 } else {
1425 codes[0] = KERN_FAILURE;
1426 }
1427
1428 codes[1] = fault_addr;
1429 #if __has_feature(ptrauth_calls)
1430 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1431 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1432 exc |= EXC_PTRAUTH_BIT;
1433 }
1434 #endif /* __has_feature(ptrauth_calls) */
1435 exception_triage(exc, codes, numcodes);
1436 __builtin_unreachable();
1437 }
1438
1439 #if __ARM_PAN_AVAILABLE__
1440 static int
is_pan_fault(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code)1441 is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1442 {
1443 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1444 // virtual address that is readable/writeable from both EL1 and EL0
1445
1446 // To check for PAN fault, we evaluate if the following conditions are true:
1447 // 1. This is a permission fault
1448 // 2. PAN is enabled
1449 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1450 // succeeds
1451
1452 vm_offset_t pa;
1453
1454 if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1455 return FALSE;
1456 }
1457
1458 if (esr & ISS_DA_WNR) {
1459 pa = mmu_kvtop_wpreflight(fault_addr);
1460 } else {
1461 pa = mmu_kvtop(fault_addr);
1462 }
1463 return (pa)? TRUE: FALSE;
1464 }
1465 #endif
1466
1467 static void
handle_kernel_abort_recover(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,thread_t thread)1468 handle_kernel_abort_recover(
1469 arm_saved_state_t *state,
1470 uint32_t esr,
1471 vm_offset_t fault_addr,
1472 thread_t thread)
1473 {
1474 thread->machine.recover_esr = esr;
1475 thread->machine.recover_far = fault_addr;
1476 #if defined(HAS_APPLE_PAC)
1477 MANIPULATE_SIGNED_THREAD_STATE(state,
1478 "mov x1, %[pc] \n"
1479 "str x1, [x0, %[SS64_PC]] \n",
1480 [pc] "r"(copyio_recovery_get_recover_addr(state))
1481 );
1482 #else
1483 saved_state64(state)->pc = copyio_recovery_get_recover_addr(state);
1484 #endif
1485 }
1486
1487 static void
handle_kernel_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,expected_fault_handler_t expected_fault_handler)1488 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1489 fault_status_t fault_code, vm_prot_t fault_type, expected_fault_handler_t expected_fault_handler)
1490 {
1491 thread_t thread = current_thread();
1492 bool recover = find_copyio_recovery_entry(state) != 0;
1493
1494 #ifndef CONFIG_XNUPOST
1495 (void)expected_fault_handler;
1496 #endif /* CONFIG_XNUPOST */
1497
1498 #if CONFIG_DTRACE
1499 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1500 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1501 /*
1502 * Point to next instruction, or recovery handler if set.
1503 */
1504 if (recover) {
1505 handle_kernel_abort_recover(state, esr, fault_addr, thread);
1506 } else {
1507 add_saved_state_pc(state, 4);
1508 }
1509 return;
1510 } else {
1511 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1512 }
1513 }
1514 #endif
1515
1516 if (ml_at_interrupt_context()) {
1517 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1518 }
1519
1520 if (is_vm_fault(fault_code)) {
1521 kern_return_t result = KERN_FAILURE;
1522 vm_map_t map;
1523 int interruptible;
1524
1525 /*
1526 * Ensure no faults in the physical aperture. This could happen if
1527 * a page table is incorrectly allocated from the read only region
1528 * when running with KTRR.
1529 */
1530
1531 #ifdef CONFIG_XNUPOST
1532 if (expected_fault_handler && expected_fault_handler(state)) {
1533 return;
1534 }
1535 #endif /* CONFIG_XNUPOST */
1536
1537 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1538 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1539 }
1540
1541 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1542 map = kernel_map;
1543 interruptible = THREAD_UNINT;
1544 } else {
1545 map = thread->map;
1546
1547 /**
1548 * In the case that the recovery handler is set (e.g., during copyio
1549 * and dtrace probes), we don't want the vm_fault() operation to be
1550 * aborted early. Those code paths can't handle restarting the
1551 * vm_fault() operation so don't allow it to return early without
1552 * creating the wanted mapping.
1553 */
1554 interruptible = (recover) ? THREAD_UNINT : THREAD_ABORTSAFE;
1555 }
1556
1557 /* check to see if it is just a pmap ref/modify fault */
1558 if (!is_translation_fault(fault_code)) {
1559 result = arm_fast_fault(map->pmap,
1560 fault_addr,
1561 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1562 if (result == KERN_SUCCESS) {
1563 return;
1564 }
1565 }
1566
1567 if (result != KERN_PROTECTION_FAILURE) {
1568 /*
1569 * We have to "fault" the page in.
1570 */
1571 result = vm_fault(map, fault_addr, fault_type,
1572 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1573 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1574 }
1575
1576 if (result == KERN_SUCCESS) {
1577 return;
1578 }
1579
1580 /*
1581 * If we have a recover handler, invoke it now.
1582 */
1583 if (recover) {
1584 handle_kernel_abort_recover(state, esr, fault_addr, thread);
1585 return;
1586 }
1587
1588 #if __ARM_PAN_AVAILABLE__
1589 if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1590 panic_with_thread_kernel_state("Privileged access never abort.", state);
1591 }
1592 #endif
1593 } else if (is_alignment_fault(fault_code)) {
1594 if (recover) {
1595 handle_kernel_abort_recover(state, esr, fault_addr, thread);
1596 return;
1597 }
1598 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1599 } else if (is_parity_error(fault_code)) {
1600 #if defined(APPLE_ARM64_ARCH_FAMILY)
1601 if (fault_code == FSC_SYNC_PARITY) {
1602 arm64_platform_error(state, esr, fault_addr);
1603 return;
1604 }
1605 #else
1606 panic_with_thread_kernel_state("Kernel parity error.", state);
1607 #endif
1608 } else {
1609 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1610 }
1611
1612 panic_with_thread_kernel_state("Kernel data abort.", state);
1613 }
1614
1615 extern void syscall_trace(struct arm_saved_state * regs);
1616
1617 static void
handle_svc(arm_saved_state_t * state)1618 handle_svc(arm_saved_state_t *state)
1619 {
1620 int trap_no = get_saved_state_svc_number(state);
1621 thread_t thread = current_thread();
1622 struct proc *p;
1623
1624 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1625
1626 #define TRACE_SYSCALL 1
1627 #if TRACE_SYSCALL
1628 syscall_trace(state);
1629 #endif
1630
1631 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1632
1633 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1634 platform_syscall(state);
1635 panic("Returned from platform_syscall()?");
1636 }
1637
1638 mach_kauth_cred_thread_update();
1639
1640 if (trap_no < 0) {
1641 switch (trap_no) {
1642 case MACH_ARM_TRAP_ABSTIME:
1643 handle_mach_absolute_time_trap(state);
1644 return;
1645 case MACH_ARM_TRAP_CONTTIME:
1646 handle_mach_continuous_time_trap(state);
1647 return;
1648 }
1649
1650 /* Counting perhaps better in the handler, but this is how it's been done */
1651 thread->syscalls_mach++;
1652 mach_syscall(state);
1653 } else {
1654 /* Counting perhaps better in the handler, but this is how it's been done */
1655 thread->syscalls_unix++;
1656 p = get_bsdthreadtask_info(thread);
1657
1658 assert(p);
1659
1660 unix_syscall(state, thread, p);
1661 }
1662 }
1663
1664 static void
handle_mach_absolute_time_trap(arm_saved_state_t * state)1665 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1666 {
1667 uint64_t now = mach_absolute_time();
1668 saved_state64(state)->x[0] = now;
1669 }
1670
1671 static void
handle_mach_continuous_time_trap(arm_saved_state_t * state)1672 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1673 {
1674 uint64_t now = mach_continuous_time();
1675 saved_state64(state)->x[0] = now;
1676 }
1677
1678
1679 __attribute__((noreturn))
1680 static void
handle_msr_trap(arm_saved_state_t * state,uint32_t esr)1681 handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
1682 {
1683 exception_type_t exception = EXC_BAD_INSTRUCTION;
1684 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1685 mach_msg_type_number_t numcodes = 2;
1686 uint32_t instr = 0;
1687
1688 if (!is_saved_state64(state)) {
1689 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state", esr);
1690 }
1691
1692 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1693 panic("MSR/MRS trap (ESR 0x%x) from kernel", esr);
1694 }
1695
1696 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1697 codes[1] = instr;
1698
1699 exception_triage(exception, codes, numcodes);
1700 __builtin_unreachable();
1701 }
1702
1703 #ifdef __ARM_ARCH_8_6__
1704 static void
autxx_instruction_extract_reg(uint32_t instr,char reg[4])1705 autxx_instruction_extract_reg(uint32_t instr, char reg[4])
1706 {
1707 unsigned int rd = ARM64_INSTR_AUTxx_RD_GET(instr);
1708 switch (rd) {
1709 case 29:
1710 strncpy(reg, "fp", 4);
1711 return;
1712
1713 case 30:
1714 strncpy(reg, "lr", 4);
1715 return;
1716
1717 case 31:
1718 strncpy(reg, "xzr", 4);
1719 return;
1720
1721 default:
1722 snprintf(reg, 4, "x%u", rd);
1723 return;
1724 }
1725 }
1726
1727 static const char *
autix_system_instruction_extract_reg(uint32_t instr)1728 autix_system_instruction_extract_reg(uint32_t instr)
1729 {
1730 unsigned int crm_op2 = ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_GET(instr);
1731 if (crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIA1716 ||
1732 crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIB1716) {
1733 return "x17";
1734 } else {
1735 return "lr";
1736 }
1737 }
1738
1739 static void
handle_pac_fail(arm_saved_state_t * state,uint32_t esr)1740 handle_pac_fail(arm_saved_state_t *state, uint32_t esr)
1741 {
1742 exception_type_t exception = EXC_BAD_ACCESS | EXC_PTRAUTH_BIT;
1743 mach_exception_data_type_t codes[2] = {EXC_ARM_PAC_FAIL};
1744 mach_msg_type_number_t numcodes = 2;
1745 uint32_t instr = 0;
1746
1747 if (!is_saved_state64(state)) {
1748 panic("PAC failure (ESR 0x%x) from 32-bit state", esr);
1749 }
1750
1751 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1752
1753 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1754 #define GENERIC_PAC_FAILURE_MSG_FMT "PAC failure from kernel with %s key"
1755 #define AUTXX_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT " while authing %s"
1756 #define GENERIC_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT
1757
1758 char msg[strlen(AUTXX_MSG_FMT)
1759 - strlen("%s") + strlen("IA")
1760 - strlen("%s") + strlen("xzr")
1761 + 1];
1762 ptrauth_key key = (ptrauth_key)(esr & 0x3);
1763 const char *key_str = ptrauth_key_to_string(key);
1764
1765 if (ARM64_INSTR_IS_AUTxx(instr)) {
1766 char reg[4];
1767 autxx_instruction_extract_reg(instr, reg);
1768 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
1769 } else if (ARM64_INSTR_IS_AUTIx_SYSTEM(instr)) {
1770 const char *reg = autix_system_instruction_extract_reg(instr);
1771 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
1772 } else {
1773 snprintf(msg, sizeof(msg), GENERIC_MSG_FMT, key_str);
1774 }
1775 panic_with_thread_kernel_state(msg, state);
1776 }
1777
1778 codes[1] = instr;
1779
1780 exception_triage(exception, codes, numcodes);
1781 __builtin_unreachable();
1782 }
1783 #endif /* __ARM_ARCH_8_6__ */
1784
1785 static void
handle_user_trapped_instruction32(arm_saved_state_t * state,uint32_t esr)1786 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1787 {
1788 exception_type_t exception = EXC_BAD_INSTRUCTION;
1789 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1790 mach_msg_type_number_t numcodes = 2;
1791 uint32_t instr;
1792
1793 if (is_saved_state64(state)) {
1794 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1795 }
1796
1797 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1798 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1799 }
1800
1801 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1802 codes[1] = instr;
1803
1804 exception_triage(exception, codes, numcodes);
1805 __builtin_unreachable();
1806 }
1807
1808 static void
handle_simd_trap(arm_saved_state_t * state,uint32_t esr)1809 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1810 {
1811 exception_type_t exception = EXC_BAD_INSTRUCTION;
1812 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1813 mach_msg_type_number_t numcodes = 2;
1814 uint32_t instr = 0;
1815
1816 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1817 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1818 }
1819
1820 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1821 codes[1] = instr;
1822
1823 exception_triage(exception, codes, numcodes);
1824 __builtin_unreachable();
1825 }
1826
1827 void
sleh_irq(arm_saved_state_t * state)1828 sleh_irq(arm_saved_state_t *state)
1829 {
1830 cpu_data_t * cdp __unused = getCpuDatap();
1831 #if MACH_ASSERT
1832 int preemption_level = sleh_get_preemption_level();
1833 #endif
1834
1835
1836 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1837
1838 #if USE_APPLEARMSMP
1839 PE_handle_ext_interrupt();
1840 #else
1841 /* Run the registered interrupt handler. */
1842 cdp->interrupt_handler(cdp->interrupt_target,
1843 cdp->interrupt_refCon,
1844 cdp->interrupt_nub,
1845 cdp->interrupt_source);
1846 #endif
1847
1848 entropy_collect();
1849
1850
1851 sleh_interrupt_handler_epilogue();
1852 #if MACH_ASSERT
1853 if (preemption_level != sleh_get_preemption_level()) {
1854 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, sleh_get_preemption_level());
1855 }
1856 #endif
1857 }
1858
1859 void
sleh_fiq(arm_saved_state_t * state)1860 sleh_fiq(arm_saved_state_t *state)
1861 {
1862 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
1863 #if MACH_ASSERT
1864 int preemption_level = sleh_get_preemption_level();
1865 #endif
1866
1867 #if MONOTONIC_FIQ
1868 uint64_t pmcr0 = 0, upmsr = 0;
1869 #endif /* MONOTONIC_FIQ */
1870
1871 #if defined(HAS_IPI)
1872 boolean_t is_ipi = FALSE;
1873 uint64_t ipi_sr = 0;
1874
1875 if (gFastIPI) {
1876 MRS(ipi_sr, "S3_5_C15_C1_1");
1877
1878 if (ipi_sr & 1) {
1879 is_ipi = TRUE;
1880 }
1881 }
1882
1883 if (is_ipi) {
1884 type = DBG_INTR_TYPE_IPI;
1885 } else
1886 #endif /* defined(HAS_IPI) */
1887 if (ml_get_timer_pending()) {
1888 type = DBG_INTR_TYPE_TIMER;
1889 }
1890 #if MONOTONIC_FIQ
1891 /* Consult the PMI sysregs last, after IPI/timer
1892 * classification.
1893 */
1894 else if (mt_pmi_pending(&pmcr0, &upmsr)) {
1895 type = DBG_INTR_TYPE_PMI;
1896 }
1897 #endif /* MONOTONIC_FIQ */
1898
1899 sleh_interrupt_handler_prologue(state, type);
1900
1901 #if APPLEVIRTUALPLATFORM
1902 uint64_t iar = __builtin_arm_rsr64("ICC_IAR0_EL1");
1903 #endif
1904
1905 #if defined(HAS_IPI)
1906 if (is_ipi) {
1907 /*
1908 * Order is important here: we must ack the IPI by writing IPI_SR
1909 * before we call cpu_signal_handler(). Otherwise, there will be
1910 * a window between the completion of pending-signal processing in
1911 * cpu_signal_handler() and the ack during which a newly-issued
1912 * IPI to this CPU may be lost. ISB is required to ensure the msr
1913 * is retired before execution of cpu_signal_handler().
1914 */
1915 MSR("S3_5_C15_C1_1", ipi_sr);
1916 __builtin_arm_isb(ISB_SY);
1917 cpu_signal_handler();
1918 } else
1919 #endif /* defined(HAS_IPI) */
1920 #if MONOTONIC_FIQ
1921 if (type == DBG_INTR_TYPE_PMI) {
1922 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
1923 mt_fiq(getCpuDatap(), pmcr0, upmsr);
1924 INTERRUPT_MASKED_DEBUG_END();
1925 } else
1926 #endif /* MONOTONIC_FIQ */
1927 {
1928 /*
1929 * We don't know that this is a timer, but we don't have insight into
1930 * the other interrupts that go down this path.
1931 */
1932
1933 cpu_data_t *cdp = getCpuDatap();
1934
1935 cdp->cpu_decrementer = -1; /* Large */
1936
1937 /*
1938 * ARM64_TODO: whether we're coming from userland is ignored right now.
1939 * We can easily thread it through, but not bothering for the
1940 * moment (AArch32 doesn't either).
1941 */
1942 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
1943 rtclock_intr(TRUE);
1944 INTERRUPT_MASKED_DEBUG_END();
1945 }
1946
1947 #if APPLEVIRTUALPLATFORM
1948 if (iar != GIC_SPURIOUS_IRQ) {
1949 __builtin_arm_wsr64("ICC_EOIR0_EL1", iar);
1950 __builtin_arm_isb(ISB_SY);
1951 }
1952 #endif
1953
1954 sleh_interrupt_handler_epilogue();
1955 #if MACH_ASSERT
1956 if (preemption_level != sleh_get_preemption_level()) {
1957 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, sleh_get_preemption_level());
1958 }
1959 #endif
1960 }
1961
1962 void
sleh_serror(arm_context_t * context,uint32_t esr,vm_offset_t far)1963 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1964 {
1965 task_vtimer_check(current_thread());
1966
1967 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1968 esr, VM_KERNEL_ADDRHIDE(far));
1969 arm_saved_state_t *state = &context->ss;
1970 #if MACH_ASSERT
1971 int preemption_level = sleh_get_preemption_level();
1972 #endif
1973
1974
1975 ASSERT_CONTEXT_SANITY(context);
1976 arm64_platform_error(state, esr, far);
1977 #if MACH_ASSERT
1978 if (preemption_level != sleh_get_preemption_level()) {
1979 panic("serror changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
1980 }
1981 #endif
1982 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
1983 esr, VM_KERNEL_ADDRHIDE(far));
1984 }
1985
1986 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)1987 mach_syscall_trace_exit(unsigned int retval,
1988 unsigned int call_number)
1989 {
1990 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1991 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1992 DBG_FUNC_END, retval, 0, 0, 0, 0);
1993 }
1994
1995 __attribute__((noreturn))
1996 void
thread_syscall_return(kern_return_t error)1997 thread_syscall_return(kern_return_t error)
1998 {
1999 thread_t thread;
2000 struct arm_saved_state *state;
2001
2002 thread = current_thread();
2003 state = get_user_regs(thread);
2004
2005 assert(is_saved_state64(state));
2006 saved_state64(state)->x[0] = error;
2007
2008 #if MACH_ASSERT
2009 kern_allocation_name_t
2010 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
2011 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
2012 #endif /* MACH_ASSERT */
2013
2014 if (kdebug_enable) {
2015 /* Invert syscall number (negative for a mach syscall) */
2016 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
2017 }
2018
2019 thread_exception_return();
2020 }
2021
2022 void
syscall_trace(struct arm_saved_state * regs __unused)2023 syscall_trace(
2024 struct arm_saved_state * regs __unused)
2025 {
2026 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
2027 }
2028
2029 static void
sleh_interrupt_handler_prologue(arm_saved_state_t * state,unsigned int type)2030 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
2031 {
2032 boolean_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
2033
2034 task_vtimer_check(current_thread());
2035
2036 uint64_t pc = is_user ? get_saved_state_pc(state) :
2037 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
2038
2039 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
2040 0, pc, is_user, type);
2041
2042 #if CONFIG_TELEMETRY
2043 if (telemetry_needs_record) {
2044 telemetry_mark_curthread(is_user, FALSE);
2045 }
2046 #endif /* CONFIG_TELEMETRY */
2047 }
2048
2049 static void
sleh_interrupt_handler_epilogue(void)2050 sleh_interrupt_handler_epilogue(void)
2051 {
2052 #if KPERF
2053 kperf_interrupt();
2054 #endif /* KPERF */
2055 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
2056 }
2057
2058 void
sleh_invalid_stack(arm_context_t * context,uint32_t esr __unused,vm_offset_t far __unused)2059 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
2060 {
2061 thread_t thread = current_thread();
2062 vm_offset_t kernel_stack_bottom, sp;
2063
2064 sp = get_saved_state_sp(&context->ss);
2065 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
2066
2067 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
2068 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
2069 }
2070
2071 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
2072 }
2073
2074