1 /*
2 * Copyright (c) 2012-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap_internal.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40
41 #include <kern/debug.h>
42 #include <kern/restartable.h>
43 #include <kern/socd_client.h>
44 #include <kern/task.h>
45 #include <kern/thread.h>
46 #include <kern/zalloc_internal.h>
47 #include <mach/exception.h>
48 #include <mach/arm/traps.h>
49 #include <mach/vm_types.h>
50 #include <mach/machine/thread_status.h>
51
52 #include <machine/atomic.h>
53 #include <machine/limits.h>
54
55 #include <pexpert/arm/protos.h>
56 #include <pexpert/arm64/apple_arm64_cpu.h>
57 #include <pexpert/arm64/apple_arm64_regs.h>
58 #include <pexpert/arm64/board_config.h>
59
60 #include <vm/vm_page.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_fault.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_map_xnu.h>
65
66 #include <sys/errno.h>
67 #include <sys/kdebug.h>
68 #include <sys/code_signing.h>
69 #include <sys/reason.h>
70 #include <kperf/kperf.h>
71
72 #include <kern/policy_internal.h>
73 #if CONFIG_TELEMETRY
74 #include <kern/telemetry.h>
75 #endif
76
77 #include <prng/entropy.h>
78
79
80
81
82 #include <arm64/platform_error_handler.h>
83
84 #if KASAN_TBI
85 #include <san/kasan.h>
86 #endif /* KASAN_TBI */
87
88 #if CONFIG_UBSAN_MINIMAL
89 #include <san/ubsan_minimal.h>
90 #endif
91
92
93 #ifdef CONFIG_BTI_TELEMETRY
94 #include <arm64/bti_telemetry.h>
95 #endif /* CONFIG_BTI_TELEMETRY */
96
97 #ifndef __arm64__
98 #error Should only be compiling for arm64.
99 #endif
100
101 #if DEBUG || DEVELOPMENT
102 #define HAS_TELEMETRY_KERNEL_BRK 1
103 #endif
104
105
106 #define TEST_CONTEXT32_SANITY(context) \
107 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
108 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
109
110 #define TEST_CONTEXT64_SANITY(context) \
111 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
112 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
113
114 #define ASSERT_CONTEXT_SANITY(context) \
115 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
116
117
118 #define COPYIN(src, dst, size) \
119 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
120 copyin_kern(src, dst, size) : \
121 copyin(src, dst, size)
122
123 #define COPYOUT(src, dst, size) \
124 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
125 copyout_kern(src, dst, size) : \
126 copyout(src, dst, size)
127
128 // Below is for concatenating a string param to a string literal
129 #define STR1(x) #x
130 #define STR(x) STR1(x)
131
132 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
133 #define ARM64_KDBG_CODE_USER (1 << 8)
134 #define ARM64_KDBG_CODE_GUEST (2 << 8)
135
136 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
137 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
138
139 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
140
141 void sleh_synchronous_sp1(arm_context_t *, uint64_t, vm_offset_t) __abortlike;
142 void sleh_synchronous(arm_context_t *, uint64_t, vm_offset_t, bool);
143
144
145
146 void sleh_irq(arm_saved_state_t *);
147 void sleh_fiq(arm_saved_state_t *);
148 void sleh_serror(arm_context_t *context, uint64_t esr, vm_offset_t far);
149 void sleh_invalid_stack(arm_context_t *context, uint64_t esr, vm_offset_t far) __dead2;
150
151 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
152 static void sleh_interrupt_handler_epilogue(void);
153
154 static void handle_svc(arm_saved_state_t *);
155 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
156 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
157
158 static void handle_msr_trap(arm_saved_state_t *state, uint64_t esr);
159 #if __has_feature(ptrauth_calls)
160 static void handle_pac_fail(arm_saved_state_t *state, uint64_t esr) __dead2;
161 static inline uint64_t fault_addr_bitmask(unsigned int bit_from, unsigned int bit_to);
162 #endif
163 static void handle_bti_fail(arm_saved_state_t *state, uint64_t esr);
164 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
165
166 static void handle_uncategorized(arm_saved_state_t *);
167
168 static void handle_kernel_breakpoint(arm_saved_state_t *, uint64_t);
169
170 static void handle_breakpoint(arm_saved_state_t *, uint64_t) __dead2;
171
172 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
173 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
174 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
175
176 static int is_vm_fault(fault_status_t);
177 static int is_translation_fault(fault_status_t);
178 static int is_alignment_fault(fault_status_t);
179
180 typedef void (*abort_handler_t)(arm_saved_state_t *, uint64_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
181 static void handle_user_abort(arm_saved_state_t *, uint64_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
182 static void handle_kernel_abort(arm_saved_state_t *, uint64_t, vm_offset_t, fault_status_t, vm_prot_t, expected_fault_handler_t);
183
184 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
185 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
186 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
187 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
188 static void handle_fp_trap(arm_saved_state_t *ss, uint64_t esr) __dead2;
189 #if HAS_ARM_FEAT_SME
190 static void handle_sme_trap(arm_saved_state_t *state, uint64_t esr);
191 #endif /* HAS_ARM_FEAT_SME */
192
193 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
194
195 static void handle_abort(arm_saved_state_t *, uint64_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
196
197 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint64_t esr) __dead2;
198
199 static void handle_simd_trap(arm_saved_state_t *, uint64_t esr) __dead2;
200
201 extern void current_cached_proc_cred_update(void);
202 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
203
204 struct proc;
205
206 typedef uint32_t arm64_instr_t;
207
208 extern void
209 unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct proc * proc);
210
211 extern void
212 mach_syscall(struct arm_saved_state*);
213
214 #if CONFIG_DTRACE
215 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
216 extern boolean_t dtrace_tally_fault(user_addr_t);
217
218 /*
219 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
220 * and paste the trap instructions
221 * over from that file. Need to keep these in sync!
222 */
223 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
224 #define FASTTRAP_THUMB32_INSTR 0xdefc
225 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
226
227 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
228 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
229 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
230
231 /* See <rdar://problem/4613924> */
232 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
233 #endif
234
235
236
237 extern void arm64_thread_exception_return(void) __dead2;
238
239 #if defined(APPLETYPHOON)
240 #define CPU_NAME "Typhoon"
241 #elif defined(APPLETWISTER)
242 #define CPU_NAME "Twister"
243 #elif defined(APPLEHURRICANE)
244 #define CPU_NAME "Hurricane"
245 #elif defined(APPLELIGHTNING)
246 #define CPU_NAME "Lightning"
247 #elif defined(APPLEEVEREST)
248 #define CPU_NAME "Everest"
249 #elif defined(APPLEH16)
250 #define CPU_NAME "AppleH16"
251 #else
252 #define CPU_NAME "Unknown"
253 #endif
254
255 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
256 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
257 #define ESR_WT_REASON(esr) ((esr) & 0xff)
258
259 #define WT_REASON_NONE 0
260 #define WT_REASON_INTEGRITY_FAIL 1
261 #define WT_REASON_BAD_SYSCALL 2
262 #define WT_REASON_NOT_LOCKED 3
263 #define WT_REASON_ALREADY_LOCKED 4
264 #define WT_REASON_SW_REQ 5
265 #define WT_REASON_PT_INVALID 6
266 #define WT_REASON_PT_VIOLATION 7
267 #define WT_REASON_REG_VIOLATION 8
268 #endif
269
270 #if defined(HAS_IPI)
271 void cpu_signal_handler(void);
272 extern unsigned int gFastIPI;
273 #endif /* defined(HAS_IPI) */
274
275 static arm_saved_state64_t *original_faulting_state = NULL;
276
277
278 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
279
280 extern vm_offset_t static_memory_end;
281
282 /*
283 * Fault copyio_recovery_entry in copyin/copyout routines.
284 *
285 * Offets are expressed in bytes from ©_recovery_table
286 */
287 struct copyio_recovery_entry {
288 ptrdiff_t cre_start;
289 ptrdiff_t cre_end;
290 ptrdiff_t cre_recovery;
291 };
292
293 extern struct copyio_recovery_entry copyio_recover_table[];
294 extern struct copyio_recovery_entry copyio_recover_table_end[];
295
296 static inline ptrdiff_t
copyio_recovery_offset(uintptr_t addr)297 copyio_recovery_offset(uintptr_t addr)
298 {
299 return (ptrdiff_t)(addr - (uintptr_t)copyio_recover_table);
300 }
301
302 #if !HAS_APPLE_PAC
303 static inline uintptr_t
copyio_recovery_addr(ptrdiff_t offset)304 copyio_recovery_addr(ptrdiff_t offset)
305 {
306 return (uintptr_t)copyio_recover_table + (uintptr_t)offset;
307 }
308 #endif
309
310 static inline struct copyio_recovery_entry *
find_copyio_recovery_entry(arm_saved_state_t * state)311 find_copyio_recovery_entry(arm_saved_state_t *state)
312 {
313 ptrdiff_t offset = copyio_recovery_offset(get_saved_state_pc(state));
314 struct copyio_recovery_entry *e;
315
316 for (e = copyio_recover_table; e < copyio_recover_table_end; e++) {
317 if (offset >= e->cre_start && offset < e->cre_end) {
318 return e;
319 }
320 }
321
322 return NULL;
323 }
324
325 static inline int
is_vm_fault(fault_status_t status)326 is_vm_fault(fault_status_t status)
327 {
328 switch (status) {
329 case FSC_TRANSLATION_FAULT_L0:
330 case FSC_TRANSLATION_FAULT_L1:
331 case FSC_TRANSLATION_FAULT_L2:
332 case FSC_TRANSLATION_FAULT_L3:
333 case FSC_ACCESS_FLAG_FAULT_L1:
334 case FSC_ACCESS_FLAG_FAULT_L2:
335 case FSC_ACCESS_FLAG_FAULT_L3:
336 case FSC_PERMISSION_FAULT_L1:
337 case FSC_PERMISSION_FAULT_L2:
338 case FSC_PERMISSION_FAULT_L3:
339 return TRUE;
340 default:
341 return FALSE;
342 }
343 }
344
345 static inline int
is_translation_fault(fault_status_t status)346 is_translation_fault(fault_status_t status)
347 {
348 switch (status) {
349 case FSC_TRANSLATION_FAULT_L0:
350 case FSC_TRANSLATION_FAULT_L1:
351 case FSC_TRANSLATION_FAULT_L2:
352 case FSC_TRANSLATION_FAULT_L3:
353 return TRUE;
354 default:
355 return FALSE;
356 }
357 }
358
359 static inline int
is_permission_fault(fault_status_t status)360 is_permission_fault(fault_status_t status)
361 {
362 switch (status) {
363 case FSC_PERMISSION_FAULT_L1:
364 case FSC_PERMISSION_FAULT_L2:
365 case FSC_PERMISSION_FAULT_L3:
366 return TRUE;
367 default:
368 return FALSE;
369 }
370 }
371
372 static inline int
is_alignment_fault(fault_status_t status)373 is_alignment_fault(fault_status_t status)
374 {
375 return status == FSC_ALIGNMENT_FAULT;
376 }
377
378 static inline int
is_parity_error(fault_status_t status)379 is_parity_error(fault_status_t status)
380 {
381 switch (status) {
382 #if defined(ARM64_BOARD_CONFIG_T6020)
383 /*
384 * H14 Erratum (rdar://61553243): Despite having FEAT_RAS implemented,
385 * FSC_SYNC_PARITY_X can be reported for data and instruction aborts
386 * and should be interpreted as FSC_SYNC_EXT_ABORT_x
387 */
388 #else
389 /*
390 * TODO: According to ARM ARM, Async Parity (0b011001) is a DFSC that is
391 * only applicable to AArch32 HSR register. Can this be removed?
392 */
393 case FSC_ASYNC_PARITY:
394 case FSC_SYNC_PARITY:
395 case FSC_SYNC_PARITY_TT_L1:
396 case FSC_SYNC_PARITY_TT_L2:
397 case FSC_SYNC_PARITY_TT_L3:
398 return TRUE;
399 #endif
400 default:
401 return FALSE;
402 }
403 }
404
405 static inline int
is_sync_external_abort(fault_status_t status)406 is_sync_external_abort(fault_status_t status)
407 {
408 switch (status) {
409 #if defined(ARM64_BOARD_CONFIG_T6020)
410 /*
411 * H14 Erratum (rdar://61553243): Despite having FEAT_RAS implemented,
412 * FSC_SYNC_PARITY_x can be reported for data and instruction aborts
413 * and should be interpreted as FSC_SYNC_EXT_ABORT_x
414 */
415 case FSC_SYNC_PARITY:
416 #endif /* defined(ARM64_BOARD_CONFIG_T6020) */
417 case FSC_SYNC_EXT_ABORT:
418 return TRUE;
419 default:
420 return FALSE;
421 }
422 }
423
424 static inline int
is_table_walk_error(fault_status_t status)425 is_table_walk_error(fault_status_t status)
426 {
427 switch (status) {
428 case FSC_SYNC_EXT_ABORT_TT_L1:
429 case FSC_SYNC_EXT_ABORT_TT_L2:
430 case FSC_SYNC_EXT_ABORT_TT_L3:
431 #if defined(ARM64_BOARD_CONFIG_T6020)
432 /*
433 * H14 Erratum(rdar://61553243): Despite having FEAT_RAS implemented,
434 * FSC_SYNC_PARITY_x can be reported for data and instruction aborts
435 * and should be interpreted as FSC_SYNC_EXT_ABORT_x
436 */
437 case FSC_SYNC_PARITY_TT_L1:
438 case FSC_SYNC_PARITY_TT_L2:
439 case FSC_SYNC_PARITY_TT_L3:
440 #endif /* defined(ARM64_BOARD_CONFIG_T6020) */
441 return TRUE;
442 default:
443 return FALSE;
444 }
445 }
446
447
448
449 static inline int
is_servicible_fault(fault_status_t status,uint64_t esr)450 is_servicible_fault(fault_status_t status, uint64_t esr)
451 {
452 #pragma unused(esr)
453 return is_vm_fault(status);
454 }
455
456 __dead2 __unused
457 static void
arm64_implementation_specific_error(arm_saved_state_t * state,uint64_t esr,vm_offset_t far)458 arm64_implementation_specific_error(arm_saved_state_t *state, uint64_t esr, vm_offset_t far)
459 {
460 #pragma unused (state, esr, far)
461 panic_plain("Unhandled implementation specific error\n");
462 }
463
464 #if CONFIG_KERNEL_INTEGRITY
465 #pragma clang diagnostic push
466 #pragma clang diagnostic ignored "-Wunused-parameter"
467 static void
kernel_integrity_error_handler(uint64_t esr,vm_offset_t far)468 kernel_integrity_error_handler(uint64_t esr, vm_offset_t far)
469 {
470 #if defined(KERNEL_INTEGRITY_WT)
471 #if (DEVELOPMENT || DEBUG)
472 if (ESR_WT_SERROR(esr)) {
473 switch (ESR_WT_REASON(esr)) {
474 case WT_REASON_INTEGRITY_FAIL:
475 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
476 case WT_REASON_BAD_SYSCALL:
477 panic_plain("Kernel integrity, bad syscall.");
478 case WT_REASON_NOT_LOCKED:
479 panic_plain("Kernel integrity, not locked.");
480 case WT_REASON_ALREADY_LOCKED:
481 panic_plain("Kernel integrity, already locked.");
482 case WT_REASON_SW_REQ:
483 panic_plain("Kernel integrity, software request.");
484 case WT_REASON_PT_INVALID:
485 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
486 "walking 0x%016lx.", far);
487 case WT_REASON_PT_VIOLATION:
488 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
489 far);
490 case WT_REASON_REG_VIOLATION:
491 panic_plain("Kernel integrity, violation in system register %d.",
492 (unsigned) far);
493 default:
494 panic_plain("Kernel integrity, unknown (esr=0x%08llx).", esr);
495 }
496 }
497 #else
498 if (ESR_WT_SERROR(esr)) {
499 panic_plain("SError esr: 0x%08llx far: 0x%016lx.", esr, far);
500 }
501 #endif
502 #endif
503 }
504 #pragma clang diagnostic pop
505 #endif
506
507 static void
arm64_platform_error(arm_saved_state_t * state,uint64_t esr,vm_offset_t far,platform_error_source_t source)508 arm64_platform_error(arm_saved_state_t *state, uint64_t esr, vm_offset_t far, platform_error_source_t source)
509 {
510 #if CONFIG_KERNEL_INTEGRITY
511 kernel_integrity_error_handler(esr, far);
512 #endif
513
514 (void)source;
515 cpu_data_t *cdp = getCpuDatap();
516
517 if (PE_handle_platform_error(far)) {
518 return;
519 } else if (cdp->platform_error_handler != NULL) {
520 cdp->platform_error_handler(cdp->cpu_id, far);
521 } else {
522 arm64_implementation_specific_error(state, esr, far);
523 }
524 }
525
526 void
panic_with_thread_kernel_state(const char * msg,arm_saved_state_t * ss)527 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
528 {
529 boolean_t ss_valid;
530
531 ss_valid = is_saved_state64(ss);
532 arm_saved_state64_t *state = saved_state64(ss);
533
534 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
535
536 // rdar://80659177
537 // Read SoCD tracepoints up to twice — once the first time we call panic and
538 // another time if we encounter a nested panic after that.
539 static int twice = 2;
540 if (twice > 0) {
541 twice--;
542 SOCD_TRACE_XNU(KERNEL_STATE_PANIC, ADDR(state->pc),
543 PACK_LSB(VALUE(state->lr), VALUE(ss_valid)),
544 PACK_2X32(VALUE(state->esr), VALUE(state->cpsr)),
545 VALUE(state->far));
546 }
547
548
549 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
550 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
551 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
552 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
553 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
554 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
555 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
556 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
557 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
558 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%016llx far: 0x%016llx\n",
559 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
560 state->x[0], state->x[1], state->x[2], state->x[3],
561 state->x[4], state->x[5], state->x[6], state->x[7],
562 state->x[8], state->x[9], state->x[10], state->x[11],
563 state->x[12], state->x[13], state->x[14], state->x[15],
564 state->x[16], state->x[17], state->x[18], state->x[19],
565 state->x[20], state->x[21], state->x[22], state->x[23],
566 state->x[24], state->x[25], state->x[26], state->x[27],
567 state->x[28], state->fp, state->lr, state->sp,
568 state->pc, state->cpsr, state->esr, state->far);
569 }
570
571 void
sleh_synchronous_sp1(arm_context_t * context,uint64_t esr,vm_offset_t far __unused)572 sleh_synchronous_sp1(arm_context_t *context, uint64_t esr, vm_offset_t far __unused)
573 {
574 esr_exception_class_t class = ESR_EC(esr);
575 arm_saved_state_t * state = &context->ss;
576
577 switch (class) {
578 case ESR_EC_UNCATEGORIZED:
579 {
580 #if (DEVELOPMENT || DEBUG)
581 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
582 if (IS_ARM_GDB_TRAP(instr)) {
583 DebuggerCall(EXC_BREAKPOINT, state);
584 }
585 OS_FALLTHROUGH; // panic if we return from the debugger
586 #else
587 panic_with_thread_kernel_state("Unexpected debugger trap while SP1 selected", state);
588 #endif /* (DEVELOPMENT || DEBUG) */
589 }
590 default:
591 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
592 }
593 }
594
595
596 __attribute__((noreturn))
597 void
thread_exception_return()598 thread_exception_return()
599 {
600 thread_t thread = current_thread();
601 if (thread->machine.exception_trace_code != 0) {
602 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
603 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
604 thread->machine.exception_trace_code = 0;
605 }
606
607 #if KASAN_TBI
608 kasan_unpoison_curstack(true);
609 #endif /* KASAN_TBI */
610 arm64_thread_exception_return();
611 __builtin_unreachable();
612 }
613
614 /*
615 * check whether task vtimers are running and set thread and CPU BSD AST
616 *
617 * must be called with interrupts masked so updates of fields are atomic
618 * must be emitted inline to avoid generating an FBT probe on the exception path
619 *
620 */
621 __attribute__((__always_inline__))
622 static inline void
task_vtimer_check(thread_t thread)623 task_vtimer_check(thread_t thread)
624 {
625 task_t task = get_threadtask_early(thread);
626
627 if (__improbable(task != NULL && task->vtimers)) {
628 thread_ast_set(thread, AST_BSD);
629 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
630 }
631 }
632
633 #if MACH_ASSERT
634 /**
635 * A version of get_preemption_level() that works in early boot.
636 *
637 * If an exception is raised in early boot before the initial thread has been
638 * set up, then calling get_preemption_level() in the SLEH will trigger an
639 * infinitely-recursing exception. This function handles this edge case.
640 */
641 static inline int
sleh_get_preemption_level(void)642 sleh_get_preemption_level(void)
643 {
644 if (__improbable(current_thread() == NULL)) {
645 return 0;
646 }
647 return get_preemption_level();
648 }
649 #endif // MACH_ASSERT
650
651 static inline bool
is_platform_error(uint64_t esr)652 is_platform_error(uint64_t esr)
653 {
654 esr_exception_class_t class = ESR_EC(esr);
655 uint32_t iss = ESR_ISS(esr);
656 fault_status_t fault_code;
657
658 if (class == ESR_EC_DABORT_EL0 || class == ESR_EC_DABORT_EL1) {
659 fault_code = ISS_DA_FSC(iss);
660 } else if (class == ESR_EC_IABORT_EL0 || class == ESR_EC_IABORT_EL1) {
661 fault_code = ISS_IA_FSC(iss);
662 } else {
663 return false;
664 }
665
666 return is_parity_error(fault_code) || is_sync_external_abort(fault_code) ||
667 is_table_walk_error(fault_code);
668 }
669
670 void
sleh_synchronous(arm_context_t * context,uint64_t esr,vm_offset_t far,__unused bool did_initiate_panic_lockdown)671 sleh_synchronous(arm_context_t *context, uint64_t esr, vm_offset_t far, __unused bool did_initiate_panic_lockdown)
672 {
673 esr_exception_class_t class = ESR_EC(esr);
674 arm_saved_state_t * state = &context->ss;
675 thread_t thread = current_thread();
676 #if MACH_ASSERT
677 int preemption_level = sleh_get_preemption_level();
678 #endif
679 expected_fault_handler_t expected_fault_handler = NULL;
680 #ifdef CONFIG_XNUPOST
681 expected_fault_handler_t saved_expected_fault_handler = NULL;
682 uintptr_t saved_expected_fault_addr = 0;
683 uintptr_t saved_expected_fault_pc = 0;
684 #endif /* CONFIG_XNUPOST */
685
686 ASSERT_CONTEXT_SANITY(context);
687
688 task_vtimer_check(thread);
689
690 #if CONFIG_DTRACE
691 /*
692 * Handle kernel DTrace probes as early as possible to minimize the likelihood
693 * that this path will itself trigger a DTrace probe, which would lead to infinite
694 * probe recursion.
695 */
696 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
697 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
698 #if CONFIG_SPTM
699 if (__improbable(did_initiate_panic_lockdown)) {
700 panic("Unexpectedly initiated lockdown for DTrace probe?");
701 }
702 #endif
703 return;
704 }
705 #endif
706 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
707
708 #if CONFIG_SPTM
709 // Lockdown should only be initiated for kernel exceptions
710 assert(!(is_user && did_initiate_panic_lockdown));
711 #endif /* CONFIG_SPTM */
712
713 /*
714 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
715 * that would disclose the behavior of PT_DENY_ATTACH processes.
716 */
717 if (is_user) {
718 /* Sanitize FAR (but only if the exception was taken from userspace) */
719 switch (class) {
720 case ESR_EC_IABORT_EL1:
721 case ESR_EC_IABORT_EL0:
722 /* If this is a SEA, since we can't trust FnV, just clear FAR from the save area. */
723 if (ISS_IA_FSC(ESR_ISS(esr)) == FSC_SYNC_EXT_ABORT) {
724 saved_state64(state)->far = 0;
725 }
726 break;
727 case ESR_EC_DABORT_EL1:
728 case ESR_EC_DABORT_EL0:
729 /* If this is a SEA, since we can't trust FnV, just clear FAR from the save area. */
730 if (ISS_DA_FSC(ESR_ISS(esr)) == FSC_SYNC_EXT_ABORT) {
731 saved_state64(state)->far = 0;
732 }
733 break;
734 case ESR_EC_WATCHPT_MATCH_EL1:
735 case ESR_EC_WATCHPT_MATCH_EL0:
736 case ESR_EC_PC_ALIGN:
737 break; /* FAR_ELx is valid */
738 default:
739 saved_state64(state)->far = 0;
740 break;
741 }
742
743 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
744 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
745 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
746 esr, far, get_saved_state_pc(state), 0, 0);
747 } else {
748 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
749 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
750 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
751 }
752
753 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
754 /*
755 * We no longer support 32-bit, which means no 2-byte
756 * instructions.
757 */
758 if (is_user) {
759 panic("Exception on 2-byte instruction, "
760 "context=%p, esr=%#llx, far=%p",
761 context, esr, (void *)far);
762 } else {
763 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
764 }
765 }
766
767 #ifdef CONFIG_XNUPOST
768 if (thread->machine.expected_fault_handler != NULL) {
769 bool matching_fault_pc = false;
770 saved_expected_fault_handler = thread->machine.expected_fault_handler;
771 saved_expected_fault_addr = thread->machine.expected_fault_addr;
772 saved_expected_fault_pc = thread->machine.expected_fault_pc;
773
774 thread->machine.expected_fault_handler = NULL;
775 thread->machine.expected_fault_addr = 0;
776 thread->machine.expected_fault_pc = 0;
777
778 #if __has_feature(ptrauth_calls)
779 /*
780 * Compare only the bits of PC which make up the virtual address.
781 * This ignores the upper bits, which may have been corrupted by HW in
782 * platform dependent ways to signal pointer authentication fault.
783 */
784 uint64_t fault_addr_mask = fault_addr_bitmask(0, 64 - T1SZ_BOOT - 1);
785 uint64_t masked_expected_pc = saved_expected_fault_pc & fault_addr_mask;
786 uint64_t masked_saved_pc = get_saved_state_pc(state) & fault_addr_mask;
787 matching_fault_pc = masked_expected_pc == masked_saved_pc;
788 #else
789 matching_fault_pc =
790 (saved_expected_fault_pc == get_saved_state_pc(state));
791 #endif /* ptrauth_call */
792 if (saved_expected_fault_addr == far ||
793 matching_fault_pc) {
794 expected_fault_handler = saved_expected_fault_handler;
795 }
796 }
797 #endif /* CONFIG_XNUPOST */
798
799 if (__improbable(is_platform_error(esr))) {
800 /*
801 * Must gather error info in platform error handler before
802 * thread is preempted to another core/cluster to guarantee
803 * accurate error details
804 */
805
806 arm64_platform_error(state, esr, far, PLAT_ERR_SRC_SYNC);
807 #if CONFIG_SPTM
808 if (__improbable(did_initiate_panic_lockdown)) {
809 panic("Panic lockdown initiated for platform error");
810 }
811 #endif
812 return;
813 }
814
815 if (is_user && class == ESR_EC_DABORT_EL0) {
816 thread_reset_pcs_will_fault(thread);
817 }
818
819 #if CONFIG_SPTM
820 if (__improbable(did_initiate_panic_lockdown && current_thread() != NULL)) {
821 /*
822 * If we initiated panic lockdown, we must disable preemption before
823 * enabling interrupts. While unlikely, preempting the panicked thread
824 * after lockdown has occurred may hang the system if all cores end up
825 * blocked while attempting to return to user space.
826 */
827 disable_preemption();
828 }
829 #endif /* CONFIG_SPTM */
830
831 /* Inherit the interrupt masks from previous context */
832 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
833 ml_set_interrupts_enabled(TRUE);
834 }
835
836 switch (class) {
837 case ESR_EC_SVC_64:
838 if (!is_saved_state64(state) || !is_user) {
839 panic("Invalid SVC_64 context");
840 }
841
842 handle_svc(state);
843 break;
844
845 case ESR_EC_DABORT_EL0:
846 handle_abort(state, esr, far, inspect_data_abort, handle_user_abort, expected_fault_handler);
847 break;
848
849 case ESR_EC_MSR_TRAP:
850 handle_msr_trap(state, esr);
851 break;
852 /**
853 * Some APPLEVIRTUALPLATFORM targets do not specify armv8.6, but it's still possible for
854 * them to be hosted by a host that implements ARM_FPAC. There's no way for such a host
855 * to disable it or trap it without substantial performance penalty. Therefore, the FPAC
856 * handler here needs to be built into the guest kernels to prevent the exception to fall
857 * through.
858 */
859 #if __has_feature(ptrauth_calls)
860 case ESR_EC_PAC_FAIL:
861 #ifdef CONFIG_XNUPOST
862 if (expected_fault_handler != NULL && expected_fault_handler(state)) {
863 break;
864 }
865 #endif /* CONFIG_XNUPOST */
866 handle_pac_fail(state, esr);
867 __builtin_unreachable();
868
869 #endif /* __has_feature(ptrauth_calls) */
870
871 #if HAS_ARM_FEAT_SME
872 case ESR_EC_SME:
873 handle_sme_trap(state, esr);
874 break;
875 #endif /* HAS_ARM_FEAT_SME */
876
877 case ESR_EC_IABORT_EL0:
878 handle_abort(state, esr, far, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
879 break;
880
881 case ESR_EC_IABORT_EL1:
882 #ifdef CONFIG_XNUPOST
883 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
884 break;
885 }
886 #endif /* CONFIG_XNUPOST */
887
888 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
889
890 case ESR_EC_PC_ALIGN:
891 handle_pc_align(state);
892 __builtin_unreachable();
893
894 case ESR_EC_DABORT_EL1:
895 handle_abort(state, esr, far, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
896 break;
897
898 case ESR_EC_UNCATEGORIZED:
899 assert(!ESR_ISS(esr));
900
901 #if CONFIG_XNUPOST
902 if (!is_user && (expected_fault_handler != NULL) && expected_fault_handler(state)) {
903 /*
904 * The fault handler accepted the exception and handled it on its
905 * own. Don't trap to the debugger/panic.
906 */
907 break;
908 }
909 #endif /* CONFIG_XNUPOST */
910 handle_uncategorized(&context->ss);
911 break;
912
913 case ESR_EC_SP_ALIGN:
914 handle_sp_align(state);
915 __builtin_unreachable();
916
917 case ESR_EC_BKPT_AARCH32:
918 handle_breakpoint(state, esr);
919 __builtin_unreachable();
920
921 case ESR_EC_BRK_AARCH64:
922 #ifdef CONFIG_XNUPOST
923 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
924 break;
925 }
926 #endif /* CONFIG_XNUPOST */
927 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
928 handle_kernel_breakpoint(state, esr);
929 break;
930 } else {
931 handle_breakpoint(state, esr);
932 __builtin_unreachable();
933 }
934
935 case ESR_EC_BKPT_REG_MATCH_EL0:
936 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
937 handle_breakpoint(state, esr);
938 }
939 panic("Unsupported Class %u event code. state=%p class=%u esr=%llu far=%p",
940 class, state, class, esr, (void *)far);
941 __builtin_unreachable();
942
943 case ESR_EC_BKPT_REG_MATCH_EL1:
944 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
945 __builtin_unreachable();
946
947 case ESR_EC_SW_STEP_DEBUG_EL0:
948 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
949 handle_sw_step_debug(state);
950 }
951 panic("Unsupported Class %u event code. state=%p class=%u esr=%llu far=%p",
952 class, state, class, esr, (void *)far);
953 __builtin_unreachable();
954
955 case ESR_EC_SW_STEP_DEBUG_EL1:
956 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
957 __builtin_unreachable();
958
959 case ESR_EC_WATCHPT_MATCH_EL0:
960 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
961 handle_watchpoint(far);
962 }
963 panic("Unsupported Class %u event code. state=%p class=%u esr=%llu far=%p",
964 class, state, class, esr, (void *)far);
965 __builtin_unreachable();
966
967 case ESR_EC_WATCHPT_MATCH_EL1:
968 /*
969 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
970 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
971 */
972 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
973 arm_debug_set(NULL);
974 break; /* return to first level handler */
975 }
976 panic("Unsupported Class %u event code. state=%p class=%u esr=%llu far=%p",
977 class, state, class, esr, (void *)far);
978 __builtin_unreachable();
979
980 case ESR_EC_TRAP_SIMD_FP:
981 handle_simd_trap(state, esr);
982 __builtin_unreachable();
983
984 case ESR_EC_ILLEGAL_INSTR_SET:
985 panic("Illegal instruction set exception. state=%p class=%u esr=%llu far=%p spsr=0x%x",
986 state, class, esr, (void *)far, get_saved_state_cpsr(state));
987 __builtin_unreachable();
988
989 case ESR_EC_MCR_MRC_CP15_TRAP:
990 case ESR_EC_MCRR_MRRC_CP15_TRAP:
991 case ESR_EC_MCR_MRC_CP14_TRAP:
992 case ESR_EC_LDC_STC_CP14_TRAP:
993 case ESR_EC_MCRR_MRRC_CP14_TRAP:
994 handle_user_trapped_instruction32(state, esr);
995 __builtin_unreachable();
996
997 case ESR_EC_WFI_WFE:
998 // Use of WFI or WFE instruction when they have been disabled for EL0
999 handle_wf_trap(state);
1000 __builtin_unreachable();
1001
1002 case ESR_EC_FLOATING_POINT_64:
1003 handle_fp_trap(state, esr);
1004 __builtin_unreachable();
1005 case ESR_EC_BTI_FAIL:
1006 #ifdef CONFIG_XNUPOST
1007 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
1008 break;
1009 }
1010 #endif /* CONFIG_XNUPOST */
1011 #ifdef CONFIG_BTI_TELEMETRY
1012 if (bti_telemetry_handle_exception(state)) {
1013 /* Telemetry has accepted and corrected the exception, continue */
1014 break;
1015 }
1016 #endif /* CONFIG_BTI_TELEMETRY */
1017 handle_bti_fail(state, esr);
1018 __builtin_unreachable();
1019
1020 default:
1021 handle_uncategorized(state);
1022 }
1023
1024 #ifdef CONFIG_XNUPOST
1025 if (saved_expected_fault_handler != NULL) {
1026 thread->machine.expected_fault_handler = saved_expected_fault_handler;
1027 thread->machine.expected_fault_addr = saved_expected_fault_addr;
1028 thread->machine.expected_fault_pc = saved_expected_fault_pc;
1029 }
1030 #endif /* CONFIG_XNUPOST */
1031
1032 if (is_user) {
1033 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1034 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
1035 esr, far, get_saved_state_pc(state), 0, 0);
1036 thread->machine.exception_trace_code = 0;
1037 } else {
1038 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1039 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
1040 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
1041 }
1042
1043 #if CONFIG_SPTM
1044 if (__improbable(did_initiate_panic_lockdown)) {
1045 #if CONFIG_XNUPOST
1046 bool can_recover = !!(expected_fault_handler);
1047 #else
1048 bool can_recover = false;
1049 #endif /* CONFIG_XNU_POST */
1050
1051 if (can_recover) {
1052 /*
1053 * If we matched an exception handler, this was a simulated lockdown
1054 * and so we can recover. Re-enable preemption if we disabled it.
1055 */
1056 if (current_thread() != NULL) {
1057 enable_preemption();
1058 }
1059 } else {
1060 /*
1061 * fleh already triggered a lockdown but we, for whatever reason,
1062 * didn't end up finding a reason to panic. Catch all panic in this
1063 * case.
1064 * Note that the panic here has no security benefit as the system is
1065 * already hosed, this is merely for telemetry.
1066 */
1067 panic_with_thread_kernel_state("Panic lockdown initiated", state);
1068 }
1069 }
1070 #endif /* CONFIG_SPTM */
1071
1072 #if MACH_ASSERT
1073 if (preemption_level != sleh_get_preemption_level()) {
1074 panic("synchronous exception changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
1075 }
1076 #endif
1077 }
1078
1079 /*
1080 * Uncategorized exceptions are a catch-all for general execution errors.
1081 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
1082 */
1083 static void
handle_uncategorized(arm_saved_state_t * state)1084 handle_uncategorized(arm_saved_state_t *state)
1085 {
1086 exception_type_t exception = EXC_BAD_INSTRUCTION;
1087 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1088 mach_msg_type_number_t numcodes = 2;
1089 uint32_t instr = 0;
1090
1091 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1092
1093 #if CONFIG_DTRACE
1094
1095 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
1096 /*
1097 * For a 64bit user process, we care about all 4 bytes of the
1098 * instr.
1099 */
1100 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
1101 if (dtrace_user_probe(state) == KERN_SUCCESS) {
1102 return;
1103 }
1104 }
1105 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
1106 /*
1107 * For a 32bit user process, we check for thumb mode, in
1108 * which case we only care about a 2 byte instruction length.
1109 * For non-thumb mode, we care about all 4 bytes of the instructin.
1110 */
1111 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
1112 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
1113 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
1114 if (dtrace_user_probe(state) == KERN_SUCCESS) {
1115 return;
1116 }
1117 }
1118 } else {
1119 if ((instr == FASTTRAP_ARM32_INSTR) ||
1120 (instr == FASTTRAP_ARM32_RET_INSTR)) {
1121 if (dtrace_user_probe(state) == KERN_SUCCESS) {
1122 return;
1123 }
1124 }
1125 }
1126 }
1127
1128 #endif /* CONFIG_DTRACE */
1129
1130 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1131 if (IS_ARM_GDB_TRAP(instr)) {
1132 boolean_t interrupt_state;
1133 exception = EXC_BREAKPOINT;
1134
1135 interrupt_state = ml_set_interrupts_enabled(FALSE);
1136
1137 /* Save off the context here (so that the debug logic
1138 * can see the original state of this thread).
1139 */
1140 current_thread()->machine.kpcb = state;
1141
1142 /* Hop into the debugger (typically either due to a
1143 * fatal exception, an explicit panic, or a stackshot
1144 * request.
1145 */
1146 DebuggerCall(exception, state);
1147
1148 current_thread()->machine.kpcb = NULL;
1149 (void) ml_set_interrupts_enabled(interrupt_state);
1150 return;
1151 } else {
1152 panic("Undefined kernel instruction: pc=%p instr=%x", (void*)get_saved_state_pc(state), instr);
1153 }
1154 }
1155
1156 /*
1157 * Check for GDB breakpoint via illegal opcode.
1158 */
1159 if (IS_ARM_GDB_TRAP(instr)) {
1160 exception = EXC_BREAKPOINT;
1161 codes[0] = EXC_ARM_BREAKPOINT;
1162 codes[1] = instr;
1163 } else {
1164 codes[1] = instr;
1165 }
1166
1167 exception_triage(exception, codes, numcodes);
1168 __builtin_unreachable();
1169 }
1170
1171 #if __has_feature(ptrauth_calls)
1172 static const uint16_t PTRAUTH_TRAP_START = 0xC470;
1173 static inline bool
brk_comment_is_ptrauth(uint16_t comment)1174 brk_comment_is_ptrauth(uint16_t comment)
1175 {
1176 return comment >= PTRAUTH_TRAP_START &&
1177 comment <= PTRAUTH_TRAP_START + ptrauth_key_asdb;
1178 }
1179
1180 static inline const char *
ptrauth_key_to_string(ptrauth_key key)1181 ptrauth_key_to_string(ptrauth_key key)
1182 {
1183 switch (key) {
1184 case ptrauth_key_asia:
1185 return "IA";
1186 case ptrauth_key_asib:
1187 return "IB";
1188 case ptrauth_key_asda:
1189 return "DA";
1190 case ptrauth_key_asdb:
1191 return "DB";
1192 default:
1193 __builtin_unreachable();
1194 }
1195 }
1196
1197 static void __attribute__((noreturn))
ptrauth_handle_brk_trap(void * tstate,uint16_t comment)1198 ptrauth_handle_brk_trap(void *tstate, uint16_t comment)
1199 {
1200 arm_saved_state_t *state = (arm_saved_state_t *)tstate;
1201 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx"
1202 char msg[strlen(MSG_FMT)
1203 - strlen("0x%04X") + strlen("0xFFFF")
1204 - strlen("%s") + strlen("IA")
1205 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
1206 + 1];
1207 ptrauth_key key = (ptrauth_key)(comment - PTRAUTH_TRAP_START);
1208 const char *key_str = ptrauth_key_to_string(key);
1209 snprintf(msg, sizeof(msg), MSG_FMT, comment, key_str, saved_state64(state)->x[16]);
1210 #undef MSG_FMT
1211
1212 panic_with_thread_kernel_state(msg, state);
1213 __builtin_unreachable();
1214 }
1215 #endif /* __has_feature(ptrauth_calls) */
1216
1217 #if HAS_TELEMETRY_KERNEL_BRK
1218 static uint32_t bound_chk_violations_event;
1219
1220 static void
xnu_soft_trap_handle_breakpoint(void * tstate,uint16_t comment)1221 xnu_soft_trap_handle_breakpoint(
1222 void *tstate,
1223 uint16_t comment)
1224 {
1225 #if CONFIG_UBSAN_MINIMAL
1226 if (comment == UBSAN_SOFT_TRAP_SIGNED_OF) {
1227 ubsan_handle_brk_trap(tstate, comment);
1228 }
1229 #else
1230 (void)tstate;
1231 #endif
1232
1233 if (comment == CLANG_SOFT_TRAP_BOUND_CHK) {
1234 os_atomic_inc(&bound_chk_violations_event, relaxed);
1235 }
1236 }
1237 #endif /* HAS_TELEMETRY_KERNEL_BRK */
1238
1239 static void
xnu_hard_trap_handle_breakpoint(void * tstate,uint16_t comment)1240 xnu_hard_trap_handle_breakpoint(void *tstate, uint16_t comment)
1241 {
1242 switch (comment) {
1243 case XNU_HARD_TRAP_SAFE_UNLINK: {
1244 #define MSG_FMT "panic: corrupt list around element %p"
1245 char msg[strlen(MSG_FMT) - strlen("%p") + 18 + 1];
1246 arm_saved_state64_t *state = saved_state64(tstate);
1247
1248 snprintf(msg, sizeof(msg), MSG_FMT, (void *)state->x[8]);
1249 panic_with_thread_kernel_state(msg, tstate);
1250 #undef MSG_FMT
1251 }
1252 case XNU_HARD_TRAP_STRING_CHK:
1253 panic_with_thread_kernel_state("panic: string operation caused an overflow", tstate);
1254 default:
1255 break;
1256 }
1257 }
1258
1259 #if __has_feature(ptrauth_calls)
1260 KERNEL_BRK_DESCRIPTOR_DEFINE(ptrauth_desc,
1261 .type = KERNEL_BRK_TYPE_PTRAUTH,
1262 .base = PTRAUTH_TRAP_START,
1263 .max = PTRAUTH_TRAP_START + ptrauth_key_asdb,
1264 .options = KERNEL_BRK_UNRECOVERABLE,
1265 .handle_breakpoint = ptrauth_handle_brk_trap);
1266 #endif
1267
1268 KERNEL_BRK_DESCRIPTOR_DEFINE(clang_desc,
1269 .type = KERNEL_BRK_TYPE_CLANG,
1270 .base = CLANG_ARM_TRAP_START,
1271 .max = CLANG_ARM_TRAP_END,
1272 .options = KERNEL_BRK_UNRECOVERABLE,
1273 .handle_breakpoint = NULL);
1274
1275 KERNEL_BRK_DESCRIPTOR_DEFINE(libcxx_desc,
1276 .type = KERNEL_BRK_TYPE_LIBCXX,
1277 .base = LIBCXX_TRAP_START,
1278 .max = LIBCXX_TRAP_END,
1279 .options = KERNEL_BRK_UNRECOVERABLE,
1280 .handle_breakpoint = NULL);
1281
1282 #if HAS_TELEMETRY_KERNEL_BRK
1283 KERNEL_BRK_DESCRIPTOR_DEFINE(xnu_soft_traps_desc,
1284 .type = KERNEL_BRK_TYPE_TELEMETRY,
1285 .base = XNU_SOFT_TRAP_START,
1286 .max = XNU_SOFT_TRAP_END,
1287 .options = KERNEL_BRK_RECOVERABLE | KERNEL_BRK_CORE_ANALYTICS,
1288 .handle_breakpoint = xnu_soft_trap_handle_breakpoint);
1289 #endif /* HAS_TELEMETRY_KERNEL_BRK */
1290
1291 KERNEL_BRK_DESCRIPTOR_DEFINE(xnu_hard_traps_desc,
1292 .type = KERNEL_BRK_TYPE_XNU,
1293 .base = XNU_HARD_TRAP_START,
1294 .max = XNU_HARD_TRAP_END,
1295 .options = KERNEL_BRK_UNRECOVERABLE,
1296 .handle_breakpoint = xnu_hard_trap_handle_breakpoint);
1297
1298 static void
1299 #if !HAS_TELEMETRY_KERNEL_BRK
1300 __attribute__((noreturn))
1301 #endif
handle_kernel_breakpoint(arm_saved_state_t * state,uint64_t esr)1302 handle_kernel_breakpoint(arm_saved_state_t *state, uint64_t esr)
1303 {
1304 uint16_t comment = ISS_BRK_COMMENT(esr);
1305 const struct kernel_brk_descriptor *desc;
1306
1307 #define MSG_FMT "Break 0x%04X instruction exception from kernel. Panic (by design)"
1308 char msg[strlen(MSG_FMT) - strlen("0x%04X") + strlen("0xFFFF") + 1];
1309
1310 desc = find_brk_descriptor_by_comment(comment);
1311
1312 if (!desc) {
1313 goto brk_out;
1314 }
1315
1316 #if HAS_TELEMETRY_KERNEL_BRK
1317 if (desc->options & KERNEL_BRK_TELEMETRY_OPTIONS) {
1318 telemetry_kernel_brk(desc->type, desc->options, (void *)state, comment);
1319 }
1320 #endif
1321
1322 if (desc->handle_breakpoint) {
1323 desc->handle_breakpoint(state, comment); /* May trigger panic */
1324 }
1325
1326 #if HAS_TELEMETRY_KERNEL_BRK
1327 /* Still alive? Check if we should recover. */
1328 if (desc->options & KERNEL_BRK_RECOVERABLE) {
1329 add_saved_state_pc(state, 4);
1330 return;
1331 }
1332 #endif
1333
1334 brk_out:
1335 snprintf(msg, sizeof(msg), MSG_FMT, comment);
1336
1337 panic_with_thread_kernel_state(msg, state);
1338 __builtin_unreachable();
1339 #undef MSG_FMT
1340 }
1341
1342 static void
handle_breakpoint(arm_saved_state_t * state,uint64_t esr __unused)1343 handle_breakpoint(arm_saved_state_t *state, uint64_t esr __unused)
1344 {
1345 exception_type_t exception = EXC_BREAKPOINT;
1346 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
1347 mach_msg_type_number_t numcodes = 2;
1348
1349 #if __has_feature(ptrauth_calls)
1350 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
1351 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
1352 exception |= EXC_PTRAUTH_BIT;
1353 }
1354 #endif /* __has_feature(ptrauth_calls) */
1355
1356 codes[1] = get_saved_state_pc(state);
1357 exception_triage(exception, codes, numcodes);
1358 __builtin_unreachable();
1359 }
1360
1361 static void
handle_watchpoint(vm_offset_t fault_addr)1362 handle_watchpoint(vm_offset_t fault_addr)
1363 {
1364 exception_type_t exception = EXC_BREAKPOINT;
1365 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
1366 mach_msg_type_number_t numcodes = 2;
1367
1368 codes[1] = fault_addr;
1369 exception_triage(exception, codes, numcodes);
1370 __builtin_unreachable();
1371 }
1372
1373 static void
handle_abort(arm_saved_state_t * state,uint64_t esr,vm_offset_t fault_addr,abort_inspector_t inspect_abort,abort_handler_t handler,expected_fault_handler_t expected_fault_handler)1374 handle_abort(arm_saved_state_t *state, uint64_t esr, vm_offset_t fault_addr,
1375 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
1376 {
1377 fault_status_t fault_code;
1378 vm_prot_t fault_type;
1379
1380 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
1381 handler(state, esr, fault_addr, fault_code, fault_type, expected_fault_handler);
1382 }
1383
1384 static void
inspect_instruction_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1385 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1386 {
1387 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1388 *fault_code = ISS_IA_FSC(iss);
1389 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1390 }
1391
1392 static void
inspect_data_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1393 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1394 {
1395 getCpuDatap()->cpu_stat.data_ex_cnt++;
1396 *fault_code = ISS_DA_FSC(iss);
1397
1398 /*
1399 * Cache maintenance operations always report faults as write access.
1400 * Change these to read access, unless they report a permission fault.
1401 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1402 * access to the mapping, but if a cache maintenance operation that only requires
1403 * read access generates a permission fault, then we will not be able to handle
1404 * the fault regardless of whether we treat it as a read or write fault.
1405 */
1406 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1407 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1408 } else {
1409 *fault_type = (VM_PROT_READ);
1410 }
1411 }
1412
1413 #if __has_feature(ptrauth_calls)
1414 static inline uint64_t
fault_addr_bitmask(unsigned int bit_from,unsigned int bit_to)1415 fault_addr_bitmask(unsigned int bit_from, unsigned int bit_to)
1416 {
1417 return ((1ULL << (bit_to - bit_from + 1)) - 1) << bit_from;
1418 }
1419
1420 static inline bool
fault_addr_bit(vm_offset_t fault_addr,unsigned int bit)1421 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1422 {
1423 return (bool)((fault_addr >> bit) & 1);
1424 }
1425
1426 extern int gARM_FEAT_PAuth2;
1427
1428 /**
1429 * Determines whether a fault address taken at EL0 contains a PAC error code
1430 * corresponding to the specified kind of ptrauth key.
1431 */
1432 static bool
user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr,bool data_key)1433 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1434 {
1435 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1436 bool tbi = data_key || __improbable(instruction_tbi);
1437
1438 if (gARM_FEAT_PAuth2) {
1439 /*
1440 * EnhancedPAC2 CPUs don't encode error codes at fixed positions, so
1441 * treat all non-canonical address bits like potential poison bits.
1442 */
1443 uint64_t mask = fault_addr_bitmask(64 - T0SZ_BOOT, 54);
1444 if (!tbi) {
1445 mask |= fault_addr_bitmask(56, 63);
1446 }
1447 return (fault_addr & mask) != 0;
1448 } else {
1449 unsigned int poison_shift;
1450 if (tbi) {
1451 poison_shift = 53;
1452 } else {
1453 poison_shift = 61;
1454 }
1455
1456 /* PAC error codes are always in the form key_number:NOT(key_number) */
1457 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1458 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1459 return poison_bit_1 != poison_bit_2;
1460 }
1461 }
1462 #endif /* __has_feature(ptrauth_calls) */
1463
1464 /**
1465 * Determines whether the userland thread has a JIT region in RW mode, TPRO
1466 * in RW mode, or JCTL_EL0 in pointer signing mode. A fault in any of these trusted
1467 * code paths may indicate an attack on WebKit. Rather than letting a
1468 * potentially-compromised process try to handle the exception, it will be killed
1469 * by the kernel and a crash report will be generated.
1470 */
1471 static bool
user_fault_in_self_restrict_mode(thread_t thread __unused)1472 user_fault_in_self_restrict_mode(thread_t thread __unused)
1473 {
1474
1475 return false;
1476 }
1477
1478 static void
handle_pc_align(arm_saved_state_t * ss)1479 handle_pc_align(arm_saved_state_t *ss)
1480 {
1481 exception_type_t exc;
1482 mach_exception_data_type_t codes[2];
1483 mach_msg_type_number_t numcodes = 2;
1484
1485 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1486 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1487 }
1488
1489 exc = EXC_BAD_ACCESS;
1490 #if __has_feature(ptrauth_calls)
1491 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1492 exc |= EXC_PTRAUTH_BIT;
1493 }
1494 #endif /* __has_feature(ptrauth_calls) */
1495
1496 codes[0] = EXC_ARM_DA_ALIGN;
1497 codes[1] = get_saved_state_pc(ss);
1498
1499 exception_triage(exc, codes, numcodes);
1500 __builtin_unreachable();
1501 }
1502
1503 static void
handle_sp_align(arm_saved_state_t * ss)1504 handle_sp_align(arm_saved_state_t *ss)
1505 {
1506 exception_type_t exc;
1507 mach_exception_data_type_t codes[2];
1508 mach_msg_type_number_t numcodes = 2;
1509
1510 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1511 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1512 }
1513
1514 exc = EXC_BAD_ACCESS;
1515 #if __has_feature(ptrauth_calls)
1516 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1517 exc |= EXC_PTRAUTH_BIT;
1518 }
1519 #endif /* __has_feature(ptrauth_calls) */
1520
1521 codes[0] = EXC_ARM_SP_ALIGN;
1522 codes[1] = get_saved_state_sp(ss);
1523
1524 exception_triage(exc, codes, numcodes);
1525 __builtin_unreachable();
1526 }
1527
1528 static void
handle_wf_trap(arm_saved_state_t * state)1529 handle_wf_trap(arm_saved_state_t *state)
1530 {
1531 exception_type_t exc;
1532 mach_exception_data_type_t codes[2];
1533 mach_msg_type_number_t numcodes = 2;
1534 uint32_t instr = 0;
1535
1536 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1537
1538 exc = EXC_BAD_INSTRUCTION;
1539 codes[0] = EXC_ARM_UNDEFINED;
1540 codes[1] = instr;
1541
1542 exception_triage(exc, codes, numcodes);
1543 __builtin_unreachable();
1544 }
1545
1546 static void
handle_fp_trap(arm_saved_state_t * state,uint64_t esr)1547 handle_fp_trap(arm_saved_state_t *state, uint64_t esr)
1548 {
1549 exception_type_t exc = EXC_ARITHMETIC;
1550 mach_exception_data_type_t codes[2];
1551 mach_msg_type_number_t numcodes = 2;
1552 uint32_t instr = 0;
1553
1554 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1555 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1556 }
1557
1558 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1559 codes[1] = instr;
1560
1561 /* The floating point trap flags are only valid if TFV is set. */
1562 if (!fp_exceptions_enabled) {
1563 exc = EXC_BAD_INSTRUCTION;
1564 codes[0] = EXC_ARM_UNDEFINED;
1565 } else if (!(esr & ISS_FP_TFV)) {
1566 codes[0] = EXC_ARM_FP_UNDEFINED;
1567 } else if (esr & ISS_FP_UFF) {
1568 codes[0] = EXC_ARM_FP_UF;
1569 } else if (esr & ISS_FP_OFF) {
1570 codes[0] = EXC_ARM_FP_OF;
1571 } else if (esr & ISS_FP_IOF) {
1572 codes[0] = EXC_ARM_FP_IO;
1573 } else if (esr & ISS_FP_DZF) {
1574 codes[0] = EXC_ARM_FP_DZ;
1575 } else if (esr & ISS_FP_IDF) {
1576 codes[0] = EXC_ARM_FP_ID;
1577 } else if (esr & ISS_FP_IXF) {
1578 codes[0] = EXC_ARM_FP_IX;
1579 } else {
1580 panic("Unrecognized floating point exception, state=%p, esr=%#llx", state, esr);
1581 }
1582
1583 exception_triage(exc, codes, numcodes);
1584 __builtin_unreachable();
1585 }
1586
1587
1588
1589 /*
1590 * handle_alignment_fault_from_user:
1591 * state: Saved state
1592 *
1593 * Attempts to deal with an alignment fault from userspace (possibly by
1594 * emulating the faulting instruction). If emulation failed due to an
1595 * unservicable fault, the ESR for that fault will be stored in the
1596 * recovery_esr field of the thread by the exception code.
1597 *
1598 * Returns:
1599 * -1: Emulation failed (emulation of state/instr not supported)
1600 * 0: Successfully emulated the instruction
1601 * EFAULT: Emulation failed (probably due to permissions)
1602 * EINVAL: Emulation failed (probably due to a bad address)
1603 */
1604
1605
1606 static int
handle_alignment_fault_from_user(arm_saved_state_t * state,kern_return_t * vmfr)1607 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1608 {
1609 int ret = -1;
1610
1611 #pragma unused (state)
1612 #pragma unused (vmfr)
1613
1614 return ret;
1615 }
1616
1617
1618
1619 #if HAS_ARM_FEAT_SME
1620 static void
handle_sme_trap(arm_saved_state_t * state,uint64_t esr)1621 handle_sme_trap(arm_saved_state_t *state, uint64_t esr)
1622 {
1623 exception_type_t exc = EXC_BAD_INSTRUCTION;
1624 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1625 mach_msg_type_number_t numcodes = 2;
1626
1627 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1628 panic("SME exception from kernel, state=%p, esr=%#llx", state, esr);
1629 }
1630 if (!arm_sme_version()) {
1631 /*
1632 * If SME is disabled in software but userspace executes an SME
1633 * instruction anyway, then the CPU will still raise an
1634 * SME-specific trap. Triage it as if the CPU raised an
1635 * undefined-instruction trap.
1636 */
1637 exception_triage(exc, codes, numcodes);
1638 __builtin_unreachable();
1639 }
1640
1641 if (ISS_SME_SMTC(ESR_ISS(esr)) == ISS_SME_SMTC_CAPCR) {
1642 thread_t thread = current_thread();
1643 switch (machine_thread_sme_state_alloc(thread)) {
1644 case KERN_SUCCESS:
1645 return;
1646
1647
1648 default:
1649 panic("Failed to allocate SME state for thread %p", thread);
1650 }
1651 }
1652
1653 uint32_t instr;
1654 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1655 codes[1] = instr;
1656
1657 exception_triage(exc, codes, numcodes);
1658 __builtin_unreachable();
1659 }
1660 #endif /* HAS_ARM_FEAT_SME */
1661
1662 static void
handle_sw_step_debug(arm_saved_state_t * state)1663 handle_sw_step_debug(arm_saved_state_t *state)
1664 {
1665 thread_t thread = current_thread();
1666 exception_type_t exc;
1667 mach_exception_data_type_t codes[2];
1668 mach_msg_type_number_t numcodes = 2;
1669
1670 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1671 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1672 }
1673
1674 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1675 if (thread->machine.DebugData != NULL) {
1676 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1677 } else {
1678 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1679 }
1680
1681 mask_user_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_ALL);
1682
1683 // Special encoding for gdb single step event on ARM
1684 exc = EXC_BREAKPOINT;
1685 codes[0] = 1;
1686 codes[1] = 0;
1687
1688 exception_triage(exc, codes, numcodes);
1689 __builtin_unreachable();
1690 }
1691
1692 #if MACH_ASSERT
1693 TUNABLE_WRITEABLE(int, panic_on_jit_guard, "panic_on_jit_guard", 0);
1694 #endif /* MACH_ASSERT */
1695
1696 static void
handle_user_abort(arm_saved_state_t * state,uint64_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,expected_fault_handler_t expected_fault_handler)1697 handle_user_abort(arm_saved_state_t *state, uint64_t esr, vm_offset_t fault_addr,
1698 fault_status_t fault_code, vm_prot_t fault_type, expected_fault_handler_t expected_fault_handler)
1699 {
1700 exception_type_t exc = EXC_BAD_ACCESS;
1701 mach_exception_data_type_t codes[2];
1702 mach_msg_type_number_t numcodes = 2;
1703 thread_t thread = current_thread();
1704
1705 (void)expected_fault_handler;
1706
1707 if (__improbable(!SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state)))) {
1708 panic_with_thread_kernel_state("User abort from non-interruptible context", state);
1709 }
1710
1711 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1712
1713 if (!is_servicible_fault(fault_code, esr) &&
1714 thread->t_rr_state.trr_fault_state != TRR_FAULT_NONE) {
1715 thread_reset_pcs_done_faulting(thread);
1716 }
1717
1718 if (is_vm_fault(fault_code)) {
1719 vm_map_t map = thread->map;
1720 vm_offset_t vm_fault_addr = fault_addr;
1721 kern_return_t result = KERN_FAILURE;
1722
1723 assert(map != kernel_map);
1724
1725 if (!(fault_type & VM_PROT_EXECUTE)) {
1726 vm_fault_addr = VM_USER_STRIP_TBI(fault_addr);
1727 }
1728
1729 /* check to see if it is just a pmap ref/modify fault */
1730 if (!is_translation_fault(fault_code)) {
1731 result = arm_fast_fault(map->pmap,
1732 vm_fault_addr,
1733 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1734 }
1735 if (result != KERN_SUCCESS) {
1736
1737 {
1738 /* We have to fault the page in */
1739 result = vm_fault(map, vm_fault_addr, fault_type,
1740 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1741 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1742 }
1743 }
1744 if (thread->t_rr_state.trr_fault_state != TRR_FAULT_NONE) {
1745 thread_reset_pcs_done_faulting(thread);
1746 }
1747 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1748 return;
1749 }
1750
1751 /*
1752 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1753 * If it does, we're leaking preemption disables somewhere in the kernel.
1754 */
1755 if (__improbable(result == KERN_FAILURE)) {
1756 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1757 }
1758
1759 codes[0] = result;
1760 } else if (is_alignment_fault(fault_code)) {
1761 kern_return_t vmfkr = KERN_SUCCESS;
1762 thread->machine.recover_esr = 0;
1763 thread->machine.recover_far = 0;
1764 int result = handle_alignment_fault_from_user(state, &vmfkr);
1765 if (result == 0) {
1766 /* Successfully emulated, or instruction
1767 * copyin() for decode/emulation failed.
1768 * Continue, or redrive instruction.
1769 */
1770 thread_exception_return();
1771 } else if (((result == EFAULT) || (result == EINVAL)) &&
1772 (thread->machine.recover_esr == 0)) {
1773 /*
1774 * If we didn't actually take a fault, but got one of
1775 * these errors, then we failed basic sanity checks of
1776 * the fault address. Treat this as an invalid
1777 * address.
1778 */
1779 codes[0] = KERN_INVALID_ADDRESS;
1780 } else if ((result == EFAULT) &&
1781 (thread->machine.recover_esr)) {
1782 /*
1783 * Since alignment aborts are prioritized
1784 * ahead of translation aborts, the misaligned
1785 * atomic emulation flow may have triggered a
1786 * VM pagefault, which the VM could not resolve.
1787 * Report the VM fault error in codes[]
1788 */
1789
1790 codes[0] = vmfkr;
1791 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1792 /* Cause ESR_EC to reflect an EL0 abort */
1793 thread->machine.recover_esr &= ~ESR_EC_MASK;
1794 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1795 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1796 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1797 fault_addr = thread->machine.recover_far;
1798 } else {
1799 /* This was just an unsupported alignment
1800 * exception. Misaligned atomic emulation
1801 * timeouts fall in this category.
1802 */
1803 codes[0] = EXC_ARM_DA_ALIGN;
1804 }
1805 } else if (is_parity_error(fault_code)) {
1806 #if defined(APPLE_ARM64_ARCH_FAMILY)
1807 /*
1808 * Platform errors are handled in sleh_sync before interrupts are enabled.
1809 */
1810 #else
1811 panic("User parity error.");
1812 #endif
1813 } else {
1814 codes[0] = KERN_FAILURE;
1815 }
1816
1817 #if CODE_SIGNING_MONITOR
1818 /*
1819 * If the code reaches here, it means we weren't able to resolve the fault and we're
1820 * going to be sending the task an exception. On systems which have the code signing
1821 * monitor enabled, an execute fault which cannot be handled must result in sending
1822 * a SIGKILL to the task.
1823 */
1824 if (is_vm_fault(fault_code) && (fault_type & VM_PROT_EXECUTE)) {
1825 csm_code_signing_violation(current_proc(), fault_addr);
1826 }
1827 #endif
1828
1829 codes[1] = fault_addr;
1830 #if __has_feature(ptrauth_calls)
1831 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1832 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1833 exc |= EXC_PTRAUTH_BIT;
1834 }
1835 #endif /* __has_feature(ptrauth_calls) */
1836
1837 if (user_fault_in_self_restrict_mode(thread) &&
1838 task_is_jit_exception_fatal(get_threadtask(thread))) {
1839 int flags = PX_KTRIAGE;
1840 exception_info_t info = {
1841 .os_reason = OS_REASON_SELF_RESTRICT,
1842 .exception_type = exc,
1843 .mx_code = codes[0],
1844 .mx_subcode = codes[1]
1845 };
1846
1847 #if MACH_ASSERT
1848 printf("\nGUARD_REASON_JIT exc %d codes=<0x%llx,0x%llx> syscalls %d task %p thread %p va 0x%lx code 0x%x type 0x%x esr 0x%llx\n",
1849 exc, codes[0], codes[1], thread->syscalls_unix, current_task(), thread, fault_addr, fault_code, fault_type, esr);
1850 if (panic_on_jit_guard &&
1851 current_task()->thread_count == 1 &&
1852 thread->syscalls_unix < 24) {
1853 panic("GUARD_REASON_JIT exc %d codes=<0x%llx,0x%llx> syscalls %d task %p thread %p va 0x%lx code 0x%x type 0x%x esr 0x%llx state %p j %d t %d s user 0x%llx (0x%llx) jb 0x%llx (0x%llx)",
1854 exc, codes[0], codes[1], thread->syscalls_unix, current_task(), thread, fault_addr, fault_code, fault_type, esr, state,
1855 0, 0, 0ull, 0ull,
1856 0ull, 0ull
1857 );
1858 }
1859 #endif /* MACH_ASSERT */
1860
1861 exit_with_mach_exception(current_proc(), info, flags);
1862 }
1863
1864 exception_triage(exc, codes, numcodes);
1865 __builtin_unreachable();
1866 }
1867
1868 /**
1869 * Panic because the kernel abort handler tried to apply a recovery handler that
1870 * isn't inside copyio_recover_table[].
1871 *
1872 * @param state original saved-state
1873 * @param recover invalid recovery handler
1874 */
1875 __attribute__((noreturn, used))
1876 static void
panic_on_invalid_recovery_handler(arm_saved_state_t * state,struct copyio_recovery_entry * recover)1877 panic_on_invalid_recovery_handler(arm_saved_state_t *state, struct copyio_recovery_entry *recover)
1878 {
1879 panic("attempt to set invalid recovery handler %p on kernel saved-state %p", recover, state);
1880 }
1881
1882 static void
handle_kernel_abort_recover(arm_saved_state_t * state,uint64_t esr,vm_offset_t fault_addr,thread_t thread,struct copyio_recovery_entry * _Nonnull recover)1883 handle_kernel_abort_recover(
1884 arm_saved_state_t *state,
1885 uint64_t esr,
1886 vm_offset_t fault_addr,
1887 thread_t thread,
1888 struct copyio_recovery_entry *_Nonnull recover)
1889 {
1890 thread->machine.recover_esr = esr;
1891 thread->machine.recover_far = fault_addr;
1892 #if defined(HAS_APPLE_PAC)
1893 MANIPULATE_SIGNED_THREAD_STATE(state,
1894 "adrp x6, _copyio_recover_table_end@page \n"
1895 "add x6, x6, _copyio_recover_table_end@pageoff \n"
1896 "cmp %[recover], x6 \n"
1897 "b.lt 1f \n"
1898 "bl _panic_on_invalid_recovery_handler \n"
1899 "brk #0 \n"
1900 "1: \n"
1901 "adrp x6, _copyio_recover_table@page \n"
1902 "add x6, x6, _copyio_recover_table@pageoff \n"
1903 "cmp %[recover], x6 \n"
1904 "b.ge 1f \n"
1905 "bl _panic_on_invalid_recovery_handler \n"
1906 "brk #0 \n"
1907 "1: \n"
1908 "ldr x1, [%[recover], %[CRE_RECOVERY]] \n"
1909 "add x1, x1, x6 \n"
1910 "str x1, [x0, %[SS64_PC]] \n",
1911 [recover] "r"(recover),
1912 [CRE_RECOVERY] "i"(offsetof(struct copyio_recovery_entry, cre_recovery))
1913 );
1914 #else
1915 if ((uintptr_t)recover < (uintptr_t)copyio_recover_table ||
1916 (uintptr_t)recover >= (uintptr_t)copyio_recover_table_end) {
1917 panic_on_invalid_recovery_handler(state, recover);
1918 }
1919 saved_state64(state)->pc = copyio_recovery_addr(recover->cre_recovery);
1920 #endif
1921 }
1922
1923 static void
handle_kernel_abort(arm_saved_state_t * state,uint64_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,expected_fault_handler_t expected_fault_handler)1924 handle_kernel_abort(arm_saved_state_t *state, uint64_t esr, vm_offset_t fault_addr,
1925 fault_status_t fault_code, vm_prot_t fault_type, expected_fault_handler_t expected_fault_handler)
1926 {
1927 thread_t thread = current_thread();
1928 struct copyio_recovery_entry *recover = find_copyio_recovery_entry(state);
1929
1930 #ifndef CONFIG_XNUPOST
1931 (void)expected_fault_handler;
1932 #endif /* CONFIG_XNUPOST */
1933
1934 #if CONFIG_DTRACE
1935 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1936 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1937 /*
1938 * Point to next instruction, or recovery handler if set.
1939 */
1940 if (recover) {
1941 handle_kernel_abort_recover(state, esr, VM_USER_STRIP_PTR(fault_addr), thread, recover);
1942 } else {
1943 add_saved_state_pc(state, 4);
1944 }
1945 return;
1946 } else {
1947 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1948 }
1949 }
1950 #endif
1951
1952 if (is_vm_fault(fault_code)) {
1953 kern_return_t result = KERN_FAILURE;
1954 vm_map_t map;
1955 int interruptible;
1956
1957 /*
1958 * Ensure no faults in the physical aperture. This could happen if
1959 * a page table is incorrectly allocated from the read only region
1960 * when running with KTRR.
1961 */
1962
1963 #ifdef CONFIG_XNUPOST
1964 if (expected_fault_handler && expected_fault_handler(state)) {
1965 return;
1966 }
1967 #endif /* CONFIG_XNUPOST */
1968
1969 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1970 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1971 }
1972
1973 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL || recover == 0) {
1974 /*
1975 * If no recovery handler is supplied, always drive the fault against
1976 * the kernel map. If the fault was taken against a userspace VA, indicating
1977 * an unprotected access to user address space, vm_fault() should fail and
1978 * ultimately lead to a panic here.
1979 */
1980 map = kernel_map;
1981 interruptible = THREAD_UNINT;
1982
1983 #if CONFIG_KERNEL_TAGGING
1984 /*
1985 * If kernel tagging is enabled, canonicalize the address here, so that we have a
1986 * chance to find it in the VM ranges. Do not mess with exec fault cases.
1987 */
1988 if (!((fault_type) & VM_PROT_EXECUTE)) {
1989 fault_addr = vm_memtag_canonicalize_address(fault_addr);
1990 }
1991 #endif /* CONFIG_KERNEL_TAGGING */
1992 } else {
1993 map = thread->map;
1994
1995 /**
1996 * In the case that the recovery handler is set (e.g., during copyio
1997 * and dtrace probes), we don't want the vm_fault() operation to be
1998 * aborted early. Those code paths can't handle restarting the
1999 * vm_fault() operation so don't allow it to return early without
2000 * creating the wanted mapping.
2001 */
2002 interruptible = (recover) ? THREAD_UNINT : THREAD_ABORTSAFE;
2003
2004 }
2005
2006 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
2007 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
2008 }
2009
2010 /* check to see if it is just a pmap ref/modify fault */
2011 if (!is_translation_fault(fault_code)) {
2012 result = arm_fast_fault(map->pmap,
2013 fault_addr,
2014 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
2015 if (result == KERN_SUCCESS) {
2016 return;
2017 }
2018 }
2019
2020 /**
2021 * vm_fault() can be called with preemption disabled (and indeed this is expected for
2022 * certain copyio() scenarios), but can't safely be called with interrupts disabled once
2023 * the system has gone multi-threaded. Other than some early-boot situations such as
2024 * startup kext loading, kernel paging operations should never be triggered by
2025 * non-interruptible code in the first place, so a fault from such a context will
2026 * ultimately produce a kernel data abort panic anyway. In these cases, skip calling
2027 * vm_fault() to avoid masking the real kernel panic with a failed VM locking assertion.
2028 */
2029 if (__probable(SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state)) ||
2030 startup_phase < STARTUP_SUB_EARLY_BOOT ||
2031 current_cpu_datap()->cpu_hibernate)) {
2032 if (result != KERN_PROTECTION_FAILURE) {
2033 // VM will query this property when deciding to throttle this fault, we don't want to
2034 // throttle kernel faults for copyio faults. The presence of a recovery entry is used as a
2035 // proxy for being in copyio code.
2036 bool const was_recover = thread->recover;
2037 thread->recover = was_recover || recover;
2038
2039 /*
2040 * We have to "fault" the page in.
2041 */
2042 result = vm_fault(map, fault_addr, fault_type,
2043 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
2044 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
2045
2046 thread->recover = was_recover;
2047 }
2048
2049 if (result == KERN_SUCCESS) {
2050 return;
2051 }
2052 }
2053
2054 /*
2055 * If we have a recover handler, invoke it now.
2056 */
2057 if (recover) {
2058 handle_kernel_abort_recover(state, esr, fault_addr, thread, recover);
2059 return;
2060 }
2061
2062 panic_fault_address = fault_addr;
2063 } else if (is_alignment_fault(fault_code)) {
2064 if (recover) {
2065 handle_kernel_abort_recover(state, esr, fault_addr, thread, recover);
2066 return;
2067 }
2068 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
2069 } else if (is_parity_error(fault_code)) {
2070 #if defined(APPLE_ARM64_ARCH_FAMILY)
2071 /*
2072 * Platform errors are handled in sleh_sync before interrupts are enabled.
2073 */
2074 #else
2075 panic_with_thread_kernel_state("Kernel parity error.", state);
2076 #endif
2077 } else {
2078 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
2079 }
2080
2081 panic_with_thread_kernel_state("Kernel data abort.", state);
2082 }
2083
2084 extern void syscall_trace(struct arm_saved_state * regs);
2085
2086 static void
handle_svc(arm_saved_state_t * state)2087 handle_svc(arm_saved_state_t *state)
2088 {
2089 int trap_no = get_saved_state_svc_number(state);
2090 thread_t thread = current_thread();
2091 struct proc *p;
2092
2093 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
2094
2095 #define TRACE_SYSCALL 1
2096 #if TRACE_SYSCALL
2097 syscall_trace(state);
2098 #endif
2099
2100 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
2101
2102 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
2103 platform_syscall(state);
2104 panic("Returned from platform_syscall()?");
2105 }
2106
2107 current_cached_proc_cred_update();
2108
2109 if (trap_no < 0) {
2110 switch (trap_no) {
2111 case MACH_ARM_TRAP_ABSTIME:
2112 handle_mach_absolute_time_trap(state);
2113 return;
2114 case MACH_ARM_TRAP_CONTTIME:
2115 handle_mach_continuous_time_trap(state);
2116 return;
2117 }
2118
2119 /* Counting perhaps better in the handler, but this is how it's been done */
2120 thread->syscalls_mach++;
2121 mach_syscall(state);
2122 } else {
2123 /* Counting perhaps better in the handler, but this is how it's been done */
2124 thread->syscalls_unix++;
2125 p = get_bsdthreadtask_info(thread);
2126
2127 assert(p);
2128
2129 unix_syscall(state, thread, p);
2130 }
2131 }
2132
2133 static void
handle_mach_absolute_time_trap(arm_saved_state_t * state)2134 handle_mach_absolute_time_trap(arm_saved_state_t *state)
2135 {
2136 uint64_t now = mach_absolute_time();
2137 saved_state64(state)->x[0] = now;
2138 }
2139
2140 static void
handle_mach_continuous_time_trap(arm_saved_state_t * state)2141 handle_mach_continuous_time_trap(arm_saved_state_t *state)
2142 {
2143 uint64_t now = mach_continuous_time();
2144 saved_state64(state)->x[0] = now;
2145 }
2146
2147
2148 __attribute__((noreturn))
2149 static void
handle_msr_trap(arm_saved_state_t * state,uint64_t esr)2150 handle_msr_trap(arm_saved_state_t *state, uint64_t esr)
2151 {
2152 exception_type_t exception = EXC_BAD_INSTRUCTION;
2153 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
2154 mach_msg_type_number_t numcodes = 2;
2155 uint32_t instr = 0;
2156
2157 if (!is_saved_state64(state)) {
2158 panic("MSR/MRS trap (ESR 0x%llx) from 32-bit state", esr);
2159 }
2160
2161 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
2162 panic("MSR/MRS trap (ESR 0x%llx) from kernel", esr);
2163 }
2164
2165 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
2166 codes[1] = instr;
2167
2168 exception_triage(exception, codes, numcodes);
2169 __builtin_unreachable();
2170 }
2171
2172 #if __has_feature(ptrauth_calls)
2173 static void
stringify_gpr(unsigned int r,char reg[4])2174 stringify_gpr(unsigned int r, char reg[4])
2175 {
2176 switch (r) {
2177 case 29:
2178 strncpy(reg, "fp", 4);
2179 return;
2180
2181 case 30:
2182 strncpy(reg, "lr", 4);
2183 return;
2184
2185 case 31:
2186 strncpy(reg, "xzr", 4);
2187 return;
2188
2189 default:
2190 snprintf(reg, 4, "x%u", r);
2191 return;
2192 }
2193 }
2194
2195 static void
autxx_instruction_extract_reg(uint32_t instr,char reg[4])2196 autxx_instruction_extract_reg(uint32_t instr, char reg[4])
2197 {
2198 unsigned int rd = ARM64_INSTR_AUTxx_RD_GET(instr);
2199 stringify_gpr(rd, reg);
2200 }
2201
2202 static const char *
autix_system_instruction_extract_reg(uint32_t instr)2203 autix_system_instruction_extract_reg(uint32_t instr)
2204 {
2205 unsigned int crm_op2 = ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_GET(instr);
2206 if (crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIA1716 ||
2207 crm_op2 == ARM64_INSTR_AUTIx_SYSTEM_CRM_OP2_AUTIB1716) {
2208 return "x17";
2209 } else {
2210 return "lr";
2211 }
2212 }
2213
2214 static void
bxrax_instruction_extract_reg(uint32_t instr,char reg[4])2215 bxrax_instruction_extract_reg(uint32_t instr, char reg[4])
2216 {
2217 unsigned int rn = ARM64_INSTR_BxRAx_RN_GET(instr);
2218 stringify_gpr(rn, reg);
2219 }
2220
2221 static void
handle_pac_fail(arm_saved_state_t * state,uint64_t esr)2222 handle_pac_fail(arm_saved_state_t *state, uint64_t esr)
2223 {
2224 exception_type_t exception = EXC_BAD_ACCESS | EXC_PTRAUTH_BIT;
2225 mach_exception_data_type_t codes[2] = {EXC_ARM_PAC_FAIL};
2226 mach_msg_type_number_t numcodes = 2;
2227 uint32_t instr = 0;
2228
2229 if (!is_saved_state64(state)) {
2230 panic("PAC failure (ESR 0x%llx) from 32-bit state", esr);
2231 }
2232
2233 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
2234
2235 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
2236 #define GENERIC_PAC_FAILURE_MSG_FMT "PAC failure from kernel with %s key"
2237 #define AUTXX_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT " while authing %s"
2238 #define BXRAX_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT " while branching to %s"
2239 #define RETAX_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT " while returning"
2240 #define GENERIC_MSG_FMT GENERIC_PAC_FAILURE_MSG_FMT
2241 #define MAX_PAC_MSG_FMT BXRAX_MSG_FMT
2242
2243 char msg[strlen(MAX_PAC_MSG_FMT)
2244 - strlen("%s") + strlen("IA")
2245 - strlen("%s") + strlen("xzr")
2246 + 1];
2247 ptrauth_key key = (ptrauth_key)(esr & 0x3);
2248 const char *key_str = ptrauth_key_to_string(key);
2249
2250 if (ARM64_INSTR_IS_AUTxx(instr)) {
2251 char reg[4];
2252 autxx_instruction_extract_reg(instr, reg);
2253 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
2254 } else if (ARM64_INSTR_IS_AUTIx_SYSTEM(instr)) {
2255 const char *reg = autix_system_instruction_extract_reg(instr);
2256 snprintf(msg, sizeof(msg), AUTXX_MSG_FMT, key_str, reg);
2257 } else if (ARM64_INSTR_IS_BxRAx(instr)) {
2258 char reg[4];
2259 bxrax_instruction_extract_reg(instr, reg);
2260 snprintf(msg, sizeof(msg), BXRAX_MSG_FMT, key_str, reg);
2261 } else if (ARM64_INSTR_IS_RETAx(instr)) {
2262 snprintf(msg, sizeof(msg), RETAX_MSG_FMT, key_str);
2263 } else {
2264 snprintf(msg, sizeof(msg), GENERIC_MSG_FMT, key_str);
2265 }
2266 panic_with_thread_kernel_state(msg, state);
2267 }
2268
2269 codes[1] = instr;
2270
2271 exception_triage(exception, codes, numcodes);
2272 __builtin_unreachable();
2273 }
2274 #endif /* __has_feature(ptrauth_calls) */
2275
2276 __attribute__((noreturn))
2277 static void
handle_bti_fail(arm_saved_state_t * state,uint64_t esr)2278 handle_bti_fail(arm_saved_state_t *state, uint64_t esr)
2279 {
2280 uint32_t btype = (uint32_t) esr & ISS_BTI_BTYPE_MASK;
2281
2282 if (!is_saved_state64(state)) {
2283 /* BTI is an ARMv8 feature, this should not be possible */
2284 panic("BTI failure for 32-bit state? (ESR=0x%llx)", esr);
2285 }
2286
2287 /*
2288 * We currently only expect BTI to be enabled for kernel pages, so panic if
2289 * we detect otherwise.
2290 */
2291 if (!PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
2292 panic("Unexpected non-kernel BTI failure? (ESR=0x%llx)", esr);
2293 }
2294
2295 #define BTI_FAIL_PTR_FMT "%04x"
2296 #define BTI_FAIL_MSG_FMT "Kernel BTI failure (BTYPE=0x" BTI_FAIL_PTR_FMT ")"
2297 /* Replace the pointer format with the length of the pointer message+NULL */
2298 char msg[strlen(BTI_FAIL_MSG_FMT) - strlen(BTI_FAIL_PTR_FMT) + 8 + 1];
2299 snprintf(msg, sizeof(msg), BTI_FAIL_MSG_FMT, btype);
2300 panic_with_thread_kernel_state(msg, state);
2301 __builtin_unreachable();
2302 }
2303
2304 static void
handle_user_trapped_instruction32(arm_saved_state_t * state,uint64_t esr)2305 handle_user_trapped_instruction32(arm_saved_state_t *state, uint64_t esr)
2306 {
2307 exception_type_t exception = EXC_BAD_INSTRUCTION;
2308 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
2309 mach_msg_type_number_t numcodes = 2;
2310 uint32_t instr;
2311
2312 if (is_saved_state64(state)) {
2313 panic("ESR (0x%llx) for instruction trapped from U32, but saved state is 64-bit.", esr);
2314 }
2315
2316 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
2317 panic("ESR (0x%llx) for instruction trapped from U32, actually came from kernel?", esr);
2318 }
2319
2320 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
2321 codes[1] = instr;
2322
2323 exception_triage(exception, codes, numcodes);
2324 __builtin_unreachable();
2325 }
2326
2327 static void
handle_simd_trap(arm_saved_state_t * state,uint64_t esr)2328 handle_simd_trap(arm_saved_state_t *state, uint64_t esr)
2329 {
2330 exception_type_t exception = EXC_BAD_INSTRUCTION;
2331 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
2332 mach_msg_type_number_t numcodes = 2;
2333 uint32_t instr = 0;
2334
2335 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
2336 panic("ESR (0x%llx) for SIMD trap from userland, actually came from kernel?", esr);
2337 }
2338
2339 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
2340 codes[1] = instr;
2341
2342 exception_triage(exception, codes, numcodes);
2343 __builtin_unreachable();
2344 }
2345
2346 void
sleh_irq(arm_saved_state_t * state)2347 sleh_irq(arm_saved_state_t *state)
2348 {
2349 cpu_data_t * cdp __unused = getCpuDatap();
2350 #if MACH_ASSERT
2351 int preemption_level = sleh_get_preemption_level();
2352 #endif
2353
2354
2355 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
2356
2357 #if USE_APPLEARMSMP
2358 PE_handle_ext_interrupt();
2359 #else
2360 /* Run the registered interrupt handler. */
2361 cdp->interrupt_handler(cdp->interrupt_target,
2362 cdp->interrupt_refCon,
2363 cdp->interrupt_nub,
2364 cdp->interrupt_source);
2365 #endif
2366
2367 entropy_collect();
2368
2369
2370 sleh_interrupt_handler_epilogue();
2371 #if MACH_ASSERT
2372 if (preemption_level != sleh_get_preemption_level()) {
2373 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, sleh_get_preemption_level());
2374 }
2375 #endif
2376 }
2377
2378 void
sleh_fiq(arm_saved_state_t * state)2379 sleh_fiq(arm_saved_state_t *state)
2380 {
2381 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
2382 #if MACH_ASSERT
2383 int preemption_level = sleh_get_preemption_level();
2384 #endif
2385
2386 #if MONOTONIC_FIQ
2387 uint64_t pmcr0 = 0, upmsr = 0;
2388 #endif /* MONOTONIC_FIQ */
2389
2390 #if defined(HAS_IPI)
2391 boolean_t is_ipi = FALSE;
2392 uint64_t ipi_sr = 0;
2393
2394 if (gFastIPI) {
2395 MRS(ipi_sr, "S3_5_C15_C1_1");
2396
2397 if (ipi_sr & ARM64_IPISR_IPI_PENDING) {
2398 is_ipi = TRUE;
2399 }
2400 }
2401
2402 if (is_ipi) {
2403 type = DBG_INTR_TYPE_IPI;
2404 } else
2405 #endif /* defined(HAS_IPI) */
2406 if (ml_get_timer_pending()) {
2407 type = DBG_INTR_TYPE_TIMER;
2408 }
2409 #if MONOTONIC_FIQ
2410 /* Consult the PMI sysregs last, after IPI/timer
2411 * classification.
2412 */
2413 else if (mt_pmi_pending(&pmcr0, &upmsr)) {
2414 type = DBG_INTR_TYPE_PMI;
2415 }
2416 #endif /* MONOTONIC_FIQ */
2417
2418 sleh_interrupt_handler_prologue(state, type);
2419
2420 #if APPLEVIRTUALPLATFORM
2421 uint64_t iar = __builtin_arm_rsr64("ICC_IAR0_EL1");
2422 #endif
2423
2424 #if defined(HAS_IPI)
2425 if (type == DBG_INTR_TYPE_IPI) {
2426 /*
2427 * Order is important here: we must ack the IPI by writing IPI_SR
2428 * before we call cpu_signal_handler(). Otherwise, there will be
2429 * a window between the completion of pending-signal processing in
2430 * cpu_signal_handler() and the ack during which a newly-issued
2431 * IPI to this CPU may be lost. ISB is required to ensure the msr
2432 * is retired before execution of cpu_signal_handler().
2433 */
2434 MSR("S3_5_C15_C1_1", ARM64_IPISR_IPI_PENDING);
2435 __builtin_arm_isb(ISB_SY);
2436 cpu_signal_handler();
2437 } else
2438 #endif /* defined(HAS_IPI) */
2439 #if MONOTONIC_FIQ
2440 if (type == DBG_INTR_TYPE_PMI) {
2441 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
2442 mt_fiq(getCpuDatap(), pmcr0, upmsr);
2443 INTERRUPT_MASKED_DEBUG_END();
2444 } else
2445 #endif /* MONOTONIC_FIQ */
2446 {
2447 /*
2448 * We don't know that this is a timer, but we don't have insight into
2449 * the other interrupts that go down this path.
2450 */
2451
2452 cpu_data_t *cdp = getCpuDatap();
2453
2454 cdp->cpu_decrementer = -1; /* Large */
2455
2456 /*
2457 * ARM64_TODO: whether we're coming from userland is ignored right now.
2458 * We can easily thread it through, but not bothering for the
2459 * moment (AArch32 doesn't either).
2460 */
2461 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
2462 rtclock_intr(TRUE);
2463 INTERRUPT_MASKED_DEBUG_END();
2464 }
2465
2466 #if APPLEVIRTUALPLATFORM
2467 if (iar != GIC_SPURIOUS_IRQ) {
2468 __builtin_arm_wsr64("ICC_EOIR0_EL1", iar);
2469 __builtin_arm_isb(ISB_SY);
2470 }
2471 #endif
2472
2473 sleh_interrupt_handler_epilogue();
2474 #if MACH_ASSERT
2475 if (preemption_level != sleh_get_preemption_level()) {
2476 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, sleh_get_preemption_level());
2477 }
2478 #endif
2479 }
2480
2481 void
sleh_serror(arm_context_t * context,uint64_t esr,vm_offset_t far)2482 sleh_serror(arm_context_t *context, uint64_t esr, vm_offset_t far)
2483 {
2484 task_vtimer_check(current_thread());
2485
2486 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
2487 esr, VM_KERNEL_ADDRHIDE(far));
2488 arm_saved_state_t *state = &context->ss;
2489 #if MACH_ASSERT
2490 int preemption_level = sleh_get_preemption_level();
2491 #endif
2492
2493 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
2494 /* Sanitize FAR (only if we came from userspace) */
2495 saved_state64(state)->far = 0;
2496 }
2497
2498 ASSERT_CONTEXT_SANITY(context);
2499 arm64_platform_error(state, esr, far, PLAT_ERR_SRC_ASYNC);
2500 #if MACH_ASSERT
2501 if (preemption_level != sleh_get_preemption_level()) {
2502 panic("serror changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
2503 }
2504 #endif
2505 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
2506 esr, VM_KERNEL_ADDRHIDE(far));
2507 }
2508
2509 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)2510 mach_syscall_trace_exit(unsigned int retval,
2511 unsigned int call_number)
2512 {
2513 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2514 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
2515 DBG_FUNC_END, retval, 0, 0, 0, 0);
2516 }
2517
2518 __attribute__((noreturn))
2519 void
thread_syscall_return(kern_return_t error)2520 thread_syscall_return(kern_return_t error)
2521 {
2522 thread_t thread;
2523 struct arm_saved_state *state;
2524
2525 thread = current_thread();
2526 state = get_user_regs(thread);
2527
2528 assert(is_saved_state64(state));
2529 saved_state64(state)->x[0] = error;
2530
2531 #if MACH_ASSERT
2532 kern_allocation_name_t
2533 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
2534 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
2535 #endif /* MACH_ASSERT */
2536
2537 if (kdebug_enable) {
2538 /* Invert syscall number (negative for a mach syscall) */
2539 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
2540 }
2541
2542 thread_exception_return();
2543 }
2544
2545 void
syscall_trace(struct arm_saved_state * regs __unused)2546 syscall_trace(
2547 struct arm_saved_state * regs __unused)
2548 {
2549 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
2550 }
2551
2552 static void
sleh_interrupt_handler_prologue(arm_saved_state_t * state,unsigned int type)2553 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
2554 {
2555 const bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
2556
2557 if (is_user == true) {
2558 /* Sanitize FAR (only if the interrupt occurred while the CPU was in usermode) */
2559 saved_state64(state)->far = 0;
2560 }
2561
2562 recount_enter_interrupt();
2563
2564 task_vtimer_check(current_thread());
2565
2566 uint64_t pc = is_user ? get_saved_state_pc(state) :
2567 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
2568
2569 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
2570 0, pc, is_user, type);
2571
2572 #if CONFIG_TELEMETRY
2573 if (telemetry_needs_record) {
2574 telemetry_mark_curthread(is_user, FALSE);
2575 }
2576 #endif /* CONFIG_TELEMETRY */
2577 }
2578
2579 static void
sleh_interrupt_handler_epilogue(void)2580 sleh_interrupt_handler_epilogue(void)
2581 {
2582 #if KPERF
2583 kperf_interrupt();
2584 #endif /* KPERF */
2585 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
2586 recount_leave_interrupt();
2587 }
2588
2589 void
sleh_invalid_stack(arm_context_t * context,uint64_t esr __unused,vm_offset_t far __unused)2590 sleh_invalid_stack(arm_context_t *context, uint64_t esr __unused, vm_offset_t far __unused)
2591 {
2592 thread_t thread = current_thread();
2593 vm_offset_t kernel_stack_bottom, sp;
2594
2595 sp = get_saved_state_sp(&context->ss);
2596 vm_offset_t kstackptr = (vm_offset_t)thread->machine.kstackptr;
2597 kernel_stack_bottom = round_page(kstackptr) - KERNEL_STACK_SIZE;
2598
2599 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
2600 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
2601 }
2602
2603 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
2604 }
2605
2606
2607 #if MACH_ASSERT
2608 static int trap_handled;
2609 static void
handle_recoverable_kernel_trap(__unused void * tstate,uint16_t comment)2610 handle_recoverable_kernel_trap(
2611 __unused void *tstate,
2612 uint16_t comment)
2613 {
2614 assert(comment == TEST_RECOVERABLE_SOFT_TRAP);
2615
2616 printf("Recoverable trap handled.\n");
2617 trap_handled = 1;
2618 }
2619
2620 KERNEL_BRK_DESCRIPTOR_DEFINE(test_desc,
2621 .type = KERNEL_BRK_TYPE_TEST,
2622 .base = TEST_RECOVERABLE_SOFT_TRAP,
2623 .max = TEST_RECOVERABLE_SOFT_TRAP,
2624 .options = KERNEL_BRK_RECOVERABLE,
2625 .handle_breakpoint = handle_recoverable_kernel_trap);
2626
2627 static int
recoverable_kernel_trap_test(__unused int64_t in,int64_t * out)2628 recoverable_kernel_trap_test(__unused int64_t in, int64_t *out)
2629 {
2630 ml_recoverable_trap(TEST_RECOVERABLE_SOFT_TRAP);
2631
2632 *out = trap_handled;
2633 return 0;
2634 }
2635
2636 SYSCTL_TEST_REGISTER(recoverable_kernel_trap, recoverable_kernel_trap_test);
2637
2638 #endif
2639