xref: /xnu-8019.80.24/osfmk/arm64/sleh.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea) !
1 /*
2  * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40 
41 #include <kern/debug.h>
42 #include <kern/socd_client.h>
43 #include <kern/thread.h>
44 #include <mach/exception.h>
45 #include <mach/arm/traps.h>
46 #include <mach/vm_types.h>
47 #include <mach/machine/thread_status.h>
48 
49 #include <machine/atomic.h>
50 #include <machine/limits.h>
51 
52 #include <pexpert/arm/protos.h>
53 
54 #include <vm/vm_page.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_fault.h>
57 #include <vm/vm_kern.h>
58 
59 #include <sys/errno.h>
60 #include <sys/kdebug.h>
61 #include <kperf/kperf.h>
62 
63 #include <kern/policy_internal.h>
64 #if CONFIG_TELEMETRY
65 #include <kern/telemetry.h>
66 #endif
67 
68 #include <prng/entropy.h>
69 
70 
71 
72 
73 
74 #if CONFIG_KERNEL_TBI && KASAN_TBI
75 #include <san/kasan.h>
76 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
77 
78 #if CONFIG_UBSAN_MINIMAL
79 #include <san/ubsan_minimal.h>
80 #endif /* CONFIG_UBSAN_MINIMAL */
81 
82 #ifndef __arm64__
83 #error Should only be compiling for arm64.
84 #endif
85 
86 #define TEST_CONTEXT32_SANITY(context) \
87 	(context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
88 	 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
89 
90 #define TEST_CONTEXT64_SANITY(context) \
91 	(context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
92 	 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
93 
94 #define ASSERT_CONTEXT_SANITY(context) \
95 	assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
96 
97 
98 #define COPYIN(src, dst, size)                           \
99 	(PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
100 	copyin_kern(src, dst, size) :                    \
101 	copyin(src, dst, size)
102 
103 #define COPYOUT(src, dst, size)                          \
104 	(PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
105 	copyout_kern(src, dst, size)                   : \
106 	copyout(src, dst, size)
107 
108 // Below is for concatenating a string param to a string literal
109 #define STR1(x) #x
110 #define STR(x) STR1(x)
111 
112 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
113 #define ARM64_KDBG_CODE_USER   (1 << 8)
114 #define ARM64_KDBG_CODE_GUEST  (2 << 8)
115 
116 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
117 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
118 
119 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
120 
121 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
122 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
123 
124 
125 
126 void sleh_irq(arm_saved_state_t *);
127 void sleh_fiq(arm_saved_state_t *);
128 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
129 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
130 
131 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
132 static void sleh_interrupt_handler_epilogue(void);
133 
134 static void handle_svc(arm_saved_state_t *);
135 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
136 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
137 
138 static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
139 
140 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
141 
142 static void handle_uncategorized(arm_saved_state_t *);
143 
144 /*
145  * For UBSan trap and continue handling, we must be able to recover
146  * from handle_kernel_breakpoint().
147  */
148 #if !CONFIG_UBSAN_MINIMAL
149 __dead2
150 #endif /* CONFIG_UBSAN_MINIMAL */
151 static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t);
152 
153 static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
154 
155 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
156 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
157 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
158 
159 static int is_vm_fault(fault_status_t);
160 static int is_translation_fault(fault_status_t);
161 static int is_alignment_fault(fault_status_t);
162 
163 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
164 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
165 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
166 
167 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
168 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
169 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
170 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
171 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
172 
173 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
174 
175 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
176 
177 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
178 
179 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
180 
181 extern void mach_kauth_cred_thread_update(void);
182 void   mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
183 
184 struct proc;
185 
186 typedef uint32_t arm64_instr_t;
187 
188 extern void
189 unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct proc * proc);
190 
191 extern void
192 mach_syscall(struct arm_saved_state*);
193 
194 #if CONFIG_DTRACE
195 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
196 extern boolean_t dtrace_tally_fault(user_addr_t);
197 
198 /*
199  * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
200  * and paste the trap instructions
201  * over from that file. Need to keep these in sync!
202  */
203 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
204 #define FASTTRAP_THUMB32_INSTR 0xdefc
205 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
206 
207 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
208 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
209 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
210 
211 /* See <rdar://problem/4613924> */
212 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
213 #endif
214 
215 
216 
217 extern void arm64_thread_exception_return(void) __dead2;
218 
219 #if defined(APPLETYPHOON)
220 #define CPU_NAME "Typhoon"
221 #elif defined(APPLETWISTER)
222 #define CPU_NAME "Twister"
223 #elif defined(APPLEHURRICANE)
224 #define CPU_NAME "Hurricane"
225 #elif defined(APPLELIGHTNING)
226 #define CPU_NAME "Lightning"
227 #else
228 #define CPU_NAME "Unknown"
229 #endif
230 
231 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
232 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
233 #define ESR_WT_REASON(esr) ((esr) & 0xff)
234 
235 #define WT_REASON_NONE           0
236 #define WT_REASON_INTEGRITY_FAIL 1
237 #define WT_REASON_BAD_SYSCALL    2
238 #define WT_REASON_NOT_LOCKED     3
239 #define WT_REASON_ALREADY_LOCKED 4
240 #define WT_REASON_SW_REQ         5
241 #define WT_REASON_PT_INVALID     6
242 #define WT_REASON_PT_VIOLATION   7
243 #define WT_REASON_REG_VIOLATION  8
244 #endif
245 
246 #if defined(HAS_IPI)
247 void cpu_signal_handler(void);
248 extern unsigned int gFastIPI;
249 #endif /* defined(HAS_IPI) */
250 
251 static arm_saved_state64_t *original_faulting_state = NULL;
252 
253 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
254 
255 extern vm_offset_t static_memory_end;
256 
257 static inline int
is_vm_fault(fault_status_t status)258 is_vm_fault(fault_status_t status)
259 {
260 	switch (status) {
261 	case FSC_TRANSLATION_FAULT_L0:
262 	case FSC_TRANSLATION_FAULT_L1:
263 	case FSC_TRANSLATION_FAULT_L2:
264 	case FSC_TRANSLATION_FAULT_L3:
265 	case FSC_ACCESS_FLAG_FAULT_L1:
266 	case FSC_ACCESS_FLAG_FAULT_L2:
267 	case FSC_ACCESS_FLAG_FAULT_L3:
268 	case FSC_PERMISSION_FAULT_L1:
269 	case FSC_PERMISSION_FAULT_L2:
270 	case FSC_PERMISSION_FAULT_L3:
271 		return TRUE;
272 	default:
273 		return FALSE;
274 	}
275 }
276 
277 static inline int
is_translation_fault(fault_status_t status)278 is_translation_fault(fault_status_t status)
279 {
280 	switch (status) {
281 	case FSC_TRANSLATION_FAULT_L0:
282 	case FSC_TRANSLATION_FAULT_L1:
283 	case FSC_TRANSLATION_FAULT_L2:
284 	case FSC_TRANSLATION_FAULT_L3:
285 		return TRUE;
286 	default:
287 		return FALSE;
288 	}
289 }
290 
291 static inline int
is_permission_fault(fault_status_t status)292 is_permission_fault(fault_status_t status)
293 {
294 	switch (status) {
295 	case FSC_PERMISSION_FAULT_L1:
296 	case FSC_PERMISSION_FAULT_L2:
297 	case FSC_PERMISSION_FAULT_L3:
298 		return TRUE;
299 	default:
300 		return FALSE;
301 	}
302 }
303 
304 static inline int
is_alignment_fault(fault_status_t status)305 is_alignment_fault(fault_status_t status)
306 {
307 	return status == FSC_ALIGNMENT_FAULT;
308 }
309 
310 static inline int
is_parity_error(fault_status_t status)311 is_parity_error(fault_status_t status)
312 {
313 	switch (status) {
314 	case FSC_SYNC_PARITY:
315 	case FSC_ASYNC_PARITY:
316 	case FSC_SYNC_PARITY_TT_L1:
317 	case FSC_SYNC_PARITY_TT_L2:
318 	case FSC_SYNC_PARITY_TT_L3:
319 		return TRUE;
320 	default:
321 		return FALSE;
322 	}
323 }
324 
325 __dead2 __unused
326 static void
arm64_implementation_specific_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)327 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
328 {
329 #pragma unused (state, esr, far)
330 	panic_plain("Unhandled implementation specific error\n");
331 }
332 
333 #if CONFIG_KERNEL_INTEGRITY
334 #pragma clang diagnostic push
335 #pragma clang diagnostic ignored "-Wunused-parameter"
336 static void
kernel_integrity_error_handler(uint32_t esr,vm_offset_t far)337 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
338 {
339 #if defined(KERNEL_INTEGRITY_WT)
340 #if (DEVELOPMENT || DEBUG)
341 	if (ESR_WT_SERROR(esr)) {
342 		switch (ESR_WT_REASON(esr)) {
343 		case WT_REASON_INTEGRITY_FAIL:
344 			panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
345 		case WT_REASON_BAD_SYSCALL:
346 			panic_plain("Kernel integrity, bad syscall.");
347 		case WT_REASON_NOT_LOCKED:
348 			panic_plain("Kernel integrity, not locked.");
349 		case WT_REASON_ALREADY_LOCKED:
350 			panic_plain("Kernel integrity, already locked.");
351 		case WT_REASON_SW_REQ:
352 			panic_plain("Kernel integrity, software request.");
353 		case WT_REASON_PT_INVALID:
354 			panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
355 			    "walking 0x%016lx.", far);
356 		case WT_REASON_PT_VIOLATION:
357 			panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
358 			    far);
359 		case WT_REASON_REG_VIOLATION:
360 			panic_plain("Kernel integrity, violation in system register %d.",
361 			    (unsigned) far);
362 		default:
363 			panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
364 		}
365 	}
366 #else
367 	if (ESR_WT_SERROR(esr)) {
368 		panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
369 	}
370 #endif
371 #endif
372 }
373 #pragma clang diagnostic pop
374 #endif
375 
376 static void
arm64_platform_error(arm_saved_state_t * state,uint32_t esr,vm_offset_t far)377 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
378 {
379 #if CONFIG_KERNEL_INTEGRITY
380 	kernel_integrity_error_handler(esr, far);
381 #endif
382 
383 	cpu_data_t *cdp = getCpuDatap();
384 
385 	if (PE_handle_platform_error(far)) {
386 		return;
387 	} else if (cdp->platform_error_handler != NULL) {
388 		cdp->platform_error_handler(cdp->cpu_id, far);
389 	} else {
390 		arm64_implementation_specific_error(state, esr, far);
391 	}
392 }
393 
394 void
panic_with_thread_kernel_state(const char * msg,arm_saved_state_t * ss)395 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
396 {
397 	boolean_t ss_valid;
398 
399 	ss_valid = is_saved_state64(ss);
400 	arm_saved_state64_t *state = saved_state64(ss);
401 
402 	os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
403 
404 	static int twice = 2;
405 	if (twice > 0) {
406 		twice--;
407 		SOCD_TRACE_XNU(PANIC_ASYNC, ADDR(state->pc), VALUE(state->esr), PACK_2X32(VALUE(state->cpsr), VALUE(ss_valid)), VALUE(state->far));
408 	}
409 
410 	panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
411 	    "\t  x0:  0x%016llx x1:  0x%016llx  x2:  0x%016llx  x3:  0x%016llx\n"
412 	    "\t  x4:  0x%016llx x5:  0x%016llx  x6:  0x%016llx  x7:  0x%016llx\n"
413 	    "\t  x8:  0x%016llx x9:  0x%016llx  x10: 0x%016llx  x11: 0x%016llx\n"
414 	    "\t  x12: 0x%016llx x13: 0x%016llx  x14: 0x%016llx  x15: 0x%016llx\n"
415 	    "\t  x16: 0x%016llx x17: 0x%016llx  x18: 0x%016llx  x19: 0x%016llx\n"
416 	    "\t  x20: 0x%016llx x21: 0x%016llx  x22: 0x%016llx  x23: 0x%016llx\n"
417 	    "\t  x24: 0x%016llx x25: 0x%016llx  x26: 0x%016llx  x27: 0x%016llx\n"
418 	    "\t  x28: 0x%016llx fp:  0x%016llx  lr:  0x%016llx  sp:  0x%016llx\n"
419 	    "\t  pc:  0x%016llx cpsr: 0x%08x         esr: 0x%08x          far: 0x%016llx\n",
420 	    msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
421 	    state->x[0], state->x[1], state->x[2], state->x[3],
422 	    state->x[4], state->x[5], state->x[6], state->x[7],
423 	    state->x[8], state->x[9], state->x[10], state->x[11],
424 	    state->x[12], state->x[13], state->x[14], state->x[15],
425 	    state->x[16], state->x[17], state->x[18], state->x[19],
426 	    state->x[20], state->x[21], state->x[22], state->x[23],
427 	    state->x[24], state->x[25], state->x[26], state->x[27],
428 	    state->x[28], state->fp, state->lr, state->sp,
429 	    state->pc, state->cpsr, state->esr, state->far);
430 }
431 
432 void
sleh_synchronous_sp1(arm_context_t * context,uint32_t esr,vm_offset_t far __unused)433 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
434 {
435 	esr_exception_class_t  class = ESR_EC(esr);
436 	arm_saved_state_t    * state = &context->ss;
437 
438 	switch (class) {
439 	case ESR_EC_UNCATEGORIZED:
440 	{
441 		uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
442 		if (IS_ARM_GDB_TRAP(instr)) {
443 			DebuggerCall(EXC_BREAKPOINT, state);
444 		}
445 	}
446 		OS_FALLTHROUGH; // panic if we return from the debugger
447 	default:
448 		panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
449 	}
450 }
451 
452 
453 __attribute__((noreturn))
454 void
thread_exception_return()455 thread_exception_return()
456 {
457 	thread_t thread = current_thread();
458 	if (thread->machine.exception_trace_code != 0) {
459 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
460 		    MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
461 		thread->machine.exception_trace_code = 0;
462 	}
463 
464 	arm64_thread_exception_return();
465 	__builtin_unreachable();
466 }
467 
468 /*
469  * check whether task vtimers are running and set thread and CPU BSD AST
470  *
471  * must be called with interrupts masked so updates of fields are atomic
472  * must be emitted inline to avoid generating an FBT probe on the exception path
473  *
474  */
475 __attribute__((__always_inline__))
476 static inline void
task_vtimer_check(thread_t thread)477 task_vtimer_check(thread_t thread)
478 {
479 	task_t task = get_threadtask_early(thread);
480 
481 	if (__improbable(task != NULL && task->vtimers)) {
482 		thread->ast |= AST_BSD;
483 		thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
484 	}
485 }
486 
487 #if MACH_ASSERT
488 /**
489  * A version of get_preemption_level() that works in early boot.
490  *
491  * If an exception is raised in early boot before the initial thread has been
492  * set up, then calling get_preemption_level() in the SLEH will trigger an
493  * infinitely-recursing exception. This function handles this edge case.
494  */
495 static inline int
sleh_get_preemption_level(void)496 sleh_get_preemption_level(void)
497 {
498 	if (__improbable(current_thread() == NULL)) {
499 		return 0;
500 	}
501 	return get_preemption_level();
502 }
503 #endif // MACH_ASSERT
504 
505 void
sleh_synchronous(arm_context_t * context,uint32_t esr,vm_offset_t far)506 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
507 {
508 	esr_exception_class_t  class   = ESR_EC(esr);
509 	arm_saved_state_t    * state   = &context->ss;
510 	vm_offset_t            recover = 0;
511 	thread_t               thread  = current_thread();
512 #if MACH_ASSERT
513 	int                    preemption_level = sleh_get_preemption_level();
514 #endif
515 	expected_fault_handler_t expected_fault_handler = NULL;
516 #ifdef CONFIG_XNUPOST
517 	expected_fault_handler_t saved_expected_fault_handler = NULL;
518 	uintptr_t saved_expected_fault_addr = 0;
519 #endif /* CONFIG_XNUPOST */
520 
521 	ASSERT_CONTEXT_SANITY(context);
522 
523 	task_vtimer_check(thread);
524 
525 #if CONFIG_DTRACE
526 	/*
527 	 * Handle kernel DTrace probes as early as possible to minimize the likelihood
528 	 * that this path will itself trigger a DTrace probe, which would lead to infinite
529 	 * probe recursion.
530 	 */
531 	if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
532 	    (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
533 		return;
534 	}
535 #endif
536 	bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
537 
538 	/*
539 	 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
540 	 * that would disclose the behavior of PT_DENY_ATTACH processes.
541 	 */
542 	if (is_user) {
543 		thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
544 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
545 		    MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
546 		    esr, far, get_saved_state_pc(state), 0, 0);
547 	} else {
548 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
549 		    MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
550 		    esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
551 	}
552 
553 	if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
554 		/*
555 		 * We no longer support 32-bit, which means no 2-byte
556 		 * instructions.
557 		 */
558 		if (is_user) {
559 			panic("Exception on 2-byte instruction, "
560 			    "context=%p, esr=%#x, far=%p",
561 			    context, esr, (void *)far);
562 		} else {
563 			panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
564 		}
565 	}
566 
567 	/* Don't run exception handler with recover handler set in case of double fault */
568 	if (thread->recover) {
569 		recover = thread->recover;
570 		thread->recover = (vm_offset_t)NULL;
571 	}
572 
573 #ifdef CONFIG_XNUPOST
574 	if (thread->machine.expected_fault_handler != NULL) {
575 		saved_expected_fault_handler = thread->machine.expected_fault_handler;
576 		saved_expected_fault_addr = thread->machine.expected_fault_addr;
577 
578 		thread->machine.expected_fault_handler = NULL;
579 		thread->machine.expected_fault_addr = 0;
580 
581 		if (saved_expected_fault_addr == far) {
582 			expected_fault_handler = saved_expected_fault_handler;
583 		}
584 	}
585 #endif /* CONFIG_XNUPOST */
586 
587 	/* Inherit the interrupt masks from previous context */
588 	if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
589 		ml_set_interrupts_enabled(TRUE);
590 	}
591 
592 	switch (class) {
593 	case ESR_EC_SVC_64:
594 		if (!is_saved_state64(state) || !is_user) {
595 			panic("Invalid SVC_64 context");
596 		}
597 
598 		handle_svc(state);
599 		break;
600 
601 	case ESR_EC_DABORT_EL0:
602 		handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort, expected_fault_handler);
603 		break;
604 
605 	case ESR_EC_MSR_TRAP:
606 		handle_msr_trap(state, esr);
607 		break;
608 
609 
610 	case ESR_EC_IABORT_EL0:
611 		handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
612 		break;
613 
614 	case ESR_EC_IABORT_EL1:
615 #ifdef CONFIG_XNUPOST
616 		if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
617 			break;
618 		}
619 #endif /* CONFIG_XNUPOST */
620 
621 		panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
622 
623 	case ESR_EC_PC_ALIGN:
624 		handle_pc_align(state);
625 		__builtin_unreachable();
626 
627 	case ESR_EC_DABORT_EL1:
628 		handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
629 		break;
630 
631 	case ESR_EC_UNCATEGORIZED:
632 		assert(!ESR_ISS(esr));
633 
634 		handle_uncategorized(&context->ss);
635 		break;
636 
637 	case ESR_EC_SP_ALIGN:
638 		handle_sp_align(state);
639 		__builtin_unreachable();
640 
641 	case ESR_EC_BKPT_AARCH32:
642 		handle_breakpoint(state, esr);
643 		__builtin_unreachable();
644 
645 	case ESR_EC_BRK_AARCH64:
646 		if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
647 			handle_kernel_breakpoint(state, esr);
648 #if CONFIG_UBSAN_MINIMAL
649 			/* UBSan breakpoints are recoverable */
650 			break;
651 #endif /* CONFIG_UBSAN_MINIMAL */
652 		} else {
653 			handle_breakpoint(state, esr);
654 			__builtin_unreachable();
655 		}
656 
657 	case ESR_EC_BKPT_REG_MATCH_EL0:
658 		if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
659 			handle_breakpoint(state, esr);
660 		}
661 		panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
662 		    class, state, class, esr, (void *)far);
663 		__builtin_unreachable();
664 
665 	case ESR_EC_BKPT_REG_MATCH_EL1:
666 		panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
667 		__builtin_unreachable();
668 
669 	case ESR_EC_SW_STEP_DEBUG_EL0:
670 		if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
671 			handle_sw_step_debug(state);
672 		}
673 		panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
674 		    class, state, class, esr, (void *)far);
675 		__builtin_unreachable();
676 
677 	case ESR_EC_SW_STEP_DEBUG_EL1:
678 		panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
679 		__builtin_unreachable();
680 
681 	case ESR_EC_WATCHPT_MATCH_EL0:
682 		if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
683 			handle_watchpoint(far);
684 		}
685 		panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
686 		    class, state, class, esr, (void *)far);
687 		__builtin_unreachable();
688 
689 	case ESR_EC_WATCHPT_MATCH_EL1:
690 		/*
691 		 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
692 		 * abort.  Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
693 		 */
694 		if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
695 			arm_debug_set(NULL);
696 			break; /* return to first level handler */
697 		}
698 		panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
699 		    class, state, class, esr, (void *)far);
700 		__builtin_unreachable();
701 
702 	case ESR_EC_TRAP_SIMD_FP:
703 		handle_simd_trap(state, esr);
704 		__builtin_unreachable();
705 
706 	case ESR_EC_ILLEGAL_INSTR_SET:
707 		if (EXCB_ACTION_RERUN !=
708 		    ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
709 			// instruction is not re-executed
710 			panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
711 			    state, class, esr, (void *)far, get_saved_state_cpsr(state));
712 		}
713 		// must clear this fault in PSR to re-run
714 		mask_saved_state_cpsr(state, 0, PSR64_IL);
715 		break;
716 
717 	case ESR_EC_MCR_MRC_CP15_TRAP:
718 	case ESR_EC_MCRR_MRRC_CP15_TRAP:
719 	case ESR_EC_MCR_MRC_CP14_TRAP:
720 	case ESR_EC_LDC_STC_CP14_TRAP:
721 	case ESR_EC_MCRR_MRRC_CP14_TRAP:
722 		handle_user_trapped_instruction32(state, esr);
723 		__builtin_unreachable();
724 
725 	case ESR_EC_WFI_WFE:
726 		// Use of WFI or WFE instruction when they have been disabled for EL0
727 		handle_wf_trap(state);
728 		__builtin_unreachable();
729 
730 	case ESR_EC_FLOATING_POINT_64:
731 		handle_fp_trap(state, esr);
732 		__builtin_unreachable();
733 
734 	default:
735 		handle_uncategorized(state);
736 	}
737 
738 #ifdef CONFIG_XNUPOST
739 	if (saved_expected_fault_handler != NULL) {
740 		thread->machine.expected_fault_handler = saved_expected_fault_handler;
741 		thread->machine.expected_fault_addr = saved_expected_fault_addr;
742 	}
743 #endif /* CONFIG_XNUPOST */
744 
745 	if (recover) {
746 		thread->recover = recover;
747 	}
748 	if (is_user) {
749 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
750 		    MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
751 		    esr, far, get_saved_state_pc(state), 0, 0);
752 		thread->machine.exception_trace_code = 0;
753 	} else {
754 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
755 		    MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
756 		    esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
757 	}
758 #if MACH_ASSERT
759 	if (preemption_level != sleh_get_preemption_level()) {
760 		panic("synchronous exception changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
761 	}
762 #endif
763 }
764 
765 /*
766  * Uncategorized exceptions are a catch-all for general execution errors.
767  * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
768  */
769 static void
handle_uncategorized(arm_saved_state_t * state)770 handle_uncategorized(arm_saved_state_t *state)
771 {
772 	exception_type_t           exception = EXC_BAD_INSTRUCTION;
773 	mach_exception_data_type_t codes[2]  = {EXC_ARM_UNDEFINED};
774 	mach_msg_type_number_t     numcodes  = 2;
775 	uint32_t                   instr     = 0;
776 
777 	COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
778 
779 #if CONFIG_DTRACE
780 
781 	if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
782 		/*
783 		 * For a 64bit user process, we care about all 4 bytes of the
784 		 * instr.
785 		 */
786 		if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
787 			if (dtrace_user_probe(state) == KERN_SUCCESS) {
788 				return;
789 			}
790 		}
791 	} else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
792 		/*
793 		 * For a 32bit user process, we check for thumb mode, in
794 		 * which case we only care about a 2 byte instruction length.
795 		 * For non-thumb mode, we care about all 4 bytes of the instructin.
796 		 */
797 		if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
798 			if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
799 			    ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
800 				if (dtrace_user_probe(state) == KERN_SUCCESS) {
801 					return;
802 				}
803 			}
804 		} else {
805 			if ((instr == FASTTRAP_ARM32_INSTR) ||
806 			    (instr == FASTTRAP_ARM32_RET_INSTR)) {
807 				if (dtrace_user_probe(state) == KERN_SUCCESS) {
808 					return;
809 				}
810 			}
811 		}
812 	}
813 
814 #endif /* CONFIG_DTRACE */
815 
816 	if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
817 		if (IS_ARM_GDB_TRAP(instr)) {
818 			boolean_t interrupt_state;
819 			exception = EXC_BREAKPOINT;
820 
821 			interrupt_state = ml_set_interrupts_enabled(FALSE);
822 
823 			/* Save off the context here (so that the debug logic
824 			 * can see the original state of this thread).
825 			 */
826 			current_thread()->machine.kpcb = state;
827 
828 			/* Hop into the debugger (typically either due to a
829 			 * fatal exception, an explicit panic, or a stackshot
830 			 * request.
831 			 */
832 			DebuggerCall(exception, state);
833 
834 			current_thread()->machine.kpcb = NULL;
835 			(void) ml_set_interrupts_enabled(interrupt_state);
836 			return;
837 		} else {
838 			panic("Undefined kernel instruction: pc=%p instr=%x", (void*)get_saved_state_pc(state), instr);
839 		}
840 	}
841 
842 	/*
843 	 * Check for GDB breakpoint via illegal opcode.
844 	 */
845 	if (IS_ARM_GDB_TRAP(instr)) {
846 		exception = EXC_BREAKPOINT;
847 		codes[0] = EXC_ARM_BREAKPOINT;
848 		codes[1] = instr;
849 	} else {
850 		codes[1] = instr;
851 	}
852 
853 	exception_triage(exception, codes, numcodes);
854 	__builtin_unreachable();
855 }
856 
857 #if __has_feature(ptrauth_calls)
858 static const uint16_t ptrauth_brk_comment_base = 0xc470;
859 
860 static inline bool
brk_comment_is_ptrauth(uint16_t comment)861 brk_comment_is_ptrauth(uint16_t comment)
862 {
863 	return comment >= ptrauth_brk_comment_base &&
864 	       comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
865 }
866 
867 static inline const char *
ptrauth_key_to_string(ptrauth_key key)868 ptrauth_key_to_string(ptrauth_key key)
869 {
870 	switch (key) {
871 	case ptrauth_key_asia:
872 		return "IA";
873 	case ptrauth_key_asib:
874 		return "IB";
875 	case ptrauth_key_asda:
876 		return "DA";
877 	case ptrauth_key_asdb:
878 		return "DB";
879 	default:
880 		__builtin_unreachable();
881 	}
882 }
883 #endif /* __has_feature(ptrauth_calls) */
884 
885 #if CONFIG_KERNEL_TBI && KASAN_TBI
886 static inline bool
brk_comment_is_kasan_failure(uint16_t comment)887 brk_comment_is_kasan_failure(uint16_t comment)
888 {
889 	return comment >= KASAN_TBI_ESR_BASE &&
890 	       comment <= KASAN_TBI_ESR_TOP;
891 }
892 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
893 
894 #if CONFIG_UBSAN_MINIMAL
895 static inline bool
brk_comment_is_ubsan(uint16_t comment)896 brk_comment_is_ubsan(uint16_t comment)
897 {
898 	return comment >= UBSAN_MINIMAL_TRAPS_START &&
899 	       comment < UBSAN_MINIMAL_TRAPS_END;
900 }
901 #endif /* CONFIG_UBSAN_MINIMAL */
902 
903 static void
handle_kernel_breakpoint(arm_saved_state_t * state,uint32_t esr)904 handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
905 {
906 	uint16_t comment = ISS_BRK_COMMENT(esr);
907 
908 #if __has_feature(ptrauth_calls)
909 	if (brk_comment_is_ptrauth(comment)) {
910 		const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx";
911 		char msg[strlen(msg_fmt)
912 		- strlen("0x%04X") + strlen("0xFFFF")
913 		- strlen("%s") + strlen("IA")
914 		- strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
915 		+ 1];
916 		ptrauth_key key = (ptrauth_key)(comment - ptrauth_brk_comment_base);
917 		const char *key_str = ptrauth_key_to_string(key);
918 		snprintf(msg, sizeof(msg), msg_fmt, comment, key_str, saved_state64(state)->x[16]);
919 
920 		panic_with_thread_kernel_state(msg, state);
921 		__builtin_unreachable();
922 	}
923 #endif /* __has_feature(ptrauth_calls) */
924 
925 #if CONFIG_KERNEL_TBI && KASAN_TBI
926 	if (brk_comment_is_kasan_failure(comment)) {
927 		kasan_handle_brk_failure(saved_state64(state)->x[0], comment);
928 		__builtin_unreachable();
929 	}
930 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
931 
932 #if CONFIG_UBSAN_MINIMAL
933 	if (brk_comment_is_ubsan(comment)) {
934 		ubsan_handle_brk_trap(comment, get_saved_state_pc(state),
935 		    get_saved_state_fp(state));
936 		add_saved_state_pc(state, 4);
937 		return;
938 	}
939 #endif /* CONFIG_UBSAN_MINIMAL */
940 
941 	const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Panic (by design)";
942 	char msg[strlen(msg_fmt) - strlen("0x%04X") + strlen("0xFFFF") + 1];
943 	snprintf(msg, sizeof(msg), msg_fmt, comment);
944 
945 	panic_with_thread_kernel_state(msg, state);
946 	__builtin_unreachable();
947 }
948 
949 static void
handle_breakpoint(arm_saved_state_t * state,uint32_t esr __unused)950 handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
951 {
952 	exception_type_t           exception = EXC_BREAKPOINT;
953 	mach_exception_data_type_t codes[2]  = {EXC_ARM_BREAKPOINT};
954 	mach_msg_type_number_t     numcodes  = 2;
955 
956 #if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
957 	if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
958 	    brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
959 		exception |= EXC_PTRAUTH_BIT;
960 	}
961 #endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
962 
963 	codes[1] = get_saved_state_pc(state);
964 	exception_triage(exception, codes, numcodes);
965 	__builtin_unreachable();
966 }
967 
968 static void
handle_watchpoint(vm_offset_t fault_addr)969 handle_watchpoint(vm_offset_t fault_addr)
970 {
971 	exception_type_t           exception = EXC_BREAKPOINT;
972 	mach_exception_data_type_t codes[2]  = {EXC_ARM_DA_DEBUG};
973 	mach_msg_type_number_t     numcodes  = 2;
974 
975 	codes[1] = fault_addr;
976 	exception_triage(exception, codes, numcodes);
977 	__builtin_unreachable();
978 }
979 
980 static void
handle_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,vm_offset_t recover,abort_inspector_t inspect_abort,abort_handler_t handler,expected_fault_handler_t expected_fault_handler)981 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover,
982     abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
983 {
984 	fault_status_t fault_code;
985 	vm_prot_t      fault_type;
986 
987 	inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
988 	handler(state, esr, fault_addr, fault_code, fault_type, recover, expected_fault_handler);
989 }
990 
991 static void
inspect_instruction_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)992 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
993 {
994 	getCpuDatap()->cpu_stat.instr_ex_cnt++;
995 	*fault_code = ISS_IA_FSC(iss);
996 	*fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
997 }
998 
999 static void
inspect_data_abort(uint32_t iss,fault_status_t * fault_code,vm_prot_t * fault_type)1000 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1001 {
1002 	getCpuDatap()->cpu_stat.data_ex_cnt++;
1003 	*fault_code = ISS_DA_FSC(iss);
1004 
1005 	/*
1006 	 * Cache maintenance operations always report faults as write access.
1007 	 * Change these to read access, unless they report a permission fault.
1008 	 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1009 	 * access to the mapping, but if a cache maintenance operation that only requires
1010 	 * read access generates a permission fault, then we will not be able to handle
1011 	 * the fault regardless of whether we treat it as a read or write fault.
1012 	 */
1013 	if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1014 		*fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1015 	} else {
1016 		*fault_type = (VM_PROT_READ);
1017 	}
1018 }
1019 
1020 #if __has_feature(ptrauth_calls)
1021 static inline bool
fault_addr_bit(vm_offset_t fault_addr,unsigned int bit)1022 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1023 {
1024 	return (bool)((fault_addr >> bit) & 1);
1025 }
1026 
1027 /**
1028  * Determines whether a fault address taken at EL0 contains a PAC error code
1029  * corresponding to the specified kind of ptrauth key.
1030  */
1031 static bool
user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr,bool data_key)1032 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1033 {
1034 	bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1035 	bool tbi = data_key || __improbable(instruction_tbi);
1036 	unsigned int poison_shift;
1037 	if (tbi) {
1038 		poison_shift = 53;
1039 	} else {
1040 		poison_shift = 61;
1041 	}
1042 
1043 	/* PAC error codes are always in the form key_number:NOT(key_number) */
1044 	bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1045 	bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1046 	return poison_bit_1 != poison_bit_2;
1047 }
1048 #endif /* __has_feature(ptrauth_calls) */
1049 
1050 static void
handle_pc_align(arm_saved_state_t * ss)1051 handle_pc_align(arm_saved_state_t *ss)
1052 {
1053 	exception_type_t exc;
1054 	mach_exception_data_type_t codes[2];
1055 	mach_msg_type_number_t numcodes = 2;
1056 
1057 	if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1058 		panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1059 	}
1060 
1061 	exc = EXC_BAD_ACCESS;
1062 #if __has_feature(ptrauth_calls)
1063 	if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1064 		exc |= EXC_PTRAUTH_BIT;
1065 	}
1066 #endif /* __has_feature(ptrauth_calls) */
1067 
1068 	codes[0] = EXC_ARM_DA_ALIGN;
1069 	codes[1] = get_saved_state_pc(ss);
1070 
1071 	exception_triage(exc, codes, numcodes);
1072 	__builtin_unreachable();
1073 }
1074 
1075 static void
handle_sp_align(arm_saved_state_t * ss)1076 handle_sp_align(arm_saved_state_t *ss)
1077 {
1078 	exception_type_t exc;
1079 	mach_exception_data_type_t codes[2];
1080 	mach_msg_type_number_t numcodes = 2;
1081 
1082 	if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1083 		panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1084 	}
1085 
1086 	exc = EXC_BAD_ACCESS;
1087 #if __has_feature(ptrauth_calls)
1088 	if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1089 		exc |= EXC_PTRAUTH_BIT;
1090 	}
1091 #endif /* __has_feature(ptrauth_calls) */
1092 
1093 	codes[0] = EXC_ARM_SP_ALIGN;
1094 	codes[1] = get_saved_state_sp(ss);
1095 
1096 	exception_triage(exc, codes, numcodes);
1097 	__builtin_unreachable();
1098 }
1099 
1100 static void
handle_wf_trap(arm_saved_state_t * state)1101 handle_wf_trap(arm_saved_state_t *state)
1102 {
1103 	exception_type_t exc;
1104 	mach_exception_data_type_t codes[2];
1105 	mach_msg_type_number_t numcodes = 2;
1106 	uint32_t instr = 0;
1107 
1108 	COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1109 
1110 	exc = EXC_BAD_INSTRUCTION;
1111 	codes[0] = EXC_ARM_UNDEFINED;
1112 	codes[1] = instr;
1113 
1114 	exception_triage(exc, codes, numcodes);
1115 	__builtin_unreachable();
1116 }
1117 
1118 static void
handle_fp_trap(arm_saved_state_t * state,uint32_t esr)1119 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1120 {
1121 	exception_type_t exc = EXC_ARITHMETIC;
1122 	mach_exception_data_type_t codes[2];
1123 	mach_msg_type_number_t numcodes = 2;
1124 	uint32_t instr = 0;
1125 
1126 	if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1127 		panic_with_thread_kernel_state("Floating point exception from kernel", state);
1128 	}
1129 
1130 	COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1131 	codes[1] = instr;
1132 
1133 	/* The floating point trap flags are only valid if TFV is set. */
1134 	if (!fp_exceptions_enabled) {
1135 		exc = EXC_BAD_INSTRUCTION;
1136 		codes[0] = EXC_ARM_UNDEFINED;
1137 	} else if (!(esr & ISS_FP_TFV)) {
1138 		codes[0] = EXC_ARM_FP_UNDEFINED;
1139 	} else if (esr & ISS_FP_UFF) {
1140 		codes[0] = EXC_ARM_FP_UF;
1141 	} else if (esr & ISS_FP_OFF) {
1142 		codes[0] = EXC_ARM_FP_OF;
1143 	} else if (esr & ISS_FP_IOF) {
1144 		codes[0] = EXC_ARM_FP_IO;
1145 	} else if (esr & ISS_FP_DZF) {
1146 		codes[0] = EXC_ARM_FP_DZ;
1147 	} else if (esr & ISS_FP_IDF) {
1148 		codes[0] = EXC_ARM_FP_ID;
1149 	} else if (esr & ISS_FP_IXF) {
1150 		codes[0] = EXC_ARM_FP_IX;
1151 	} else {
1152 		panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1153 	}
1154 
1155 	exception_triage(exc, codes, numcodes);
1156 	__builtin_unreachable();
1157 }
1158 
1159 
1160 
1161 /*
1162  * handle_alignment_fault_from_user:
1163  *   state: Saved state
1164  *
1165  * Attempts to deal with an alignment fault from userspace (possibly by
1166  * emulating the faulting instruction).  If emulation failed due to an
1167  * unservicable fault, the ESR for that fault will be stored in the
1168  * recovery_esr field of the thread by the exception code.
1169  *
1170  * Returns:
1171  *   -1:     Emulation failed (emulation of state/instr not supported)
1172  *   0:      Successfully emulated the instruction
1173  *   EFAULT: Emulation failed (probably due to permissions)
1174  *   EINVAL: Emulation failed (probably due to a bad address)
1175  */
1176 
1177 
1178 static int
handle_alignment_fault_from_user(arm_saved_state_t * state,kern_return_t * vmfr)1179 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1180 {
1181 	int ret = -1;
1182 
1183 #pragma unused (state)
1184 #pragma unused (vmfr)
1185 
1186 	return ret;
1187 }
1188 
1189 
1190 static void
handle_sw_step_debug(arm_saved_state_t * state)1191 handle_sw_step_debug(arm_saved_state_t *state)
1192 {
1193 	thread_t thread = current_thread();
1194 	exception_type_t exc;
1195 	mach_exception_data_type_t codes[2];
1196 	mach_msg_type_number_t numcodes = 2;
1197 
1198 	if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1199 		panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1200 	}
1201 
1202 	// Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1203 	if (thread->machine.DebugData != NULL) {
1204 		thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1205 	} else {
1206 		panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1207 	}
1208 
1209 	mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_ALL);
1210 
1211 	// Special encoding for gdb single step event on ARM
1212 	exc = EXC_BREAKPOINT;
1213 	codes[0] = 1;
1214 	codes[1] = 0;
1215 
1216 	exception_triage(exc, codes, numcodes);
1217 	__builtin_unreachable();
1218 }
1219 
1220 static void
set_saved_state_pc_to_recovery_handler(arm_saved_state_t * iss,vm_offset_t recover)1221 set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover)
1222 {
1223 #if defined(HAS_APPLE_PAC)
1224 	thread_t thread = current_thread();
1225 	const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER);
1226 	const char *panic_msg = "Illegal thread->recover value %p";
1227 
1228 	MANIPULATE_SIGNED_THREAD_STATE(iss,
1229 	    // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1230 	    //     ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1231 	    "mov	x1, %[recover]		\n"
1232 	    "mov	x6, %[disc]		\n"
1233 	    "autia	x1, x6			\n"
1234 	    // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1235 	    "mov	x6, x1			\n"
1236 	    "xpaci	x6			\n"
1237 	    "cmp	x1, x6			\n"
1238 	    "beq	1f			\n"
1239 	    //         panic("Illegal thread->recover value %p", (void *)recover);
1240 	    "mov	x0, %[panic_msg]	\n"
1241 	    "bl		_panic			\n"
1242 	    // }
1243 	    "1:					\n"
1244 	    "str	x1, [x0, %[SS64_PC]]	\n",
1245 	    [recover]     "r"(recover),
1246 	    [disc]        "r"(disc),
1247 	    [panic_msg]   "r"(panic_msg)
1248 	    );
1249 #else
1250 	set_saved_state_pc(iss, recover);
1251 #endif
1252 }
1253 
1254 static void
handle_user_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,vm_offset_t recover,expected_fault_handler_t expected_fault_handler)1255 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1256     fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1257 {
1258 	exception_type_t           exc      = EXC_BAD_ACCESS;
1259 	mach_exception_data_type_t codes[2];
1260 	mach_msg_type_number_t     numcodes = 2;
1261 	thread_t                   thread   = current_thread();
1262 
1263 	(void)esr;
1264 	(void)expected_fault_handler;
1265 
1266 	if (ml_at_interrupt_context()) {
1267 		panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1268 	}
1269 
1270 	thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1271 
1272 	if (is_vm_fault(fault_code)) {
1273 		vm_map_t        map = thread->map;
1274 		vm_offset_t     vm_fault_addr = fault_addr;
1275 		kern_return_t   result = KERN_FAILURE;
1276 
1277 		assert(map != kernel_map);
1278 
1279 		if (!(fault_type & VM_PROT_EXECUTE)) {
1280 			vm_fault_addr = tbi_clear(fault_addr);
1281 		}
1282 
1283 #if CONFIG_DTRACE
1284 		if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1285 			if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
1286 				if (recover) {
1287 					thread->machine.recover_esr = esr;
1288 					thread->machine.recover_far = vm_fault_addr;
1289 					set_saved_state_pc_to_recovery_handler(state, recover);
1290 				} else {
1291 					panic_with_thread_kernel_state("copyin/out has no recovery point", state);
1292 				}
1293 				return;
1294 			} else {
1295 				panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state);
1296 			}
1297 		}
1298 #else
1299 		(void)recover;
1300 #endif
1301 
1302 		/* check to see if it is just a pmap ref/modify fault */
1303 		if (!is_translation_fault(fault_code)) {
1304 			result = arm_fast_fault(map->pmap,
1305 			    vm_fault_addr,
1306 			    fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1307 		}
1308 		if (result == KERN_SUCCESS) {
1309 			return;
1310 		}
1311 
1312 		{
1313 			/* We have to fault the page in */
1314 			result = vm_fault(map, vm_fault_addr, fault_type,
1315 			    /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1316 			    /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1317 		}
1318 		if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1319 			return;
1320 		}
1321 
1322 		/*
1323 		 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1324 		 * If it does, we're leaking preemption disables somewhere in the kernel.
1325 		 */
1326 		if (__improbable(result == KERN_FAILURE)) {
1327 			panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1328 		}
1329 
1330 		codes[0] = result;
1331 	} else if (is_alignment_fault(fault_code)) {
1332 		kern_return_t vmfkr = KERN_SUCCESS;
1333 		thread->machine.recover_esr = 0;
1334 		thread->machine.recover_far = 0;
1335 		int result = handle_alignment_fault_from_user(state, &vmfkr);
1336 		if (result == 0) {
1337 			/* Successfully emulated, or instruction
1338 			 * copyin() for decode/emulation failed.
1339 			 * Continue, or redrive instruction.
1340 			 */
1341 			thread_exception_return();
1342 		} else if (((result == EFAULT) || (result == EINVAL)) &&
1343 		    (thread->machine.recover_esr == 0)) {
1344 			/*
1345 			 * If we didn't actually take a fault, but got one of
1346 			 * these errors, then we failed basic sanity checks of
1347 			 * the fault address.  Treat this as an invalid
1348 			 * address.
1349 			 */
1350 			codes[0] = KERN_INVALID_ADDRESS;
1351 		} else if ((result == EFAULT) &&
1352 		    (thread->machine.recover_esr)) {
1353 			/*
1354 			 * Since alignment aborts are prioritized
1355 			 * ahead of translation aborts, the misaligned
1356 			 * atomic emulation flow may have triggered a
1357 			 * VM pagefault, which the VM could not resolve.
1358 			 * Report the VM fault error in codes[]
1359 			 */
1360 
1361 			codes[0] = vmfkr;
1362 			assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1363 			/* Cause ESR_EC to reflect an EL0 abort */
1364 			thread->machine.recover_esr &= ~ESR_EC_MASK;
1365 			thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1366 			set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1367 			set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1368 			fault_addr = thread->machine.recover_far;
1369 		} else {
1370 			/* This was just an unsupported alignment
1371 			 * exception. Misaligned atomic emulation
1372 			 * timeouts fall in this category.
1373 			 */
1374 			codes[0] = EXC_ARM_DA_ALIGN;
1375 		}
1376 	} else if (is_parity_error(fault_code)) {
1377 #if defined(APPLE_ARM64_ARCH_FAMILY)
1378 		if (fault_code == FSC_SYNC_PARITY) {
1379 			arm64_platform_error(state, esr, fault_addr);
1380 			return;
1381 		}
1382 #else
1383 		panic("User parity error.");
1384 #endif
1385 	} else {
1386 		codes[0] = KERN_FAILURE;
1387 	}
1388 
1389 	codes[1] = fault_addr;
1390 #if __has_feature(ptrauth_calls)
1391 	bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1392 	if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1393 		exc |= EXC_PTRAUTH_BIT;
1394 	}
1395 #endif /* __has_feature(ptrauth_calls) */
1396 	exception_triage(exc, codes, numcodes);
1397 	__builtin_unreachable();
1398 }
1399 
1400 #if __ARM_PAN_AVAILABLE__
1401 static int
is_pan_fault(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code)1402 is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1403 {
1404 	// PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1405 	// virtual address that is readable/writeable from both EL1 and EL0
1406 
1407 	// To check for PAN fault, we evaluate if the following conditions are true:
1408 	// 1. This is a permission fault
1409 	// 2. PAN is enabled
1410 	// 3. AT instruction (on which PAN has no effect) on the same faulting address
1411 	// succeeds
1412 
1413 	vm_offset_t pa;
1414 
1415 	if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1416 		return FALSE;
1417 	}
1418 
1419 	if (esr & ISS_DA_WNR) {
1420 		pa = mmu_kvtop_wpreflight(fault_addr);
1421 	} else {
1422 		pa = mmu_kvtop(fault_addr);
1423 	}
1424 	return (pa)? TRUE: FALSE;
1425 }
1426 #endif
1427 
1428 static void
handle_kernel_abort(arm_saved_state_t * state,uint32_t esr,vm_offset_t fault_addr,fault_status_t fault_code,vm_prot_t fault_type,vm_offset_t recover,expected_fault_handler_t expected_fault_handler)1429 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1430     fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1431 {
1432 	thread_t thread = current_thread();
1433 	(void)esr;
1434 
1435 #ifndef CONFIG_XNUPOST
1436 	(void)expected_fault_handler;
1437 #endif /* CONFIG_XNUPOST */
1438 
1439 #if CONFIG_DTRACE
1440 	if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1441 		if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1442 			/*
1443 			 * Point to next instruction, or recovery handler if set.
1444 			 */
1445 			if (recover) {
1446 				thread->machine.recover_esr = esr;
1447 				thread->machine.recover_far = fault_addr;
1448 				set_saved_state_pc_to_recovery_handler(state, recover);
1449 			} else {
1450 				add_saved_state_pc(state, 4);
1451 			}
1452 			return;
1453 		} else {
1454 			panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1455 		}
1456 	}
1457 #endif
1458 
1459 	if (ml_at_interrupt_context()) {
1460 		panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1461 	}
1462 
1463 	if (is_vm_fault(fault_code)) {
1464 		kern_return_t result = KERN_FAILURE;
1465 		vm_map_t      map;
1466 		int           interruptible;
1467 
1468 		/*
1469 		 * Ensure no faults in the physical aperture. This could happen if
1470 		 * a page table is incorrectly allocated from the read only region
1471 		 * when running with KTRR.
1472 		 */
1473 
1474 #ifdef CONFIG_XNUPOST
1475 		if (expected_fault_handler && expected_fault_handler(state)) {
1476 			return;
1477 		}
1478 #endif /* CONFIG_XNUPOST */
1479 
1480 		if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1481 			panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1482 		}
1483 
1484 		if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1485 			map = kernel_map;
1486 			interruptible = THREAD_UNINT;
1487 		} else {
1488 			map = thread->map;
1489 
1490 			/**
1491 			 * In the case that the recovery handler is set (e.g., during copyio
1492 			 * and dtrace probes), we don't want the vm_fault() operation to be
1493 			 * aborted early. Those code paths can't handle restarting the
1494 			 * vm_fault() operation so don't allow it to return early without
1495 			 * creating the wanted mapping.
1496 			 */
1497 			interruptible = (recover) ? THREAD_UNINT : THREAD_ABORTSAFE;
1498 		}
1499 
1500 		/* check to see if it is just a pmap ref/modify fault */
1501 		if (!is_translation_fault(fault_code)) {
1502 			result = arm_fast_fault(map->pmap,
1503 			    fault_addr,
1504 			    fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1505 			if (result == KERN_SUCCESS) {
1506 				return;
1507 			}
1508 		}
1509 
1510 		if (result != KERN_PROTECTION_FAILURE) {
1511 			/*
1512 			 *  We have to "fault" the page in.
1513 			 */
1514 			result = vm_fault(map, fault_addr, fault_type,
1515 			    /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1516 			    /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1517 		}
1518 
1519 		if (result == KERN_SUCCESS) {
1520 			return;
1521 		}
1522 
1523 		/*
1524 		 *  If we have a recover handler, invoke it now.
1525 		 */
1526 		if (recover) {
1527 			thread->machine.recover_esr = esr;
1528 			thread->machine.recover_far = fault_addr;
1529 			set_saved_state_pc_to_recovery_handler(state, recover);
1530 			return;
1531 		}
1532 
1533 #if __ARM_PAN_AVAILABLE__
1534 		if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1535 			panic_with_thread_kernel_state("Privileged access never abort.", state);
1536 		}
1537 #endif
1538 	} else if (is_alignment_fault(fault_code)) {
1539 		if (recover) {
1540 			thread->machine.recover_esr = esr;
1541 			thread->machine.recover_far = fault_addr;
1542 			set_saved_state_pc_to_recovery_handler(state, recover);
1543 			return;
1544 		}
1545 		panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1546 	} else if (is_parity_error(fault_code)) {
1547 #if defined(APPLE_ARM64_ARCH_FAMILY)
1548 		if (fault_code == FSC_SYNC_PARITY) {
1549 			arm64_platform_error(state, esr, fault_addr);
1550 			return;
1551 		}
1552 #else
1553 		panic_with_thread_kernel_state("Kernel parity error.", state);
1554 #endif
1555 	} else {
1556 		kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1557 	}
1558 
1559 	panic_with_thread_kernel_state("Kernel data abort.", state);
1560 }
1561 
1562 extern void syscall_trace(struct arm_saved_state * regs);
1563 
1564 static void
handle_svc(arm_saved_state_t * state)1565 handle_svc(arm_saved_state_t *state)
1566 {
1567 	int      trap_no = get_saved_state_svc_number(state);
1568 	thread_t thread  = current_thread();
1569 	struct   proc *p;
1570 
1571 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1572 
1573 #define TRACE_SYSCALL 1
1574 #if TRACE_SYSCALL
1575 	syscall_trace(state);
1576 #endif
1577 
1578 	thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1579 
1580 	if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1581 		platform_syscall(state);
1582 		panic("Returned from platform_syscall()?");
1583 	}
1584 
1585 	mach_kauth_cred_thread_update();
1586 
1587 	if (trap_no < 0) {
1588 		switch (trap_no) {
1589 		case MACH_ARM_TRAP_ABSTIME:
1590 			handle_mach_absolute_time_trap(state);
1591 			return;
1592 		case MACH_ARM_TRAP_CONTTIME:
1593 			handle_mach_continuous_time_trap(state);
1594 			return;
1595 		}
1596 
1597 		/* Counting perhaps better in the handler, but this is how it's been done */
1598 		thread->syscalls_mach++;
1599 		mach_syscall(state);
1600 	} else {
1601 		/* Counting perhaps better in the handler, but this is how it's been done */
1602 		thread->syscalls_unix++;
1603 		p = get_bsdthreadtask_info(thread);
1604 
1605 		assert(p);
1606 
1607 		unix_syscall(state, thread, p);
1608 	}
1609 }
1610 
1611 static void
handle_mach_absolute_time_trap(arm_saved_state_t * state)1612 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1613 {
1614 	uint64_t now = mach_absolute_time();
1615 	saved_state64(state)->x[0] = now;
1616 }
1617 
1618 static void
handle_mach_continuous_time_trap(arm_saved_state_t * state)1619 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1620 {
1621 	uint64_t now = mach_continuous_time();
1622 	saved_state64(state)->x[0] = now;
1623 }
1624 
1625 
1626 __attribute__((noreturn))
1627 static void
handle_msr_trap(arm_saved_state_t * state,uint32_t esr)1628 handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
1629 {
1630 	exception_type_t           exception = EXC_BAD_INSTRUCTION;
1631 	mach_exception_data_type_t codes[2]  = {EXC_ARM_UNDEFINED};
1632 	mach_msg_type_number_t     numcodes  = 2;
1633 	uint32_t                   instr     = 0;
1634 
1635 	if (!is_saved_state64(state)) {
1636 		panic("MSR/MRS trap (ESR 0x%x) from 32-bit state", esr);
1637 	}
1638 
1639 	if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1640 		panic("MSR/MRS trap (ESR 0x%x) from kernel", esr);
1641 	}
1642 
1643 	COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1644 	codes[1] = instr;
1645 
1646 	exception_triage(exception, codes, numcodes);
1647 	__builtin_unreachable();
1648 }
1649 
1650 
1651 static void
handle_user_trapped_instruction32(arm_saved_state_t * state,uint32_t esr)1652 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1653 {
1654 	exception_type_t           exception = EXC_BAD_INSTRUCTION;
1655 	mach_exception_data_type_t codes[2]  = {EXC_ARM_UNDEFINED};
1656 	mach_msg_type_number_t     numcodes  = 2;
1657 	uint32_t                   instr;
1658 
1659 	if (is_saved_state64(state)) {
1660 		panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1661 	}
1662 
1663 	if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1664 		panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1665 	}
1666 
1667 	COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1668 	codes[1] = instr;
1669 
1670 	exception_triage(exception, codes, numcodes);
1671 	__builtin_unreachable();
1672 }
1673 
1674 static void
handle_simd_trap(arm_saved_state_t * state,uint32_t esr)1675 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1676 {
1677 	exception_type_t           exception = EXC_BAD_INSTRUCTION;
1678 	mach_exception_data_type_t codes[2]  = {EXC_ARM_UNDEFINED};
1679 	mach_msg_type_number_t     numcodes  = 2;
1680 	uint32_t                   instr     = 0;
1681 
1682 	if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1683 		panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1684 	}
1685 
1686 	COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1687 	codes[1] = instr;
1688 
1689 	exception_triage(exception, codes, numcodes);
1690 	__builtin_unreachable();
1691 }
1692 
1693 void
sleh_irq(arm_saved_state_t * state)1694 sleh_irq(arm_saved_state_t *state)
1695 {
1696 	cpu_data_t * cdp __unused             = getCpuDatap();
1697 #if MACH_ASSERT
1698 	int preemption_level = sleh_get_preemption_level();
1699 #endif
1700 
1701 
1702 	sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1703 
1704 #if USE_APPLEARMSMP
1705 	PE_handle_ext_interrupt();
1706 #else
1707 	/* Run the registered interrupt handler. */
1708 	cdp->interrupt_handler(cdp->interrupt_target,
1709 	    cdp->interrupt_refCon,
1710 	    cdp->interrupt_nub,
1711 	    cdp->interrupt_source);
1712 #endif
1713 
1714 	entropy_collect();
1715 
1716 
1717 	sleh_interrupt_handler_epilogue();
1718 #if MACH_ASSERT
1719 	if (preemption_level != sleh_get_preemption_level()) {
1720 		panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, sleh_get_preemption_level());
1721 	}
1722 #endif
1723 }
1724 
1725 void
sleh_fiq(arm_saved_state_t * state)1726 sleh_fiq(arm_saved_state_t *state)
1727 {
1728 	unsigned int type   = DBG_INTR_TYPE_UNKNOWN;
1729 #if MACH_ASSERT
1730 	int preemption_level = sleh_get_preemption_level();
1731 #endif
1732 
1733 #if MONOTONIC_FIQ
1734 	uint64_t pmcr0 = 0, upmsr = 0;
1735 #endif /* MONOTONIC_FIQ */
1736 
1737 #if defined(HAS_IPI)
1738 	boolean_t    is_ipi = FALSE;
1739 	uint64_t     ipi_sr = 0;
1740 
1741 	if (gFastIPI) {
1742 		MRS(ipi_sr, "S3_5_C15_C1_1");
1743 
1744 		if (ipi_sr & 1) {
1745 			is_ipi = TRUE;
1746 		}
1747 	}
1748 
1749 	if (is_ipi) {
1750 		type = DBG_INTR_TYPE_IPI;
1751 	} else
1752 #endif /* defined(HAS_IPI) */
1753 	if (ml_get_timer_pending()) {
1754 		type = DBG_INTR_TYPE_TIMER;
1755 	}
1756 #if MONOTONIC_FIQ
1757 	/* Consult the PMI sysregs last, after IPI/timer
1758 	 * classification.
1759 	 */
1760 	else if (mt_pmi_pending(&pmcr0, &upmsr)) {
1761 		type = DBG_INTR_TYPE_PMI;
1762 	}
1763 #endif /* MONOTONIC_FIQ */
1764 
1765 	sleh_interrupt_handler_prologue(state, type);
1766 
1767 #if APPLEVIRTUALPLATFORM
1768 	uint64_t iar = __builtin_arm_rsr64("ICC_IAR0_EL1");
1769 #endif
1770 
1771 #if defined(HAS_IPI)
1772 	if (is_ipi) {
1773 		/*
1774 		 * Order is important here: we must ack the IPI by writing IPI_SR
1775 		 * before we call cpu_signal_handler().  Otherwise, there will be
1776 		 * a window between the completion of pending-signal processing in
1777 		 * cpu_signal_handler() and the ack during which a newly-issued
1778 		 * IPI to this CPU may be lost.  ISB is required to ensure the msr
1779 		 * is retired before execution of cpu_signal_handler().
1780 		 */
1781 		MSR("S3_5_C15_C1_1", ipi_sr);
1782 		__builtin_arm_isb(ISB_SY);
1783 		cpu_signal_handler();
1784 	} else
1785 #endif /* defined(HAS_IPI) */
1786 #if MONOTONIC_FIQ
1787 	if (type == DBG_INTR_TYPE_PMI) {
1788 		INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
1789 		mt_fiq(getCpuDatap(), pmcr0, upmsr);
1790 		INTERRUPT_MASKED_DEBUG_END();
1791 	} else
1792 #endif /* MONOTONIC_FIQ */
1793 	{
1794 		/*
1795 		 * We don't know that this is a timer, but we don't have insight into
1796 		 * the other interrupts that go down this path.
1797 		 */
1798 
1799 		cpu_data_t *cdp = getCpuDatap();
1800 
1801 		cdp->cpu_decrementer = -1; /* Large */
1802 
1803 		/*
1804 		 * ARM64_TODO: whether we're coming from userland is ignored right now.
1805 		 * We can easily thread it through, but not bothering for the
1806 		 * moment (AArch32 doesn't either).
1807 		 */
1808 		INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
1809 		rtclock_intr(TRUE);
1810 		INTERRUPT_MASKED_DEBUG_END();
1811 	}
1812 
1813 #if APPLEVIRTUALPLATFORM
1814 	if (iar != GIC_SPURIOUS_IRQ) {
1815 		__builtin_arm_wsr64("ICC_EOIR0_EL1", iar);
1816 		__builtin_arm_isb(ISB_SY);
1817 	}
1818 #endif
1819 
1820 	sleh_interrupt_handler_epilogue();
1821 #if MACH_ASSERT
1822 	if (preemption_level != sleh_get_preemption_level()) {
1823 		panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, sleh_get_preemption_level());
1824 	}
1825 #endif
1826 }
1827 
1828 void
sleh_serror(arm_context_t * context,uint32_t esr,vm_offset_t far)1829 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1830 {
1831 	task_vtimer_check(current_thread());
1832 
1833 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1834 	    esr, VM_KERNEL_ADDRHIDE(far));
1835 	arm_saved_state_t *state = &context->ss;
1836 #if MACH_ASSERT
1837 	int preemption_level = sleh_get_preemption_level();
1838 #endif
1839 
1840 
1841 	ASSERT_CONTEXT_SANITY(context);
1842 	arm64_platform_error(state, esr, far);
1843 #if MACH_ASSERT
1844 	if (preemption_level != sleh_get_preemption_level()) {
1845 		panic("serror changed preemption level from %d to %d", preemption_level, sleh_get_preemption_level());
1846 	}
1847 #endif
1848 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
1849 	    esr, VM_KERNEL_ADDRHIDE(far));
1850 }
1851 
1852 void
mach_syscall_trace_exit(unsigned int retval,unsigned int call_number)1853 mach_syscall_trace_exit(unsigned int retval,
1854     unsigned int call_number)
1855 {
1856 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1857 	    MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1858 	    DBG_FUNC_END, retval, 0, 0, 0, 0);
1859 }
1860 
1861 __attribute__((noreturn))
1862 void
thread_syscall_return(kern_return_t error)1863 thread_syscall_return(kern_return_t error)
1864 {
1865 	thread_t thread;
1866 	struct arm_saved_state *state;
1867 
1868 	thread = current_thread();
1869 	state = get_user_regs(thread);
1870 
1871 	assert(is_saved_state64(state));
1872 	saved_state64(state)->x[0] = error;
1873 
1874 #if MACH_ASSERT
1875 	kern_allocation_name_t
1876 	prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
1877 	assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
1878 #endif /* MACH_ASSERT */
1879 
1880 	if (kdebug_enable) {
1881 		/* Invert syscall number (negative for a mach syscall) */
1882 		mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
1883 	}
1884 
1885 	thread_exception_return();
1886 }
1887 
1888 void
syscall_trace(struct arm_saved_state * regs __unused)1889 syscall_trace(
1890 	struct arm_saved_state * regs __unused)
1891 {
1892 	/* kprintf("syscall: %d\n", saved_state64(regs)->x[16]);  */
1893 }
1894 
1895 static void
sleh_interrupt_handler_prologue(arm_saved_state_t * state,unsigned int type)1896 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
1897 {
1898 	boolean_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
1899 
1900 	task_vtimer_check(current_thread());
1901 
1902 	uint64_t pc = is_user ? get_saved_state_pc(state) :
1903 	    VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
1904 
1905 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
1906 	    0, pc, is_user, type);
1907 
1908 #if CONFIG_TELEMETRY
1909 	if (telemetry_needs_record) {
1910 		telemetry_mark_curthread(is_user, FALSE);
1911 	}
1912 #endif /* CONFIG_TELEMETRY */
1913 }
1914 
1915 static void
sleh_interrupt_handler_epilogue(void)1916 sleh_interrupt_handler_epilogue(void)
1917 {
1918 #if KPERF
1919 	kperf_interrupt();
1920 #endif /* KPERF */
1921 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
1922 }
1923 
1924 void
sleh_invalid_stack(arm_context_t * context,uint32_t esr __unused,vm_offset_t far __unused)1925 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
1926 {
1927 	thread_t thread = current_thread();
1928 	vm_offset_t kernel_stack_bottom, sp;
1929 
1930 	sp = get_saved_state_sp(&context->ss);
1931 	kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
1932 
1933 	if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
1934 		panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
1935 	}
1936 
1937 	panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
1938 }
1939 
1940