xref: /xnu-10063.141.1/osfmk/arm64/locore.s (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45#if XNU_MONITOR && !CONFIG_SPTM
46/*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
52 *         exception was taken while in the PPL.
53 */
54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55	cmp		x26, xzr
56	b.eq		1f
57
58	/* Return to the PPL. */
59	mov		x15, #0
60	mov		w10, #PPL_STATE_EXCEPTION
61#error "XPRR configuration error"
621:
63.endmacro
64
65
66#endif /* XNU_MONITOR && !CONFIG_SPTM */
67
68#if CONFIG_SPTM
69#include <sptm/sptm_xnu.h>
70#include <sptm/sptm_common.h>
71/*
72 * Panic lockdown is a security enhancement which makes certain types of
73 * exceptions (generally, PAC failures and sync exceptions taken with async
74 * exceptions masked) and panics fatal against attackers with kernel R/W. It
75 * does this through a trapdoor panic bit protected by the SPTM.
76 * When this bit is set, TXM will refuse to authorize new code mappings which,
77 * ideally, renders the system unusable even if the attacker gains control over
78 * XNU. Additionally, when this bit is set XNU will refuse to handle any sync
79 * exceptions originating from user space. This makes implementing further stages
80 * of an exploit challenging as it prevents user space from driving the kernel.
81 */
82
83/*
84 * Inform the SPTM that XNU has (or, rather, must) panic. This is provided as a
85 * macro rather than a function since it's just one instruction on release and
86 * it avoids the need to spill a return addresses unless the macro caller
87 * explicitly needs to preserve LR.
88 *
89 * This macro preserves callee saved registers but clobbers all others.
90 */
91.macro BEGIN_PANIC_LOCKDOWN
92#if CONFIG_XNUPOST
93	mrs		x8, TPIDR_EL1
94	/*
95	 * If hitting this with a null TPIDR, it's likely that this was an unexpected
96	 * exception in early boot rather than an expected one as a part of a test.
97	 * Trigger lockdown.
98	 */
99	cbz		x8, 0f
100	ldr		x8, [x8, TH_EXPECTED_FAULT_HANDLER]
101	cbnz 	x8, 1f
1020:
103#endif /* CONFIG_XNUPOST */
104	/*
105	 * The sptm_xnu_panic_begin routine is guaranteed to unavoidably lead to
106	 * the panic bit being set.
107	 */
108	bl EXT(sptm_xnu_panic_begin)
109#if CONFIG_XNUPOST
110	b		2f
1111:
112	/*
113	 * We hit lockdown while an exception handler was installed.
114	 * This was likely an expected exception. Skip setting the panic bit (since
115	 * this will kill the system) and instead set a bit in the test handler.
116	 */
117	mov		w9, #1
118	adrp	x8, EXT(xnu_post_panic_lockdown_did_fire)@page
119	add		x8, x8, EXT(xnu_post_panic_lockdown_did_fire)@pageoff
120	strb	w9, [x8]
121	mov		lr, xzr // trash LR to ensure callers don't rely on it
1222:
123#endif /* CONFIG_XNUPOST */
124.endmacro
125#endif /* CONFIG_SPTM */
126
127/*
128 * MAP_KERNEL
129 *
130 * Restores the kernel EL1 mappings, if necessary.
131 *
132 * This may mutate x18.
133 */
134.macro MAP_KERNEL
135#if __ARM_KERNEL_PROTECT__
136	/* Switch to the kernel ASID (low bit set) for the task. */
137	mrs		x18, TTBR0_EL1
138	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
139	msr		TTBR0_EL1, x18
140
141	/*
142	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
143	 * to the TTBRs and writes to the TCR should be ensured by the
144	 * microarchitecture.
145	 */
146#if !defined(APPLE_ARM64_ARCH_FAMILY)
147	isb		sy
148#endif
149
150	/*
151	 * Update the TCR to map the kernel now that we are using the kernel
152	 * ASID.
153	 */
154	MOV64		x18, TCR_EL1_BOOT
155	msr		TCR_EL1, x18
156	isb		sy
157#endif /* __ARM_KERNEL_PROTECT__ */
158.endmacro
159
160/*
161 * BRANCH_TO_KVA_VECTOR
162 *
163 * Branches to the requested long exception vector in the kernelcache.
164 *   arg0 - The label to branch to
165 *   arg1 - The index of the label in exc_vectors_tables
166 *
167 * This may mutate x18.
168 */
169.macro BRANCH_TO_KVA_VECTOR
170#if __ARM_KERNEL_PROTECT__
171	/*
172	 * Find the kernelcache table for the exception vectors by accessing
173	 * the per-CPU data.
174	 */
175	mrs		x18, TPIDR_EL1
176	ldr		x18, [x18, ACT_CPUDATAP]
177	ldr		x18, [x18, CPU_EXC_VECTORS]
178
179	/*
180	 * Get the handler for this exception and jump to it.
181	 */
182	ldr		x18, [x18, #($1 << 3)]
183	br		x18
184#else
185	b		$0
186#endif /* __ARM_KERNEL_PROTECT__ */
187.endmacro
188
189/*
190 * CHECK_KERNEL_STACK
191 *
192 * Verifies that the kernel stack is aligned and mapped within an expected
193 * stack address range. Note: happens before saving registers (in case we can't
194 * save to kernel stack).
195 *
196 * Expects:
197 *	{x0, x1} - saved
198 *	x1 - Exception syndrome
199 *	sp - Saved state
200 *
201 * Seems like we need an unused argument to the macro for the \@ syntax to work
202 *
203 */
204.macro CHECK_KERNEL_STACK unused
205	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
206	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
207	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
208	cmp		x1, x2								// If we have a stack alignment exception
209	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
210	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
211	cmp		x1, x2								// If we have a data abort, we need to
212	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
213	mrs		x0, SP_EL0					// Get SP_EL0
214	mrs		x1, TPIDR_EL1						// Get thread pointer
215Ltest_kstack_\@:
216	LOAD_KERN_STACK_TOP	dst=x2, src=x1, tmp=x3	// Get top of kernel stack
217	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
218	cmp		x0, x2								// if (SP_EL0 >= kstack top)
219	b.ge	Ltest_istack_\@						//    jump to istack test
220	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
221	b.gt	Lvalid_stack_\@						//    stack pointer valid
222Ltest_istack_\@:
223	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
224	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
225	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
226	cmp		x0, x2								// if (SP_EL0 >= istack top)
227	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
228	cmp		x0, x3								// if (SP_EL0 > istack bottom)
229	b.gt	Lvalid_stack_\@						//    stack pointer valid
230Lcorrupt_stack_\@:
231	ldp		x2, x3, [sp], #16
232	ldp		x0, x1, [sp], #16
233	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
234	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
235	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
236	mrs		x0, SP_EL0					// Get SP_EL0
237	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
238	INIT_SAVED_STATE_FLAVORS sp, w0, w1
239	mov		x0, sp								// Copy exception frame pointer to x0
240	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
241	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
242	b		fleh_dispatch64_noreturn
243Lvalid_stack_\@:
244	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
245.endmacro
246
247
248#if __ARM_KERNEL_PROTECT__
249	.section __DATA_CONST,__const
250	.align 3
251	.globl EXT(exc_vectors_table)
252LEXT(exc_vectors_table)
253	/* Table of exception handlers.
254         * These handlers sometimes contain deadloops.
255         * It's nice to have symbols for them when debugging. */
256	.quad el1_sp0_synchronous_vector_long
257	.quad el1_sp0_irq_vector_long
258	.quad el1_sp0_fiq_vector_long
259	.quad el1_sp0_serror_vector_long
260	.quad el1_sp1_synchronous_vector_long
261	.quad el1_sp1_irq_vector_long
262	.quad el1_sp1_fiq_vector_long
263	.quad el1_sp1_serror_vector_long
264	.quad el0_synchronous_vector_64_long
265	.quad el0_irq_vector_64_long
266	.quad el0_fiq_vector_64_long
267	.quad el0_serror_vector_64_long
268#endif /* __ARM_KERNEL_PROTECT__ */
269
270	.text
271#if __ARM_KERNEL_PROTECT__
272	/*
273	 * We need this to be on a page boundary so that we may avoiding mapping
274	 * other text along with it.  As this must be on the VM page boundary
275	 * (due to how the coredumping code currently works), this will be a
276	 * 16KB page boundary.
277	 */
278	.align 14
279#else
280	.align 12
281#endif /* __ARM_KERNEL_PROTECT__ */
282	.globl EXT(ExceptionVectorsBase)
283LEXT(ExceptionVectorsBase)
284Lel1_sp0_synchronous_vector:
285	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
286
287	.text
288	.align 7
289Lel1_sp0_irq_vector:
290	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
291
292	.text
293	.align 7
294Lel1_sp0_fiq_vector:
295	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
296
297	.text
298	.align 7
299Lel1_sp0_serror_vector:
300	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
301
302	.text
303	.align 7
304Lel1_sp1_synchronous_vector:
305	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
306
307	.text
308	.align 7
309Lel1_sp1_irq_vector:
310	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
311
312	.text
313	.align 7
314Lel1_sp1_fiq_vector:
315	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
316
317	.text
318	.align 7
319Lel1_sp1_serror_vector:
320	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
321
322	.text
323	.align 7
324Lel0_synchronous_vector_64:
325	MAP_KERNEL
326	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
327
328	.text
329	.align 7
330Lel0_irq_vector_64:
331	MAP_KERNEL
332	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
333
334	.text
335	.align 7
336Lel0_fiq_vector_64:
337	MAP_KERNEL
338	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
339
340	.text
341	.align 7
342Lel0_serror_vector_64:
343	MAP_KERNEL
344	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
345
346	/* Fill out the rest of the page */
347	.align 12
348
349/*********************************
350 * END OF EXCEPTION VECTORS PAGE *
351 *********************************/
352
353
354
355.macro EL1_SP0_VECTOR
356	msr		SPSel, #0							// Switch to SP0
357	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
358	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
359	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
360	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
361	INIT_SAVED_STATE_FLAVORS sp, w0, w1
362	mov		x0, sp								// Copy saved state pointer to x0
363.endmacro
364
365.macro EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
366	// SWITCH_TO_INT_STACK requires a clobberable tmp register, but at this
367	// point in the exception vector we can't spare the extra GPR.  Instead note
368	// that EL1_SP0_VECTOR ends with x0 == sp and use this to unclobber x0.
369	mrs		x1, TPIDR_EL1
370	LOAD_INT_STACK	dst=x1, src=x1, tmp=x0
371	mov		x0, sp
372	mov		sp, x1
373.endmacro
374
375el1_sp0_synchronous_vector_long:
376	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
377	mrs		x1, ESR_EL1							// Get the exception syndrome
378	/* If the stack pointer is corrupt, it will manifest either as a data abort
379	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
380	 * these quickly by testing bit 5 of the exception class.
381	 */
382	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
383	CHECK_KERNEL_STACK
384Lkernel_stack_valid:
385	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
386	EL1_SP0_VECTOR
387	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
388	add		x1, x1, EXT(fleh_synchronous)@pageoff
389	b		fleh_dispatch64
390
391el1_sp0_irq_vector_long:
392	EL1_SP0_VECTOR
393	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
394	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
395	add		x1, x1, EXT(fleh_irq)@pageoff
396	b		fleh_dispatch64
397
398el1_sp0_fiq_vector_long:
399	// ARM64_TODO write optimized decrementer
400	EL1_SP0_VECTOR
401	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
402	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
403	add		x1, x1, EXT(fleh_fiq)@pageoff
404	b		fleh_dispatch64
405
406el1_sp0_serror_vector_long:
407	EL1_SP0_VECTOR
408	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
409	add		x1, x1, EXT(fleh_serror)@pageoff
410	b		fleh_dispatch64
411
412.macro EL1_SP1_VECTOR
413	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
414	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
415	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
416	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
417	INIT_SAVED_STATE_FLAVORS sp, w0, w1
418	mov		x0, sp								// Copy saved state pointer to x0
419.endmacro
420
421el1_sp1_synchronous_vector_long:
422	b		check_exception_stack
423Lel1_sp1_synchronous_valid_stack:
424#if defined(KERNEL_INTEGRITY_KTRR)
425	b		check_ktrr_sctlr_trap
426Lel1_sp1_synchronous_vector_continue:
427#endif
428	EL1_SP1_VECTOR
429	adrp	x1, fleh_synchronous_sp1@page
430	add		x1, x1, fleh_synchronous_sp1@pageoff
431	b		fleh_dispatch64_noreturn
432
433el1_sp1_irq_vector_long:
434	EL1_SP1_VECTOR
435	adrp	x1, fleh_irq_sp1@page
436	add		x1, x1, fleh_irq_sp1@pageoff
437	b		fleh_dispatch64_noreturn
438
439el1_sp1_fiq_vector_long:
440	EL1_SP1_VECTOR
441	adrp	x1, fleh_fiq_sp1@page
442	add		x1, x1, fleh_fiq_sp1@pageoff
443	b		fleh_dispatch64_noreturn
444
445el1_sp1_serror_vector_long:
446	EL1_SP1_VECTOR
447	adrp	x1, fleh_serror_sp1@page
448	add		x1, x1, fleh_serror_sp1@pageoff
449	b		fleh_dispatch64_noreturn
450
451
452.macro EL0_64_VECTOR guest_label
453	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
454#if __ARM_KERNEL_PROTECT__
455	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
456#endif
457	mrs		x0, TPIDR_EL1						// Load the thread register
458	LOAD_USER_PCB	dst=x0, src=x0, tmp=x1		// Load the user context pointer
459	mrs		x1, SP_EL0							// Load the user stack pointer
460	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
461	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
462	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
463	msr		SPSel, #0							// Switch to SP0
464	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
465	mrs		x1, TPIDR_EL1						// Load the thread register
466
467
468
469	mov		x0, sp								// Copy the user PCB pointer to x0
470												// x1 contains thread register
471.endmacro
472
473.macro EL0_64_VECTOR_SWITCH_TO_INT_STACK
474	// Similarly to EL1_SP0_VECTOR_SWITCH_TO_INT_STACK, we need to take
475	// advantage of EL0_64_VECTOR ending with x0 == sp.  EL0_64_VECTOR also
476	// populates x1 with the thread state, so we can skip reloading it.
477	LOAD_INT_STACK	dst=x1, src=x1, tmp=x0
478	mov		x0, sp
479	mov		sp, x1
480.endmacro
481
482.macro EL0_64_VECTOR_SWITCH_TO_KERN_STACK
483	LOAD_KERN_STACK_TOP	dst=x1, src=x1, tmp=x0
484	mov		x0, sp
485	mov		sp, x1
486.endmacro
487
488el0_synchronous_vector_64_long:
489	EL0_64_VECTOR	sync
490	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
491	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
492	add		x1, x1, EXT(fleh_synchronous)@pageoff
493	b		fleh_dispatch64
494
495el0_irq_vector_64_long:
496	EL0_64_VECTOR	irq
497	EL0_64_VECTOR_SWITCH_TO_INT_STACK
498	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
499	add		x1, x1, EXT(fleh_irq)@pageoff
500	b		fleh_dispatch64
501
502el0_fiq_vector_64_long:
503	EL0_64_VECTOR	fiq
504	EL0_64_VECTOR_SWITCH_TO_INT_STACK
505	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
506	add		x1, x1, EXT(fleh_fiq)@pageoff
507	b		fleh_dispatch64
508
509el0_serror_vector_64_long:
510	EL0_64_VECTOR	serror
511	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
512	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
513	add		x1, x1, EXT(fleh_serror)@pageoff
514	b		fleh_dispatch64
515
516
517/*
518 * check_exception_stack
519 *
520 * Verifies that stack pointer at SP1 is within exception stack
521 * If not, will simply hang as we have no more stack to fall back on.
522 */
523
524	.text
525	.align 2
526check_exception_stack:
527	mrs		x18, TPIDR_EL1					// Get thread pointer
528	cbz		x18, Lvalid_exception_stack			// Thread context may not be set early in boot
529	ldr		x18, [x18, ACT_CPUDATAP]
530	cbz		x18, Lcheck_exception_stack_fail	// If thread context is set, cpu data should be too
531	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
532	cmp		sp, x18
533	b.gt	Lcheck_exception_stack_fail	// Hang if above exception stack top
534	sub		x18, x18, EXCEPSTACK_SIZE_NUM			// Find bottom of exception stack
535	cmp		sp, x18
536	b.lt	Lcheck_exception_stack_fail	// Hang if below exception stack bottom
537Lvalid_exception_stack:
538	mov		x18, #0
539	b		Lel1_sp1_synchronous_valid_stack
540
541Lcheck_exception_stack_fail:
542#if CONFIG_SPTM
543	/*
544	 * To prevent exceptions delivered on SP1 from being delayed indefinitely,
545	 * make recieving an exception on an invalid exception stack fatal.
546	 */
547	BEGIN_PANIC_LOCKDOWN
548#endif /* CONFIG_SPTM */
5491:
550	wfi
551	b		1b		// Spin for debugger/watchdog
552
553#if defined(KERNEL_INTEGRITY_KTRR)
554	.text
555	.align 2
556check_ktrr_sctlr_trap:
557/* We may abort on an instruction fetch on reset when enabling the MMU by
558 * writing SCTLR_EL1 because the page containing the privileged instruction is
559 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
560 * would otherwise panic unconditionally. Check for the condition and return
561 * safe execution to the caller on behalf of the faulting function.
562 *
563 * Expected register state:
564 *  x22 - Kernel virtual base
565 *  x23 - Kernel physical base
566 */
567	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
568	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
569	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
570	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
571	movz	w1, #0x8600, lsl #16
572	movk	w1, #0x0000
573	cmp		x0, x1
574	mrs		x0, ELR_EL1					// Check for expected abort address
575	adrp	x1, _pinst_set_sctlr_trap_addr@page
576	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
577	sub		x1, x1, x22					// Convert to physical address
578	add		x1, x1, x23
579	ccmp	x0, x1, #0, eq
580	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
581	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
582	b.ne	Lel1_sp1_synchronous_vector_continue
583	msr		ELR_EL1, lr					// Return to caller
584	ERET_CONTEXT_SYNCHRONIZING
585#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
586
587/* 64-bit first level exception handler dispatcher.
588 * Completes register context saving and branches to a non-returning FLEH.
589 * FLEH can inspect the spilled thread state, but it contains an invalid
590 * thread signature.
591 *
592 * Expects:
593 *  {x0, x1, sp} - saved
594 *  x0 - arm_context_t
595 *  x1 - address of FLEH
596 *  fp - previous stack frame if EL1
597 *  lr - unused
598 *  sp - kernel stack
599 */
600	.text
601	.align 2
602fleh_dispatch64_noreturn:
603#if HAS_APPLE_PAC
604	pacia	x1, sp
605	/* Save arm_saved_state64 with invalid signature */
606	SPILL_REGISTERS KERNEL_MODE, POISON_THREAD_SIGNATURE
607	b	fleh_dispatch64_common
608#else
609	// Fall through to fleh_dispatch64
610#endif
611
612/* 64-bit first level exception handler dispatcher.
613 * Completes register context saving and branches to FLEH.
614 * Expects:
615 *  {x0, x1, sp} - saved
616 *  x0 - arm_context_t
617 *  x1 - address of FLEH
618 *  fp - previous stack frame if EL1
619 *  lr - unused
620 *  sp - kernel stack
621 */
622	.text
623	.align 2
624fleh_dispatch64:
625#if HAS_APPLE_PAC
626	pacia	x1, sp
627#endif
628
629	/* Save arm_saved_state64 */
630	SPILL_REGISTERS KERNEL_MODE
631
632fleh_dispatch64_common:
633	/* If exception is from userspace, zero unused registers */
634	and		x23, x23, #(PSR64_MODE_EL_MASK)
635	cmp		x23, #(PSR64_MODE_EL0)
636	bne		1f
637
638	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
6392:
640	mov		x2, #0
641	mov		x3, #0
642	mov		x4, #0
643	mov		x5, #0
644	mov		x6, #0
645	mov		x7, #0
646	mov		x8, #0
647	mov		x9, #0
648	mov		x10, #0
649	mov		x11, #0
650	mov		x12, #0
651	mov		x13, #0
652	mov		x14, #0
653	mov		x15, #0
654	mov		x16, #0
655	mov		x17, #0
656	mov		x18, #0
657	mov		x19, #0
658	mov		x20, #0
659	/* x21, x22 cleared in common case below */
660	mov		x23, #0
661	mov		x24, #0
662	mov		x25, #0
663#if !XNU_MONITOR
664	mov		x26, #0
665#endif
666	mov		x27, #0
667	mov		x28, #0
668	mov		fp, #0
669	mov		lr, #0
6701:
671
672	mov		x21, x0								// Copy arm_context_t pointer to x21
673	mov		x22, x1								// Copy handler routine to x22
674
675#if XNU_MONITOR
676	/* Zero x26 to indicate that this should not return to the PPL. */
677	mov		x26, #0
678#endif
679
680#if PRECISE_USER_KERNEL_TIME
681	tst		x23, PSR64_MODE_EL_KERNEL			// If interrupting this kernel, skip
682	b.ne	1f                                  // precise time update.
683	PUSH_FRAME
684	bl		EXT(recount_leave_user)
685	POP_FRAME_WITHOUT_LR
686	mov		x0, x21								// Reload arm_context_t pointer
6871:
688#endif /* PRECISE_USER_KERNEL_TIME */
689
690	/* Dispatch to FLEH */
691
692#if HAS_APPLE_PAC
693	braa	x22,sp
694#else
695	br		x22
696#endif
697
698
699	.text
700	.align 2
701	.global EXT(fleh_synchronous)
702LEXT(fleh_synchronous)
703
704UNWIND_PROLOGUE
705UNWIND_DIRECTIVES
706
707	mrs		x1, ESR_EL1							// Load exception syndrome
708	mrs		x2, FAR_EL1							// Load fault address
709
710	/* At this point, the LR contains the value of ELR_EL1. In the case of an
711	 * instruction prefetch abort, this will be the faulting pc, which we know
712	 * to be invalid. This will prevent us from backtracing through the
713	 * exception if we put it in our stack frame, so we load the LR from the
714	 * exception saved state instead.
715	 */
716	and		w6, w1, #(ESR_EC_MASK)
717	lsr		w6, w6, #(ESR_EC_SHIFT)
718	mov		w4, #(ESR_EC_IABORT_EL1)
719	cmp		w6, w4
720	b.eq	Lfleh_sync_load_lr
721Lvalid_link_register:
722
723#if CONFIG_SPTM
724	/*
725	 * Sync exceptions in the kernel are rare, so check that first.
726	 * This check should be trivially predicted NT. We also take
727	 * the check out of line so, on the hot path, we don't add a
728	 * frontend redirect.
729	 */
730	mov		x3, #0 // by default, do not signal panic lockdown to sleh
731	mrs		x4, SPSR_EL1
732	tst		x4, #(PSR64_MODE_EL_MASK)
733	b.ne	Lfleh_synchronous_ool_check_exception_el1 /* Run ELn checks if we're EL!=0 (!Z) */
734	/* EL0 -- check if we're blocking sync exceptions due to lockdown */
735	adrp	x4, EXT(sptm_xnu_triggered_panic_ptr)@page
736	ldr		x4, [x4, EXT(sptm_xnu_triggered_panic_ptr)@pageoff]
737	ldrb	w4, [x4]
738	cbnz	w4, Lblocked_user_sync_exception
739
740Lfleh_synchronous_continue:
741#endif /* CONFIG_SPTM */
742
743	PUSH_FRAME
744	bl		EXT(sleh_synchronous)
745	POP_FRAME_WITHOUT_LR
746
747#if XNU_MONITOR && !CONFIG_SPTM
748	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
749#endif
750
751	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
752	b		exception_return_dispatch
753
754Lfleh_sync_load_lr:
755	ldr		lr, [x0, SS64_LR]
756	b Lvalid_link_register
757
758#if CONFIG_SPTM
759Lfleh_synchronous_ool_check_exception_el1:
760	/*
761	 * We're in the kernel!
762	 *
763	 * An SP0 sync exception is forced to be fatal if:
764	 * (AND
765	 * 	(SPSR.M[3:2] > 0) // Originates from kernel
766	 * 	(OR
767	 * 		(ESR.EC == ESR_EC_PAC_FAIL)    // FPAC failure
768	 * 		(AND  // PAC BRK instruction (compiler generated traps)
769	 * 			ESR.EC == ESR_EC_BRK_AARCH64
770	 * 			ESR.ISS is in PTRAUTH_TRAPS
771	 * 		)
772	 *		(AND // Potential dPAC failure (poisoned VA)
773	 *			(ESR.EC == ESR_EC_DABORT_EL1)
774	 *			(XPACD(FAR) != FAR)
775	 *			(NAND //outside copyio region
776	 *				ELR >= copyio_fault_region_begin
777	 *				ELR < copyio_fault_region_end
778	 * 			)
779	 *		)
780	 *		(ESR.EC == ESR_EC_IABORT_EL1)  // Potential iPAC failure (poisoned PC)
781	 * 		(AND
782	 * 			(AND SPSR.A SPSR.I SPSR.F) // Async exceptions were masked
783	 * 			(ESR.EC != ESR_EC_UNCATEGORIZED) // Not an undefined instruction (GDBTRAP for stackshots, etc.)
784	 *			(NAND // brks other than PAC traps are permitted for non-fatal telemetry
785	 *				(ESR.EC == ESR_EC_BRK_AARCH64)
786	 *				(ESR.ISS is in PTRAUTH_TRAPS)
787	 *			)
788	 * 			(startup_phase < STARTUP_SUB_LOCKDOWN) // Not in early-boot
789	 * 			(OR !CONFIG_XNUPOST (saved_expected_fault_handler == NULL)) // Not an expected, test exception
790	 *			(NAND // copyio data aborts are permitted while exceptions are masked
791	 *				ESR.EC == ESR_EC_DABORT_EL1
792	 *				ELR >= copyio_fault_region_begin
793	 *				ELR < copyio_fault_region_end
794	 *			)
795	 * 		)
796	 * 	)
797	 * )
798	 */
799
800	/*
801	 * Pre-compute some sub-expressions which will be used later
802	 */
803	mrs		x10, ELR_EL1
804	adrp	x11, EXT(copyio_fault_region_begin)@page
805	add		x11, x11, EXT(copyio_fault_region_begin)@pageoff
806	adrp	x12, EXT(copyio_fault_region_end)@page
807	add		x12, x12, EXT(copyio_fault_region_end)@pageoff
808
809	/* in-copyio-region sub-expression */
810	/* Are we after the start of the copyio region? */
811	cmp		x10, x11
812	/*
813	 * If after the start (HS), test upper bounds.
814	 * Otherwise (LO), fail forward (HS)
815	 */
816	ccmp	x10, x12, #0b0010 /* C/HS */, HS
817	/*
818	 * Spill "in-copyio-region" flag for later reuse
819	 * x10=1 if ELR was in copyio region, 0 otherwise
820	 */
821	cset	x10, LO
822
823#if __has_feature(ptrauth_calls)
824	/* is-dPAC-poisoned-DABORT sub-expression */
825	mrs		x5, FAR_EL1
826	/*
827	 * Is XPACD(FAR) == FAR?
828	 * XPAC converts an arbitrary pointer like value into the canonical form
829	 * that would be produced if the pointer were to successfully pass AUTx.
830	 * If the pointer is canonical, this has no effect.
831	 * If the pointer is non-canonical (such as due to PAC poisoning), the value
832	 * will not match FAR.
833	 */
834	mov		x11, x5
835	xpacd	x11
836	/*
837	 * If we're outside the copyio region (HS), set flags for whether FAR is
838	 * clean (EQ) or has PAC poisoning (NE).
839	 * Otherwise (LO), set EQ
840	 */
841	ccmp    x5, x11, #0b0100 /* Z/EQ */, HS
842	/*
843	 * If we were poisoned (NE), was this a data abort?
844	 * Otherwise (EQ), pass NE
845	 */
846	mov		w5, #(ESR_EC_DABORT_EL1)
847	ccmp	w6, w5, #0b0000 /* !Z/NE */, NE
848	/* x11=1 when we had a DABORT with a poisoned VA outside the copyio region */
849	cset	x11, EQ
850#endif /* ptrauth_calls */
851
852	/*
853	 * Now let's check the rare but fast conditions that apply only to kernel
854	 * sync exceptions.
855	 */
856
857	/*
858	 * if ((ESR.EC == ESR_EC_BRK_AARCH64 && IS_PTRAUTH(ESR.ISS)) ||
859	 * 		ESR.EC == ESR_EC_PAC_FAIL ||
860	 *		ESR.EC == ESR_IABORT_EL1 ||
861	 *		poisoned_dabort)
862	 * 	goto Lfleh_synchronous_panic_lockdown
863	 */
864	cmp		w6, #(ESR_EC_BRK_AARCH64) // eq if this is a BRK instruction
865	/*
866	 * Is this a PAC breakpoint? ESR.ISS in [0xC470, 0xC473], which is true when
867	 * {ESR.ISS[24:2], 2'b00} == 0xC470
868	 */
869	mov		w5, #(0xC470)
870	and		w7, w1, #0xfffc
871	/*
872	 * If we're not BRK, NE
873	 * If we're BRK, set flags for ISS=PAC breakpoint
874	 */
875	ccmp	w7, w5, #0, EQ
876
877	/*
878	 * If we aren't a PAC BRK (NE), set flags for ESR.EC==PAC_FAIL
879	 * If we are a PAC BRK (EQ), pass EQ through.
880	*/
881	ccmp	w6, #(ESR_EC_PAC_FAIL), #0b0100 /* Z */, NE
882
883	/*
884	 * If !(PAC BRK || EC == PAC_FAIL) (NE), set flags for ESR.EC==IABORT
885	 * If (PAC BRK || EC == PAC_FAIL) (EQ), pass lockdown request (EQ)
886	 */
887	mov		w5, #(ESR_EC_IABORT_EL1)
888	ccmp	w6, w5, #0b0100 /* Z/EQ */, NE
889
890#if __has_feature(ptrauth_calls)
891	/*
892	 * If !(PAC BRK || EC == PAC_FAIL || EC == IABORT) (NE), set flags for
893	 * whether this was a posioned DABORT (previously computed in x11).
894	 * If (PAC BRK || EC == PAC_FAIL || EC == IABORT) (EQ), pass lockdown
895	 * request (EQ)
896	 */
897	ccmp	x11, #1, #0b0100 /* Z/EQ */, NE
898#endif /* ptrauth_calls */
899
900	b.eq	Lfleh_synchronous_panic_lockdown
901
902	/*
903	 * Most kernel exceptions won't be taken with exceptions masked but if they
904	 * are they'll be stackshot traps or telemetry breakpoints. Check these
905	 * first since they're cheap.
906	 *
907	 * if (!((PSTATE & DAIF_STANDARD_DISABLE) == DAIF_STANDARD_DISABLE
908	 * 		&& ESR.EC != ESR_EC_UNCATEGORIZED
909	 * 		&& ESR.EC != ESR_EC_BRK_AARCH64))
910	 * 		goto Lfleh_synchronous_continue
911	 */
912	mov		w5, #(DAIF_STANDARD_DISABLE)
913	bics	wzr, w5, w4 // (DAIF_STANDARD_DISABLE & (~PSTATE)). If !Z/NE, AIF wasn't (fully) masked.
914	/*
915	 * If AIF was masked (EQ), test EC =? fasttrap
916	 * If AIF wasn't masked (NE), pass lockdown skip (EQ)
917	 */
918	ccmp	w6, #(ESR_EC_UNCATEGORIZED), #0b0100 /* Z/EQ */, EQ
919	/*
920	 * If AIF was masked AND EC != fasttrap (NE), test EC =? BRK
921	 * If AIF wasn't masked OR EC == fasttrap (EQ), pass lockdown skip (EQ)
922	 */
923	mov		w5, #(ESR_EC_BRK_AARCH64)
924	ccmp	w6, w5, #0b0100 /* Z/EQ */, NE
925	/*
926	 * AIF was masked AND EC != fasttrap AND EC != BRK (NE)
927	 * AIF wasn't masked OR EC == fasttrap OR EC == BRK (EQ) -> skip lockdown!
928	 */
929	b.eq	Lfleh_synchronous_continue
930
931	/*
932	 * Non-PAC/BRK/fasttrap exception taken with exceptions disabled.
933	 * We're going down if the system IS NOT:
934	 * 1) in early boot OR
935	 * 2) handling an expected XNUPOST exception (handled in lockdown macro)
936	 * 3) taking a copyio data abort
937	 */
938	adrp	x7, EXT(startup_phase)@page
939	add		x7, x7, EXT(startup_phase)@pageoff
940	ldr		w7, [x7]
941	cmp		w7, #-1 // STARTUP_SUB_LOCKDOWN
942	b.lo	Lfleh_synchronous_continue
943
944	/* Was this a copyio data abort taken while exceptions were masked? */
945	cmp		w6, #ESR_EC_DABORT_EL1
946	/* x10=1 when ELR was in copyio range */
947	ccmp	x10, #1, #0b0000 /* !Z/NE */, EQ
948	b.eq	Lfleh_synchronous_continue
949
950	/* FALLTHROUGH */
951Lfleh_synchronous_panic_lockdown:
952	/* Save off arguments for sleh as SPTM may clobber */
953	mov x26, x0
954	mov x27, x1
955	mov x28, x2
956	BEGIN_PANIC_LOCKDOWN
957	mov x0, x26
958	mov x1, x27
959	mov x2, x28
960	/*
961	 * A captain goes down with her ship; system is sunk but for telemetry
962	 * try to handle the crash normally.
963	 */
964	mov		x3, #1 // signal to sleh that we completed panic lockdown
965	b		Lfleh_synchronous_continue
966#endif /* CONFIG_SPTM */
967UNWIND_EPILOGUE
968
969#if CONFIG_SPTM
970	.text
971	.align 2
972	/* Make a global symbol so it's easier to pick out in backtraces */
973	.global EXT(blocked_user_sync_exception)
974LEXT(blocked_user_sync_exception)
975Lblocked_user_sync_exception:
976	UNWIND_PROLOGUE
977	UNWIND_DIRECTIVES
978	/*
979	 * User space took a sync exception after panic lockdown had been initiated.
980	 * The system is going to panic soon, so let's just re-enable FIQs and wait
981	 * for debugger sync.
982	 */
983	msr		DAIFClr, #DAIFSC_FIQF
9840:
985	wfi
986	b		0b
987	UNWIND_EPILOGUE
988#endif /* CONFIG_SPTM */
989
990/* Shared prologue code for fleh_irq and fleh_fiq.
991 * Does any interrupt booking we may want to do
992 * before invoking the handler proper.
993 * Expects:
994 *  x0 - arm_context_t
995 * x23 - CPSR
996 *  fp - Undefined live value (we may push a frame)
997 *  lr - Undefined live value (we may push a frame)
998 *  sp - Interrupt stack for the current CPU
999 */
1000.macro BEGIN_INTERRUPT_HANDLER
1001	mrs		x22, TPIDR_EL1
1002	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
1003	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
1004	ldr		w1, [x23, CPU_STAT_IRQ]
1005	add		w1, w1, #1							// Increment count
1006	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
1007	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
1008	add		w1, w1, #1					// Increment count
1009	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1010	/* Increment preempt count */
1011	ldr		w1, [x22, ACT_PREEMPT_CNT]
1012	add		w1, w1, #1
1013	str		w1, [x22, ACT_PREEMPT_CNT]
1014	/* Store context in int state */
1015	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
1016.endmacro
1017
1018/* Shared epilogue code for fleh_irq and fleh_fiq.
1019 * Cleans up after the prologue, and may do a bit more
1020 * bookkeeping (kdebug related).
1021 * Expects:
1022 * x22 - Live TPIDR_EL1 value (thread address)
1023 * x23 - Address of the current CPU data structure
1024 * w24 - 0 if kdebug is disbled, nonzero otherwise
1025 *  fp - Undefined live value (we may push a frame)
1026 *  lr - Undefined live value (we may push a frame)
1027 *  sp - Interrupt stack for the current CPU
1028 */
1029.macro END_INTERRUPT_HANDLER
1030	/* Clear int context */
1031	str		xzr, [x23, CPU_INT_STATE]
1032	/* Decrement preempt count */
1033	ldr		w0, [x22, ACT_PREEMPT_CNT]
1034	cbnz	w0, 1f								// Detect underflow
1035	b		preempt_underflow
10361:
1037	sub		w0, w0, #1
1038	str		w0, [x22, ACT_PREEMPT_CNT]
1039	/* Switch back to kernel stack */
1040	LOAD_KERN_STACK_TOP	dst=x0, src=x22, tmp=x28
1041	mov		sp, x0
1042	/* Generate a CPU-local event to terminate a post-IRQ WFE */
1043	sevl
1044.endmacro
1045
1046	.text
1047	.align 2
1048	.global EXT(fleh_irq)
1049LEXT(fleh_irq)
1050UNWIND_PROLOGUE
1051UNWIND_DIRECTIVES
1052	BEGIN_INTERRUPT_HANDLER
1053	PUSH_FRAME
1054	bl		EXT(sleh_irq)
1055	POP_FRAME_WITHOUT_LR
1056	END_INTERRUPT_HANDLER
1057
1058#if XNU_MONITOR && !CONFIG_SPTM
1059	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1060#endif
1061
1062	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1063	b		exception_return_dispatch
1064UNWIND_EPILOGUE
1065
1066	.text
1067	.align 2
1068	.global EXT(fleh_fiq_generic)
1069LEXT(fleh_fiq_generic)
1070	PANIC_UNIMPLEMENTED
1071
1072	.text
1073	.align 2
1074	.global EXT(fleh_fiq)
1075LEXT(fleh_fiq)
1076UNWIND_PROLOGUE
1077UNWIND_DIRECTIVES
1078	BEGIN_INTERRUPT_HANDLER
1079	PUSH_FRAME
1080	bl		EXT(sleh_fiq)
1081	POP_FRAME_WITHOUT_LR
1082	END_INTERRUPT_HANDLER
1083
1084#if XNU_MONITOR && !CONFIG_SPTM
1085	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1086#endif
1087
1088	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1089	b		exception_return_dispatch
1090UNWIND_EPILOGUE
1091
1092	.text
1093	.align 2
1094	.global EXT(fleh_serror)
1095LEXT(fleh_serror)
1096UNWIND_PROLOGUE
1097UNWIND_DIRECTIVES
1098	mrs		x1, ESR_EL1							// Load exception syndrome
1099	mrs		x2, FAR_EL1							// Load fault address
1100
1101	PUSH_FRAME
1102	bl		EXT(sleh_serror)
1103	POP_FRAME_WITHOUT_LR
1104
1105#if XNU_MONITOR && !CONFIG_SPTM
1106	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1107#endif
1108
1109	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
1110	b		exception_return_dispatch
1111UNWIND_EPILOGUE
1112
1113/*
1114 * Register state saved before we get here.
1115 */
1116	.text
1117	.align 2
1118fleh_invalid_stack:
1119#if CONFIG_SPTM
1120	/*
1121	 * Taking a data abort with an invalid kernel stack pointer is unrecoverable.
1122	 * Initiate lockdown.
1123	 */
1124	/* Save off x0 as SPTM may clobber */
1125	mov		x26, x0
1126	BEGIN_PANIC_LOCKDOWN
1127	mov		x0, x26
1128#endif /* CONFIG_SPTM */
1129	mrs		x1, ESR_EL1							// Load exception syndrome
1130	str		x1, [x0, SS64_ESR]
1131	mrs		x2, FAR_EL1							// Load fault address
1132	str		x2, [x0, SS64_FAR]
1133	PUSH_FRAME
1134	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
1135	b 		.
1136
1137	.text
1138	.align 2
1139fleh_synchronous_sp1:
1140	mrs		x1, ESR_EL1							// Load exception syndrome
1141	str		x1, [x0, SS64_ESR]
1142	mrs		x2, FAR_EL1							// Load fault address
1143	str		x2, [x0, SS64_FAR]
1144
1145#if CONFIG_SPTM
1146	/*
1147	 * Without debugger intervention, all exceptions on SP1 (including debug
1148	 * trap instructions) are intended to be fatal. In order to not break
1149	 * self-hosted kernel debug, do not trigger lockdown for debug traps
1150	 * (unknown instructions/uncategorized exceptions). On release kernels, we
1151	 * don't support self-hosted kernel debug so unconditionally lockdown.
1152	 */
1153#if (DEVELOPMENT || DEBUG)
1154	tst		w1, #(ESR_EC_MASK)
1155	b.eq	Lfleh_synchronous_sp1_skip_panic_lockdown // ESR_EC_UNCATEGORIZED is 0, so skip lockdown if Z
1156#endif /* DEVELOPMENT || DEBUG */
1157	/* Save off arguments for sleh as SPTM may clobber */
1158	mov x26, x0
1159	mov x27, x1
1160	mov x28, x2
1161	BEGIN_PANIC_LOCKDOWN
1162	mov x0, x26
1163	mov x1, x27
1164	mov x2, x28
1165Lfleh_synchronous_sp1_skip_panic_lockdown:
1166#endif /* CONFIG_SPTM */
1167
1168	PUSH_FRAME
1169	bl		EXT(sleh_synchronous_sp1)
1170	b 		.
1171
1172	.text
1173	.align 2
1174fleh_irq_sp1:
1175	mov		x1, x0
1176	adr		x0, Lsp1_irq_str
1177	b		EXT(panic_with_thread_kernel_state)
1178Lsp1_irq_str:
1179	.asciz "IRQ exception taken while SP1 selected"
1180
1181	.text
1182	.align 2
1183fleh_fiq_sp1:
1184	mov		x1, x0
1185	adr		x0, Lsp1_fiq_str
1186	b		EXT(panic_with_thread_kernel_state)
1187Lsp1_fiq_str:
1188	.asciz "FIQ exception taken while SP1 selected"
1189
1190	.text
1191	.align 2
1192fleh_serror_sp1:
1193	mov		x1, x0
1194	adr		x0, Lsp1_serror_str
1195	b		EXT(panic_with_thread_kernel_state)
1196Lsp1_serror_str:
1197	.asciz "Asynchronous exception taken while SP1 selected"
1198
1199	.text
1200	.align 2
1201exception_return_dispatch:
1202	ldr		w0, [x21, SS64_CPSR]
1203	tst		w0, PSR64_MODE_EL_MASK
1204	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1205	b		return_to_user
1206
1207#if CONFIG_SPTM
1208/**
1209 * XNU returns to this symbol whenever handling an interrupt that occurred
1210 * during SPTM, TXM or SK runtime. This code determines which domain the
1211 * XNU thread was executing in when the interrupt occurred and tells SPTM
1212 * which domain to resume.
1213 */
1214	.text
1215	.align 2
1216	.global EXT(xnu_return_to_gl2)
1217LEXT(xnu_return_to_gl2)
1218	/**
1219	 * If thread->txm_thread_stack is set, we need to tell SPTM dispatch to
1220	 * resume the TXM thread in x0.
1221	 */
1222	mrs		x8, TPIDR_EL1
1223	ldr		x8, [x8, TH_TXM_THREAD_STACK]
1224	cbz		x8, 1f
1225	mov		x0, x8
1226	b		EXT(txm_resume)
1227	/* Unreachable */
1228	b .
1229
1230#if CONFIG_EXCLAVES
1231	/**
1232	 * If thread->th_exclaves_intstate flag TH_EXCLAVES_EXECUTION is set
1233	 * we need to tell SPTM dispatch to resume the SK thread.
1234	 */
12351:
1236	mrs		x8, TPIDR_EL1
1237	ldr		x9, [x8, TH_EXCLAVES_INTSTATE]
1238	and		x9, x9, TH_EXCLAVES_EXECUTION
1239	cbz		x9, 1f
1240	b		EXT(sk_resume)
1241	/* Unreachable */
1242	b .
1243#endif /* CONFIG_EXCLAVES */
1244
1245	/**
1246	 * If neither the above checks succeeded, this must be a thread
1247	 * that was interrupted while running in SPTM. Tell SPTM to resume
1248	 * the interrupted SPTM call.
1249	 */
12501:
1251	b		EXT(sptm_resume_from_exception)
1252	/* Unreachable */
1253	b .
1254#endif /* CONFIG_SPTM */
1255
1256	.text
1257	.align 2
1258	.global EXT(return_to_kernel)
1259LEXT(return_to_kernel)
1260	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
1261	mrs		x3, TPIDR_EL1                           // Load thread pointer
1262	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
1263	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
1264	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
1265	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
1266	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
1267	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
1268	b.eq	exception_return_unint_tpidr_x3
1269	mov		sp, x21                                 // Switch to thread stack for preemption
1270	PUSH_FRAME
1271	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
1272	POP_FRAME_WITHOUT_LR
1273	b		exception_return
1274
1275	.text
1276	.globl EXT(thread_bootstrap_return)
1277LEXT(thread_bootstrap_return)
1278#if CONFIG_DTRACE
1279	bl		EXT(dtrace_thread_bootstrap)
1280#endif
1281#if KASAN_TBI
1282	PUSH_FRAME
1283	bl		EXT(__asan_handle_no_return)
1284	POP_FRAME_WITHOUT_LR
1285#endif /* KASAN_TBI */
1286	b		EXT(arm64_thread_exception_return)
1287
1288	.text
1289	.globl EXT(arm64_thread_exception_return)
1290LEXT(arm64_thread_exception_return)
1291	mrs		x0, TPIDR_EL1
1292	LOAD_USER_PCB	dst=x21, src=x0, tmp=x28
1293	mov		x28, xzr
1294
1295	//
1296	// Fall Through to return_to_user from arm64_thread_exception_return.
1297	// Note that if we move return_to_user or insert a new routine
1298	// below arm64_thread_exception_return, the latter will need to change.
1299	//
1300	.text
1301/* x21 is always the machine context pointer when we get here
1302 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1303return_to_user:
1304check_user_asts:
1305#if KASAN_TBI
1306	PUSH_FRAME
1307	bl		EXT(__asan_handle_no_return)
1308	POP_FRAME_WITHOUT_LR
1309#endif /* KASAN_TBI */
1310	mrs		x3, TPIDR_EL1					// Load thread pointer
1311
1312	movn		w2, #0
1313	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
1314
1315#if MACH_ASSERT
1316	ldr		w0, [x3, ACT_PREEMPT_CNT]
1317	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
1318#endif
1319
1320	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1321	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1322	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
1323	cbz		w0, no_asts							// If no asts, skip ahead
1324
1325	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
1326
1327	/* At this point, we have ASTs and we need to check whether we are running in the
1328	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1329	 * the PFZ since we don't want to handle getting a signal or getting suspended
1330	 * while holding a spinlock in userspace.
1331	 *
1332	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1333	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1334	 * to use it to indicate to userspace to come back to take a delayed
1335	 * preemption, at which point the ASTs will be handled. */
1336	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
1337	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
1338
1339	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
1340	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
1341	cbz		x0, restore_and_check_ast			// No, deal with other asts
1342
1343	mov		x0, #1
1344	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
1345	mov		x0, x19								// restore x0 to asts
1346	b		no_asts								// pretend we have no asts
1347
1348restore_and_check_ast:
1349	mov		x0, x19								// restore x0
1350	b	user_take_ast							// Service pending asts
1351no_asts:
1352
1353
1354#if PRECISE_USER_KERNEL_TIME
1355	mov		x19, x3						// Preserve thread pointer across function call
1356	PUSH_FRAME
1357	bl		EXT(recount_enter_user)
1358	POP_FRAME_WITHOUT_LR
1359	mov		x3, x19
1360#endif /* PRECISE_USER_KERNEL_TIME */
1361
1362#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1363	/* Watchtower
1364	 *
1365	 * Here we attempt to enable NEON access for EL0. If the last entry into the
1366	 * kernel from user-space was due to an IRQ, the monitor will have disabled
1367	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1368	 * check in with the monitor in order to reenable NEON for EL0 in exchange
1369	 * for routing IRQs through the monitor (2). This way the monitor will
1370	 * always 'own' either IRQs or EL0 NEON.
1371	 *
1372	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1373	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1374	 * here.
1375	 *
1376	 * EL0 user ________ IRQ                                            ______
1377	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
1378	 * EL3 monitor           \_/                                \___/
1379	 *
1380	 *                       (1)                                 (2)
1381	 */
1382
1383	mov		x0, #(CPACR_FPEN_ENABLE)
1384	msr		CPACR_EL1, x0
1385#endif
1386
1387	/* Establish this thread's debug state as the live state on the selected CPU. */
1388	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1389	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
1390	ldr		x0, [x3, ACT_DEBUGDATA]
1391	cmp		x0, x1
1392	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
1393
1394
1395	PUSH_FRAME
1396	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
1397	POP_FRAME_WITHOUT_LR
1398	mrs		x3, TPIDR_EL1						// Reload thread pointer
1399	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
1400L_skip_user_set_debug_state:
1401
1402
1403	ldrsh	x0, [x4, CPU_TPIDR_EL0]
1404	msr		TPIDR_EL0, x0
1405
1406
1407	b		exception_return_unint_tpidr_x3
1408
1409exception_return:
1410	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1411exception_return_unint:
1412	mrs		x3, TPIDR_EL1					// Load thread pointer
1413exception_return_unint_tpidr_x3:
1414	mov		sp, x21						// Reload the pcb pointer
1415
1416#if !__ARM_KERNEL_PROTECT__
1417	/*
1418	 * Restore x18 only if the task has the entitlement that allows
1419	 * usage. Those are very few, and can move to something else
1420	 * once we use x18 for something more global.
1421	 *
1422	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
1423	 * that uses x18 as one of the global use cases (and will reset
1424	 * x18 later down below).
1425	 *
1426	 * It's also unconditionally skipped for translated threads,
1427	 * as those are another use case, one where x18 must be preserved.
1428	 */
1429	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
1430	mov		x18, #0
1431	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
1432
1433exception_return_unint_tpidr_x3_restore_x18:
1434	ldr		x18, [sp, SS64_X18]
1435
1436#else /* !__ARM_KERNEL_PROTECT__ */
1437	/*
1438	 * If we are going to eret to userspace, we must return through the EL0
1439	 * eret mapping.
1440	 */
1441	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
1442	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
1443
1444	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
1445	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
1446	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
1447	add		x1, x1, Lexception_return_restore_registers@pageoff
1448	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
1449	sub		x1, x1, x0											// Calculate delta
1450	add		x0, x2, x1											// Convert KVA to EL0 vector address
1451	br		x0
1452
1453Lskip_el0_eret_mapping:
1454#endif /* !__ARM_KERNEL_PROTECT__ */
1455
1456Lexception_return_restore_registers:
1457	mov 	x0, sp								// x0 = &pcb
1458	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1459	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, x25, el0_state_allowed=1
1460
1461	msr		ELR_EL1, x1							// Load the return address into ELR
1462	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1463
1464/* Restore special register state */
1465	ldr		w3, [sp, NS64_FPSR]
1466	ldr		w4, [sp, NS64_FPCR]
1467
1468	msr		FPSR, x3
1469	mrs		x5, FPCR
1470	CMSR FPCR, x5, x4, 1
14711:
1472
1473
1474
1475	/* Restore arm_neon_saved_state64 */
1476	ldp		q0, q1, [x0, NS64_Q0]
1477	ldp		q2, q3, [x0, NS64_Q2]
1478	ldp		q4, q5, [x0, NS64_Q4]
1479	ldp		q6, q7, [x0, NS64_Q6]
1480	ldp		q8, q9, [x0, NS64_Q8]
1481	ldp		q10, q11, [x0, NS64_Q10]
1482	ldp		q12, q13, [x0, NS64_Q12]
1483	ldp		q14, q15, [x0, NS64_Q14]
1484	ldp		q16, q17, [x0, NS64_Q16]
1485	ldp		q18, q19, [x0, NS64_Q18]
1486	ldp		q20, q21, [x0, NS64_Q20]
1487	ldp		q22, q23, [x0, NS64_Q22]
1488	ldp		q24, q25, [x0, NS64_Q24]
1489	ldp		q26, q27, [x0, NS64_Q26]
1490	ldp		q28, q29, [x0, NS64_Q28]
1491	ldp		q30, q31, [x0, NS64_Q30]
1492
1493	/* Restore arm_saved_state64 */
1494
1495	// Skip x0, x1 - we're using them
1496	ldp		x2, x3, [x0, SS64_X2]
1497	ldp		x4, x5, [x0, SS64_X4]
1498	ldp		x6, x7, [x0, SS64_X6]
1499	ldp		x8, x9, [x0, SS64_X8]
1500	ldp		x10, x11, [x0, SS64_X10]
1501	ldp		x12, x13, [x0, SS64_X12]
1502	ldp		x14, x15, [x0, SS64_X14]
1503	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1504	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1505	ldr		x19, [x0, SS64_X19]
1506	ldp		x20, x21, [x0, SS64_X20]
1507	ldp		x22, x23, [x0, SS64_X22]
1508	ldp		x24, x25, [x0, SS64_X24]
1509	ldp		x26, x27, [x0, SS64_X26]
1510	ldr		x28, [x0, SS64_X28]
1511	ldr		fp, [x0, SS64_FP]
1512	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1513
1514	// Restore stack pointer and our last two GPRs
1515	ldr		x1, [x0, SS64_SP]
1516	mov		sp, x1
1517
1518#if __ARM_KERNEL_PROTECT__
1519	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1520#endif /* __ARM_KERNEL_PROTECT__ */
1521
1522	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1523
1524#if __ARM_KERNEL_PROTECT__
1525	/* If we are going to eret to userspace, we must unmap the kernel. */
1526	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1527
1528	/* Update TCR to unmap the kernel. */
1529	MOV64		x18, TCR_EL1_USER
1530	msr		TCR_EL1, x18
1531
1532	/*
1533	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1534	 * each other due to the microarchitecture.
1535	 */
1536#if !defined(APPLE_ARM64_ARCH_FAMILY)
1537	isb		sy
1538#endif
1539
1540	/* Switch to the user ASID (low bit clear) for the task. */
1541	mrs		x18, TTBR0_EL1
1542	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1543	msr		TTBR0_EL1, x18
1544	mov		x18, #0
1545
1546	/* We don't need an ISB here, as the eret is synchronizing. */
1547Lskip_ttbr1_switch:
1548#endif /* __ARM_KERNEL_PROTECT__ */
1549
1550	ERET_CONTEXT_SYNCHRONIZING
1551
1552user_take_ast:
1553	PUSH_FRAME
1554	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1555	POP_FRAME_WITHOUT_LR
1556	b		check_user_asts								// Now try again
1557
1558	.text
1559	.align 2
1560preempt_underflow:
1561	mrs		x0, TPIDR_EL1
1562	str		x0, [sp, #-16]!						// We'll print thread pointer
1563	adr		x0, L_underflow_str					// Format string
1564	CALL_EXTERN panic							// Game over
1565
1566L_underflow_str:
1567	.asciz "Preemption count negative on thread %p"
1568.align 2
1569
1570#if MACH_ASSERT
1571	.text
1572	.align 2
1573preempt_count_notzero:
1574	mrs		x0, TPIDR_EL1
1575	str		x0, [sp, #-16]!						// We'll print thread pointer
1576	ldr		w0, [x0, ACT_PREEMPT_CNT]
1577	str		w0, [sp, #8]
1578	adr		x0, L_preempt_count_notzero_str				// Format string
1579	CALL_EXTERN panic							// Game over
1580
1581L_preempt_count_notzero_str:
1582	.asciz "preemption count not 0 on thread %p (%u)"
1583#endif /* MACH_ASSERT */
1584
1585#if __ARM_KERNEL_PROTECT__
1586	/*
1587	 * This symbol denotes the end of the exception vector/eret range; we page
1588	 * align it so that we can avoid mapping other text in the EL0 exception
1589	 * vector mapping.
1590	 */
1591	.text
1592	.align 14
1593	.globl EXT(ExceptionVectorsEnd)
1594LEXT(ExceptionVectorsEnd)
1595#endif /* __ARM_KERNEL_PROTECT__ */
1596
1597#if XNU_MONITOR && !CONFIG_SPTM
1598
1599/*
1600 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1601 * mostly concerned with setting up state for the normal fleh code.
1602 */
1603	.text
1604	.align 2
1605fleh_synchronous_from_ppl:
1606	/* Save x0. */
1607	mov		x15, x0
1608
1609	/* Grab the ESR. */
1610	mrs		x1, ESR_EL1							// Get the exception syndrome
1611
1612	/* If the stack pointer is corrupt, it will manifest either as a data abort
1613	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1614	 * these quickly by testing bit 5 of the exception class.
1615	 */
1616	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1617	mrs		x0, SP_EL0							// Get SP_EL0
1618
1619	/* Perform high level checks for stack corruption. */
1620	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1621	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1622	cmp		x1, x2								// If we have a stack alignment exception
1623	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1624	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1625	cmp		x1, x2								// If we have a data abort, we need to
1626	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1627
1628Ltest_pstack:
1629	/* Bounds check the PPL stack. */
1630	adrp	x10, EXT(pmap_stacks_start)@page
1631	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1632	adrp	x11, EXT(pmap_stacks_end)@page
1633	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1634	cmp		x0, x10
1635	b.lo	Lcorrupt_ppl_stack
1636	cmp		x0, x11
1637	b.hi	Lcorrupt_ppl_stack
1638
1639Lvalid_ppl_stack:
1640	/* Restore x0. */
1641	mov		x0, x15
1642
1643	/* Switch back to the kernel stack. */
1644	msr		SPSel, #0
1645	GET_PMAP_CPU_DATA x5, x6, x7
1646	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1647	mov		sp, x6
1648
1649	/* Hand off to the synch handler. */
1650	b		EXT(fleh_synchronous)
1651
1652Lcorrupt_ppl_stack:
1653	/* Restore x0. */
1654	mov		x0, x15
1655
1656	/* Hand off to the invalid stack handler. */
1657	b		fleh_invalid_stack
1658
1659fleh_fiq_from_ppl:
1660	SWITCH_TO_INT_STACK	tmp=x25
1661	b		EXT(fleh_fiq)
1662
1663fleh_irq_from_ppl:
1664	SWITCH_TO_INT_STACK	tmp=x25
1665	b		EXT(fleh_irq)
1666
1667fleh_serror_from_ppl:
1668	GET_PMAP_CPU_DATA x5, x6, x7
1669	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1670	mov		sp, x6
1671	b		EXT(fleh_serror)
1672
1673
1674
1675
1676	// x15: ppl call number
1677	// w10: ppl_state
1678	// x20: gxf_enter caller's DAIF
1679	.globl EXT(ppl_trampoline_start)
1680LEXT(ppl_trampoline_start)
1681
1682
1683#error "XPRR configuration error"
1684	cmp		x14, x21
1685	b.ne	Lppl_fail_dispatch
1686
1687	/* Verify the request ID. */
1688	cmp		x15, PMAP_COUNT
1689	b.hs	Lppl_fail_dispatch
1690
1691	GET_PMAP_CPU_DATA	x12, x13, x14
1692
1693	/* Mark this CPU as being in the PPL. */
1694	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1695
1696	cmp		w9, #PPL_STATE_KERNEL
1697	b.eq		Lppl_mark_cpu_as_dispatching
1698
1699	/* Check to see if we are trying to trap from within the PPL. */
1700	cmp		w9, #PPL_STATE_DISPATCH
1701	b.eq		Lppl_fail_dispatch_ppl
1702
1703
1704	/* Ensure that we are returning from an exception. */
1705	cmp		w9, #PPL_STATE_EXCEPTION
1706	b.ne		Lppl_fail_dispatch
1707
1708	// where is w10 set?
1709	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1710	cmp		w10, #PPL_STATE_EXCEPTION
1711	b.ne		Lppl_fail_dispatch
1712
1713	/* This is an exception return; set the CPU to the dispatching state. */
1714	mov		w9, #PPL_STATE_DISPATCH
1715	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1716
1717	/* Find the save area, and return to the saved PPL context. */
1718	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1719	mov		sp, x0
1720	b		EXT(return_to_ppl)
1721
1722Lppl_mark_cpu_as_dispatching:
1723	cmp		w10, #PPL_STATE_KERNEL
1724	b.ne		Lppl_fail_dispatch
1725
1726	/* Mark the CPU as dispatching. */
1727	mov		w13, #PPL_STATE_DISPATCH
1728	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1729
1730	/* Switch to the regular PPL stack. */
1731	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1732	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1733
1734	// SP0 is thread stack here
1735	mov		x21, sp
1736	// SP0 is now PPL stack
1737	mov		sp, x9
1738
1739	/* Save the old stack pointer off in case we need it. */
1740	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1741
1742	/* Get the handler for the request */
1743	adrp	x9, EXT(ppl_handler_table)@page
1744	add		x9, x9, EXT(ppl_handler_table)@pageoff
1745	add		x9, x9, x15, lsl #3
1746	ldr		x10, [x9]
1747
1748	/* Branch to the code that will invoke the PPL request. */
1749	b		EXT(ppl_dispatch)
1750
1751Lppl_fail_dispatch_ppl:
1752	/* Switch back to the kernel stack. */
1753	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1754	mov		sp, x10
1755
1756Lppl_fail_dispatch:
1757	/* Indicate that we failed. */
1758	mov		x15, #PPL_EXIT_BAD_CALL
1759
1760	/* Move the DAIF bits into the expected register. */
1761	mov		x10, x20
1762
1763	/* Return to kernel mode. */
1764	b		ppl_return_to_kernel_mode
1765
1766Lppl_dispatch_exit:
1767
1768	/* Indicate that we are cleanly exiting the PPL. */
1769	mov		x15, #PPL_EXIT_DISPATCH
1770
1771	/* Switch back to the original (kernel thread) stack. */
1772	mov		sp, x21
1773
1774	/* Move the saved DAIF bits. */
1775	mov		x10, x20
1776
1777	/* Clear the in-flight pmap pointer */
1778	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
1779	stlr		xzr, [x13]
1780
1781	/* Clear the old stack pointer. */
1782	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1783
1784	/*
1785	 * Mark the CPU as no longer being in the PPL.  We spin if our state
1786	 * machine is broken.
1787	 */
1788	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1789	cmp		w9, #PPL_STATE_DISPATCH
1790	b.ne		.
1791	mov		w9, #PPL_STATE_KERNEL
1792	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1793
1794	/* Return to the kernel. */
1795	b ppl_return_to_kernel_mode
1796
1797
1798
1799	.text
1800ppl_exit:
1801	/*
1802	 * If we are dealing with an exception, hand off to the first level
1803	 * exception handler.
1804	 */
1805	cmp		x15, #PPL_EXIT_EXCEPTION
1806	b.eq	Ljump_to_fleh_handler
1807
1808	/* If this was a panic call from the PPL, reinvoke panic. */
1809	cmp		x15, #PPL_EXIT_PANIC_CALL
1810	b.eq	Ljump_to_panic_trap_to_debugger
1811
1812	/*
1813	 * Stash off the original DAIF in the high bits of the exit code register.
1814	 * We could keep this in a dedicated register, but that would require us to copy it to
1815	 * an additional callee-save register below (e.g. x22), which in turn would require that
1816	 * register to be saved/restored at PPL entry/exit.
1817	 */
1818	add		x15, x15, x10, lsl #32
1819
1820	/* Load the preemption count. */
1821	mrs		x10, TPIDR_EL1
1822	ldr		w12, [x10, ACT_PREEMPT_CNT]
1823
1824	/* Detect underflow */
1825	cbnz	w12, Lno_preempt_underflow
1826	b		preempt_underflow
1827Lno_preempt_underflow:
1828
1829	/* Lower the preemption count. */
1830	sub		w12, w12, #1
1831
1832#if SCHED_HYGIENE_DEBUG
1833	/* Collect preemption disable measurement if necessary. */
1834
1835	/*
1836	 * Only collect measurement if this reenabled preemption,
1837	 * and SCHED_HYGIENE_MARKER is set.
1838	 */
1839	mov		x20, #SCHED_HYGIENE_MARKER
1840	cmp		w12, w20
1841	b.ne	Lskip_collect_measurement
1842
1843	/* Stash our return value and return reason. */
1844	mov		x20, x0
1845	mov		x21, x15
1846
1847	/* Collect measurement. */
1848	bl		EXT(_collect_preemption_disable_measurement)
1849
1850	/* Restore the return value and the return reason. */
1851	mov		x0, x20
1852	mov		x15, x21
1853	/* ... and w12, which is now 0. */
1854	mov		w12, #0
1855
1856	/* Restore the thread pointer into x10. */
1857	mrs		x10, TPIDR_EL1
1858
1859Lskip_collect_measurement:
1860#endif /* SCHED_HYGIENE_DEBUG */
1861
1862	/* Save the lowered preemption count. */
1863	str		w12, [x10, ACT_PREEMPT_CNT]
1864
1865	/* Skip ASTs if the peemption count is not zero. */
1866	cbnz	x12, Lppl_skip_ast_taken
1867
1868	/*
1869	 * Skip the AST check if interrupts were originally disabled.
1870	 * The original DAIF state prior to PPL entry is stored in the upper
1871	 * 32 bits of x15.
1872	 */
1873	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
1874
1875	/* IF there is no urgent AST, skip the AST. */
1876	ldr		x12, [x10, ACT_CPUDATAP]
1877	ldr		w14, [x12, CPU_PENDING_AST]
1878	tst		w14, AST_URGENT
1879	b.eq	Lppl_skip_ast_taken
1880
1881	/* Stash our return value and return reason. */
1882	mov		x20, x0
1883	mov		x21, x15
1884
1885	/* Handle the AST. */
1886	bl		EXT(ast_taken_kernel)
1887
1888	/* Restore the return value and the return reason. */
1889	mov		x15, x21
1890	mov		x0, x20
1891
1892Lppl_skip_ast_taken:
1893
1894	/* Extract caller DAIF from high-order bits of exit code */
1895	ubfx	x10, x15, #32, #32
1896	bfc		x15, #32, #32
1897	msr		DAIF, x10
1898
1899	/* Pop the stack frame. */
1900	ldp		x29, x30, [sp, #0x10]
1901	ldp		x20, x21, [sp], #0x20
1902
1903	/* Check to see if this was a bad request. */
1904	cmp		x15, #PPL_EXIT_BAD_CALL
1905	b.eq	Lppl_bad_call
1906
1907	/* Return. */
1908	ARM64_STACK_EPILOG
1909
1910	.align 2
1911Ljump_to_fleh_handler:
1912	br	x25
1913
1914	.align 2
1915Ljump_to_panic_trap_to_debugger:
1916	b		EXT(panic_trap_to_debugger)
1917
1918Lppl_bad_call:
1919	/* Panic. */
1920	adrp	x0, Lppl_bad_call_panic_str@page
1921	add		x0, x0, Lppl_bad_call_panic_str@pageoff
1922	b		EXT(panic)
1923
1924	.text
1925	.align 2
1926	.globl EXT(ppl_dispatch)
1927LEXT(ppl_dispatch)
1928	/*
1929	 * Save a couple of important registers (implementation detail; x12 has
1930	 * the PPL per-CPU data address; x13 is not actually interesting).
1931	 */
1932	stp		x12, x13, [sp, #-0x10]!
1933
1934	/*
1935	 * Restore the original AIF state, force D set to mask debug exceptions
1936	 * while PPL code runs.
1937	 */
1938	orr		x8, x20, DAIF_DEBUGF
1939	msr		DAIF, x8
1940
1941	/*
1942	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1943	 * but the exception vectors will deal with this properly.
1944	 */
1945
1946	/* Invoke the PPL method. */
1947#ifdef HAS_APPLE_PAC
1948	blraa		x10, x9
1949#else
1950	blr		x10
1951#endif
1952
1953	/* Disable DAIF. */
1954	msr		DAIFSet, #(DAIFSC_ALL)
1955
1956	/* Restore those important registers. */
1957	ldp		x12, x13, [sp], #0x10
1958
1959	/* Mark this as a regular return, and hand off to the return path. */
1960	b		Lppl_dispatch_exit
1961
1962	.text
1963	.align 2
1964	.globl EXT(ppl_bootstrap_dispatch)
1965LEXT(ppl_bootstrap_dispatch)
1966	/* Verify the PPL request. */
1967	cmp		x15, PMAP_COUNT
1968	b.hs	Lppl_fail_bootstrap_dispatch
1969
1970	/* Get the requested PPL routine. */
1971	adrp	x9, EXT(ppl_handler_table)@page
1972	add		x9, x9, EXT(ppl_handler_table)@pageoff
1973	add		x9, x9, x15, lsl #3
1974	ldr		x10, [x9]
1975
1976	/* Invoke the requested PPL routine. */
1977#ifdef HAS_APPLE_PAC
1978	blraa		x10, x9
1979#else
1980	blr		x10
1981#endif
1982	LOAD_PMAP_CPU_DATA	x9, x10, x11
1983
1984	/* Clear the in-flight pmap pointer */
1985	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
1986	stlr		xzr, [x9]
1987
1988	/* Stash off the return value */
1989	mov		x20, x0
1990	/* Drop the preemption count */
1991	bl		EXT(_enable_preemption)
1992	mov		x0, x20
1993
1994	/* Pop the stack frame. */
1995	ldp		x29, x30, [sp, #0x10]
1996	ldp		x20, x21, [sp], #0x20
1997#if __has_feature(ptrauth_returns)
1998	retab
1999#else
2000	ret
2001#endif
2002
2003Lppl_fail_bootstrap_dispatch:
2004	/* Pop our stack frame and panic. */
2005	ldp		x29, x30, [sp, #0x10]
2006	ldp		x20, x21, [sp], #0x20
2007#if __has_feature(ptrauth_returns)
2008	autibsp
2009#endif
2010	adrp	x0, Lppl_bad_call_panic_str@page
2011	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2012	b		EXT(panic)
2013
2014	.text
2015	.align 2
2016	.globl EXT(ml_panic_trap_to_debugger)
2017LEXT(ml_panic_trap_to_debugger)
2018	mrs		x10, DAIF
2019	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
2020
2021	adrp		x12, EXT(pmap_ppl_locked_down)@page
2022	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
2023	cbz		w12, Lnot_in_ppl_dispatch
2024
2025	LOAD_PMAP_CPU_DATA	x11, x12, x13
2026
2027	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
2028	cmp		w12, #PPL_STATE_DISPATCH
2029	b.ne		Lnot_in_ppl_dispatch
2030
2031	/* Indicate (for the PPL->kernel transition) that we are panicking. */
2032	mov		x15, #PPL_EXIT_PANIC_CALL
2033
2034	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
2035	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
2036	mov		sp, x12
2037
2038	mrs		x10, DAIF
2039	mov		w13, #PPL_STATE_PANIC
2040	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
2041
2042	/**
2043	 * When we panic in PPL, we might have un-synced PTE updates. Shoot down
2044	 * all the TLB entries.
2045	 *
2046	 * A check must be done here against CurrentEL because the alle1is flavor
2047	 * of tlbi is not available to EL1, but the vmalle1is flavor is. When PPL
2048	 * runs at GL2, we can issue an alle2is and an alle1is tlbi to kill all
2049	 * the TLB entries. When PPL runs at GL1, as a guest or on an pre-H13
2050	 * platform, we issue a vmalle1is tlbi instead.
2051	 *
2052	 * Note that we only do this after passing the `PPL_STATE_DISPATCH` check
2053	 * because if we did this for every panic, including the ones triggered
2054	 * by fabric problems we may be stuck at the DSB below and trigger an AP
2055	 * watchdog.
2056	 */
2057	mrs		x12, CurrentEL
2058	cmp		x12, PSR64_MODE_EL2
2059	bne		Lnot_in_gl2
2060	tlbi		alle2is
2061	tlbi		alle1is
2062	b		Ltlb_invalidate_all_done
2063Lnot_in_gl2:
2064	tlbi		vmalle1is
2065Ltlb_invalidate_all_done:
2066	dsb		ish
2067	isb
2068
2069	/* Now we are ready to exit the PPL. */
2070	b		ppl_return_to_kernel_mode
2071Lnot_in_ppl_dispatch:
2072	msr		DAIF, x10
2073	ret
2074
2075	.data
2076Lppl_bad_call_panic_str:
2077	.asciz "ppl_dispatch: failed due to bad arguments/state"
2078#else /* XNU_MONITOR && !CONFIG_SPTM */
2079	.text
2080	.align 2
2081	.globl EXT(ml_panic_trap_to_debugger)
2082LEXT(ml_panic_trap_to_debugger)
2083	ret
2084#endif /* XNU_MONITOR && !CONFIG_SPTM */
2085
2086#if CONFIG_SPTM
2087	.text
2088	.align 2
2089
2090	.globl EXT(_sptm_pre_entry_hook)
2091LEXT(_sptm_pre_entry_hook)
2092	/* Push a frame. */
2093	ARM64_STACK_PROLOG
2094	PUSH_FRAME
2095	stp		x20, x21, [sp, #-0x10]!
2096
2097	/* Increase the preemption count. */
2098	mrs		x9, TPIDR_EL1
2099	cbz		x9, Lskip_preemption_check_sptmhook
2100	ldr		w10, [x9, ACT_PREEMPT_CNT]
2101	add		w10, w10, #1
2102	str		w10, [x9, ACT_PREEMPT_CNT]
2103
2104#if SCHED_HYGIENE_DEBUG
2105	/* Prepare preemption disable measurement, if necessary. */
2106
2107	/* Only prepare if we actually disabled preemption. */
2108	cmp		w10, #1
2109	b.ne	Lskip_prepare_measurement_sptmhook
2110
2111	/* Don't prepare if measuring is off completely. */
2112	adrp	x10, _sched_preemption_disable_debug_mode@page
2113	add		x10, x10, _sched_preemption_disable_debug_mode@pageoff
2114	ldr		w10, [x10]
2115	cmp		w10, #0
2116	b.eq	Lskip_prepare_measurement_sptmhook
2117
2118	/* Save arguments to SPTM function and SPTM function id. */
2119	mov		x20, x16
2120	stp		x0, x1, [sp, #-0x40]!
2121	stp		x2, x3, [sp, #0x10]
2122	stp		x4, x5, [sp, #0x20]
2123	stp		x6, x7, [sp, #0x30]
2124
2125	/* Call prepare function with thread pointer as first arg. */
2126	bl		EXT(_prepare_preemption_disable_measurement)
2127
2128	/* Restore arguments to SPTM function and SPTM function id. */
2129	ldp		x6, x7, [sp, #0x30]
2130	ldp		x4, x5, [sp, #0x20]
2131	ldp		x2, x3, [sp, #0x10]
2132	ldp		x0, x1, [sp]
2133	add		sp, sp, #0x40
2134	mov		x16, x20
2135
2136Lskip_prepare_measurement_sptmhook:
2137#endif /* SCHED_HYGIENE_DEBUG */
2138Lskip_preemption_check_sptmhook:
2139	/* assert we're not calling from guarded mode */
2140	mrs		x14, CurrentG
2141	cmp		x14, #0
2142	b.ne	.
2143
2144	ldp		x20, x21, [sp], #0x10
2145	POP_FRAME
2146	ARM64_STACK_EPILOG
2147
2148	.align 2
2149	.globl EXT(_sptm_post_exit_hook)
2150LEXT(_sptm_post_exit_hook)
2151	ARM64_STACK_PROLOG
2152	PUSH_FRAME
2153	stp		x20, x21, [sp, #-0x10]!
2154
2155	/* Save SPTM return value(s) */
2156	stp		x0, x1, [sp, #-0x40]!
2157	stp		x2, x3, [sp, #0x10]
2158	stp		x4, x5, [sp, #0x20]
2159	stp		x6, x7, [sp, #0x30]
2160
2161	/* Load the preemption count. */
2162	mrs		x0, TPIDR_EL1
2163	cbz		x0, Lsptm_skip_ast_taken_sptmhook
2164	ldr		w12, [x0, ACT_PREEMPT_CNT]
2165
2166	/* Detect underflow */
2167	cbnz	w12, Lno_preempt_underflow_sptmhook
2168	/* No need to clean up the stack, as preempt_underflow calls panic */
2169	b		preempt_underflow
2170Lno_preempt_underflow_sptmhook:
2171
2172	/* Lower the preemption count. */
2173	sub		w12, w12, #1
2174
2175#if SCHED_HYGIENE_DEBUG
2176	/* Collect preemption disable measurement if necessary. */
2177
2178	/*
2179	 * Only collect measurement if this reenabled preemption,
2180	 * and SCHED_HYGIENE_MARKER is set.
2181	 */
2182	mov		x20, #SCHED_HYGIENE_MARKER
2183	cmp		w12, w20
2184	b.ne	Lskip_collect_measurement_sptmhook
2185
2186	/* Collect measurement. */
2187	bl		EXT(_collect_preemption_disable_measurement)
2188
2189	/* Restore w12, which is now 0. */
2190	mov		w12, #0
2191
2192	/* Restore x0 as the thread pointer */
2193	mrs		x0, TPIDR_EL1
2194
2195Lskip_collect_measurement_sptmhook:
2196#endif /* SCHED_HYGIENE_DEBUG */
2197
2198	/* Save the lowered preemption count. */
2199	str		w12, [x0, ACT_PREEMPT_CNT]
2200
2201	/* Skip ASTs if the preemption count is not zero. */
2202	cbnz	w12, Lsptm_skip_ast_taken_sptmhook
2203
2204	/**
2205	 * Skip the AST check if interrupts were originally disabled. The original
2206	 * DAIF value needs to be placed into a callee-saved register so that the
2207	 * value is preserved across the ast_taken_kernel() call.
2208	 */
2209	mrs		x20, DAIF
2210	tbnz	x20, #(DAIF_IRQF_SHIFT), Lsptm_skip_ast_taken_sptmhook
2211
2212	/* IF there is no urgent AST, skip the AST. */
2213	ldr		x12, [x0, ACT_CPUDATAP]
2214	ldr		x14, [x12, CPU_PENDING_AST]
2215	tst		x14, AST_URGENT
2216	b.eq	Lsptm_skip_ast_taken_sptmhook
2217
2218	/* Handle the AST. This call requires interrupts to be disabled. */
2219	msr		DAIFSet, #(DAIFSC_ALL)
2220	bl		EXT(ast_taken_kernel)
2221	msr		DAIF, x20
2222
2223Lsptm_skip_ast_taken_sptmhook:
2224
2225	/* Restore SPTM return value(s) */
2226	ldp		x6, x7, [sp, #0x30]
2227	ldp		x4, x5, [sp, #0x20]
2228	ldp		x2, x3, [sp, #0x10]
2229	ldp		x0, x1, [sp]
2230	add		sp, sp, #0x40
2231
2232	/* Return. */
2233	ldp		x20, x21, [sp], 0x10
2234	POP_FRAME
2235	ARM64_STACK_EPILOG
2236#endif /* CONFIG_SPTM */
2237
2238/* ARM64_TODO Is globals_asm.h needed? */
2239//#include	"globals_asm.h"
2240
2241/* vim: set ts=4: */
2242