xref: /xnu-11417.140.69/osfmk/arm64/locore.s (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45#if XNU_MONITOR && !CONFIG_SPTM
46/*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
52 *         exception was taken while in the PPL.
53 */
54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55	cmp		x26, xzr
56	b.eq		1f
57
58	/* Return to the PPL. */
59	mov		x15, #0
60	mov		w10, #PPL_STATE_EXCEPTION
61#error "XPRR configuration error"
621:
63.endmacro
64
65
66#endif /* XNU_MONITOR && !CONFIG_SPTM */
67
68#if CONFIG_SPTM
69#include <sptm/sptm_xnu.h>
70#include <sptm/sptm_common.h>
71/*
72 * Panic lockdown is a security enhancement which makes certain types of
73 * exceptions (generally, PAC failures and sync exceptions taken with async
74 * exceptions masked) and panics fatal against attackers with kernel R/W. It
75 * does this through a trapdoor panic bit protected by the SPTM.
76 * When this bit is set, TXM will refuse to authorize new code mappings which,
77 * ideally, renders the system unusable even if the attacker gains control over
78 * XNU. Additionally, when this bit is set XNU will refuse to handle any sync
79 * exceptions originating from user space. This makes implementing further stages
80 * of an exploit challenging as it prevents user space from driving the kernel.
81 */
82
83/*
84 * Inform the SPTM that XNU has (or, rather, must) panic. This is provided as a
85 * macro rather than a function since it's just one instruction on release and
86 * it avoids the need to spill a return addresses unless the macro caller
87 * explicitly needs to preserve LR.
88 *
89 * On CONFIG_XNUPOST, this functions returns a 1 in x0 if a simulated lockdown
90 * was performed, 0 otherwise.
91 *
92 * This macro preserves callee saved registers but clobbers all others.
93 */
94.macro BEGIN_PANIC_LOCKDOWN unused
95#if DEVELOPMENT || DEBUG
96	/*
97	 * Forcefully clobber all caller saved GPRs on DEBUG so we don't
98	 * accidentally violate our contract with SPTM.
99	 */
100	mov		x0, #0
101	mov		x1, #0
102	mov		x2, #0
103	mov		x3, #0
104	mov		x4, #0
105	mov		x5, #0
106	mov		x6, #0
107	mov		x7, #0
108	mov		x8, #0
109	mov		x9, #0
110	mov		x10, #0
111	mov		x11, #0
112	mov		x12, #0
113	mov		x13, #0
114	mov		x14, #0
115	mov		x15, #0
116	mov		x16, #0
117	mov		x17, #0
118	mov		x18, #0
119
120	/* Attempt to record the debug trace */
121	bl		EXT(panic_lockdown_record_debug_data)
122
123#endif /* DEVELOPMENT || DEBUG */
124#if CONFIG_XNUPOST
125	mrs		x0, TPIDR_EL1
126	/*
127	 * If hitting this with a null TPIDR, it's likely that this was an unexpected
128	 * exception in early boot rather than an expected one as a part of a test.
129	 * Trigger lockdown.
130	 */
131	cbz		x0, Lbegin_panic_lockdown_real_\@
132	ldr		x1, [x0, TH_EXPECTED_FAULT_HANDLER]
133	/* Is a fault handler installed? */
134	cbz 	x1, Lbegin_panic_lockdown_real_\@
135
136	/* Do the VA bits of ELR match the expected fault PC? */
137	ldr		x1, [x0, TH_EXPECTED_FAULT_PC]
138	mrs		x2, ELR_EL1
139	mov		x3, #((1 << (64 - T1SZ_BOOT - 1)) - 1)
140	and		x4, x1, x3
141	and		x5, x2, x3
142	cmp		x4, x5
143	b.eq	Lbegin_panic_lockdown_simulated_\@
144	/* If we had an expected PC but didn't hit it, fail out */
145	cbnz	x1, Lbegin_panic_lockdown_real_\@
146
147	/* Alternatively, do the FAR VA bits match the expected fault address? */
148	ldr		x1, [x0, TH_EXPECTED_FAULT_ADDR]
149	mrs		x2, FAR_EL1
150	and		x4, x1, x3
151	and		x5, x2, x3
152	cmp		x4, x5
153	b.eq	Lbegin_panic_lockdown_simulated_\@
154
155Lbegin_panic_lockdown_real_\@:
156#endif /* CONFIG_XNUPOST */
157	/*
158	 * The sptm_xnu_panic_begin routine is guaranteed to unavoidably lead to
159	 * the panic bit being set.
160	 */
161	bl EXT(sptm_xnu_panic_begin)
162#if CONFIG_XNUPOST
163	mov		x0, #0 // not a simulated lockdown
164	b		Lbegin_panic_lockdown_continue_\@
165Lbegin_panic_lockdown_simulated_\@:
166	/*
167	 * We hit lockdown with a matching exception handler installed.
168	 * Since this is an expected test exception, skip setting the panic bit
169	 * (since this will kill the system) and instead set a bit in the test
170	 * handler.
171	 */
172	mov		x0, #1 // this is a simulated lockdown!
173	adrp	x1, EXT(xnu_post_panic_lockdown_did_fire)@page
174	strb	w0, [x1, EXT(xnu_post_panic_lockdown_did_fire)@pageoff]
175	mov		lr, xzr // trash LR to ensure callers don't rely on it
176Lbegin_panic_lockdown_continue_\@:
177#endif /* CONFIG_XNUPOST */
178.endmacro
179#endif /* CONFIG_SPTM */
180
181/*
182 * MAP_KERNEL
183 *
184 * Restores the kernel EL1 mappings, if necessary.
185 *
186 * This may mutate x18.
187 */
188.macro MAP_KERNEL
189#if __ARM_KERNEL_PROTECT__
190	/* Switch to the kernel ASID (low bit set) for the task. */
191	mrs		x18, TTBR0_EL1
192	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
193	msr		TTBR0_EL1, x18
194
195	/*
196	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
197	 * to the TTBRs and writes to the TCR should be ensured by the
198	 * microarchitecture.
199	 */
200#if !defined(APPLE_ARM64_ARCH_FAMILY)
201	isb		sy
202#endif
203
204	/*
205	 * Update the TCR to map the kernel now that we are using the kernel
206	 * ASID.
207	 */
208	MOV64		x18, TCR_EL1_BOOT
209	msr		TCR_EL1, x18
210	isb		sy
211#endif /* __ARM_KERNEL_PROTECT__ */
212.endmacro
213
214/*
215 * BRANCH_TO_KVA_VECTOR
216 *
217 * Branches to the requested long exception vector in the kernelcache.
218 *   arg0 - The label to branch to
219 *   arg1 - The index of the label in exc_vectors_tables
220 *
221 * This may mutate x18.
222 */
223.macro BRANCH_TO_KVA_VECTOR
224
225#if __ARM_KERNEL_PROTECT__
226	/*
227	 * Find the kernelcache table for the exception vectors by accessing
228	 * the per-CPU data.
229	 */
230	mrs		x18, TPIDR_EL1
231	ldr		x18, [x18, ACT_CPUDATAP]
232	ldr		x18, [x18, CPU_EXC_VECTORS]
233
234	/*
235	 * Get the handler for this exception and jump to it.
236	 */
237	ldr		x18, [x18, #($1 << 3)]
238	br		x18
239#else
240	b		$0
241#endif /* __ARM_KERNEL_PROTECT__ */
242.endmacro
243
244/*
245 * CHECK_KERNEL_STACK
246 *
247 * Verifies that the kernel stack is aligned and mapped within an expected
248 * stack address range. Note: happens before saving registers (in case we can't
249 * save to kernel stack).
250 *
251 * Expects:
252 *	{x0, x1} - saved
253 *	x1 - Exception syndrome
254 *	sp - Saved state
255 *
256 * Seems like we need an unused argument to the macro for the \@ syntax to work
257 *
258 */
259.macro CHECK_KERNEL_STACK unused
260	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
261	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
262	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
263	cmp		x1, x2								// If we have a stack alignment exception
264	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
265	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
266	cmp		x1, x2								// If we have a data abort, we need to
267	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
268	mrs		x0, SP_EL0					// Get SP_EL0
269	mrs		x1, TPIDR_EL1						// Get thread pointer
270	cbnz	x1, Ltest_kstack_\@					// Can only continue if TPIDR_EL1 is set
2710:
272	wfe
273	b		0b									// Can't do much else but wait here for debugger.
274Ltest_kstack_\@:
275	LOAD_KERN_STACK_TOP	dst=x2, src=x1, tmp=x3	// Get top of kernel stack
276	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
277	cmp		x0, x2								// if (SP_EL0 >= kstack top)
278	b.ge	Ltest_istack_\@						//    jump to istack test
279	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
280	b.gt	Lvalid_stack_\@						//    stack pointer valid
281Ltest_istack_\@:
282	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
283	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
284	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
285	cmp		x0, x2								// if (SP_EL0 >= istack top)
286	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
287	cmp		x0, x3								// if (SP_EL0 > istack bottom)
288	b.gt	Lvalid_stack_\@						//    stack pointer valid
289Lcorrupt_stack_\@:
290	ldp		x2, x3, [sp], #16
291	ldp		x0, x1, [sp], #16
292	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
293	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
294	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
295	mrs		x0, SP_EL0					// Get SP_EL0
296	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
297	INIT_SAVED_STATE_FLAVORS sp, w0, w1
298	mov		x0, sp								// Copy exception frame pointer to x0
299	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
300	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
301	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
302	b		fleh_dispatch64
303Lvalid_stack_\@:
304	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
305.endmacro
306
307/*
308 * CHECK_EXCEPTION_CRITICAL_REGION
309 *
310 * Checks if the exception occurred within range [VECTOR_BEGIN, VECTOR_END).
311 * If so, jumps to \fail_label. Otherwise, continues.
312 * This is useful for avoiding infinite exception loops.
313 *
314 * Clobbers x18, NZCV.
315 */
316.macro CHECK_EXCEPTION_CRITICAL_REGION vector_begin, vector_end, fail_label
317	/*
318	 * We need two registers to do a compare but only have x18 free without
319	 * spilling. We can't safely spill to memory yet, however, because doing so
320	 * may fault. It's evil, but since we're operating on ELR here we can
321	 * temporarily spill into it to get another free register as long as we put
322	 * everything back at the end.
323	 */
324	mrs		x18, ELR_EL1
325	msr		ELR_EL1, x19
326
327	adrp	x19, \vector_begin@PAGE
328	add		x19, x19, \vector_begin@PAGEOFF
329	cmp		x18, x19 /* HS if at or above (suspect), LO if below (safe) */
330	adrp	x19, \vector_end@PAGE
331	add		x19, x19, \vector_end@PAGEOFF
332	/*
333	 * If ELR >= \vector_begin (HS), set flags for ELR - \vector_end. LO here
334	 * indicates we are in range.
335	 * Otherwise, set HS (C)
336	 */
337	ccmp	x18, x19, #0b0010 /* C/HS */, HS
338	/* Unspill x19/fixup ELR */
339	mrs		x19, ELR_EL1
340	msr		ELR_EL1, x18
341	mov		x18, #0
342	/* If we're in the range, fail out */
343	b.lo	\fail_label
344.endmacro
345
346/*
347 * CHECK_EXCEPTION_STACK
348 *
349 * Verifies that SP1 is within exception stack and continues if it is.
350 * If not, jumps to \invalid_stack_label as we have nothing to fall back on.
351 *
352 * (out) x18: The unauthenticated CPU_EXCEPSTACK_TOP used for the comparison or
353 *            zero if the check could not be performed (such as because the
354 *            thread pointer was invalid).
355 *
356 * Clobbers NZCV.
357 */
358.macro CHECK_EXCEPTION_STACK invalid_stack_label
359	mrs		x18, TPIDR_EL1					// Get thread pointer
360	/*
361	 * The thread pointer might be invalid during early boot.
362	 * Return zero in x18 to indicate that we failed to execute the check.
363	 */
364	cbz		x18, Lskip_stack_check_\@
365	ldr		x18, [x18, ACT_CPUDATAP]
366	cbz		x18, \invalid_stack_label		// If thread context is set, cpu data should be too
367	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
368	cmp		sp, x18
369	b.gt	\invalid_stack_label			// Fail if above exception stack top
370	sub		x18, x18, EXCEPSTACK_SIZE_NUM	// Find bottom of exception stack
371	cmp		sp, x18
372	b.lt	\invalid_stack_label			// Fail if below exception stack bottom
373	add		x18, x18, EXCEPSTACK_SIZE_NUM	// Return stack top in x18
374Lskip_stack_check_\@:
375	/* FALLTHROUGH */
376.endmacro
377
378#if __ARM_KERNEL_PROTECT__
379	.section __DATA_CONST,__const
380	.align 3
381	.globl EXT(exc_vectors_table)
382LEXT(exc_vectors_table)
383	/* Table of exception handlers.
384         * These handlers sometimes contain deadloops.
385         * It's nice to have symbols for them when debugging. */
386	.quad el1_sp0_synchronous_vector_long
387	.quad el1_sp0_irq_vector_long
388	.quad el1_sp0_fiq_vector_long
389	.quad el1_sp0_serror_vector_long
390	.quad el1_sp1_synchronous_vector_long
391	.quad el1_sp1_irq_vector_long
392	.quad el1_sp1_fiq_vector_long
393	.quad el1_sp1_serror_vector_long
394	.quad el0_synchronous_vector_64_long
395	.quad el0_irq_vector_64_long
396	.quad el0_fiq_vector_64_long
397	.quad el0_serror_vector_64_long
398#endif /* __ARM_KERNEL_PROTECT__ */
399
400	.text
401#if __ARM_KERNEL_PROTECT__
402	/*
403	 * We need this to be on a page boundary so that we may avoiding mapping
404	 * other text along with it.  As this must be on the VM page boundary
405	 * (due to how the coredumping code currently works), this will be a
406	 * 16KB page boundary.
407	 */
408	.align 14
409#else
410	.align 12
411#endif /* __ARM_KERNEL_PROTECT__ */
412	.globl EXT(ExceptionVectorsBase)
413LEXT(ExceptionVectorsBase)
414Lel1_sp0_synchronous_vector:
415	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
416
417	.text
418	.align 7
419Lel1_sp0_irq_vector:
420	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
421
422	.text
423	.align 7
424Lel1_sp0_fiq_vector:
425	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
426
427	.text
428	.align 7
429Lel1_sp0_serror_vector:
430	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
431
432	.text
433	.align 7
434Lel1_sp1_synchronous_vector:
435	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
436
437	.text
438	.align 7
439Lel1_sp1_irq_vector:
440	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
441
442	.text
443	.align 7
444Lel1_sp1_fiq_vector:
445	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
446
447	.text
448	.align 7
449Lel1_sp1_serror_vector:
450	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
451
452	.text
453	.align 7
454Lel0_synchronous_vector_64:
455	MAP_KERNEL
456	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
457
458	.text
459	.align 7
460Lel0_irq_vector_64:
461	MAP_KERNEL
462	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
463
464	.text
465	.align 7
466Lel0_fiq_vector_64:
467	MAP_KERNEL
468	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
469
470	.text
471	.align 7
472Lel0_serror_vector_64:
473	MAP_KERNEL
474	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
475
476	/* Fill out the rest of the page */
477	.align 12
478
479/*********************************
480 * END OF EXCEPTION VECTORS PAGE *
481 *********************************/
482
483
484
485.macro EL1_SP0_VECTOR
486	msr		SPSel, #0							// Switch to SP0
487	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
488	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
489	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
490	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
491	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
492	INIT_SAVED_STATE_FLAVORS sp, w0, w1
493	mov		x0, sp								// Copy saved state pointer to x0
494.endmacro
495
496.macro EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
497	// SWITCH_TO_INT_STACK requires a clobberable tmp register, but at this
498	// point in the exception vector we can't spare the extra GPR.  Instead note
499	// that EL1_SP0_VECTOR ends with x0 == sp and use this to unclobber x0.
500	mrs		x1, TPIDR_EL1
501	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
502	mov		x0, sp
503	mov		sp, x1
504.endmacro
505
506el1_sp0_synchronous_vector_long:
507	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
508	mrs		x1, ESR_EL1							// Get the exception syndrome
509	/* If the stack pointer is corrupt, it will manifest either as a data abort
510	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
511	 * these quickly by testing bit 5 of the exception class.
512	 */
513	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
514	CHECK_KERNEL_STACK
515Lkernel_stack_valid:
516	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
517	EL1_SP0_VECTOR
518	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
519	add		x1, x1, EXT(fleh_synchronous)@pageoff
520	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
521	b		fleh_dispatch64
522
523el1_sp0_irq_vector_long:
524	EL1_SP0_VECTOR
525	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
526	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
527	add		x1, x1, EXT(fleh_irq)@pageoff
528	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
529	b		fleh_dispatch64
530
531el1_sp0_fiq_vector_long:
532	// ARM64_TODO write optimized decrementer
533	EL1_SP0_VECTOR
534	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
535	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
536	add		x1, x1, EXT(fleh_fiq)@pageoff
537	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
538	b		fleh_dispatch64
539
540el1_sp0_serror_vector_long:
541	EL1_SP0_VECTOR
542	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
543	add		x1, x1, EXT(fleh_serror)@pageoff
544	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
545	b		fleh_dispatch64
546
547.macro EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
548	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
549	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
550	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
551	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
552	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
553	INIT_SAVED_STATE_FLAVORS sp, w0, w1
554.if \set_x0_to_exception_frame_ptr
555	mov		x0, sp								// Copy saved state pointer to x0
556.endif
557.endmacro
558
559el1_sp1_synchronous_vector_long:
560	/*
561	 * Before making our first (potentially faulting) memory access, check if we
562	 * previously tried and failed to execute this vector. If we did, it's not
563	 * going to work this time either so let's just spin.
564	 */
565#ifdef CONFIG_SPTM
566	/*
567	 * This check is doubly important for devices which support panic lockdown
568	 * as we use this check to ensure that we can take only a bounded number of
569	 * exceptions on SP1 while trying to spill before we give up on spilling and
570	 * lockdown anyways.
571	 *
572	 * Note, however, that we only check if we took an exception inside this
573	 * vector. Although an attacker could cause exceptions outside this routine,
574	 * they can only do this a finite number of times before overflowing the
575	 * exception stack (causing CHECK_EXCEPTION_STACK to fail) since we subtract
576	 * from SP inside the checked region and do not reload SP from memory before
577	 * we hit post-spill lockdown point in fleh_synchronous_sp1.
578	 */
579#endif /* CONFIG_SPTM */
580	CHECK_EXCEPTION_CRITICAL_REGION el1_sp1_synchronous_vector_long, Lel1_sp1_synchronous_vector_long_end, EXT(el1_sp1_synchronous_vector_long_spill_failed)
581	CHECK_EXCEPTION_STACK EXT(el1_sp1_synchronous_vector_long_spill_failed)
582#ifdef KERNEL_INTEGRITY_KTRR
583	b		check_ktrr_sctlr_trap
584Lel1_sp1_synchronous_vector_continue:
585#endif /* KERNEL_INTEGRITY_KTRR */
586#if CONFIG_SPTM
587	/* Don't bother setting up x0 since we need it as a temporary */
588	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=0
589
590	/*
591	 * Did we fail to execute the stack check (x18=0)?
592	 * On devices which support panic lockdown, we cannot allow this check to be
593	 * skipped after early-boot as doing so many allow exception processing to
594	 * be delayed indefinitely.
595	 */
596	adrp	x0, EXT(startup_phase)@page
597	ldr		w0, [x0, EXT(startup_phase)@pageoff]
598	/* Are we in early-boot? */
599	cmp		w0, #-1 // STARTUP_SUB_LOCKDOWN
600	/*
601	 * If we're still in early-boot (LO), set flags for if we skipped the check
602	 * If we're after early-boot (HS), pass NE
603	 */
604	ccmp	x18, xzr, #0b0000 /* !Z/NE */, LO
605	/* Skip authentication if this was an early boot check fail */
606	b.eq	1f
607	/*
608	 * If we're not in early boot but still couldn't execute the stack bounds
609	 * check (x18=0), something is wrong (TPIDR is corrupted?).
610	 * Trigger a lockdown.
611	 */
612	cbz		x18, EXT(el1_sp1_synchronous_vector_long_spill_failed)
613
614	/*
615	 * In CHECK_EXCEPTION_STACK, we didn't have enough registers to perform the
616	 * signature verification on the exception stack top value and instead used
617	 * the unauthenticated value (x18) for the stack pointer bounds check.
618	 *
619	 * Ensure that we actually performed the check on a legitmate value now.
620	 */
621	mrs		x0, TPIDR_EL1
622	LOAD_EXCEP_STACK_THREAD dst=x0, src=x0, tmp=x1
623	cmp		x0, x18
624	/* If we aren't equal, something is very wrong and we should lockdown. */
625	b.ne	EXT(el1_sp1_synchronous_vector_long_spill_failed)
626
6271:
628	mov		x0, sp	/* Set x0 to saved state pointer */
629#else
630	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
631#endif /* CONFIG_SPTM */
632	adrp	x1, fleh_synchronous_sp1@page
633	add		x1, x1, fleh_synchronous_sp1@pageoff
634	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
635	b		fleh_dispatch64
636
637	/*
638	 * Global symbol to make it easy to pick out in backtraces.
639	 * Do not call externally.
640	 */
641	.global EXT(el1_sp1_synchronous_vector_long_spill_failed)
642LEXT(el1_sp1_synchronous_vector_long_spill_failed)
643	TRAP_UNWIND_PROLOGUE
644	TRAP_UNWIND_DIRECTIVES
645	/*
646	 * We couldn't process the exception due to either having an invalid
647	 * exception stack or because we previously tried to process it and failed.
648	 */
649#if CONFIG_SPTM
650	/*
651	 * For SP1 exceptions, we usually delay initiating lockdown until after
652	 * we've spilled in order to not lose register state. Since we have nowhere
653	 * to safely spill, we have no choice but to initiate it now, clobbering
654	 * some of our exception state in the process (RIP).
655	 */
656	BEGIN_PANIC_LOCKDOWN
657#if CONFIG_XNUPOST
658	/* Macro returns x0=1 if it performed a simulated lockdown */
659	cbz		x0, 0f
660	/* This was a test; return to fault handler so they can fixup the system. */
661	mrs		x0, TPIDR_EL1
662	ldr		x16, [x0, TH_EXPECTED_FAULT_HANDLER]
663#if __has_feature(ptrauth_calls)
664	movk	x17, #TH_EXPECTED_FAULT_HANDLER_DIVERSIFIER
665	autia	x16, x17
666#endif /* ptrauth_calls */
667	msr		ELR_EL1, x16
668	/* Pass a NULL saved state since we didn't actually save anything */
669	mov		x0, #0
670	ERET_NO_STRAIGHT_LINE_SPECULATION
671#endif /* CONFIG_XNUPOST */
672#endif /* CONFIG_SPTM */
6730:
674	wfe
675	b		0b // Spin for watchdog
676	UNWIND_EPILOGUE
677
678#if CONFIG_SPTM
679#if CONFIG_XNUPOST
680	/**
681	 * Test function which raises an exception from a location considered inside
682	 * the vector. Does not return.
683	 */
684	.global EXT(el1_sp1_synchronous_raise_exception_in_vector)
685LEXT(el1_sp1_synchronous_raise_exception_in_vector)
686	ARM64_PROLOG
687	brk		#0
688	/* Unreachable */
689	b		.
690#endif /* CONFIG_XNUPOST */
691#endif /* CONFIG_SPTM */
692Lel1_sp1_synchronous_vector_long_end:
693
694el1_sp1_irq_vector_long:
695	EL1_SP1_VECTOR
696	adrp	x1, fleh_irq_sp1@page
697	add		x1, x1, fleh_irq_sp1@pageoff
698	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
699	b		fleh_dispatch64
700
701el1_sp1_fiq_vector_long:
702	EL1_SP1_VECTOR
703	adrp	x1, fleh_fiq_sp1@page
704	add		x1, x1, fleh_fiq_sp1@pageoff
705	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
706	b		fleh_dispatch64
707
708el1_sp1_serror_vector_long:
709	EL1_SP1_VECTOR
710	adrp	x1, fleh_serror_sp1@page
711	add		x1, x1, fleh_serror_sp1@pageoff
712	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
713	b		fleh_dispatch64
714
715
716.macro EL0_64_VECTOR guest_label
717	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
718#if __ARM_KERNEL_PROTECT__
719	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
720#endif
721	mrs		x0, TPIDR_EL1						// Load the thread register
722	LOAD_USER_PCB	dst=x0, src=x0, tmp=x1		// Load the user context pointer
723	mrs		x1, SP_EL0							// Load the user stack pointer
724	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
725	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
726	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
727	msr		SPSel, #0							// Switch to SP0
728	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
729	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the user PCB
730	mrs		x1, TPIDR_EL1						// Load the thread register
731
732
733#if HAS_ARM_FEAT_SME
734	str		x2, [sp, SS64_X2]
735	// current_thread()->machine.umatrix_hdr == NULL: this thread has never
736	// executed smstart, so no SME state to save
737	ldr		x2, [x1, ACT_UMATRIX_HDR]
738	cbz		x2, 1f
739
740	mrs		x0, SVCR
741	str		x0, [x2, SME_SVCR]
742	// SVCR.SM == 0: save SVCR only (ZA is handled during context-switch)
743	tbz		x0, #SVCR_SM_SHIFT, 1f
744
745	// SVCR.SM == 1: save SVCR, Z, and P; and exit streaming SVE mode
746	ldrh	w0, [x2, SME_SVL_B]
747	add		x2, x2, SME_Z_P_ZA
748	LOAD_OR_STORE_Z_P_REGISTERS	str, svl_b=x0, ss=x2
749	mrs		x2, FPSR
750	smstop	sm
751	msr		FPSR, x2
7521:
753	ldr		x2, [sp, SS64_X2]
754#endif /* HAS_ARM_FEAT_SME */
755
756	mov		x0, sp								// Copy the user PCB pointer to x0
757												// x1 contains thread register
758.endmacro
759
760.macro EL0_64_VECTOR_SWITCH_TO_INT_STACK
761	// Similarly to EL1_SP0_VECTOR_SWITCH_TO_INT_STACK, we need to take
762	// advantage of EL0_64_VECTOR ending with x0 == sp.  EL0_64_VECTOR also
763	// populates x1 with the thread state, so we can skip reloading it.
764	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
765	mov		x0, sp
766	mov		sp, x1
767.endmacro
768
769.macro EL0_64_VECTOR_SWITCH_TO_KERN_STACK
770	LOAD_KERN_STACK_TOP	dst=x1, src=x1, tmp=x0
771	mov		x0, sp
772	mov		sp, x1
773.endmacro
774
775el0_synchronous_vector_64_long:
776	EL0_64_VECTOR	sync
777	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
778	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
779	add		x1, x1, EXT(fleh_synchronous)@pageoff
780	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
781	b		fleh_dispatch64
782
783el0_irq_vector_64_long:
784	EL0_64_VECTOR	irq
785	EL0_64_VECTOR_SWITCH_TO_INT_STACK
786	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
787	add		x1, x1, EXT(fleh_irq)@pageoff
788	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
789	b		fleh_dispatch64
790
791el0_fiq_vector_64_long:
792	EL0_64_VECTOR	fiq
793	EL0_64_VECTOR_SWITCH_TO_INT_STACK
794	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
795	add		x1, x1, EXT(fleh_fiq)@pageoff
796	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
797	b		fleh_dispatch64
798
799el0_serror_vector_64_long:
800	EL0_64_VECTOR	serror
801	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
802	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
803	add		x1, x1, EXT(fleh_serror)@pageoff
804	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
805	b		fleh_dispatch64
806
807
808#if defined(KERNEL_INTEGRITY_KTRR)
809	.text
810	.align 2
811check_ktrr_sctlr_trap:
812/* We may abort on an instruction fetch on reset when enabling the MMU by
813 * writing SCTLR_EL1 because the page containing the privileged instruction is
814 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
815 * would otherwise panic unconditionally. Check for the condition and return
816 * safe execution to the caller on behalf of the faulting function.
817 *
818 * Expected register state:
819 *  x22 - Kernel virtual base
820 *  x23 - Kernel physical base
821 */
822	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
823	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
824	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
825	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
826	movz	w1, #0x8600, lsl #16
827	movk	w1, #0x0000
828	cmp		x0, x1
829	mrs		x0, ELR_EL1					// Check for expected abort address
830	adrp	x1, _pinst_set_sctlr_trap_addr@page
831	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
832	sub		x1, x1, x22					// Convert to physical address
833	add		x1, x1, x23
834	ccmp	x0, x1, #0, eq
835	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
836	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
837	b.ne	Lel1_sp1_synchronous_vector_continue
838	msr		ELR_EL1, lr					// Return to caller
839	ERET_NO_STRAIGHT_LINE_SPECULATION
840#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
841
842/* 64-bit first level exception handler dispatcher.
843 * Completes register context saving and branches to FLEH.
844 * Expects:
845 *  {x0, x1, sp} - saved
846 *  x0 - arm_context_t
847 *  x1 - address of FLEH
848 *  x2 - bitfield of type FLEH_DISPATCH64_OPTION_xxx, clobbered
849 *  x3 - unused
850 *  fp - previous stack frame if EL1
851 *  lr - unused
852 *  sp - kernel stack
853 */
854	.text
855	.align 2
856fleh_dispatch64:
857#if HAS_APPLE_PAC
858	pacia	x1, sp
859#endif
860
861	/* Save arm_saved_state64 */
862	SPILL_REGISTERS KERNEL_MODE, options_register=x2
863
864	/* If exception is from userspace, zero unused registers */
865	and		x23, x23, #(PSR64_MODE_EL_MASK)
866	cmp		x23, #(PSR64_MODE_EL0)
867	bne		1f
868
869	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
870
871
8722:
873
874	mov		x2, #0
875	mov		x3, #0
876	mov		x4, #0
877	mov		x5, #0
878	mov		x6, #0
879	mov		x7, #0
880	mov		x8, #0
881	mov		x9, #0
882	mov		x10, #0
883	mov		x11, #0
884	mov		x12, #0
885	mov		x13, #0
886	mov		x14, #0
887	mov		x15, #0
888	mov		x16, #0
889	mov		x17, #0
890	mov		x18, #0
891	mov		x19, #0
892	mov		x20, #0
893	/* x21, x22 cleared in common case below */
894	mov		x23, #0
895	mov		x24, #0
896	mov		x25, #0
897#if !XNU_MONITOR
898	mov		x26, #0
899#endif
900	mov		x27, #0
901	mov		x28, #0
902	mov		fp, #0
903	mov		lr, #0
9041:
905
906	mov		x21, x0								// Copy arm_context_t pointer to x21
907	mov		x22, x1								// Copy handler routine to x22
908
909#if XNU_MONITOR
910	/* Zero x26 to indicate that this should not return to the PPL. */
911	mov		x26, #0
912#endif
913
914#if PRECISE_USER_KERNEL_TIME
915	cmp		x23, #PSR64_MODE_EL0			// If interrupting this kernel, skip
916	b.gt	1f                                  // precise time update.
917	PUSH_FRAME
918	bl		EXT(recount_leave_user)
919	POP_FRAME_WITHOUT_LR
920	mov		x0, x21								// Reload arm_context_t pointer
9211:
922#endif /* PRECISE_USER_KERNEL_TIME */
923
924	/* Dispatch to FLEH */
925
926#if HAS_APPLE_PAC
927	braa	x22,sp
928#else
929	br		x22
930#endif
931
932
933	.text
934	.align 2
935	.global EXT(fleh_synchronous)
936LEXT(fleh_synchronous)
937TRAP_UNWIND_PROLOGUE
938TRAP_UNWIND_DIRECTIVES
939	ARM64_JUMP_TARGET
940	mrs		x1, ESR_EL1							// Load exception syndrome
941	mrs		x2, FAR_EL1							// Load fault address
942	mrs		lr, ELR_EL1
943	/* NB: lr might not be a valid address (e.g. instruction abort). */
944	PUSH_FRAME
945
946#if CONFIG_SPTM
947	mrs		x25, ELR_EL1
948
949	/*
950	 * Sync exceptions in the kernel are rare, so check that first.
951	 * This check should be trivially predicted NT. We also take
952	 * the check out of line so, on the hot path, we don't add a
953	 * frontend redirect.
954	 */
955	mov		x3, #0 // by default, do not signal panic lockdown to sleh
956	mrs		x4, SPSR_EL1
957	tst		x4, #(PSR64_MODE_EL_MASK)
958	b.ne	Lfleh_synchronous_ool_check_exception_el1 /* Run ELn checks if we're EL!=0 (!Z) */
959	/* EL0 -- check if we're blocking sync exceptions due to lockdown */
960	adrp	x4, EXT(sptm_xnu_triggered_panic_ptr)@page
961	ldr		x4, [x4, EXT(sptm_xnu_triggered_panic_ptr)@pageoff]
962	ldrb	w4, [x4]
963	cbnz	w4, Lblocked_user_sync_exception
964
965Lfleh_synchronous_continue:
966	/* We've had our chance to lockdown, release PC/FAR */
967	str		x25, [x0, SS64_PC]
968	str		x2,  [x0, SS64_FAR]
969#endif /* CONFIG_SPTM */
970
971	bl		EXT(sleh_synchronous)
972	POP_FRAME_WITHOUT_LR
973
974#if XNU_MONITOR && !CONFIG_SPTM
975	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
976#endif
977
978	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
979	b		exception_return_dispatch
980
981#if CONFIG_SPTM
982Lfleh_synchronous_ool_check_exception_el1:
983	/* Save off arguments needed for sleh_sync as we may clobber */
984	mov		x26, x0
985	mov		x27, x1
986	mov		x28, x2
987
988	/*
989	 * Evaluate the exception state to determine if we should initiate a
990	 * lockdown. While this function is implemented in C, since it is guaranteed
991	 * to not use the stack it should be immune from spill tampering and other
992	 * attacks which may cause it to render the wrong ruling.
993	 */
994	mov		x0, x1  // ESR
995	mov		x1, x25 // ELR
996			        // FAR is already in x2
997	mrs		x3, SPSR_EL1
998	bl		EXT(sleh_panic_lockdown_should_initiate_el1_sp0_sync)
999
1000	/* sleh_synchronous needs the lockdown decision in x3 */
1001	mov		x3, x0
1002	/* Optimistically restore registers on the assumption we won't lockdown */
1003	mov		x0, x26
1004	mov		x1, x27
1005	mov		x2, x28
1006
1007	cbz		x3, Lfleh_synchronous_continue
1008
1009	BEGIN_PANIC_LOCKDOWN
1010	mov		x0, x26
1011	mov		x1, x27
1012	mov		x2, x28
1013	/*
1014	 * A captain goes down with her ship; system is sunk but for telemetry
1015	 * try to handle the crash normally.
1016	 */
1017	mov		x3, #1 // signal to sleh that we completed panic lockdown
1018	b		Lfleh_synchronous_continue
1019#endif /* CONFIG_SPTM */
1020UNWIND_EPILOGUE
1021
1022#if CONFIG_SPTM
1023	.text
1024	.align 2
1025	/* Make a global symbol so it's easier to pick out in backtraces */
1026	.global EXT(blocked_user_sync_exception)
1027LEXT(blocked_user_sync_exception)
1028Lblocked_user_sync_exception:
1029	TRAP_UNWIND_PROLOGUE
1030	TRAP_UNWIND_DIRECTIVES
1031	/*
1032	 * User space took a sync exception after panic lockdown had been initiated.
1033	 * The system is going to panic soon, so let's just re-enable FIQs and wait
1034	 * for debugger sync.
1035	 */
1036	msr		DAIFClr, #DAIFSC_FIQF
10370:
1038	wfe
1039	b		0b
1040	UNWIND_EPILOGUE
1041#endif /* CONFIG_SPTM */
1042
1043/* Shared prologue code for fleh_irq and fleh_fiq.
1044 * Does any interrupt booking we may want to do
1045 * before invoking the handler proper.
1046 * Expects:
1047 *  x0 - arm_context_t
1048 * x23 - CPSR
1049 *  fp - Undefined live value (we may push a frame)
1050 *  lr - Undefined live value (we may push a frame)
1051 *  sp - Interrupt stack for the current CPU
1052 */
1053.macro BEGIN_INTERRUPT_HANDLER
1054	mrs		x22, TPIDR_EL1
1055	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
1056	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
1057	ldr		w1, [x23, CPU_STAT_IRQ]
1058	add		w1, w1, #1							// Increment count
1059	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
1060	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
1061	add		w1, w1, #1					// Increment count
1062	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1063	/* Increment preempt count */
1064	ldr		w1, [x22, ACT_PREEMPT_CNT]
1065	add		w1, w1, #1
1066	str		w1, [x22, ACT_PREEMPT_CNT]
1067	/* Store context in int state */
1068	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
1069.endmacro
1070
1071/* Shared epilogue code for fleh_irq and fleh_fiq.
1072 * Cleans up after the prologue, and may do a bit more
1073 * bookkeeping (kdebug related).
1074 * Expects:
1075 * x22 - Live TPIDR_EL1 value (thread address)
1076 * x23 - Address of the current CPU data structure
1077 * w24 - 0 if kdebug is disbled, nonzero otherwise
1078 *  fp - Undefined live value (we may push a frame)
1079 *  lr - Undefined live value (we may push a frame)
1080 *  sp - Interrupt stack for the current CPU
1081 */
1082.macro END_INTERRUPT_HANDLER
1083	/* Clear int context */
1084	str		xzr, [x23, CPU_INT_STATE]
1085	/* Decrement preempt count */
1086	ldr		w0, [x22, ACT_PREEMPT_CNT]
1087	cbnz	w0, 1f								// Detect underflow
1088	b		preempt_underflow
10891:
1090	sub		w0, w0, #1
1091	str		w0, [x22, ACT_PREEMPT_CNT]
1092	/* Switch back to kernel stack */
1093	LOAD_KERN_STACK_TOP	dst=x0, src=x22, tmp=x28
1094	mov		sp, x0
1095	/* Generate a CPU-local event to terminate a post-IRQ WFE */
1096	sevl
1097.endmacro
1098
1099	.text
1100	.align 2
1101	.global EXT(fleh_irq)
1102LEXT(fleh_irq)
1103TRAP_UNWIND_PROLOGUE
1104TRAP_UNWIND_DIRECTIVES
1105	ARM64_JUMP_TARGET
1106	BEGIN_INTERRUPT_HANDLER
1107	PUSH_FRAME
1108	bl		EXT(sleh_irq)
1109	POP_FRAME_WITHOUT_LR
1110	END_INTERRUPT_HANDLER
1111
1112#if XNU_MONITOR && !CONFIG_SPTM
1113	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1114#endif
1115
1116	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1117	b		exception_return_dispatch
1118UNWIND_EPILOGUE
1119
1120	.text
1121	.align 2
1122	.global EXT(fleh_fiq_generic)
1123LEXT(fleh_fiq_generic)
1124	/*
1125	 * This function is a placeholder which should never be invoked.
1126	 * We omit the landingpad here since there is no sensible choice.
1127	 */
1128	PANIC_UNIMPLEMENTED
1129
1130	.text
1131	.align 2
1132	.global EXT(fleh_fiq)
1133LEXT(fleh_fiq)
1134TRAP_UNWIND_PROLOGUE
1135TRAP_UNWIND_DIRECTIVES
1136	ARM64_JUMP_TARGET
1137	BEGIN_INTERRUPT_HANDLER
1138	PUSH_FRAME
1139	bl		EXT(sleh_fiq)
1140	POP_FRAME_WITHOUT_LR
1141	END_INTERRUPT_HANDLER
1142
1143#if XNU_MONITOR && !CONFIG_SPTM
1144	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1145#endif
1146
1147	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1148	b		exception_return_dispatch
1149UNWIND_EPILOGUE
1150
1151	.text
1152	.align 2
1153	.global EXT(fleh_serror)
1154LEXT(fleh_serror)
1155TRAP_UNWIND_PROLOGUE
1156TRAP_UNWIND_DIRECTIVES
1157	ARM64_JUMP_TARGET
1158	mrs		x1, ESR_EL1							// Load exception syndrome
1159	mrs		x2, FAR_EL1							// Load fault address
1160
1161	PUSH_FRAME
1162	bl		EXT(sleh_serror)
1163	POP_FRAME_WITHOUT_LR
1164
1165#if XNU_MONITOR && !CONFIG_SPTM
1166	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1167#endif
1168
1169	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
1170	b		exception_return_dispatch
1171UNWIND_EPILOGUE
1172
1173/*
1174 * Register state saved before we get here.
1175 */
1176	.text
1177	.align 2
1178fleh_invalid_stack:
1179	ARM64_JUMP_TARGET
1180#if CONFIG_SPTM
1181	/*
1182	 * Taking a data abort with an invalid kernel stack pointer is unrecoverable.
1183	 * Initiate lockdown.
1184	 */
1185
1186	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1187	mov		x25, x0
1188	mrs		x26, ELR_EL1
1189	mrs		x27, ESR_EL1
1190	mrs		x28, FAR_EL1
1191	BEGIN_PANIC_LOCKDOWN
1192	mov		x0, x25
1193	mov		x1, x27
1194	mov		x2, x28
1195	/* We deferred storing PC/FAR until after lockdown, so do that now */
1196	str		x26, [x0, SS64_PC]
1197	str		x28, [x0, SS64_FAR]
1198#else
1199	mrs		x1, ESR_EL1							// Load exception syndrome
1200	mrs		x2, FAR_EL1							// Load fault address
1201#endif /* CONFIG_SPTM */
1202	PUSH_FRAME
1203	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
1204	b 		.
1205
1206	.text
1207	.align 2
1208fleh_synchronous_sp1:
1209	ARM64_JUMP_TARGET
1210#if CONFIG_SPTM
1211	/*
1212	 * Without debugger intervention, all exceptions on SP1 (including debug
1213	 * trap instructions) are intended to be fatal. In order to not break
1214	 * self-hosted kernel debug, do not trigger lockdown for debug traps
1215	 * (unknown instructions/uncategorized exceptions). On release kernels, we
1216	 * don't support self-hosted kernel debug so unconditionally lockdown.
1217	 */
1218#if (DEVELOPMENT || DEBUG)
1219	tst		w1, #(ESR_EC_MASK)
1220	b.eq	Lfleh_synchronous_sp1_skip_panic_lockdown // ESR_EC_UNCATEGORIZED is 0, so skip lockdown if Z
1221#endif /* DEVELOPMENT || DEBUG */
1222	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1223	mov		x25, x0
1224	mrs		x26, ELR_EL1
1225	mrs		x27, ESR_EL1
1226	mrs		x28, FAR_EL1
1227	BEGIN_PANIC_LOCKDOWN
1228	mov		x0, x25
1229	mov		x1, x27
1230	mov		x2, x28
1231	/* We deferred storing PC/FAR until after lockdown, so do that now */
1232	str		x26, [x0, SS64_PC]
1233	str		x28, [x0, SS64_FAR]
1234Lfleh_synchronous_sp1_skip_panic_lockdown:
1235#else
1236	mrs		x1, ESR_EL1
1237	mrs		x2, FAR_EL1
1238#endif /* CONFIG_SPTM */
1239
1240	PUSH_FRAME
1241	bl		EXT(sleh_synchronous_sp1)
1242	b 		.
1243
1244	.text
1245	.align 2
1246fleh_irq_sp1:
1247	ARM64_JUMP_TARGET
1248	mov		x1, x0
1249	adr		x0, Lsp1_irq_str
1250	b		EXT(panic_with_thread_kernel_state)
1251Lsp1_irq_str:
1252	.asciz "IRQ exception taken while SP1 selected"
1253
1254	.text
1255	.align 2
1256fleh_fiq_sp1:
1257	ARM64_JUMP_TARGET
1258	mov		x1, x0
1259	adr		x0, Lsp1_fiq_str
1260	b		EXT(panic_with_thread_kernel_state)
1261Lsp1_fiq_str:
1262	.asciz "FIQ exception taken while SP1 selected"
1263
1264	.text
1265	.align 2
1266fleh_serror_sp1:
1267	ARM64_JUMP_TARGET
1268	mov		x1, x0
1269	adr		x0, Lsp1_serror_str
1270	b		EXT(panic_with_thread_kernel_state)
1271Lsp1_serror_str:
1272	.asciz "Asynchronous exception taken while SP1 selected"
1273
1274	.text
1275	.align 2
1276exception_return_dispatch:
1277	ldr		w0, [x21, SS64_CPSR]
1278	tst		w0, PSR64_MODE_EL_MASK
1279	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1280	b		return_to_user
1281
1282#if CONFIG_SPTM
1283/**
1284 * XNU returns to this symbol whenever handling an interrupt that occurred
1285 * during SPTM, TXM or SK runtime. This code determines which domain the
1286 * XNU thread was executing in when the interrupt occurred and tells SPTM
1287 * which domain to resume.
1288 */
1289	.text
1290	.align 2
1291	.global EXT(xnu_return_to_gl2)
1292LEXT(xnu_return_to_gl2)
1293	/**
1294	 * If thread->txm_thread_stack is set, we need to tell SPTM dispatch to
1295	 * resume the TXM thread in x0.
1296	 */
1297	mrs		x8, TPIDR_EL1
1298	ldr		x8, [x8, TH_TXM_THREAD_STACK]
1299	cbz		x8, 1f
1300	mov		x0, x8
1301	b		EXT(txm_resume)
1302	/* Unreachable */
1303	b .
1304
1305#if CONFIG_EXCLAVES
1306	/**
1307	 * If thread->th_exclaves_intstate flag TH_EXCLAVES_EXECUTION is set
1308	 * we need to tell SPTM dispatch to resume the SK thread.
1309	 */
13101:
1311	mrs		x8, TPIDR_EL1
1312	ldr		x9, [x8, TH_EXCLAVES_INTSTATE]
1313	and		x9, x9, TH_EXCLAVES_EXECUTION
1314	cbz		x9, 1f
1315	b		EXT(sk_resume)
1316	/* Unreachable */
1317	b .
1318#endif /* CONFIG_EXCLAVES */
1319
1320	/**
1321	 * If neither the above checks succeeded, this must be a thread
1322	 * that was interrupted while running in SPTM. Tell SPTM to resume
1323	 * the interrupted SPTM call.
1324	 */
13251:
1326	b		EXT(sptm_resume_from_exception)
1327	/* Unreachable */
1328	b .
1329#endif /* CONFIG_SPTM */
1330
1331	.text
1332	.align 2
1333	.global EXT(return_to_kernel)
1334LEXT(return_to_kernel)
1335	UNWIND_PROLOGUE
1336	RETURN_TO_KERNEL_UNWIND
1337	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
1338	mrs		x3, TPIDR_EL1                           // Load thread pointer
1339	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
1340	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
1341	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
1342	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
1343	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
1344	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
1345	b.eq	exception_return_unint_tpidr_x3
1346	mov		sp, x21                                 // Switch to thread stack for preemption
1347	PUSH_FRAME
1348	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
1349	POP_FRAME_WITHOUT_LR
1350	b		exception_return
1351	UNWIND_EPILOGUE
1352
1353	.text
1354	.globl EXT(thread_bootstrap_return)
1355LEXT(thread_bootstrap_return)
1356	ARM64_PROLOG
1357#if CONFIG_DTRACE
1358	bl		EXT(dtrace_thread_bootstrap)
1359#endif
1360#if KASAN_TBI
1361	PUSH_FRAME
1362	bl		EXT(__asan_handle_no_return)
1363	POP_FRAME_WITHOUT_LR
1364#endif /* KASAN_TBI */
1365	b		EXT(arm64_thread_exception_return)
1366
1367	.text
1368	.globl EXT(arm64_thread_exception_return)
1369LEXT(arm64_thread_exception_return)
1370	ARM64_PROLOG
1371	mrs		x0, TPIDR_EL1
1372	LOAD_USER_PCB	dst=x21, src=x0, tmp=x28
1373	mov		x28, xzr
1374
1375	//
1376	// Fall Through to return_to_user from arm64_thread_exception_return.
1377	// Note that if we move return_to_user or insert a new routine
1378	// below arm64_thread_exception_return, the latter will need to change.
1379	//
1380	.text
1381/* x21 is always the machine context pointer when we get here
1382 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1383return_to_user:
1384check_user_asts:
1385#if KASAN_TBI
1386	PUSH_FRAME
1387	bl		EXT(__asan_handle_no_return)
1388	POP_FRAME_WITHOUT_LR
1389#endif /* KASAN_TBI */
1390	mrs		x3, TPIDR_EL1					// Load thread pointer
1391
1392	movn		w2, #0
1393	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
1394
1395#if MACH_ASSERT
1396	ldr		w0, [x3, ACT_PREEMPT_CNT]
1397	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
1398#endif
1399
1400	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1401	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1402	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
1403	cbz		w0, no_asts							// If no asts, skip ahead
1404
1405	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
1406
1407	/* At this point, we have ASTs and we need to check whether we are running in the
1408	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1409	 * the PFZ since we don't want to handle getting a signal or getting suspended
1410	 * while holding a spinlock in userspace.
1411	 *
1412	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1413	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1414	 * to use it to indicate to userspace to come back to take a delayed
1415	 * preemption, at which point the ASTs will be handled. */
1416	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
1417	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
1418
1419	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
1420	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
1421	cbz		x0, restore_and_check_ast			// No, deal with other asts
1422
1423	mov		x0, #1
1424	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
1425	mov		x0, x19								// restore x0 to asts
1426	b		no_asts								// pretend we have no asts
1427
1428restore_and_check_ast:
1429	mov		x0, x19								// restore x0
1430	b	user_take_ast							// Service pending asts
1431no_asts:
1432
1433
1434#if PRECISE_USER_KERNEL_TIME
1435	mov		x19, x3						// Preserve thread pointer across function call
1436	PUSH_FRAME
1437	bl		EXT(recount_enter_user)
1438	POP_FRAME_WITHOUT_LR
1439	mov		x3, x19
1440#endif /* PRECISE_USER_KERNEL_TIME */
1441
1442#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1443	/* Watchtower
1444	 *
1445	 * Here we attempt to enable NEON access for EL0. If the last entry into the
1446	 * kernel from user-space was due to an IRQ, the monitor will have disabled
1447	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1448	 * check in with the monitor in order to reenable NEON for EL0 in exchange
1449	 * for routing IRQs through the monitor (2). This way the monitor will
1450	 * always 'own' either IRQs or EL0 NEON.
1451	 *
1452	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1453	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1454	 * here.
1455	 *
1456	 * EL0 user ________ IRQ                                            ______
1457	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
1458	 * EL3 monitor           \_/                                \___/
1459	 *
1460	 *                       (1)                                 (2)
1461	 */
1462
1463	mov		x0, #(CPACR_FPEN_ENABLE)
1464	msr		CPACR_EL1, x0
1465#endif
1466
1467	/* Establish this thread's debug state as the live state on the selected CPU. */
1468	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1469	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
1470	ldr		x0, [x3, ACT_DEBUGDATA]
1471	cmp		x0, x1
1472	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
1473
1474
1475	PUSH_FRAME
1476	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
1477	POP_FRAME_WITHOUT_LR
1478	mrs		x3, TPIDR_EL1						// Reload thread pointer
1479	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
1480L_skip_user_set_debug_state:
1481
1482
1483	ldrsh	x0, [x4, CPU_TPIDR_EL0]
1484	msr		TPIDR_EL0, x0
1485
1486
1487	b		exception_return_unint_tpidr_x3
1488
1489exception_return:
1490	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1491exception_return_unint:
1492	mrs		x3, TPIDR_EL1					// Load thread pointer
1493exception_return_unint_tpidr_x3:
1494	mov		sp, x21						// Reload the pcb pointer
1495
1496#if !__ARM_KERNEL_PROTECT__
1497	/*
1498	 * Restore x18 only if the task has the entitlement that allows
1499	 * usage. Those are very few, and can move to something else
1500	 * once we use x18 for something more global.
1501	 *
1502	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
1503	 * that uses x18 as one of the global use cases (and will reset
1504	 * x18 later down below).
1505	 *
1506	 * It's also unconditionally skipped for translated threads,
1507	 * as those are another use case, one where x18 must be preserved.
1508	 */
1509	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
1510	mov		x18, #0
1511	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
1512
1513exception_return_unint_tpidr_x3_restore_x18:
1514	ldr		x18, [sp, SS64_X18]
1515
1516#else /* !__ARM_KERNEL_PROTECT__ */
1517	/*
1518	 * If we are going to eret to userspace, we must return through the EL0
1519	 * eret mapping.
1520	 */
1521	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
1522	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
1523
1524	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
1525	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
1526	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
1527	add		x1, x1, Lexception_return_restore_registers@pageoff
1528	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
1529	sub		x1, x1, x0											// Calculate delta
1530	add		x0, x2, x1											// Convert KVA to EL0 vector address
1531	br		x0
1532
1533Lskip_el0_eret_mapping:
1534#endif /* !__ARM_KERNEL_PROTECT__ */
1535
1536Lexception_return_restore_registers:
1537	mov 	x0, sp								// x0 = &pcb
1538	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1539	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, x25, el0_state_allowed=1
1540
1541	msr		ELR_EL1, x1							// Load the return address into ELR
1542	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1543
1544/* Restore special register state */
1545	ldr		w3, [sp, NS64_FPSR]
1546	ldr		w4, [sp, NS64_FPCR]
1547
1548	msr		FPSR, x3
1549	mrs		x5, FPCR
1550	CMSR FPCR, x5, x4, 1
15511:
1552
1553
1554#if HAS_ARM_FEAT_SME
1555	mrs		x2, SPSR_EL1
1556	and		x2, x2, #(PSR64_MODE_EL_MASK)
1557	cmp		x2, #(PSR64_MODE_EL0)
1558	// SPSR_EL1.M != EL0: no SME state to restore
1559	bne		Lno_sme_saved_state
1560
1561	mrs		x3, TPIDR_EL1
1562	ldr		x2, [x3, ACT_UMATRIX_HDR]
1563	cbz		x2, Lno_sme_saved_state
1564
1565	ldr		x3, [x2, SME_SVCR]
1566	msr		SVCR, x3
1567	// SVCR.SM == 0: restore SVCR only (ZA is handled during context-switch)
1568	tbz		x3, #SVCR_SM_SHIFT, Lno_sme_saved_state
1569
1570	// SVCR.SM == 1: restore SVCR, Z, and P
1571	ldrh	w3, [x2, SME_SVL_B]
1572	add		x2, x2, SME_Z_P_ZA
1573	LOAD_OR_STORE_Z_P_REGISTERS	ldr, svl_b=x3, ss=x2
1574
1575	// The FPSIMD register file acts like a view into the lower 128 bits of
1576	// Z0-Z31.  While there's no harm reading it out during exception entry,
1577	// writing it back would truncate the Z0-Z31 values we just restored.
1578	b		Lskip_restore_neon_saved_state
1579Lno_sme_saved_state:
1580#endif /* HAS_ARM_FEAT_SME */
1581
1582	/* Restore arm_neon_saved_state64 */
1583	ldp		q0, q1, [x0, NS64_Q0]
1584	ldp		q2, q3, [x0, NS64_Q2]
1585	ldp		q4, q5, [x0, NS64_Q4]
1586	ldp		q6, q7, [x0, NS64_Q6]
1587	ldp		q8, q9, [x0, NS64_Q8]
1588	ldp		q10, q11, [x0, NS64_Q10]
1589	ldp		q12, q13, [x0, NS64_Q12]
1590	ldp		q14, q15, [x0, NS64_Q14]
1591	ldp		q16, q17, [x0, NS64_Q16]
1592	ldp		q18, q19, [x0, NS64_Q18]
1593	ldp		q20, q21, [x0, NS64_Q20]
1594	ldp		q22, q23, [x0, NS64_Q22]
1595	ldp		q24, q25, [x0, NS64_Q24]
1596	ldp		q26, q27, [x0, NS64_Q26]
1597	ldp		q28, q29, [x0, NS64_Q28]
1598	ldp		q30, q31, [x0, NS64_Q30]
1599#if HAS_ARM_FEAT_SME
1600Lskip_restore_neon_saved_state:
1601#endif
1602	// If sync_on_cswitch and ERET is not a CSE, issue an ISB now. Unconditionally clear the
1603	// sync_on_cswitch flag.
1604	mrs		x1, TPIDR_EL1
1605	ldr		x1, [x1, ACT_CPUDATAP]
1606
1607	// Redefined for backporting.
1608#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__
1609	ldrb	w2, [x1, CPU_SYNC_ON_CSWITCH]
1610#if ERET_NEEDS_ISB
1611	// Set the bit, but don't sync, it will be synced shortly after this.
1612	orr		x5, x5, x2, lsl #(BIT_ISB_PENDING)
1613#else
1614	cbz		w2, 1f
1615	// Last chance, sync now.
1616	isb		sy
16171:
1618#endif  /* ERET_NEEDS_ISB */
1619#endif  /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__ */
1620	strb	wzr, [x1, CPU_SYNC_ON_CSWITCH]
1621
1622	/* Restore arm_saved_state64 */
1623
1624	// Skip x0, x1 - we're using them
1625	ldp		x2, x3, [x0, SS64_X2]
1626	ldp		x4, x5, [x0, SS64_X4]
1627	ldp		x6, x7, [x0, SS64_X6]
1628	ldp		x8, x9, [x0, SS64_X8]
1629	ldp		x10, x11, [x0, SS64_X10]
1630	ldp		x12, x13, [x0, SS64_X12]
1631	ldp		x14, x15, [x0, SS64_X14]
1632	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1633	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1634	ldr		x19, [x0, SS64_X19]
1635	ldp		x20, x21, [x0, SS64_X20]
1636	ldp		x22, x23, [x0, SS64_X22]
1637	ldp		x24, x25, [x0, SS64_X24]
1638	ldp		x26, x27, [x0, SS64_X26]
1639	ldr		x28, [x0, SS64_X28]
1640	ldr		fp, [x0, SS64_FP]
1641	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1642
1643	// Restore stack pointer and our last two GPRs
1644	ldr		x1, [x0, SS64_SP]
1645	mov		sp, x1
1646
1647#if __ARM_KERNEL_PROTECT__
1648	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1649#endif /* __ARM_KERNEL_PROTECT__ */
1650
1651	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1652
1653#if __ARM_KERNEL_PROTECT__
1654	/* If we are going to eret to userspace, we must unmap the kernel. */
1655	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1656
1657	/* Update TCR to unmap the kernel. */
1658	MOV64		x18, TCR_EL1_USER
1659	msr		TCR_EL1, x18
1660
1661	/*
1662	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1663	 * each other due to the microarchitecture.
1664	 */
1665#if !defined(APPLE_ARM64_ARCH_FAMILY)
1666	isb		sy
1667#endif
1668
1669	/* Switch to the user ASID (low bit clear) for the task. */
1670	mrs		x18, TTBR0_EL1
1671	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1672	msr		TTBR0_EL1, x18
1673	mov		x18, #0
1674
1675	/* We don't need an ISB here, as the eret is synchronizing. */
1676Lskip_ttbr1_switch:
1677#endif /* __ARM_KERNEL_PROTECT__ */
1678
1679	ERET_NO_STRAIGHT_LINE_SPECULATION
1680
1681user_take_ast:
1682	PUSH_FRAME
1683	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1684	POP_FRAME_WITHOUT_LR
1685	b		check_user_asts								// Now try again
1686
1687	.text
1688	.align 2
1689preempt_underflow:
1690	mrs		x0, TPIDR_EL1
1691	str		x0, [sp, #-16]!						// We'll print thread pointer
1692	adr		x0, L_underflow_str					// Format string
1693	CALL_EXTERN panic							// Game over
1694
1695L_underflow_str:
1696	.asciz "Preemption count negative on thread %p"
1697.align 2
1698
1699#if MACH_ASSERT
1700	.text
1701	.align 2
1702preempt_count_notzero:
1703	mrs		x0, TPIDR_EL1
1704	str		x0, [sp, #-16]!						// We'll print thread pointer
1705	ldr		w0, [x0, ACT_PREEMPT_CNT]
1706	str		w0, [sp, #8]
1707	adr		x0, L_preempt_count_notzero_str				// Format string
1708	CALL_EXTERN panic							// Game over
1709
1710L_preempt_count_notzero_str:
1711	.asciz "preemption count not 0 on thread %p (%u)"
1712#endif /* MACH_ASSERT */
1713
1714#if __ARM_KERNEL_PROTECT__
1715	/*
1716	 * This symbol denotes the end of the exception vector/eret range; we page
1717	 * align it so that we can avoid mapping other text in the EL0 exception
1718	 * vector mapping.
1719	 */
1720	.text
1721	.align 14
1722	.globl EXT(ExceptionVectorsEnd)
1723LEXT(ExceptionVectorsEnd)
1724#endif /* __ARM_KERNEL_PROTECT__ */
1725
1726#if XNU_MONITOR && !CONFIG_SPTM
1727
1728/*
1729 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1730 * mostly concerned with setting up state for the normal fleh code.
1731 */
1732	.text
1733	.align 2
1734fleh_synchronous_from_ppl:
1735	ARM64_JUMP_TARGET
1736	/* Save x0. */
1737	mov		x15, x0
1738
1739	/* Grab the ESR. */
1740	mrs		x1, ESR_EL1							// Get the exception syndrome
1741
1742	/* If the stack pointer is corrupt, it will manifest either as a data abort
1743	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1744	 * these quickly by testing bit 5 of the exception class.
1745	 */
1746	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1747	mrs		x0, SP_EL0							// Get SP_EL0
1748
1749	/* Perform high level checks for stack corruption. */
1750	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1751	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1752	cmp		x1, x2								// If we have a stack alignment exception
1753	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1754	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1755	cmp		x1, x2								// If we have a data abort, we need to
1756	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1757
1758Ltest_pstack:
1759	/* Bounds check the PPL stack. */
1760	adrp	x10, EXT(pmap_stacks_start)@page
1761	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1762	adrp	x11, EXT(pmap_stacks_end)@page
1763	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1764	cmp		x0, x10
1765	b.lo	Lcorrupt_ppl_stack
1766	cmp		x0, x11
1767	b.hi	Lcorrupt_ppl_stack
1768
1769Lvalid_ppl_stack:
1770	/* Restore x0. */
1771	mov		x0, x15
1772
1773	/* Switch back to the kernel stack. */
1774	msr		SPSel, #0
1775	GET_PMAP_CPU_DATA x5, x6, x7
1776	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1777	mov		sp, x6
1778
1779	/* Hand off to the synch handler. */
1780	b		EXT(fleh_synchronous)
1781
1782Lcorrupt_ppl_stack:
1783	/* Restore x0. */
1784	mov		x0, x15
1785
1786	/* Hand off to the invalid stack handler. */
1787	b		fleh_invalid_stack
1788
1789fleh_fiq_from_ppl:
1790	ARM64_JUMP_TARGET
1791	SWITCH_TO_INT_STACK	tmp=x25
1792	b		EXT(fleh_fiq)
1793
1794fleh_irq_from_ppl:
1795	ARM64_JUMP_TARGET
1796	SWITCH_TO_INT_STACK	tmp=x25
1797	b		EXT(fleh_irq)
1798
1799fleh_serror_from_ppl:
1800	ARM64_JUMP_TARGET
1801	GET_PMAP_CPU_DATA x5, x6, x7
1802	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1803	mov		sp, x6
1804	b		EXT(fleh_serror)
1805
1806
1807
1808
1809	// x15: ppl call number
1810	// w10: ppl_state
1811	// x20: gxf_enter caller's DAIF
1812	.globl EXT(ppl_trampoline_start)
1813LEXT(ppl_trampoline_start)
1814
1815
1816#error "XPRR configuration error"
1817	cmp		x14, x21
1818	b.ne	Lppl_fail_dispatch
1819
1820	/* Verify the request ID. */
1821	cmp		x15, PMAP_COUNT
1822	b.hs	Lppl_fail_dispatch
1823
1824	GET_PMAP_CPU_DATA	x12, x13, x14
1825
1826	/* Mark this CPU as being in the PPL. */
1827	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1828
1829	cmp		w9, #PPL_STATE_KERNEL
1830	b.eq		Lppl_mark_cpu_as_dispatching
1831
1832	/* Check to see if we are trying to trap from within the PPL. */
1833	cmp		w9, #PPL_STATE_DISPATCH
1834	b.eq		Lppl_fail_dispatch_ppl
1835
1836
1837	/* Ensure that we are returning from an exception. */
1838	cmp		w9, #PPL_STATE_EXCEPTION
1839	b.ne		Lppl_fail_dispatch
1840
1841	// where is w10 set?
1842	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1843	cmp		w10, #PPL_STATE_EXCEPTION
1844	b.ne		Lppl_fail_dispatch
1845
1846	/* This is an exception return; set the CPU to the dispatching state. */
1847	mov		w9, #PPL_STATE_DISPATCH
1848	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1849
1850	/* Find the save area, and return to the saved PPL context. */
1851	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1852	mov		sp, x0
1853	b		EXT(return_to_ppl)
1854
1855Lppl_mark_cpu_as_dispatching:
1856	cmp		w10, #PPL_STATE_KERNEL
1857	b.ne		Lppl_fail_dispatch
1858
1859	/* Mark the CPU as dispatching. */
1860	mov		w13, #PPL_STATE_DISPATCH
1861	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1862
1863	/* Switch to the regular PPL stack. */
1864	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1865	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1866
1867	// SP0 is thread stack here
1868	mov		x21, sp
1869	// SP0 is now PPL stack
1870	mov		sp, x9
1871
1872	/* Save the old stack pointer off in case we need it. */
1873	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1874
1875	/* Get the handler for the request */
1876	adrp	x9, EXT(ppl_handler_table)@page
1877	add		x9, x9, EXT(ppl_handler_table)@pageoff
1878	add		x9, x9, x15, lsl #3
1879	ldr		x10, [x9]
1880
1881	/* Branch to the code that will invoke the PPL request. */
1882	b		EXT(ppl_dispatch)
1883
1884Lppl_fail_dispatch_ppl:
1885	/* Switch back to the kernel stack. */
1886	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1887	mov		sp, x10
1888
1889Lppl_fail_dispatch:
1890	/* Indicate that we failed. */
1891	mov		x15, #PPL_EXIT_BAD_CALL
1892
1893	/* Move the DAIF bits into the expected register. */
1894	mov		x10, x20
1895
1896	/* Return to kernel mode. */
1897	b		ppl_return_to_kernel_mode
1898
1899Lppl_dispatch_exit:
1900
1901	/* Indicate that we are cleanly exiting the PPL. */
1902	mov		x15, #PPL_EXIT_DISPATCH
1903
1904	/* Switch back to the original (kernel thread) stack. */
1905	mov		sp, x21
1906
1907	/* Move the saved DAIF bits. */
1908	mov		x10, x20
1909
1910	/* Clear the in-flight pmap pointer */
1911	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
1912	stlr		xzr, [x13]
1913
1914	/* Clear the old stack pointer. */
1915	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1916
1917	/*
1918	 * Mark the CPU as no longer being in the PPL.  We spin if our state
1919	 * machine is broken.
1920	 */
1921	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1922	cmp		w9, #PPL_STATE_DISPATCH
1923	b.ne		.
1924	mov		w9, #PPL_STATE_KERNEL
1925	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1926
1927	/* Return to the kernel. */
1928	b ppl_return_to_kernel_mode
1929
1930
1931
1932	.text
1933ppl_exit:
1934	ARM64_PROLOG
1935	/*
1936	 * If we are dealing with an exception, hand off to the first level
1937	 * exception handler.
1938	 */
1939	cmp		x15, #PPL_EXIT_EXCEPTION
1940	b.eq	Ljump_to_fleh_handler
1941
1942	/* If this was a panic call from the PPL, reinvoke panic. */
1943	cmp		x15, #PPL_EXIT_PANIC_CALL
1944	b.eq	Ljump_to_panic_trap_to_debugger
1945
1946	/*
1947	 * Stash off the original DAIF in the high bits of the exit code register.
1948	 * We could keep this in a dedicated register, but that would require us to copy it to
1949	 * an additional callee-save register below (e.g. x22), which in turn would require that
1950	 * register to be saved/restored at PPL entry/exit.
1951	 */
1952	add		x15, x15, x10, lsl #32
1953
1954	/* Load the preemption count. */
1955	mrs		x10, TPIDR_EL1
1956	ldr		w12, [x10, ACT_PREEMPT_CNT]
1957
1958	/* Detect underflow */
1959	cbnz	w12, Lno_preempt_underflow
1960	b		preempt_underflow
1961Lno_preempt_underflow:
1962
1963	/* Lower the preemption count. */
1964	sub		w12, w12, #1
1965
1966#if SCHED_HYGIENE_DEBUG
1967	/* Collect preemption disable measurement if necessary. */
1968
1969	/*
1970	 * Only collect measurement if this reenabled preemption,
1971	 * and SCHED_HYGIENE_MARKER is set.
1972	 */
1973	mov		x20, #SCHED_HYGIENE_MARKER
1974	cmp		w12, w20
1975	b.ne	Lskip_collect_measurement
1976
1977	/* Stash our return value and return reason. */
1978	mov		x20, x0
1979	mov		x21, x15
1980
1981	/* Collect measurement. */
1982	bl		EXT(_collect_preemption_disable_measurement)
1983
1984	/* Restore the return value and the return reason. */
1985	mov		x0, x20
1986	mov		x15, x21
1987	/* ... and w12, which is now 0. */
1988	mov		w12, #0
1989
1990	/* Restore the thread pointer into x10. */
1991	mrs		x10, TPIDR_EL1
1992
1993Lskip_collect_measurement:
1994#endif /* SCHED_HYGIENE_DEBUG */
1995
1996	/* Save the lowered preemption count. */
1997	str		w12, [x10, ACT_PREEMPT_CNT]
1998
1999	/* Skip ASTs if the peemption count is not zero. */
2000	cbnz	x12, Lppl_skip_ast_taken
2001
2002	/*
2003	 * Skip the AST check if interrupts were originally disabled.
2004	 * The original DAIF state prior to PPL entry is stored in the upper
2005	 * 32 bits of x15.
2006	 */
2007	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
2008
2009	/* IF there is no urgent AST, skip the AST. */
2010	ldr		x12, [x10, ACT_CPUDATAP]
2011	ldr		w14, [x12, CPU_PENDING_AST]
2012	tst		w14, AST_URGENT
2013	b.eq	Lppl_skip_ast_taken
2014
2015	/* Stash our return value and return reason. */
2016	mov		x20, x0
2017	mov		x21, x15
2018
2019	/* Handle the AST. */
2020	bl		EXT(ast_taken_kernel)
2021
2022	/* Restore the return value and the return reason. */
2023	mov		x15, x21
2024	mov		x0, x20
2025
2026Lppl_skip_ast_taken:
2027
2028	/* Extract caller DAIF from high-order bits of exit code */
2029	ubfx	x10, x15, #32, #32
2030	bfc		x15, #32, #32
2031	msr		DAIF, x10
2032
2033	/* Pop the stack frame. */
2034	ldp		x29, x30, [sp, #0x10]
2035	ldp		x20, x21, [sp], #0x20
2036
2037	/* Check to see if this was a bad request. */
2038	cmp		x15, #PPL_EXIT_BAD_CALL
2039	b.eq	Lppl_bad_call
2040
2041	/* Return. */
2042	ARM64_STACK_EPILOG
2043
2044	.align 2
2045Ljump_to_fleh_handler:
2046	br	x25
2047
2048	.align 2
2049Ljump_to_panic_trap_to_debugger:
2050	b		EXT(panic_trap_to_debugger)
2051
2052Lppl_bad_call:
2053	/* Panic. */
2054	adrp	x0, Lppl_bad_call_panic_str@page
2055	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2056	b		EXT(panic)
2057
2058	.text
2059	.align 2
2060	.globl EXT(ppl_dispatch)
2061LEXT(ppl_dispatch)
2062	/*
2063	 * Save a couple of important registers (implementation detail; x12 has
2064	 * the PPL per-CPU data address; x13 is not actually interesting).
2065	 */
2066	stp		x12, x13, [sp, #-0x10]!
2067
2068	/*
2069	 * Restore the original AIF state, force D set to mask debug exceptions
2070	 * while PPL code runs.
2071	 */
2072	orr		x8, x20, DAIF_DEBUGF
2073	msr		DAIF, x8
2074
2075	/*
2076	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
2077	 * but the exception vectors will deal with this properly.
2078	 */
2079
2080	/* Invoke the PPL method. */
2081#ifdef HAS_APPLE_PAC
2082	blraa		x10, x9
2083#else
2084	blr		x10
2085#endif
2086
2087	/* Disable DAIF. */
2088	msr		DAIFSet, #(DAIFSC_ALL)
2089
2090	/* Restore those important registers. */
2091	ldp		x12, x13, [sp], #0x10
2092
2093	/* Mark this as a regular return, and hand off to the return path. */
2094	b		Lppl_dispatch_exit
2095
2096	.text
2097	.align 2
2098	.globl EXT(ppl_bootstrap_dispatch)
2099LEXT(ppl_bootstrap_dispatch)
2100	/* Verify the PPL request. */
2101	cmp		x15, PMAP_COUNT
2102	b.hs	Lppl_fail_bootstrap_dispatch
2103
2104	/* Get the requested PPL routine. */
2105	adrp	x9, EXT(ppl_handler_table)@page
2106	add		x9, x9, EXT(ppl_handler_table)@pageoff
2107	add		x9, x9, x15, lsl #3
2108	ldr		x10, [x9]
2109
2110	/* Invoke the requested PPL routine. */
2111#ifdef HAS_APPLE_PAC
2112	blraa		x10, x9
2113#else
2114	blr		x10
2115#endif
2116	LOAD_PMAP_CPU_DATA	x9, x10, x11
2117
2118	/* Clear the in-flight pmap pointer */
2119	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
2120	stlr		xzr, [x9]
2121
2122	/* Stash off the return value */
2123	mov		x20, x0
2124	/* Drop the preemption count */
2125	bl		EXT(_enable_preemption)
2126	mov		x0, x20
2127
2128	/* Pop the stack frame. */
2129	ldp		x29, x30, [sp, #0x10]
2130	ldp		x20, x21, [sp], #0x20
2131#if __has_feature(ptrauth_returns)
2132	retab
2133#else
2134	ret
2135#endif
2136
2137Lppl_fail_bootstrap_dispatch:
2138	/* Pop our stack frame and panic. */
2139	ldp		x29, x30, [sp, #0x10]
2140	ldp		x20, x21, [sp], #0x20
2141#if __has_feature(ptrauth_returns)
2142	autibsp
2143#endif
2144	adrp	x0, Lppl_bad_call_panic_str@page
2145	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2146	b		EXT(panic)
2147
2148	.text
2149	.align 2
2150	.globl EXT(ml_panic_trap_to_debugger)
2151LEXT(ml_panic_trap_to_debugger)
2152	ARM64_PROLOG
2153	mrs		x10, DAIF
2154	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
2155
2156	adrp		x12, EXT(pmap_ppl_locked_down)@page
2157	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
2158	cbz		w12, Lnot_in_ppl_dispatch
2159
2160	LOAD_PMAP_CPU_DATA	x11, x12, x13
2161
2162	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
2163	cmp		w12, #PPL_STATE_DISPATCH
2164	b.ne		Lnot_in_ppl_dispatch
2165
2166	/* Indicate (for the PPL->kernel transition) that we are panicking. */
2167	mov		x15, #PPL_EXIT_PANIC_CALL
2168
2169	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
2170	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
2171	mov		sp, x12
2172
2173	mrs		x10, DAIF
2174	mov		w13, #PPL_STATE_PANIC
2175	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
2176
2177	/**
2178	 * When we panic in PPL, we might have un-synced PTE updates. Shoot down
2179	 * all the TLB entries.
2180	 *
2181	 * A check must be done here against CurrentEL because the alle1is flavor
2182	 * of tlbi is not available to EL1, but the vmalle1is flavor is. When PPL
2183	 * runs at GL2, we can issue an alle2is and an alle1is tlbi to kill all
2184	 * the TLB entries. When PPL runs at GL1, as a guest or on an pre-H13
2185	 * platform, we issue a vmalle1is tlbi instead.
2186	 *
2187	 * Note that we only do this after passing the `PPL_STATE_DISPATCH` check
2188	 * because if we did this for every panic, including the ones triggered
2189	 * by fabric problems we may be stuck at the DSB below and trigger an AP
2190	 * watchdog.
2191	 */
2192	mrs		x12, CurrentEL
2193	cmp		x12, PSR64_MODE_EL2
2194	bne		Lnot_in_gl2
2195	tlbi		alle2is
2196	tlbi		alle1is
2197	b		Ltlb_invalidate_all_done
2198Lnot_in_gl2:
2199	tlbi		vmalle1is
2200Ltlb_invalidate_all_done:
2201	dsb		ish
2202	isb
2203
2204	/* Now we are ready to exit the PPL. */
2205	b		ppl_return_to_kernel_mode
2206Lnot_in_ppl_dispatch:
2207	msr		DAIF, x10
2208	ret
2209
2210	.data
2211Lppl_bad_call_panic_str:
2212	.asciz "ppl_dispatch: failed due to bad arguments/state"
2213#else /* XNU_MONITOR && !CONFIG_SPTM */
2214	.text
2215	.align 2
2216	.globl EXT(ml_panic_trap_to_debugger)
2217LEXT(ml_panic_trap_to_debugger)
2218	ARM64_PROLOG
2219	ret
2220#endif /* XNU_MONITOR && !CONFIG_SPTM */
2221
2222#if CONFIG_SPTM
2223	.text
2224	.align 2
2225
2226	.globl EXT(_sptm_pre_entry_hook)
2227LEXT(_sptm_pre_entry_hook)
2228	/* Push a frame. */
2229	ARM64_STACK_PROLOG
2230	PUSH_FRAME
2231	stp		x20, x21, [sp, #-0x10]!
2232
2233	/* Save arguments to SPTM function and SPTM function id. */
2234	mov		x20, x16
2235	stp		x0, x1, [sp, #-0x40]!
2236	stp		x2, x3, [sp, #0x10]
2237	stp		x4, x5, [sp, #0x20]
2238	stp		x6, x7, [sp, #0x30]
2239
2240	/* Increase the preemption count. */
2241	mrs		x9, TPIDR_EL1
2242	cbz		x9, Lskip_preemption_check_sptmhook
2243	ldr		w10, [x9, ACT_PREEMPT_CNT]
2244	add		w10, w10, #1
2245	str		w10, [x9, ACT_PREEMPT_CNT]
2246
2247	/* Update SPTM trace state to see if trace entries were generated post-exit */
2248
2249#if SCHED_HYGIENE_DEBUG
2250	/* Prepare preemption disable measurement, if necessary. */
2251
2252	/* Only prepare if we actually disabled preemption. */
2253	cmp		w10, #1
2254	b.ne	Lskip_prepare_measurement_sptmhook
2255
2256	/* Don't prepare if measuring is off completely. */
2257	adrp	x10, _sched_preemption_disable_debug_mode@page
2258	add		x10, x10, _sched_preemption_disable_debug_mode@pageoff
2259	ldr		w10, [x10]
2260	cmp		w10, #0
2261	b.eq	Lskip_prepare_measurement_sptmhook
2262
2263	/* Call prepare function with thread pointer as first arg. */
2264	bl		EXT(_prepare_preemption_disable_measurement)
2265
2266Lskip_prepare_measurement_sptmhook:
2267#endif /* SCHED_HYGIENE_DEBUG */
2268Lskip_preemption_check_sptmhook:
2269	/* assert we're not calling from guarded mode */
2270	mrs		x14, CurrentG
2271	cmp		x14, #0
2272	b.ne	.
2273
2274	/* Restore arguments to SPTM function and SPTM function id. */
2275	ldp		x6, x7, [sp, #0x30]
2276	ldp		x4, x5, [sp, #0x20]
2277	ldp		x2, x3, [sp, #0x10]
2278	ldp		x0, x1, [sp]
2279	add		sp, sp, #0x40
2280	mov		x16, x20
2281
2282	ldp		x20, x21, [sp], #0x10
2283	POP_FRAME
2284	ARM64_STACK_EPILOG
2285
2286	.align 2
2287	.globl EXT(_sptm_post_exit_hook)
2288LEXT(_sptm_post_exit_hook)
2289	ARM64_STACK_PROLOG
2290	PUSH_FRAME
2291	stp		x20, x21, [sp, #-0x10]!
2292
2293	/* Save SPTM return value(s) */
2294	stp		x0, x1, [sp, #-0x40]!
2295	stp		x2, x3, [sp, #0x10]
2296	stp		x4, x5, [sp, #0x20]
2297	stp		x6, x7, [sp, #0x30]
2298
2299
2300	/* Load the preemption count. */
2301	mrs		x0, TPIDR_EL1
2302	cbz		x0, Lsptm_skip_ast_taken_sptmhook
2303	ldr		w12, [x0, ACT_PREEMPT_CNT]
2304
2305	/* Detect underflow */
2306	cbnz	w12, Lno_preempt_underflow_sptmhook
2307	/* No need to clean up the stack, as preempt_underflow calls panic */
2308	b		preempt_underflow
2309Lno_preempt_underflow_sptmhook:
2310
2311	/* Lower the preemption count. */
2312	sub		w12, w12, #1
2313
2314#if SCHED_HYGIENE_DEBUG
2315	/* Collect preemption disable measurement if necessary. */
2316
2317	/*
2318	 * Only collect measurement if this reenabled preemption,
2319	 * and SCHED_HYGIENE_MARKER is set.
2320	 */
2321	mov		x20, #SCHED_HYGIENE_MARKER
2322	cmp		w12, w20
2323	b.ne	Lskip_collect_measurement_sptmhook
2324
2325	/* Collect measurement. */
2326	bl		EXT(_collect_preemption_disable_measurement)
2327
2328	/* Restore w12, which is now 0. */
2329	mov		w12, #0
2330
2331	/* Restore x0 as the thread pointer */
2332	mrs		x0, TPIDR_EL1
2333
2334Lskip_collect_measurement_sptmhook:
2335#endif /* SCHED_HYGIENE_DEBUG */
2336
2337	/* Save the lowered preemption count. */
2338	str		w12, [x0, ACT_PREEMPT_CNT]
2339
2340	/* Skip ASTs if the preemption count is not zero. */
2341	cbnz	w12, Lsptm_skip_ast_taken_sptmhook
2342
2343	/**
2344	 * Skip the AST check if interrupts were originally disabled. The original
2345	 * DAIF value needs to be placed into a callee-saved register so that the
2346	 * value is preserved across the ast_taken_kernel() call.
2347	 */
2348	mrs		x20, DAIF
2349	tbnz	x20, #(DAIF_IRQF_SHIFT), Lsptm_skip_ast_taken_sptmhook
2350
2351	/* IF there is no urgent AST, skip the AST. */
2352	ldr		x12, [x0, ACT_CPUDATAP]
2353	ldr		x14, [x12, CPU_PENDING_AST]
2354	tst		x14, AST_URGENT
2355	b.eq	Lsptm_skip_ast_taken_sptmhook
2356
2357	/* Handle the AST. This call requires interrupts to be disabled. */
2358	msr		DAIFSet, #(DAIFSC_ALL)
2359	bl		EXT(ast_taken_kernel)
2360	msr		DAIF, x20
2361
2362Lsptm_skip_ast_taken_sptmhook:
2363
2364	/* Restore SPTM return value(s) */
2365	ldp		x6, x7, [sp, #0x30]
2366	ldp		x4, x5, [sp, #0x20]
2367	ldp		x2, x3, [sp, #0x10]
2368	ldp		x0, x1, [sp]
2369	add		sp, sp, #0x40
2370
2371	/* Return. */
2372	ldp		x20, x21, [sp], 0x10
2373	POP_FRAME
2374	ARM64_STACK_EPILOG
2375#endif /* CONFIG_SPTM */
2376
2377#if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2378/**
2379 * Record debug data for a panic lockdown event
2380 * Clobbers x0, x1, x2
2381 */
2382	.text
2383	.align 2
2384	.global EXT(panic_lockdown_record_debug_data)
2385LEXT(panic_lockdown_record_debug_data)
2386	adrp	x0, EXT(debug_panic_lockdown_initiator_state)@page
2387	add		x0, x0, EXT(debug_panic_lockdown_initiator_state)@pageoff
2388
2389	/*
2390	 * To synchronize accesses to the debug state, we use the initiator PC as a
2391	 * "lock". It starts out at zero and we try to swap in our initiator's PC
2392	 * (which is trivially non-zero) to acquire the debug state and become the
2393	 * initiator of record.
2394	 *
2395	 * Note that other CPUs which are not the initiator of record may still
2396	 * initiate panic lockdown (potentially before the initiator of record does
2397	 * so) and so this debug data should only be used as a hint for the
2398	 * initiating CPU rather than a guarantee of which CPU initiated lockdown
2399	 * first.
2400	 */
2401	mov		x1, #0
2402	add		x2, x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_PC
2403	cas		x1, lr, [x2]
2404	/* If there's a non-zero value there already, we aren't the first. Skip. */
2405	cbnz	x1, Lpanic_lockdown_record_debug_data_done
2406
2407	/*
2408	 * We're the first and have exclusive access to the debug structure!
2409	 * Record all our data.
2410	 */
2411	mov		x1, sp
2412	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_SP]
2413
2414	mrs		x1, TPIDR_EL1
2415	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_TPIDR]
2416
2417	mrs		x1, MPIDR_EL1
2418	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_MPIDR]
2419
2420	mrs		x1, ESR_EL1
2421	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ESR]
2422
2423	mrs		x1, ELR_EL1
2424	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ELR]
2425
2426	mrs		x1, FAR_EL1
2427	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_FAR]
2428
2429	/* Sync and then read the timer */
2430	dsb		sy
2431	isb
2432	mrs		x1, CNTVCT_EL0
2433	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_TIMESTAMP]
2434
2435Lpanic_lockdown_record_debug_data_done:
2436	ret
2437#endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2438
2439/* ARM64_TODO Is globals_asm.h needed? */
2440//#include	"globals_asm.h"
2441
2442/* vim: set ts=4: */
2443