xref: /xnu-12377.1.9/osfmk/arm64/locore.s (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45// If __ARM_KERNEL_PROTECT__, eret is preceeded by an ISB before returning to userspace.
46// Otherwise, use BIT_ISB_PENDING flag to track that we need to issue an isb before eret if needed.
47#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__
48#define ERET_NEEDS_ISB 1
49#define BIT_ISB_PENDING 0
50#endif /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__ */
51
52#if XNU_MONITOR && !CONFIG_SPTM
53/*
54 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55 *
56 * Checks if an exception was taken from the PPL, and if so, trampolines back
57 * into the PPL.
58 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
59 *         exception was taken while in the PPL.
60 */
61.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
62	cmp		x26, xzr
63	b.eq		1f
64
65	/* Return to the PPL. */
66	mov		x15, #0
67	mov		w10, #PPL_STATE_EXCEPTION
68#error "XPRR configuration error"
691:
70.endmacro
71
72
73#endif /* XNU_MONITOR && !CONFIG_SPTM */
74
75#if CONFIG_SPTM
76#include <sptm/sptm_xnu.h>
77#include <sptm/sptm_common.h>
78/*
79 * Panic lockdown is a security enhancement which makes certain types of
80 * exceptions (generally, PAC failures and sync exceptions taken with async
81 * exceptions masked) and panics fatal against attackers with kernel R/W. It
82 * does this through a trapdoor panic bit protected by the SPTM.
83 * When this bit is set, TXM will refuse to authorize new code mappings which,
84 * ideally, renders the system unusable even if the attacker gains control over
85 * XNU. Additionally, when this bit is set XNU will refuse to handle any sync
86 * exceptions originating from user space. This makes implementing further stages
87 * of an exploit challenging as it prevents user space from driving the kernel.
88 */
89
90/*
91 * Inform the SPTM that XNU has (or, rather, must) panic. This is provided as a
92 * macro rather than a function since it's just one instruction on release and
93 * it avoids the need to spill a return addresses unless the macro caller
94 * explicitly needs to preserve LR.
95 *
96 * On CONFIG_XNUPOST, this functions returns a 1 in x0 if a simulated lockdown
97 * was performed, 0 otherwise.
98 *
99 * This macro preserves callee saved registers but clobbers all others.
100 */
101.macro BEGIN_PANIC_LOCKDOWN unused
102#if DEVELOPMENT || DEBUG
103	/*
104	 * Forcefully clobber all caller saved GPRs on DEBUG so we don't
105	 * accidentally violate our contract with SPTM.
106	 */
107	mov		x0, #0
108	mov		x1, #0
109	mov		x2, #0
110	mov		x3, #0
111	mov		x4, #0
112	mov		x5, #0
113	mov		x6, #0
114	mov		x7, #0
115	mov		x8, #0
116	mov		x9, #0
117	mov		x10, #0
118	mov		x11, #0
119	mov		x12, #0
120	mov		x13, #0
121	mov		x14, #0
122	mov		x15, #0
123	mov		x16, #0
124	mov		x17, #0
125	mov		x18, #0
126
127	/* Attempt to record the debug trace */
128	bl		EXT(panic_lockdown_record_debug_data)
129
130#endif /* DEVELOPMENT || DEBUG */
131#if CONFIG_XNUPOST
132	mrs		x0, TPIDR_EL1
133	/*
134	 * If hitting this with a null TPIDR, it's likely that this was an unexpected
135	 * exception in early boot rather than an expected one as a part of a test.
136	 * Trigger lockdown.
137	 */
138	cbz		x0, Lbegin_panic_lockdown_real_\@
139	ldr		x1, [x0, TH_EXPECTED_FAULT_HANDLER]
140	/* Is a fault handler installed? */
141	cbz 	x1, Lbegin_panic_lockdown_real_\@
142
143	/* Do the VA bits of ELR match the expected fault PC? */
144	ldr		x1, [x0, TH_EXPECTED_FAULT_PC]
145	mrs		x2, ELR_EL1
146	mov		x3, #((1 << (64 - T1SZ_BOOT - 1)) - 1)
147	and		x4, x1, x3
148	and		x5, x2, x3
149	cmp		x4, x5
150	b.eq	Lbegin_panic_lockdown_simulated_\@
151	/* If we had an expected PC but didn't hit it, fail out */
152	cbnz	x1, Lbegin_panic_lockdown_real_\@
153
154	/* Alternatively, do the FAR VA bits match the expected fault address? */
155	ldr		x1, [x0, TH_EXPECTED_FAULT_ADDR]
156	mrs		x2, FAR_EL1
157	and		x4, x1, x3
158	and		x5, x2, x3
159	cmp		x4, x5
160	b.eq	Lbegin_panic_lockdown_simulated_\@
161
162Lbegin_panic_lockdown_real_\@:
163#endif /* CONFIG_XNUPOST */
164	/*
165	 * The sptm_xnu_panic_begin routine is guaranteed to unavoidably lead to
166	 * the panic bit being set.
167	 */
168	bl EXT(sptm_xnu_panic_begin)
169#if CONFIG_XNUPOST
170	mov		x0, #0 // not a simulated lockdown
171	b		Lbegin_panic_lockdown_continue_\@
172Lbegin_panic_lockdown_simulated_\@:
173	/*
174	 * We hit lockdown with a matching exception handler installed.
175	 * Since this is an expected test exception, skip setting the panic bit
176	 * (since this will kill the system) and instead set a bit in the test
177	 * handler.
178	 */
179	mov		x0, #1 // this is a simulated lockdown!
180	adrp	x1, EXT(xnu_post_panic_lockdown_did_fire)@page
181	strb	w0, [x1, EXT(xnu_post_panic_lockdown_did_fire)@pageoff]
182	mov		lr, xzr // trash LR to ensure callers don't rely on it
183Lbegin_panic_lockdown_continue_\@:
184#endif /* CONFIG_XNUPOST */
185.endmacro
186#endif /* CONFIG_SPTM */
187
188/*
189 * MAP_KERNEL
190 *
191 * Restores the kernel EL1 mappings, if necessary.
192 *
193 * This may mutate x18.
194 */
195.macro MAP_KERNEL
196#if __ARM_KERNEL_PROTECT__
197	/* Switch to the kernel ASID (low bit set) for the task. */
198	mrs		x18, TTBR0_EL1
199	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
200	msr		TTBR0_EL1, x18
201
202	/*
203	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
204	 * to the TTBRs and writes to the TCR should be ensured by the
205	 * microarchitecture.
206	 */
207#if !defined(APPLE_ARM64_ARCH_FAMILY)
208	isb		sy
209#endif
210
211	/*
212	 * Update the TCR to map the kernel now that we are using the kernel
213	 * ASID.
214	 */
215	MOV64		x18, TCR_EL1_BOOT
216	msr		TCR_EL1, x18
217	isb		sy
218#endif /* __ARM_KERNEL_PROTECT__ */
219.endmacro
220
221/*
222 * BRANCH_TO_KVA_VECTOR
223 *
224 * Branches to the requested long exception vector in the kernelcache.
225 *   arg0 - The label to branch to
226 *   arg1 - The index of the label in exc_vectors_tables
227 *
228 * This may mutate x18.
229 */
230.macro BRANCH_TO_KVA_VECTOR
231
232#if __ARM_KERNEL_PROTECT__
233	/*
234	 * Find the kernelcache table for the exception vectors by accessing
235	 * the per-CPU data.
236	 */
237	mrs		x18, TPIDR_EL1
238	ldr		x18, [x18, ACT_CPUDATAP]
239	ldr		x18, [x18, CPU_EXC_VECTORS]
240
241	/*
242	 * Get the handler for this exception and jump to it.
243	 */
244	ldr		x18, [x18, #($1 << 3)]
245	br		x18
246#else
247	b		$0
248#endif /* __ARM_KERNEL_PROTECT__ */
249.endmacro
250
251/*
252 * CHECK_KERNEL_STACK
253 *
254 * Verifies that the kernel stack is aligned and mapped within an expected
255 * stack address range. Note: happens before saving registers (in case we can't
256 * save to kernel stack).
257 *
258 * Expects:
259 *	{x0, x1} - saved
260 *	x1 - Exception syndrome
261 *	sp - Saved state
262 *
263 * Seems like we need an unused argument to the macro for the \@ syntax to work
264 *
265 */
266.macro CHECK_KERNEL_STACK unused
267	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
268	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
269	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
270	cmp		x1, x2								// If we have a stack alignment exception
271	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
272	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
273	cmp		x1, x2								// If we have a data abort, we need to
274	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
275	mrs		x0, SP_EL0					// Get SP_EL0
276	mrs		x1, TPIDR_EL1						// Get thread pointer
277	/*
278	 * Check for either a NULL TPIDR or a NULL kernel stack, both of which
279	 * are expected in early boot, but will cause recursive faults if not
280	 * handled specially,
281	 */
282	cbz		x1, Lcorrupt_stack_\@
283	ldr		x2, [x1, TH_KSTACKPTR]
284	cbz		x2, Lcorrupt_stack_\@
285Ltest_kstack_\@:
286	LOAD_KERN_STACK_TOP	dst=x2, src=x1, tmp=x3	// Get top of kernel stack
287	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
288	cmp		x0, x2								// if (SP_EL0 >= kstack top)
289	b.ge	Ltest_istack_\@						//    jump to istack test
290	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
291	b.gt	Lvalid_stack_\@						//    stack pointer valid
292Ltest_istack_\@:
293	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
294	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
295	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
296	cmp		x0, x2								// if (SP_EL0 >= istack top)
297	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
298	cmp		x0, x3								// if (SP_EL0 > istack bottom)
299	b.gt	Lvalid_stack_\@						//    stack pointer valid
300Lcorrupt_stack_\@:
301	ldp		x2, x3, [sp], #16
302	ldp		x0, x1, [sp], #16
303	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
304	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
305	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
306	mrs		x0, SP_EL0					// Get SP_EL0
307	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
308	INIT_SAVED_STATE_FLAVORS sp, w0, w1
309	mov		x0, sp								// Copy exception frame pointer to x0
310	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
311	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
312	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
313	b		fleh_dispatch64
314Lvalid_stack_\@:
315	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
316.endmacro
317
318/*
319 * CHECK_EXCEPTION_CRITICAL_REGION
320 *
321 * Checks if the exception occurred within range [VECTOR_BEGIN, VECTOR_END).
322 * If so, jumps to \fail_label. Otherwise, continues.
323 * This is useful for avoiding infinite exception loops.
324 *
325 * Clobbers x18, NZCV.
326 */
327.macro CHECK_EXCEPTION_CRITICAL_REGION vector_begin, vector_end, fail_label
328	/*
329	 * We need two registers to do a compare but only have x18 free without
330	 * spilling. We can't safely spill to memory yet, however, because doing so
331	 * may fault. It's evil, but since we're operating on ELR here we can
332	 * temporarily spill into it to get another free register as long as we put
333	 * everything back at the end.
334	 */
335	mrs		x18, ELR_EL1
336	msr		ELR_EL1, x19
337
338	adrp	x19, \vector_begin@PAGE
339	add		x19, x19, \vector_begin@PAGEOFF
340	cmp		x18, x19 /* HS if at or above (suspect), LO if below (safe) */
341	adrp	x19, \vector_end@PAGE
342	add		x19, x19, \vector_end@PAGEOFF
343	/*
344	 * If ELR >= \vector_begin (HS), set flags for ELR - \vector_end. LO here
345	 * indicates we are in range.
346	 * Otherwise, set HS (C)
347	 */
348	ccmp	x18, x19, #0b0010 /* C/HS */, HS
349	/* Unspill x19/fixup ELR */
350	mrs		x19, ELR_EL1
351	msr		ELR_EL1, x18
352	mov		x18, #0
353	/* If we're in the range, fail out */
354	b.lo	\fail_label
355.endmacro
356
357/*
358 * CHECK_EXCEPTION_STACK
359 *
360 * Verifies that SP1 is within exception stack and continues if it is.
361 * If not, jumps to \invalid_stack_label as we have nothing to fall back on.
362 *
363 * (out) x18: The unauthenticated CPU_EXCEPSTACK_TOP used for the comparison or
364 *            zero if the check could not be performed (such as because the
365 *            thread pointer was invalid).
366 *
367 * Clobbers NZCV.
368 */
369.macro CHECK_EXCEPTION_STACK invalid_stack_label
370	mrs		x18, TPIDR_EL1					// Get thread pointer
371	/*
372	 * The thread pointer might be invalid during early boot.
373	 * Return zero in x18 to indicate that we failed to execute the check.
374	 */
375	cbz		x18, Lskip_stack_check_\@
376	ldr		x18, [x18, ACT_CPUDATAP]
377	cbz		x18, \invalid_stack_label		// If thread context is set, cpu data should be too
378	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
379	cmp		sp, x18
380	b.gt	\invalid_stack_label			// Fail if above exception stack top
381	sub		x18, x18, EXCEPSTACK_SIZE_NUM	// Find bottom of exception stack
382	cmp		sp, x18
383	b.lt	\invalid_stack_label			// Fail if below exception stack bottom
384	add		x18, x18, EXCEPSTACK_SIZE_NUM	// Return stack top in x18
385Lskip_stack_check_\@:
386	/* FALLTHROUGH */
387.endmacro
388
389#if __ARM_KERNEL_PROTECT__
390	.section __DATA_CONST,__const
391	.align 3
392	.globl EXT(exc_vectors_table)
393LEXT(exc_vectors_table)
394	/* Table of exception handlers.
395         * These handlers sometimes contain deadloops.
396         * It's nice to have symbols for them when debugging. */
397	.quad el1_sp0_synchronous_vector_long
398	.quad el1_sp0_irq_vector_long
399	.quad el1_sp0_fiq_vector_long
400	.quad el1_sp0_serror_vector_long
401	.quad el1_sp1_synchronous_vector_long
402	.quad el1_sp1_irq_vector_long
403	.quad el1_sp1_fiq_vector_long
404	.quad el1_sp1_serror_vector_long
405	.quad el0_synchronous_vector_64_long
406	.quad el0_irq_vector_64_long
407	.quad el0_fiq_vector_64_long
408	.quad el0_serror_vector_64_long
409#endif /* __ARM_KERNEL_PROTECT__ */
410
411	.text
412#if __ARM_KERNEL_PROTECT__
413	/*
414	 * We need this to be on a page boundary so that we may avoiding mapping
415	 * other text along with it.  As this must be on the VM page boundary
416	 * (due to how the coredumping code currently works), this will be a
417	 * 16KB page boundary.
418	 */
419	.align 14
420#else
421	.align 12
422#endif /* __ARM_KERNEL_PROTECT__ */
423	.globl EXT(ExceptionVectorsBase)
424LEXT(ExceptionVectorsBase)
425Lel1_sp0_synchronous_vector:
426	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
427
428	.text
429	.align 7
430Lel1_sp0_irq_vector:
431	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
432
433	.text
434	.align 7
435Lel1_sp0_fiq_vector:
436	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
437
438	.text
439	.align 7
440Lel1_sp0_serror_vector:
441	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
442
443	.text
444	.align 7
445Lel1_sp1_synchronous_vector:
446	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
447
448	.text
449	.align 7
450Lel1_sp1_irq_vector:
451	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
452
453	.text
454	.align 7
455Lel1_sp1_fiq_vector:
456	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
457
458	.text
459	.align 7
460Lel1_sp1_serror_vector:
461	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
462
463	.text
464	.align 7
465Lel0_synchronous_vector_64:
466	MAP_KERNEL
467	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
468
469	.text
470	.align 7
471Lel0_irq_vector_64:
472	MAP_KERNEL
473	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
474
475	.text
476	.align 7
477Lel0_fiq_vector_64:
478	MAP_KERNEL
479	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
480
481	.text
482	.align 7
483Lel0_serror_vector_64:
484	MAP_KERNEL
485	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
486
487	/* Fill out the rest of the page */
488	.align 12
489
490/*********************************
491 * END OF EXCEPTION VECTORS PAGE *
492 *********************************/
493
494
495
496.macro EL1_SP0_VECTOR
497	msr		SPSel, #0							// Switch to SP0
498	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
499	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
500	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
501	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
502	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
503	INIT_SAVED_STATE_FLAVORS sp, w0, w1
504	mov		x0, sp								// Copy saved state pointer to x0
505.endmacro
506
507.macro EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
508	// SWITCH_TO_INT_STACK requires a clobberable tmp register, but at this
509	// point in the exception vector we can't spare the extra GPR.  Instead note
510	// that EL1_SP0_VECTOR ends with x0 == sp and use this to unclobber x0.
511	mrs		x1, TPIDR_EL1
512	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
513	mov		x0, sp
514	mov		sp, x1
515.endmacro
516
517el1_sp0_synchronous_vector_long:
518	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
519	mrs		x1, ESR_EL1							// Get the exception syndrome
520	/* If the stack pointer is corrupt, it will manifest either as a data abort
521	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
522	 * these quickly by testing bit 5 of the exception class.
523	 */
524	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
525	CHECK_KERNEL_STACK
526Lkernel_stack_valid:
527	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
528	EL1_SP0_VECTOR
529	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
530	add		x1, x1, EXT(fleh_synchronous)@pageoff
531	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
532	b		fleh_dispatch64
533
534el1_sp0_irq_vector_long:
535	EL1_SP0_VECTOR
536	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
537	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
538	add		x1, x1, EXT(fleh_irq)@pageoff
539	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
540	b		fleh_dispatch64
541
542el1_sp0_fiq_vector_long:
543	// ARM64_TODO write optimized decrementer
544	EL1_SP0_VECTOR
545	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
546	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
547	add		x1, x1, EXT(fleh_fiq)@pageoff
548	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
549	b		fleh_dispatch64
550
551el1_sp0_serror_vector_long:
552	EL1_SP0_VECTOR
553	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
554	add		x1, x1, EXT(fleh_serror)@pageoff
555	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
556	b		fleh_dispatch64
557
558.macro EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
559	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
560	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
561	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
562	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
563	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
564	INIT_SAVED_STATE_FLAVORS sp, w0, w1
565.if \set_x0_to_exception_frame_ptr
566	mov		x0, sp								// Copy saved state pointer to x0
567.endif
568.endmacro
569
570el1_sp1_synchronous_vector_long:
571	/*
572	 * Before making our first (potentially faulting) memory access, check if we
573	 * previously tried and failed to execute this vector. If we did, it's not
574	 * going to work this time either so let's just spin.
575	 */
576#ifdef CONFIG_SPTM
577	/*
578	 * This check is doubly important for devices which support panic lockdown
579	 * as we use this check to ensure that we can take only a bounded number of
580	 * exceptions on SP1 while trying to spill before we give up on spilling and
581	 * lockdown anyways.
582	 *
583	 * Note, however, that we only check if we took an exception inside this
584	 * vector. Although an attacker could cause exceptions outside this routine,
585	 * they can only do this a finite number of times before overflowing the
586	 * exception stack (causing CHECK_EXCEPTION_STACK to fail) since we subtract
587	 * from SP inside the checked region and do not reload SP from memory before
588	 * we hit post-spill lockdown point in fleh_synchronous_sp1.
589	 */
590#endif /* CONFIG_SPTM */
591	CHECK_EXCEPTION_CRITICAL_REGION el1_sp1_synchronous_vector_long, Lel1_sp1_synchronous_vector_long_end, EXT(el1_sp1_synchronous_vector_long_spill_failed)
592	CHECK_EXCEPTION_STACK EXT(el1_sp1_synchronous_vector_long_spill_failed)
593#ifdef KERNEL_INTEGRITY_KTRR
594	b		check_ktrr_sctlr_trap
595Lel1_sp1_synchronous_vector_continue:
596#endif /* KERNEL_INTEGRITY_KTRR */
597#if CONFIG_SPTM
598	/* Don't bother setting up x0 since we need it as a temporary */
599	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=0
600
601	/*
602	 * Did we fail to execute the stack check (x18=0)?
603	 * On devices which support panic lockdown, we cannot allow this check to be
604	 * skipped after early-boot as doing so many allow exception processing to
605	 * be delayed indefinitely.
606	 */
607	adrp	x0, EXT(startup_phase)@page
608	ldr		w0, [x0, EXT(startup_phase)@pageoff]
609	/* Are we in early-boot? */
610	cmp		w0, #-1 // STARTUP_SUB_LOCKDOWN
611	/*
612	 * If we're still in early-boot (LO), set flags for if we skipped the check
613	 * If we're after early-boot (HS), pass NE
614	 */
615	ccmp	x18, xzr, #0b0000 /* !Z/NE */, LO
616	/* Skip authentication if this was an early boot check fail */
617	b.eq	1f
618	/*
619	 * If we're not in early boot but still couldn't execute the stack bounds
620	 * check (x18=0), something is wrong (TPIDR is corrupted?).
621	 * Trigger a lockdown.
622	 */
623	cbz		x18, EXT(el1_sp1_synchronous_vector_long_spill_failed)
624
625	/*
626	 * In CHECK_EXCEPTION_STACK, we didn't have enough registers to perform the
627	 * signature verification on the exception stack top value and instead used
628	 * the unauthenticated value (x18) for the stack pointer bounds check.
629	 *
630	 * Ensure that we actually performed the check on a legitmate value now.
631	 */
632	mrs		x0, TPIDR_EL1
633	LOAD_EXCEP_STACK_THREAD dst=x0, src=x0, tmp=x1
634	cmp		x0, x18
635	/* If we aren't equal, something is very wrong and we should lockdown. */
636	b.ne	EXT(el1_sp1_synchronous_vector_long_spill_failed)
637
6381:
639	mov		x0, sp	/* Set x0 to saved state pointer */
640#else
641	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
642#endif /* CONFIG_SPTM */
643	adrp	x1, fleh_synchronous_sp1@page
644	add		x1, x1, fleh_synchronous_sp1@pageoff
645	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
646	b		fleh_dispatch64
647
648	/*
649	 * Global symbol to make it easy to pick out in backtraces.
650	 * Do not call externally.
651	 */
652	.global EXT(el1_sp1_synchronous_vector_long_spill_failed)
653LEXT(el1_sp1_synchronous_vector_long_spill_failed)
654	TRAP_UNWIND_PROLOGUE
655	TRAP_UNWIND_DIRECTIVES
656	/*
657	 * We couldn't process the exception due to either having an invalid
658	 * exception stack or because we previously tried to process it and failed.
659	 */
660#if CONFIG_SPTM
661	/*
662	 * For SP1 exceptions, we usually delay initiating lockdown until after
663	 * we've spilled in order to not lose register state. Since we have nowhere
664	 * to safely spill, we have no choice but to initiate it now, clobbering
665	 * some of our exception state in the process (RIP).
666	 */
667	BEGIN_PANIC_LOCKDOWN
668#if CONFIG_XNUPOST
669	/* Macro returns x0=1 if it performed a simulated lockdown */
670	cbz		x0, 0f
671	/* This was a test; return to fault handler so they can fixup the system. */
672	mrs		x0, TPIDR_EL1
673	ldr		x16, [x0, TH_EXPECTED_FAULT_HANDLER]
674#if __has_feature(ptrauth_calls)
675	movk	x17, #TH_EXPECTED_FAULT_HANDLER_DIVERSIFIER
676	autia	x16, x17
677#endif /* ptrauth_calls */
678	msr		ELR_EL1, x16
679	/* Pass a NULL saved state since we didn't actually save anything */
680	mov		x0, #0
681	ERET_NO_STRAIGHT_LINE_SPECULATION
682#endif /* CONFIG_XNUPOST */
683#endif /* CONFIG_SPTM */
6840:
685	wfe
686	b		0b // Spin for watchdog
687	UNWIND_EPILOGUE
688
689#if CONFIG_SPTM
690#if CONFIG_XNUPOST
691	/**
692	 * Test function which raises an exception from a location considered inside
693	 * the vector. Does not return.
694	 */
695	.global EXT(el1_sp1_synchronous_raise_exception_in_vector)
696LEXT(el1_sp1_synchronous_raise_exception_in_vector)
697	ARM64_PROLOG
698	brk		#0
699	/* Unreachable */
700	b		.
701#endif /* CONFIG_XNUPOST */
702#endif /* CONFIG_SPTM */
703Lel1_sp1_synchronous_vector_long_end:
704
705el1_sp1_irq_vector_long:
706	EL1_SP1_VECTOR
707	adrp	x1, fleh_irq_sp1@page
708	add		x1, x1, fleh_irq_sp1@pageoff
709	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
710	b		fleh_dispatch64
711
712el1_sp1_fiq_vector_long:
713	EL1_SP1_VECTOR
714	adrp	x1, fleh_fiq_sp1@page
715	add		x1, x1, fleh_fiq_sp1@pageoff
716	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
717	b		fleh_dispatch64
718
719el1_sp1_serror_vector_long:
720	EL1_SP1_VECTOR
721	adrp	x1, fleh_serror_sp1@page
722	add		x1, x1, fleh_serror_sp1@pageoff
723	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
724	b		fleh_dispatch64
725
726
727.macro EL0_64_VECTOR guest_label
728	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
729#if __ARM_KERNEL_PROTECT__
730	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
731#endif
732	mrs		x0, TPIDR_EL1						// Load the thread register
733	LOAD_USER_PCB	dst=x0, src=x0, tmp=x1		// Load the user context pointer
734	mrs		x1, SP_EL0							// Load the user stack pointer
735	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
736	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
737	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
738	msr		SPSel, #0							// Switch to SP0
739	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
740	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the user PCB
741	mrs		x1, TPIDR_EL1						// Load the thread register
742
743
744
745#if HAS_ARM_FEAT_SME
746	str		x2, [sp, SS64_X2]
747	// current_thread()->machine.umatrix_hdr == NULL: this thread has never
748	// executed smstart, so no SME state to save
749	add		x0, x1, ACT_UMATRIX_HDR
750	ldr		x2, [x0]
751	cbz		x2, 1f
752	AUTDA_DIVERSIFIED x2, address=x0, diversifier=ACT_UMATRIX_HDR_DIVERSIFIER
753
754	mrs		x0, SVCR
755	str		x0, [x2, SME_SVCR]
756	// SVCR.SM == 0: save SVCR only (ZA is handled during context-switch)
757	tbz		x0, #SVCR_SM_SHIFT, 1f
758
759	// SVCR.SM == 1: save SVCR, Z, and P; and exit streaming SVE mode
760	ldrh	w0, [x2, SME_SVL_B]
761	add		x2, x2, SME_Z_P_ZA
762	LOAD_OR_STORE_Z_P_REGISTERS	str, svl_b=x0, ss=x2
763	mrs		x2, FPSR
764	smstop	sm
765	msr		FPSR, x2
7661:
767	ldr		x2, [sp, SS64_X2]
768#endif /* HAS_ARM_FEAT_SME */
769
770	mov		x0, sp								// Copy the user PCB pointer to x0
771												// x1 contains thread register
772.endmacro
773
774.macro EL0_64_VECTOR_SWITCH_TO_INT_STACK
775	// Similarly to EL1_SP0_VECTOR_SWITCH_TO_INT_STACK, we need to take
776	// advantage of EL0_64_VECTOR ending with x0 == sp.  EL0_64_VECTOR also
777	// populates x1 with the thread state, so we can skip reloading it.
778	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
779	mov		x0, sp
780	mov		sp, x1
781.endmacro
782
783.macro EL0_64_VECTOR_SWITCH_TO_KERN_STACK
784	LOAD_KERN_STACK_TOP	dst=x1, src=x1, tmp=x0
785	mov		x0, sp
786	mov		sp, x1
787.endmacro
788
789el0_synchronous_vector_64_long:
790	EL0_64_VECTOR	sync
791	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
792	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
793	add		x1, x1, EXT(fleh_synchronous)@pageoff
794	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
795	b		fleh_dispatch64
796
797el0_irq_vector_64_long:
798	EL0_64_VECTOR	irq
799	EL0_64_VECTOR_SWITCH_TO_INT_STACK
800	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
801	add		x1, x1, EXT(fleh_irq)@pageoff
802	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
803	b		fleh_dispatch64
804
805el0_fiq_vector_64_long:
806	EL0_64_VECTOR	fiq
807	EL0_64_VECTOR_SWITCH_TO_INT_STACK
808	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
809	add		x1, x1, EXT(fleh_fiq)@pageoff
810	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
811	b		fleh_dispatch64
812
813el0_serror_vector_64_long:
814	EL0_64_VECTOR	serror
815	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
816	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
817	add		x1, x1, EXT(fleh_serror)@pageoff
818	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
819	b		fleh_dispatch64
820
821
822#if defined(KERNEL_INTEGRITY_KTRR)
823	.text
824	.align 2
825check_ktrr_sctlr_trap:
826/* We may abort on an instruction fetch on reset when enabling the MMU by
827 * writing SCTLR_EL1 because the page containing the privileged instruction is
828 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
829 * would otherwise panic unconditionally. Check for the condition and return
830 * safe execution to the caller on behalf of the faulting function.
831 *
832 * Expected register state:
833 *  x22 - Kernel virtual base
834 *  x23 - Kernel physical base
835 */
836	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
837	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
838	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
839	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
840	movz	w1, #0x8600, lsl #16
841	movk	w1, #0x0000
842	cmp		x0, x1
843	mrs		x0, ELR_EL1					// Check for expected abort address
844	adrp	x1, _pinst_set_sctlr_trap_addr@page
845	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
846	sub		x1, x1, x22					// Convert to physical address
847	add		x1, x1, x23
848	ccmp	x0, x1, #0, eq
849	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
850	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
851	b.ne	Lel1_sp1_synchronous_vector_continue
852	msr		ELR_EL1, lr					// Return to caller
853	ERET_NO_STRAIGHT_LINE_SPECULATION
854#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
855
856/* 64-bit first level exception handler dispatcher.
857 * Completes register context saving and branches to FLEH.
858 * Expects:
859 *  {x0, x1, sp} - saved
860 *  x0 - arm_context_t
861 *  x1 - address of FLEH
862 *  x2 - bitfield of type FLEH_DISPATCH64_OPTION_xxx, clobbered
863 *  x3 - unused
864 *  fp - previous stack frame if EL1
865 *  lr - unused
866 *  sp - kernel stack
867 */
868	.text
869	.align 2
870fleh_dispatch64:
871#if HAS_APPLE_PAC
872	pacia	x1, sp
873#endif
874
875	/* Save arm_saved_state64 */
876	SPILL_REGISTERS KERNEL_MODE, options_register=x2
877
878	/* If exception is from userspace, zero unused registers */
879	and		x23, x23, #(PSR64_MODE_EL_MASK)
880	cmp		x23, #(PSR64_MODE_EL0)
881	bne		1f
882
883	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
884
885
8862:
887
888	mov		x2, #0
889	mov		x3, #0
890	mov		x4, #0
891	mov		x5, #0
892	mov		x6, #0
893	mov		x7, #0
894	mov		x8, #0
895	mov		x9, #0
896	mov		x10, #0
897	mov		x11, #0
898	mov		x12, #0
899	mov		x13, #0
900	mov		x14, #0
901	mov		x15, #0
902	mov		x16, #0
903	mov		x17, #0
904	mov		x18, #0
905	mov		x19, #0
906	mov		x20, #0
907	/* x21, x22 cleared in common case below */
908	mov		x23, #0
909	mov		x24, #0
910	mov		x25, #0
911#if !XNU_MONITOR
912	mov		x26, #0
913#endif
914	mov		x27, #0
915	mov		x28, #0
916	mov		fp, #0
917	mov		lr, #0
9181:
919
920	mov		x21, x0								// Copy arm_context_t pointer to x21
921	mov		x22, x1								// Copy handler routine to x22
922
923#if XNU_MONITOR
924	/* Zero x26 to indicate that this should not return to the PPL. */
925	mov		x26, #0
926#endif
927
928#if PRECISE_USER_KERNEL_TIME
929	cmp		x23, #PSR64_MODE_EL0			// If interrupting this kernel, skip
930	b.gt	1f                                  // precise time update.
931	PUSH_FRAME
932	bl		EXT(recount_leave_user)
933	POP_FRAME_WITHOUT_LR
934	mov		x0, x21								// Reload arm_context_t pointer
9351:
936#endif /* PRECISE_USER_KERNEL_TIME */
937
938	/* Dispatch to FLEH */
939
940#if HAS_APPLE_PAC
941	braa	x22,sp
942#else
943	br		x22
944#endif
945
946
947	.text
948	.align 2
949	.global EXT(fleh_synchronous)
950LEXT(fleh_synchronous)
951TRAP_UNWIND_PROLOGUE
952TRAP_UNWIND_DIRECTIVES
953	ARM64_JUMP_TARGET
954	mrs		x1, ESR_EL1							// Load exception syndrome
955	mrs		x2, FAR_EL1							// Load fault address
956	mrs		lr, ELR_EL1
957	/* NB: lr might not be a valid address (e.g. instruction abort). */
958	PUSH_FRAME
959
960#if CONFIG_SPTM
961	mrs		x25, ELR_EL1
962
963	/*
964	 * Sync exceptions in the kernel are rare, so check that first.
965	 * This check should be trivially predicted NT. We also take
966	 * the check out of line so, on the hot path, we don't add a
967	 * frontend redirect.
968	 */
969	mov		x3, #0 // by default, do not signal panic lockdown to sleh
970	mrs		x4, SPSR_EL1
971	tst		x4, #(PSR64_MODE_EL_MASK)
972	b.ne	Lfleh_synchronous_ool_check_exception_el1 /* Run ELn checks if we're EL!=0 (!Z) */
973	/* EL0 -- check if we're blocking sync exceptions due to lockdown */
974	adrp	x4, EXT(sptm_xnu_triggered_panic_ptr)@page
975	ldr		x4, [x4, EXT(sptm_xnu_triggered_panic_ptr)@pageoff]
976	ldrb	w4, [x4]
977	cbnz	w4, Lblocked_user_sync_exception
978
979Lfleh_synchronous_continue:
980	/* We've had our chance to lockdown, release PC/FAR */
981	str		x25, [x0, SS64_PC]
982	str		x2,  [x0, SS64_FAR]
983#endif /* CONFIG_SPTM */
984
985	bl		EXT(sleh_synchronous)
986	POP_FRAME_WITHOUT_LR
987
988#if XNU_MONITOR && !CONFIG_SPTM
989	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
990#endif
991
992	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
993	b		exception_return_dispatch
994
995#if CONFIG_SPTM
996Lfleh_synchronous_ool_check_exception_el1:
997	/* Save off arguments needed for sleh_sync as we may clobber */
998	mov		x26, x0
999	mov		x27, x1
1000	mov		x28, x2
1001
1002	/*
1003	 * Evaluate the exception state to determine if we should initiate a
1004	 * lockdown. While this function is implemented in C, since it is guaranteed
1005	 * to not use the stack it should be immune from spill tampering and other
1006	 * attacks which may cause it to render the wrong ruling.
1007	 */
1008	mov		x0, x1  // ESR
1009	mov		x1, x25 // ELR
1010			        // FAR is already in x2
1011	mrs		x3, SPSR_EL1
1012	bl		EXT(sleh_panic_lockdown_should_initiate_el1_sp0_sync)
1013
1014	/* sleh_synchronous needs the lockdown decision in x3 */
1015	mov		x3, x0
1016	/* Optimistically restore registers on the assumption we won't lockdown */
1017	mov		x0, x26
1018	mov		x1, x27
1019	mov		x2, x28
1020
1021	cbz		x3, Lfleh_synchronous_continue
1022
1023	BEGIN_PANIC_LOCKDOWN
1024	mov		x0, x26
1025	mov		x1, x27
1026	mov		x2, x28
1027	/*
1028	 * A captain goes down with her ship; system is sunk but for telemetry
1029	 * try to handle the crash normally.
1030	 */
1031	mov		x3, #1 // signal to sleh that we completed panic lockdown
1032	b		Lfleh_synchronous_continue
1033#endif /* CONFIG_SPTM */
1034UNWIND_EPILOGUE
1035
1036#if CONFIG_SPTM
1037	.text
1038	.align 2
1039	/* Make a global symbol so it's easier to pick out in backtraces */
1040	.global EXT(blocked_user_sync_exception)
1041LEXT(blocked_user_sync_exception)
1042Lblocked_user_sync_exception:
1043	TRAP_UNWIND_PROLOGUE
1044	TRAP_UNWIND_DIRECTIVES
1045	/*
1046	 * User space took a sync exception after panic lockdown had been initiated.
1047	 * The system is going to panic soon, so let's just re-enable interrupts and
1048	 * wait for debugger sync.
1049	 */
1050	msr		DAIFClr, #(DAIFSC_STANDARD_DISABLE)
10510:
1052	wfe
1053	b		0b
1054	UNWIND_EPILOGUE
1055#endif /* CONFIG_SPTM */
1056
1057/* Shared prologue code for fleh_irq and fleh_fiq.
1058 * Does any interrupt booking we may want to do
1059 * before invoking the handler proper.
1060 * Expects:
1061 *  x0 - arm_context_t
1062 * x23 - CPSR
1063 *  fp - Undefined live value (we may push a frame)
1064 *  lr - Undefined live value (we may push a frame)
1065 *  sp - Interrupt stack for the current CPU
1066 */
1067.macro BEGIN_INTERRUPT_HANDLER
1068	mrs		x22, TPIDR_EL1
1069	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
1070	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
1071	ldr		w1, [x23, CPU_STAT_IRQ]
1072	add		w1, w1, #1							// Increment count
1073	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
1074	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
1075	add		w1, w1, #1					// Increment count
1076	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1077	/* Increment preempt count */
1078	ldr		w1, [x22, ACT_PREEMPT_CNT]
1079	add		w1, w1, #1
1080	str		w1, [x22, ACT_PREEMPT_CNT]
1081	/* Store context in int state */
1082	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
1083.endmacro
1084
1085/* Shared epilogue code for fleh_irq and fleh_fiq.
1086 * Cleans up after the prologue, and may do a bit more
1087 * bookkeeping (kdebug related).
1088 * Expects:
1089 * x22 - Live TPIDR_EL1 value (thread address)
1090 * x23 - Address of the current CPU data structure
1091 * w24 - 0 if kdebug is disbled, nonzero otherwise
1092 *  fp - Undefined live value (we may push a frame)
1093 *  lr - Undefined live value (we may push a frame)
1094 *  sp - Interrupt stack for the current CPU
1095 */
1096.macro END_INTERRUPT_HANDLER
1097	/* Clear int context */
1098	str		xzr, [x23, CPU_INT_STATE]
1099	/* Decrement preempt count */
1100	ldr		w0, [x22, ACT_PREEMPT_CNT]
1101	cbnz	w0, 1f								// Detect underflow
1102	b		preempt_underflow
11031:
1104	sub		w0, w0, #1
1105	str		w0, [x22, ACT_PREEMPT_CNT]
1106	/* Switch back to kernel stack */
1107	LOAD_KERN_STACK_TOP	dst=x0, src=x22, tmp=x28
1108	mov		sp, x0
1109	/* Generate a CPU-local event to terminate a post-IRQ WFE */
1110	sevl
1111.endmacro
1112
1113	.text
1114	.align 2
1115	.global EXT(fleh_irq)
1116LEXT(fleh_irq)
1117TRAP_UNWIND_PROLOGUE
1118TRAP_UNWIND_DIRECTIVES
1119	ARM64_JUMP_TARGET
1120	BEGIN_INTERRUPT_HANDLER
1121	PUSH_FRAME
1122	bl		EXT(sleh_irq)
1123	POP_FRAME_WITHOUT_LR
1124	END_INTERRUPT_HANDLER
1125
1126#if XNU_MONITOR && !CONFIG_SPTM
1127	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1128#endif
1129
1130	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1131	b		exception_return_dispatch
1132UNWIND_EPILOGUE
1133
1134	.text
1135	.align 2
1136	.global EXT(fleh_fiq_generic)
1137LEXT(fleh_fiq_generic)
1138	/*
1139	 * This function is a placeholder which should never be invoked.
1140	 * We omit the landingpad here since there is no sensible choice.
1141	 */
1142	PANIC_UNIMPLEMENTED
1143
1144	.text
1145	.align 2
1146	.global EXT(fleh_fiq)
1147LEXT(fleh_fiq)
1148TRAP_UNWIND_PROLOGUE
1149TRAP_UNWIND_DIRECTIVES
1150	ARM64_JUMP_TARGET
1151	BEGIN_INTERRUPT_HANDLER
1152	PUSH_FRAME
1153	bl		EXT(sleh_fiq)
1154	POP_FRAME_WITHOUT_LR
1155	END_INTERRUPT_HANDLER
1156
1157#if XNU_MONITOR && !CONFIG_SPTM
1158	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1159#endif
1160
1161	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1162	b		exception_return_dispatch
1163UNWIND_EPILOGUE
1164
1165	.text
1166	.align 2
1167	.global EXT(fleh_serror)
1168LEXT(fleh_serror)
1169TRAP_UNWIND_PROLOGUE
1170TRAP_UNWIND_DIRECTIVES
1171	ARM64_JUMP_TARGET
1172	mrs		x1, ESR_EL1							// Load exception syndrome
1173	mrs		x2, FAR_EL1							// Load fault address
1174
1175	PUSH_FRAME
1176	bl		EXT(sleh_serror)
1177	POP_FRAME_WITHOUT_LR
1178
1179#if XNU_MONITOR && !CONFIG_SPTM
1180	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1181#endif
1182
1183	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
1184	b		exception_return_dispatch
1185UNWIND_EPILOGUE
1186
1187/*
1188 * Register state saved before we get here.
1189 */
1190	.text
1191	.align 2
1192fleh_invalid_stack:
1193	TRAP_UNWIND_PROLOGUE
1194	TRAP_UNWIND_DIRECTIVES
1195	ARM64_JUMP_TARGET
1196#if CONFIG_SPTM
1197	/*
1198	 * Taking a data abort with an invalid kernel stack pointer is unrecoverable.
1199	 * Initiate lockdown.
1200	 */
1201
1202	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1203	mov		x25, x0
1204	mrs		x26, ELR_EL1
1205	mrs		x27, ESR_EL1
1206	mrs		x28, FAR_EL1
1207	BEGIN_PANIC_LOCKDOWN
1208	mov		x0, x25
1209	mov		x1, x27
1210	mov		x2, x28
1211	/* We deferred storing PC/FAR until after lockdown, so do that now */
1212	str		x26, [x0, SS64_PC]
1213	str		x28, [x0, SS64_FAR]
1214#else
1215	mrs		x1, ESR_EL1							// Load exception syndrome
1216	mrs		x2, FAR_EL1							// Load fault address
1217#endif /* CONFIG_SPTM */
1218	PUSH_FRAME
1219	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
1220	b 		.
1221	UNWIND_EPILOGUE
1222
1223	.text
1224	.align 2
1225fleh_synchronous_sp1:
1226	TRAP_UNWIND_PROLOGUE
1227	TRAP_UNWIND_DIRECTIVES
1228	ARM64_JUMP_TARGET
1229#if CONFIG_SPTM
1230	/*
1231	 * Without debugger intervention, all exceptions on SP1 (including debug
1232	 * trap instructions) are intended to be fatal. In order to not break
1233	 * self-hosted kernel debug, do not trigger lockdown for debug traps
1234	 * (unknown instructions/uncategorized exceptions). On release kernels, we
1235	 * don't support self-hosted kernel debug so unconditionally lockdown.
1236	 */
1237#if (DEVELOPMENT || DEBUG)
1238	tst		w1, #(ESR_EC_MASK)
1239	b.eq	Lfleh_synchronous_sp1_skip_panic_lockdown // ESR_EC_UNCATEGORIZED is 0, so skip lockdown if Z
1240#endif /* DEVELOPMENT || DEBUG */
1241	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1242	mov		x25, x0
1243	mrs		x26, ELR_EL1
1244	mrs		x27, ESR_EL1
1245	mrs		x28, FAR_EL1
1246	BEGIN_PANIC_LOCKDOWN
1247	mov		x0, x25
1248	mov		x1, x27
1249	mov		x2, x28
1250	/* We deferred storing PC/FAR until after lockdown, so do that now */
1251	str		x26, [x0, SS64_PC]
1252	str		x28, [x0, SS64_FAR]
1253Lfleh_synchronous_sp1_skip_panic_lockdown:
1254#else
1255	mrs		x1, ESR_EL1
1256	mrs		x2, FAR_EL1
1257#endif /* CONFIG_SPTM */
1258	/*
1259	 * If we got here before we have a kernel thread or kernel stack (e.g.
1260	 * still on init_thread) and we try to panic(), we'll end up in an infinite
1261	 * nested exception, so just stop here instead to preserve the call stack.
1262	 */
1263	mrs		x9, TPIDR_EL1
1264	cbz		x9, 0f
1265	ldr		x9, [x9, TH_KSTACKPTR]
1266	cbz		x9, 0f
1267	PUSH_FRAME
1268	bl		EXT(sleh_synchronous_sp1)
1269	b 		.
12700:
1271	PUSH_FRAME
1272	bl		EXT(el1_sp1_synchronous_vector_long_invalid_kstack)
1273	b 		.
1274	UNWIND_EPILOGUE
1275
1276LEXT(el1_sp1_synchronous_vector_long_invalid_kstack)
12770:
1278	wfe
1279	b		0b // Spin for watchdog
1280
1281	.text
1282	.align 2
1283fleh_irq_sp1:
1284	ARM64_JUMP_TARGET
1285	mov		x1, x0
1286	adr		x0, Lsp1_irq_str
1287	b		EXT(panic_with_thread_kernel_state)
1288Lsp1_irq_str:
1289	.asciz "IRQ exception taken while SP1 selected"
1290
1291	.text
1292	.align 2
1293fleh_fiq_sp1:
1294	ARM64_JUMP_TARGET
1295	mov		x1, x0
1296	adr		x0, Lsp1_fiq_str
1297	b		EXT(panic_with_thread_kernel_state)
1298Lsp1_fiq_str:
1299	.asciz "FIQ exception taken while SP1 selected"
1300
1301	.text
1302	.align 2
1303fleh_serror_sp1:
1304	ARM64_JUMP_TARGET
1305	mov		x1, x0
1306	adr		x0, Lsp1_serror_str
1307	b		EXT(panic_with_thread_kernel_state)
1308Lsp1_serror_str:
1309	.asciz "Asynchronous exception taken while SP1 selected"
1310
1311	.text
1312	.align 2
1313exception_return_dispatch:
1314	ldr		w0, [x21, SS64_CPSR]
1315	tst		w0, PSR64_MODE_EL_MASK
1316	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1317	b		return_to_user
1318
1319#if CONFIG_SPTM
1320/**
1321 * XNU returns to this symbol whenever handling an interrupt that occurred
1322 * during SPTM, TXM or SK runtime. This code determines which domain the
1323 * XNU thread was executing in when the interrupt occurred and tells SPTM
1324 * which domain to resume.
1325 */
1326	.text
1327	.align 2
1328	.global EXT(xnu_return_to_gl2)
1329LEXT(xnu_return_to_gl2)
1330	/**
1331	 * If thread->txm_thread_stack is set, we need to tell SPTM dispatch to
1332	 * resume the TXM thread in x0.
1333	 */
1334	mrs		x8, TPIDR_EL1
1335	ldr		x8, [x8, TH_TXM_THREAD_STACK]
1336	cbz		x8, 1f
1337	mov		x0, x8
1338	b		EXT(txm_resume)
1339	/* Unreachable */
1340	b .
1341
1342#if CONFIG_EXCLAVES
1343	/**
1344	 * If thread->th_exclaves_intstate flag TH_EXCLAVES_EXECUTION is set
1345	 * we need to tell SPTM dispatch to resume the SK thread.
1346	 */
13471:
1348	mrs		x8, TPIDR_EL1
1349	ldr		x9, [x8, TH_EXCLAVES_INTSTATE]
1350	and		x9, x9, TH_EXCLAVES_EXECUTION
1351	cbz		x9, 1f
1352	b		EXT(sk_resume)
1353	/* Unreachable */
1354	b .
1355#endif /* CONFIG_EXCLAVES */
1356
1357	/**
1358	 * If neither the above checks succeeded, this must be a thread
1359	 * that was interrupted while running in SPTM. Tell SPTM to resume
1360	 * the interrupted SPTM call.
1361	 */
13621:
1363	b		EXT(sptm_resume_from_exception)
1364	/* Unreachable */
1365	b .
1366#endif /* CONFIG_SPTM */
1367
1368	.text
1369	.align 2
1370	.global EXT(return_to_kernel)
1371LEXT(return_to_kernel)
1372	UNWIND_PROLOGUE
1373	RETURN_TO_KERNEL_UNWIND
1374	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
1375	mrs		x3, TPIDR_EL1                           // Load thread pointer
1376	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
1377	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
1378	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
1379	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
1380	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
1381	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
1382	b.eq	exception_return_unint_tpidr_x3
1383	mov		sp, x21                                 // Switch to thread stack for preemption
1384	PUSH_FRAME
1385	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
1386	POP_FRAME_WITHOUT_LR
1387	b		exception_return
1388	UNWIND_EPILOGUE
1389
1390	.text
1391	.globl EXT(thread_bootstrap_return)
1392LEXT(thread_bootstrap_return)
1393	ARM64_PROLOG
1394#if CONFIG_DTRACE
1395	bl		EXT(dtrace_thread_bootstrap)
1396#endif
1397#if KASAN_TBI
1398	PUSH_FRAME
1399	bl		EXT(__asan_handle_no_return)
1400	POP_FRAME_WITHOUT_LR
1401#endif /* KASAN_TBI */
1402	b		EXT(arm64_thread_exception_return)
1403
1404	.text
1405	.globl EXT(arm64_thread_exception_return)
1406LEXT(arm64_thread_exception_return)
1407	ARM64_PROLOG
1408	mrs		x0, TPIDR_EL1
1409	LOAD_USER_PCB	dst=x21, src=x0, tmp=x28
1410	mov		x28, xzr
1411
1412	//
1413	// Fall Through to return_to_user from arm64_thread_exception_return.
1414	// Note that if we move return_to_user or insert a new routine
1415	// below arm64_thread_exception_return, the latter will need to change.
1416	//
1417	.text
1418/* x21 is always the machine context pointer when we get here
1419 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1420return_to_user:
1421check_user_asts:
1422#if KASAN_TBI
1423	PUSH_FRAME
1424	bl		EXT(__asan_handle_no_return)
1425	POP_FRAME_WITHOUT_LR
1426#endif /* KASAN_TBI */
1427	mrs		x3, TPIDR_EL1					// Load thread pointer
1428
1429	movn		w2, #0
1430	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
1431
1432#if MACH_ASSERT
1433	ldr		w0, [x3, ACT_PREEMPT_CNT]
1434	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
1435#endif
1436
1437	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1438	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1439	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
1440	cbz		w0, no_asts							// If no asts, skip ahead
1441
1442	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
1443
1444	/* At this point, we have ASTs and we need to check whether we are running in the
1445	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1446	 * the PFZ since we don't want to handle getting a signal or getting suspended
1447	 * while holding a spinlock in userspace.
1448	 *
1449	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1450	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1451	 * to use it to indicate to userspace to come back to take a delayed
1452	 * preemption, at which point the ASTs will be handled. */
1453	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
1454	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
1455
1456	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
1457	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
1458	cbz		x0, restore_and_check_ast			// No, deal with other asts
1459
1460	mov		x0, #1
1461	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
1462	mov		x0, x19								// restore x0 to asts
1463	b		no_asts								// pretend we have no asts
1464
1465restore_and_check_ast:
1466	mov		x0, x19								// restore x0
1467	b	user_take_ast							// Service pending asts
1468no_asts:
1469
1470
1471#if PRECISE_USER_KERNEL_TIME
1472	mov		x19, x3						// Preserve thread pointer across function call
1473	PUSH_FRAME
1474	bl		EXT(recount_enter_user)
1475	POP_FRAME_WITHOUT_LR
1476	mov		x3, x19
1477#endif /* PRECISE_USER_KERNEL_TIME */
1478
1479#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1480	/* Watchtower
1481	 *
1482	 * Here we attempt to enable NEON access for EL0. If the last entry into the
1483	 * kernel from user-space was due to an IRQ, the monitor will have disabled
1484	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1485	 * check in with the monitor in order to reenable NEON for EL0 in exchange
1486	 * for routing IRQs through the monitor (2). This way the monitor will
1487	 * always 'own' either IRQs or EL0 NEON.
1488	 *
1489	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1490	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1491	 * here.
1492	 *
1493	 * EL0 user ________ IRQ                                            ______
1494	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
1495	 * EL3 monitor           \_/                                \___/
1496	 *
1497	 *                       (1)                                 (2)
1498	 */
1499
1500	mov		x0, #(CPACR_FPEN_ENABLE)
1501	msr		CPACR_EL1, x0
1502#endif
1503
1504	/* Establish this thread's debug state as the live state on the selected CPU. */
1505	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1506	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
1507	ldr		x0, [x3, ACT_DEBUGDATA]
1508	cmp		x0, x1
1509	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
1510
1511
1512	PUSH_FRAME
1513	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
1514	POP_FRAME_WITHOUT_LR
1515	mrs		x3, TPIDR_EL1						// Reload thread pointer
1516	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
1517L_skip_user_set_debug_state:
1518
1519
1520	ldrsh	x0, [x4, CPU_TPIDR_EL0]
1521	msr		TPIDR_EL0, x0
1522
1523
1524	b		exception_return_unint_tpidr_x3
1525
1526exception_return:
1527	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1528exception_return_unint:
1529	mrs		x3, TPIDR_EL1					// Load thread pointer
1530exception_return_unint_tpidr_x3:
1531	mov		sp, x21						// Reload the pcb pointer
1532
1533#if !__ARM_KERNEL_PROTECT__
1534	/*
1535	 * Restore x18 only if the task has the entitlement that allows
1536	 * usage. Those are very few, and can move to something else
1537	 * once we use x18 for something more global.
1538	 *
1539	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
1540	 * that uses x18 as one of the global use cases (and will reset
1541	 * x18 later down below).
1542	 *
1543	 * It's also unconditionally skipped for translated threads,
1544	 * as those are another use case, one where x18 must be preserved.
1545	 */
1546	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
1547	mov		x18, #0
1548	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
1549
1550exception_return_unint_tpidr_x3_restore_x18:
1551	ldr		x18, [sp, SS64_X18]
1552
1553#else /* !__ARM_KERNEL_PROTECT__ */
1554	/*
1555	 * If we are going to eret to userspace, we must return through the EL0
1556	 * eret mapping.
1557	 */
1558	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
1559	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
1560
1561	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
1562	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
1563	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
1564	add		x1, x1, Lexception_return_restore_registers@pageoff
1565	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
1566	sub		x1, x1, x0											// Calculate delta
1567	add		x0, x2, x1											// Convert KVA to EL0 vector address
1568	br		x0
1569
1570Lskip_el0_eret_mapping:
1571#endif /* !__ARM_KERNEL_PROTECT__ */
1572
1573Lexception_return_restore_registers:
1574	mov 	x0, sp								// x0 = &pcb
1575	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1576	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, x25, el0_state_allowed=1
1577
1578	msr		ELR_EL1, x1							// Load the return address into ELR
1579	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1580
1581/* Restore special register state */
1582	ldr		w3, [sp, NS64_FPSR]
1583	ldr		w4, [sp, NS64_FPCR]
1584
1585	msr		FPSR, x3
1586	mrs		x5, FPCR
1587	CMSR FPCR, x5, x4, 1
15881:
1589	mov		x5, #0
1590
1591#if HAS_ARM_FEAT_SME
1592	and		x2, x2, #(PSR64_MODE_EL_MASK)
1593	cmp		x2, #(PSR64_MODE_EL0)
1594	// SPSR_EL1.M != EL0: no SME state to restore
1595	bne		Lno_sme_saved_state
1596
1597	mrs		x3, TPIDR_EL1
1598	add		x3, x3, ACT_UMATRIX_HDR
1599	ldr		x2, [x3]
1600	cbz		x2, Lno_sme_saved_state
1601	AUTDA_DIVERSIFIED x2, address=x3, diversifier=ACT_UMATRIX_HDR_DIVERSIFIER
1602
1603	ldr		x3, [x2, SME_SVCR]
1604	msr		SVCR, x3
1605	// SVCR.SM == 0: restore SVCR only (ZA is handled during context-switch)
1606	tbz		x3, #SVCR_SM_SHIFT, Lno_sme_saved_state
1607
1608	// SVCR.SM == 1: restore SVCR, Z, and P
1609	ldrh	w3, [x2, SME_SVL_B]
1610	add		x2, x2, SME_Z_P_ZA
1611	LOAD_OR_STORE_Z_P_REGISTERS	ldr, svl_b=x3, ss=x2
1612
1613	// The FPSIMD register file acts like a view into the lower 128 bits of
1614	// Z0-Z31.  While there's no harm reading it out during exception entry,
1615	// writing it back would truncate the Z0-Z31 values we just restored.
1616	b		Lskip_restore_neon_saved_state
1617Lno_sme_saved_state:
1618#endif /* HAS_ARM_FEAT_SME */
1619
1620	/* Restore arm_neon_saved_state64 */
1621	ldp		q0, q1, [x0, NS64_Q0]
1622	ldp		q2, q3, [x0, NS64_Q2]
1623	ldp		q4, q5, [x0, NS64_Q4]
1624	ldp		q6, q7, [x0, NS64_Q6]
1625	ldp		q8, q9, [x0, NS64_Q8]
1626	ldp		q10, q11, [x0, NS64_Q10]
1627	ldp		q12, q13, [x0, NS64_Q12]
1628	ldp		q14, q15, [x0, NS64_Q14]
1629	ldp		q16, q17, [x0, NS64_Q16]
1630	ldp		q18, q19, [x0, NS64_Q18]
1631	ldp		q20, q21, [x0, NS64_Q20]
1632	ldp		q22, q23, [x0, NS64_Q22]
1633	ldp		q24, q25, [x0, NS64_Q24]
1634	ldp		q26, q27, [x0, NS64_Q26]
1635	ldp		q28, q29, [x0, NS64_Q28]
1636	ldp		q30, q31, [x0, NS64_Q30]
1637#if HAS_ARM_FEAT_SME
1638Lskip_restore_neon_saved_state:
1639#endif
1640
1641
1642	// If sync_on_cswitch and ERET is not a CSE, issue an ISB now. Unconditionally clear the
1643	// sync_on_cswitch flag.
1644	mrs		x1, TPIDR_EL1
1645	ldr		x1, [x1, ACT_CPUDATAP]
1646
1647	// Redefined for backporting.
1648#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__
1649	ldrb	w2, [x1, CPU_SYNC_ON_CSWITCH]
1650#if ERET_NEEDS_ISB
1651	// Set the bit, but don't sync, it will be synced shortly after this.
1652	orr		x5, x5, x2, lsl #(BIT_ISB_PENDING)
1653#else
1654	cbz		w2, 1f
1655	// Last chance, sync now.
1656	isb		sy
16571:
1658#endif  /* ERET_NEEDS_ISB */
1659#endif  /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__ */
1660	strb	wzr, [x1, CPU_SYNC_ON_CSWITCH]
1661
1662
1663#if ERET_NEEDS_ISB
1664	// Apply any pending isb from earlier.
1665	tbz		x5, #(BIT_ISB_PENDING), Lskip_eret_isb
1666	isb		sy
1667Lskip_eret_isb:
1668#endif /* ERET_NEEDS_ISB */
1669
1670	/* Restore arm_saved_state64 */
1671
1672	// Skip x0, x1 - we're using them
1673	ldp		x2, x3, [x0, SS64_X2]
1674	ldp		x4, x5, [x0, SS64_X4]
1675	ldp		x6, x7, [x0, SS64_X6]
1676	ldp		x8, x9, [x0, SS64_X8]
1677	ldp		x10, x11, [x0, SS64_X10]
1678	ldp		x12, x13, [x0, SS64_X12]
1679	ldp		x14, x15, [x0, SS64_X14]
1680	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1681	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1682	ldr		x19, [x0, SS64_X19]
1683	ldp		x20, x21, [x0, SS64_X20]
1684	ldp		x22, x23, [x0, SS64_X22]
1685	ldp		x24, x25, [x0, SS64_X24]
1686	ldp		x26, x27, [x0, SS64_X26]
1687	ldr		x28, [x0, SS64_X28]
1688	ldr		fp, [x0, SS64_FP]
1689	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1690
1691	// Restore stack pointer and our last two GPRs
1692	ldr		x1, [x0, SS64_SP]
1693	mov		sp, x1
1694
1695#if __ARM_KERNEL_PROTECT__
1696	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1697#endif /* __ARM_KERNEL_PROTECT__ */
1698
1699	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1700
1701#if __ARM_KERNEL_PROTECT__
1702	/* If we are going to eret to userspace, we must unmap the kernel. */
1703	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1704
1705	/* Update TCR to unmap the kernel. */
1706	MOV64		x18, TCR_EL1_USER
1707	msr		TCR_EL1, x18
1708
1709	/*
1710	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1711	 * each other due to the microarchitecture.
1712	 */
1713#if !defined(APPLE_ARM64_ARCH_FAMILY)
1714	isb		sy
1715#endif
1716
1717	/* Switch to the user ASID (low bit clear) for the task. */
1718	mrs		x18, TTBR0_EL1
1719	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1720	msr		TTBR0_EL1, x18
1721	mov		x18, #0
1722
1723#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING)
1724	isb		sy
1725#endif /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) */
1726
1727Lskip_ttbr1_switch:
1728#endif /* __ARM_KERNEL_PROTECT__ */
1729
1730	ERET_NO_STRAIGHT_LINE_SPECULATION
1731
1732user_take_ast:
1733	PUSH_FRAME
1734	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1735	POP_FRAME_WITHOUT_LR
1736	b		check_user_asts								// Now try again
1737
1738	.text
1739	.align 2
1740preempt_underflow:
1741	mrs		x0, TPIDR_EL1
1742	str		x0, [sp, #-16]!						// We'll print thread pointer
1743	adr		x0, L_underflow_str					// Format string
1744	CALL_EXTERN panic							// Game over
1745
1746L_underflow_str:
1747	.asciz "Preemption count negative on thread %p"
1748.align 2
1749
1750#if MACH_ASSERT
1751	.text
1752	.align 2
1753preempt_count_notzero:
1754	mrs		x0, TPIDR_EL1
1755	str		x0, [sp, #-16]!						// We'll print thread pointer
1756	ldr		w0, [x0, ACT_PREEMPT_CNT]
1757	str		w0, [sp, #8]
1758	adr		x0, L_preempt_count_notzero_str				// Format string
1759	CALL_EXTERN panic							// Game over
1760
1761L_preempt_count_notzero_str:
1762	.asciz "preemption count not 0 on thread %p (%u)"
1763#endif /* MACH_ASSERT */
1764
1765#if __ARM_KERNEL_PROTECT__
1766	/*
1767	 * This symbol denotes the end of the exception vector/eret range; we page
1768	 * align it so that we can avoid mapping other text in the EL0 exception
1769	 * vector mapping.
1770	 */
1771	.text
1772	.align 14
1773	.globl EXT(ExceptionVectorsEnd)
1774LEXT(ExceptionVectorsEnd)
1775#endif /* __ARM_KERNEL_PROTECT__ */
1776
1777#if XNU_MONITOR && !CONFIG_SPTM
1778
1779/*
1780 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1781 * mostly concerned with setting up state for the normal fleh code.
1782 */
1783	.text
1784	.align 2
1785fleh_synchronous_from_ppl:
1786	ARM64_JUMP_TARGET
1787	/* Save x0. */
1788	mov		x15, x0
1789
1790	/* Grab the ESR. */
1791	mrs		x1, ESR_EL1							// Get the exception syndrome
1792
1793	/* If the stack pointer is corrupt, it will manifest either as a data abort
1794	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1795	 * these quickly by testing bit 5 of the exception class.
1796	 */
1797	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1798	mrs		x0, SP_EL0							// Get SP_EL0
1799
1800	/* Perform high level checks for stack corruption. */
1801	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1802	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1803	cmp		x1, x2								// If we have a stack alignment exception
1804	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1805	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1806	cmp		x1, x2								// If we have a data abort, we need to
1807	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1808
1809Ltest_pstack:
1810	/* Bounds check the PPL stack. */
1811	adrp	x10, EXT(pmap_stacks_start)@page
1812	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1813	adrp	x11, EXT(pmap_stacks_end)@page
1814	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1815	cmp		x0, x10
1816	b.lo	Lcorrupt_ppl_stack
1817	cmp		x0, x11
1818	b.hi	Lcorrupt_ppl_stack
1819
1820Lvalid_ppl_stack:
1821	/* Restore x0. */
1822	mov		x0, x15
1823
1824	/* Switch back to the kernel stack. */
1825	msr		SPSel, #0
1826	GET_PMAP_CPU_DATA x5, x6, x7
1827	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1828	mov		sp, x6
1829
1830	/* Hand off to the synch handler. */
1831	b		EXT(fleh_synchronous)
1832
1833Lcorrupt_ppl_stack:
1834	/* Restore x0. */
1835	mov		x0, x15
1836
1837	/* Hand off to the invalid stack handler. */
1838	b		fleh_invalid_stack
1839
1840fleh_fiq_from_ppl:
1841	ARM64_JUMP_TARGET
1842	SWITCH_TO_INT_STACK	tmp=x25
1843	b		EXT(fleh_fiq)
1844
1845fleh_irq_from_ppl:
1846	ARM64_JUMP_TARGET
1847	SWITCH_TO_INT_STACK	tmp=x25
1848	b		EXT(fleh_irq)
1849
1850fleh_serror_from_ppl:
1851	ARM64_JUMP_TARGET
1852	GET_PMAP_CPU_DATA x5, x6, x7
1853	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1854	mov		sp, x6
1855	b		EXT(fleh_serror)
1856
1857
1858
1859
1860	// x15: ppl call number
1861	// w10: ppl_state
1862	// x20: gxf_enter caller's DAIF
1863	.globl EXT(ppl_trampoline_start)
1864LEXT(ppl_trampoline_start)
1865
1866
1867#error "XPRR configuration error"
1868	cmp		x14, x21
1869	b.ne	Lppl_fail_dispatch
1870
1871	/* Verify the request ID. */
1872	cmp		x15, PMAP_COUNT
1873	b.hs	Lppl_fail_dispatch
1874
1875	GET_PMAP_CPU_DATA	x12, x13, x14
1876
1877	/* Mark this CPU as being in the PPL. */
1878	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1879
1880	cmp		w9, #PPL_STATE_KERNEL
1881	b.eq		Lppl_mark_cpu_as_dispatching
1882
1883	/* Check to see if we are trying to trap from within the PPL. */
1884	cmp		w9, #PPL_STATE_DISPATCH
1885	b.eq		Lppl_fail_dispatch_ppl
1886
1887
1888	/* Ensure that we are returning from an exception. */
1889	cmp		w9, #PPL_STATE_EXCEPTION
1890	b.ne		Lppl_fail_dispatch
1891
1892	// where is w10 set?
1893	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1894	cmp		w10, #PPL_STATE_EXCEPTION
1895	b.ne		Lppl_fail_dispatch
1896
1897	/* This is an exception return; set the CPU to the dispatching state. */
1898	mov		w9, #PPL_STATE_DISPATCH
1899	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1900
1901	/* Find the save area, and return to the saved PPL context. */
1902	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1903	mov		sp, x0
1904	b		EXT(return_to_ppl)
1905
1906Lppl_mark_cpu_as_dispatching:
1907	cmp		w10, #PPL_STATE_KERNEL
1908	b.ne		Lppl_fail_dispatch
1909
1910	/* Mark the CPU as dispatching. */
1911	mov		w13, #PPL_STATE_DISPATCH
1912	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1913
1914	/* Switch to the regular PPL stack. */
1915	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1916	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1917
1918	// SP0 is thread stack here
1919	mov		x21, sp
1920	// SP0 is now PPL stack
1921	mov		sp, x9
1922
1923	/* Save the old stack pointer off in case we need it. */
1924	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1925
1926	/* Get the handler for the request */
1927	adrp	x9, EXT(ppl_handler_table)@page
1928	add		x9, x9, EXT(ppl_handler_table)@pageoff
1929	add		x9, x9, x15, lsl #3
1930	ldr		x10, [x9]
1931
1932	/* Branch to the code that will invoke the PPL request. */
1933	b		EXT(ppl_dispatch)
1934
1935Lppl_fail_dispatch_ppl:
1936	/* Switch back to the kernel stack. */
1937	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1938	mov		sp, x10
1939
1940Lppl_fail_dispatch:
1941	/* Indicate that we failed. */
1942	mov		x15, #PPL_EXIT_BAD_CALL
1943
1944	/* Move the DAIF bits into the expected register. */
1945	mov		x10, x20
1946
1947	/* Return to kernel mode. */
1948	b		ppl_return_to_kernel_mode
1949
1950Lppl_dispatch_exit:
1951
1952	/* Indicate that we are cleanly exiting the PPL. */
1953	mov		x15, #PPL_EXIT_DISPATCH
1954
1955	/* Switch back to the original (kernel thread) stack. */
1956	mov		sp, x21
1957
1958	/* Move the saved DAIF bits. */
1959	mov		x10, x20
1960
1961	/* Clear the in-flight pmap pointer */
1962	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
1963	stlr		xzr, [x13]
1964
1965	/* Clear the old stack pointer. */
1966	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1967
1968	/*
1969	 * Mark the CPU as no longer being in the PPL.  We spin if our state
1970	 * machine is broken.
1971	 */
1972	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1973	cmp		w9, #PPL_STATE_DISPATCH
1974	b.ne		.
1975	mov		w9, #PPL_STATE_KERNEL
1976	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1977
1978	/* Return to the kernel. */
1979	b ppl_return_to_kernel_mode
1980
1981
1982
1983	.text
1984ppl_exit:
1985	ARM64_PROLOG
1986	/*
1987	 * If we are dealing with an exception, hand off to the first level
1988	 * exception handler.
1989	 */
1990	cmp		x15, #PPL_EXIT_EXCEPTION
1991	b.eq	Ljump_to_fleh_handler
1992
1993	/* If this was a panic call from the PPL, reinvoke panic. */
1994	cmp		x15, #PPL_EXIT_PANIC_CALL
1995	b.eq	Ljump_to_panic_trap_to_debugger
1996
1997	/*
1998	 * Stash off the original DAIF in the high bits of the exit code register.
1999	 * We could keep this in a dedicated register, but that would require us to copy it to
2000	 * an additional callee-save register below (e.g. x22), which in turn would require that
2001	 * register to be saved/restored at PPL entry/exit.
2002	 */
2003	add		x15, x15, x10, lsl #32
2004
2005	/* Load the preemption count. */
2006	mrs		x10, TPIDR_EL1
2007	ldr		w12, [x10, ACT_PREEMPT_CNT]
2008
2009	/* Detect underflow */
2010	cbnz	w12, Lno_preempt_underflow
2011	b		preempt_underflow
2012Lno_preempt_underflow:
2013
2014	/* Lower the preemption count. */
2015	sub		w12, w12, #1
2016
2017#if SCHED_HYGIENE_DEBUG
2018	/* Collect preemption disable measurement if necessary. */
2019
2020	/*
2021	 * Only collect measurement if this reenabled preemption,
2022	 * and SCHED_HYGIENE_MARKER is set.
2023	 */
2024	mov		x20, #SCHED_HYGIENE_MARKER
2025	cmp		w12, w20
2026	b.ne	Lskip_collect_measurement
2027
2028	/* Stash our return value and return reason. */
2029	mov		x20, x0
2030	mov		x21, x15
2031
2032	/* Collect measurement. */
2033	bl		EXT(_collect_preemption_disable_measurement)
2034
2035	/* Restore the return value and the return reason. */
2036	mov		x0, x20
2037	mov		x15, x21
2038	/* ... and w12, which is now 0. */
2039	mov		w12, #0
2040
2041	/* Restore the thread pointer into x10. */
2042	mrs		x10, TPIDR_EL1
2043
2044Lskip_collect_measurement:
2045#endif /* SCHED_HYGIENE_DEBUG */
2046
2047	/* Save the lowered preemption count. */
2048	str		w12, [x10, ACT_PREEMPT_CNT]
2049
2050	/* Skip ASTs if the peemption count is not zero. */
2051	cbnz	x12, Lppl_skip_ast_taken
2052
2053	/*
2054	 * Skip the AST check if interrupts were originally disabled.
2055	 * The original DAIF state prior to PPL entry is stored in the upper
2056	 * 32 bits of x15.
2057	 */
2058	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
2059
2060	/* IF there is no urgent AST, skip the AST. */
2061	ldr		x12, [x10, ACT_CPUDATAP]
2062	ldr		w14, [x12, CPU_PENDING_AST]
2063	tst		w14, AST_URGENT
2064	b.eq	Lppl_skip_ast_taken
2065
2066	/* Stash our return value and return reason. */
2067	mov		x20, x0
2068	mov		x21, x15
2069
2070	/* Handle the AST. */
2071	bl		EXT(ast_taken_kernel)
2072
2073	/* Restore the return value and the return reason. */
2074	mov		x15, x21
2075	mov		x0, x20
2076
2077Lppl_skip_ast_taken:
2078
2079	/* Extract caller DAIF from high-order bits of exit code */
2080	ubfx	x10, x15, #32, #32
2081	bfc		x15, #32, #32
2082	msr		DAIF, x10
2083
2084	/* Pop the stack frame. */
2085	ldp		x29, x30, [sp, #0x10]
2086	ldp		x20, x21, [sp], #0x20
2087
2088	/* Check to see if this was a bad request. */
2089	cmp		x15, #PPL_EXIT_BAD_CALL
2090	b.eq	Lppl_bad_call
2091
2092	/* Return. */
2093	ARM64_STACK_EPILOG
2094
2095	.align 2
2096Ljump_to_fleh_handler:
2097	br	x25
2098
2099	.align 2
2100Ljump_to_panic_trap_to_debugger:
2101	b		EXT(panic_trap_to_debugger)
2102
2103Lppl_bad_call:
2104	/* Panic. */
2105	adrp	x0, Lppl_bad_call_panic_str@page
2106	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2107	b		EXT(panic)
2108
2109	.text
2110	.align 2
2111	.globl EXT(ppl_dispatch)
2112LEXT(ppl_dispatch)
2113	/*
2114	 * Save a couple of important registers (implementation detail; x12 has
2115	 * the PPL per-CPU data address; x13 is not actually interesting).
2116	 */
2117	stp		x12, x13, [sp, #-0x10]!
2118
2119	/*
2120	 * Restore the original AIF state, force D set to mask debug exceptions
2121	 * while PPL code runs.
2122	 */
2123	orr		x8, x20, DAIF_DEBUGF
2124	msr		DAIF, x8
2125
2126	/*
2127	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
2128	 * but the exception vectors will deal with this properly.
2129	 */
2130
2131	/* Invoke the PPL method. */
2132#ifdef HAS_APPLE_PAC
2133	blraa		x10, x9
2134#else
2135	blr		x10
2136#endif
2137
2138	/* Disable DAIF. */
2139	msr		DAIFSet, #(DAIFSC_ALL)
2140
2141	/* Restore those important registers. */
2142	ldp		x12, x13, [sp], #0x10
2143
2144	/* Mark this as a regular return, and hand off to the return path. */
2145	b		Lppl_dispatch_exit
2146
2147	.text
2148	.align 2
2149	.globl EXT(ppl_bootstrap_dispatch)
2150LEXT(ppl_bootstrap_dispatch)
2151	/* Verify the PPL request. */
2152	cmp		x15, PMAP_COUNT
2153	b.hs	Lppl_fail_bootstrap_dispatch
2154
2155	/* Get the requested PPL routine. */
2156	adrp	x9, EXT(ppl_handler_table)@page
2157	add		x9, x9, EXT(ppl_handler_table)@pageoff
2158	add		x9, x9, x15, lsl #3
2159	ldr		x10, [x9]
2160
2161	/* Invoke the requested PPL routine. */
2162#ifdef HAS_APPLE_PAC
2163	blraa		x10, x9
2164#else
2165	blr		x10
2166#endif
2167	LOAD_PMAP_CPU_DATA	x9, x10, x11
2168
2169	/* Clear the in-flight pmap pointer */
2170	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
2171	stlr		xzr, [x9]
2172
2173	/* Stash off the return value */
2174	mov		x20, x0
2175	/* Drop the preemption count */
2176	bl		EXT(_enable_preemption)
2177	mov		x0, x20
2178
2179	/* Pop the stack frame. */
2180	ldp		x29, x30, [sp, #0x10]
2181	ldp		x20, x21, [sp], #0x20
2182#if __has_feature(ptrauth_returns)
2183	retab
2184#else
2185	ret
2186#endif
2187
2188Lppl_fail_bootstrap_dispatch:
2189	/* Pop our stack frame and panic. */
2190	ldp		x29, x30, [sp, #0x10]
2191	ldp		x20, x21, [sp], #0x20
2192#if __has_feature(ptrauth_returns)
2193	autibsp
2194#endif
2195	adrp	x0, Lppl_bad_call_panic_str@page
2196	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2197	b		EXT(panic)
2198
2199	.text
2200	.align 2
2201	.globl EXT(ml_panic_trap_to_debugger)
2202LEXT(ml_panic_trap_to_debugger)
2203	ARM64_PROLOG
2204	mrs		x10, DAIF
2205	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
2206
2207	adrp		x12, EXT(pmap_ppl_locked_down)@page
2208	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
2209	cbz		w12, Lnot_in_ppl_dispatch
2210
2211	LOAD_PMAP_CPU_DATA	x11, x12, x13
2212
2213	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
2214	cmp		w12, #PPL_STATE_DISPATCH
2215	b.ne		Lnot_in_ppl_dispatch
2216
2217	/* Indicate (for the PPL->kernel transition) that we are panicking. */
2218	mov		x15, #PPL_EXIT_PANIC_CALL
2219
2220	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
2221	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
2222	mov		sp, x12
2223
2224	mrs		x10, DAIF
2225	mov		w13, #PPL_STATE_PANIC
2226	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
2227
2228	/**
2229	 * When we panic in PPL, we might have un-synced PTE updates. Shoot down
2230	 * all the TLB entries.
2231	 *
2232	 * A check must be done here against CurrentEL because the alle1is flavor
2233	 * of tlbi is not available to EL1, but the vmalle1is flavor is. When PPL
2234	 * runs at GL2, we can issue an alle2is and an alle1is tlbi to kill all
2235	 * the TLB entries. When PPL runs at GL1, as a guest or on an pre-H13
2236	 * platform, we issue a vmalle1is tlbi instead.
2237	 *
2238	 * Note that we only do this after passing the `PPL_STATE_DISPATCH` check
2239	 * because if we did this for every panic, including the ones triggered
2240	 * by fabric problems we may be stuck at the DSB below and trigger an AP
2241	 * watchdog.
2242	 */
2243	mrs		x12, CurrentEL
2244	cmp		x12, PSR64_MODE_EL2
2245	bne		Lnot_in_gl2
2246	tlbi		alle2is
2247	tlbi		alle1is
2248	b		Ltlb_invalidate_all_done
2249Lnot_in_gl2:
2250	tlbi		vmalle1is
2251Ltlb_invalidate_all_done:
2252	dsb		ish
2253	isb
2254
2255	/* Now we are ready to exit the PPL. */
2256	b		ppl_return_to_kernel_mode
2257Lnot_in_ppl_dispatch:
2258	msr		DAIF, x10
2259	ret
2260
2261	.data
2262Lppl_bad_call_panic_str:
2263	.asciz "ppl_dispatch: failed due to bad arguments/state"
2264#else /* XNU_MONITOR && !CONFIG_SPTM */
2265	.text
2266	.align 2
2267	.globl EXT(ml_panic_trap_to_debugger)
2268LEXT(ml_panic_trap_to_debugger)
2269	ARM64_PROLOG
2270	ret
2271#endif /* XNU_MONITOR && !CONFIG_SPTM */
2272
2273#if CONFIG_SPTM
2274	.text
2275	.align 2
2276
2277	.globl EXT(_sptm_pre_entry_hook)
2278LEXT(_sptm_pre_entry_hook)
2279	/* Push a frame. */
2280	ARM64_STACK_PROLOG
2281	PUSH_FRAME
2282	stp		x20, x21, [sp, #-0x10]!
2283
2284	/* Save arguments to SPTM function and SPTM function id. */
2285	mov		x20, x16
2286	stp		x0, x1, [sp, #-0x40]!
2287	stp		x2, x3, [sp, #0x10]
2288	stp		x4, x5, [sp, #0x20]
2289	stp		x6, x7, [sp, #0x30]
2290
2291	/* Increase the preemption count. */
2292	mrs		x9, TPIDR_EL1
2293	cbz		x9, Lskip_preemption_check_sptmhook
2294	ldr		w10, [x9, ACT_PREEMPT_CNT]
2295	add		w10, w10, #1
2296	str		w10, [x9, ACT_PREEMPT_CNT]
2297
2298	/* Update SPTM trace state to see if trace entries were generated post-exit */
2299
2300#if SCHED_HYGIENE_DEBUG
2301	/* Prepare preemption disable measurement, if necessary. */
2302
2303	/* Only prepare if we actually disabled preemption. */
2304	cmp		w10, #1
2305	b.ne	Lskip_prepare_measurement_sptmhook
2306
2307	/* Don't prepare if measuring is off completely. */
2308	adrp	x10, _sched_preemption_disable_debug_mode@page
2309	add		x10, x10, _sched_preemption_disable_debug_mode@pageoff
2310	ldr		w10, [x10]
2311	cmp		w10, #0
2312	b.eq	Lskip_prepare_measurement_sptmhook
2313
2314	/* Call prepare function with thread pointer as first arg. */
2315	bl		EXT(_prepare_preemption_disable_measurement)
2316
2317Lskip_prepare_measurement_sptmhook:
2318#endif /* SCHED_HYGIENE_DEBUG */
2319Lskip_preemption_check_sptmhook:
2320	/* assert we're not calling from guarded mode */
2321	mrs		x14, CurrentG
2322	cmp		x14, #0
2323	b.ne	.
2324
2325	/* Restore arguments to SPTM function and SPTM function id. */
2326	ldp		x6, x7, [sp, #0x30]
2327	ldp		x4, x5, [sp, #0x20]
2328	ldp		x2, x3, [sp, #0x10]
2329	ldp		x0, x1, [sp]
2330	add		sp, sp, #0x40
2331	mov		x16, x20
2332
2333	ldp		x20, x21, [sp], #0x10
2334	POP_FRAME
2335	ARM64_STACK_EPILOG EXT(_sptm_pre_entry_hook)
2336
2337	.align 2
2338	.globl EXT(_sptm_post_exit_hook)
2339LEXT(_sptm_post_exit_hook)
2340	ARM64_STACK_PROLOG
2341	PUSH_FRAME
2342	stp		x20, x21, [sp, #-0x10]!
2343
2344	/* Save SPTM return value(s) */
2345	stp		x0, x1, [sp, #-0x40]!
2346	stp		x2, x3, [sp, #0x10]
2347	stp		x4, x5, [sp, #0x20]
2348	stp		x6, x7, [sp, #0x30]
2349
2350
2351	/* Load the preemption count. */
2352	mrs		x0, TPIDR_EL1
2353	cbz		x0, Lsptm_skip_ast_taken_sptmhook
2354	ldr		w12, [x0, ACT_PREEMPT_CNT]
2355
2356	/* Detect underflow */
2357	cbnz	w12, Lno_preempt_underflow_sptmhook
2358	/* No need to clean up the stack, as preempt_underflow calls panic */
2359	b		preempt_underflow
2360Lno_preempt_underflow_sptmhook:
2361
2362	/* Lower the preemption count. */
2363	sub		w12, w12, #1
2364
2365#if SCHED_HYGIENE_DEBUG
2366	/* Collect preemption disable measurement if necessary. */
2367
2368	/*
2369	 * Only collect measurement if this reenabled preemption,
2370	 * and SCHED_HYGIENE_MARKER is set.
2371	 */
2372	mov		x20, #SCHED_HYGIENE_MARKER
2373	cmp		w12, w20
2374	b.ne	Lskip_collect_measurement_sptmhook
2375
2376	/* Collect measurement. */
2377	bl		EXT(_collect_preemption_disable_measurement)
2378
2379	/* Restore w12, which is now 0. */
2380	mov		w12, #0
2381
2382	/* Restore x0 as the thread pointer */
2383	mrs		x0, TPIDR_EL1
2384
2385Lskip_collect_measurement_sptmhook:
2386#endif /* SCHED_HYGIENE_DEBUG */
2387
2388	/* Save the lowered preemption count. */
2389	str		w12, [x0, ACT_PREEMPT_CNT]
2390
2391	/* Skip ASTs if the preemption count is not zero. */
2392	cbnz	w12, Lsptm_skip_ast_taken_sptmhook
2393
2394	/**
2395	 * Skip the AST check if interrupts were originally disabled. The original
2396	 * DAIF value needs to be placed into a callee-saved register so that the
2397	 * value is preserved across the ast_taken_kernel() call.
2398	 */
2399	mrs		x20, DAIF
2400	tbnz	x20, #(DAIF_IRQF_SHIFT), Lsptm_skip_ast_taken_sptmhook
2401
2402	/* IF there is no urgent AST, skip the AST. */
2403	ldr		x12, [x0, ACT_CPUDATAP]
2404	ldr		x14, [x12, CPU_PENDING_AST]
2405	tst		x14, AST_URGENT
2406	b.eq	Lsptm_skip_ast_taken_sptmhook
2407
2408	/* Handle the AST. This call requires interrupts to be disabled. */
2409	msr		DAIFSet, #(DAIFSC_ALL)
2410	bl		EXT(ast_taken_kernel)
2411	msr		DAIF, x20
2412
2413Lsptm_skip_ast_taken_sptmhook:
2414
2415	/* Restore SPTM return value(s) */
2416	ldp		x6, x7, [sp, #0x30]
2417	ldp		x4, x5, [sp, #0x20]
2418	ldp		x2, x3, [sp, #0x10]
2419	ldp		x0, x1, [sp]
2420	add		sp, sp, #0x40
2421
2422	/* Return. */
2423	ldp		x20, x21, [sp], 0x10
2424	POP_FRAME
2425	ARM64_STACK_EPILOG EXT(_sptm_post_exit_hook)
2426#endif /* CONFIG_SPTM */
2427
2428#if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2429/**
2430 * Record debug data for a panic lockdown event
2431 * Clobbers x0, x1, x2
2432 */
2433	.text
2434	.align 2
2435	.global EXT(panic_lockdown_record_debug_data)
2436LEXT(panic_lockdown_record_debug_data)
2437	adrp	x0, EXT(debug_panic_lockdown_initiator_state)@page
2438	add		x0, x0, EXT(debug_panic_lockdown_initiator_state)@pageoff
2439
2440	/*
2441	 * To synchronize accesses to the debug state, we use the initiator PC as a
2442	 * "lock". It starts out at zero and we try to swap in our initiator's PC
2443	 * (which is trivially non-zero) to acquire the debug state and become the
2444	 * initiator of record.
2445	 *
2446	 * Note that other CPUs which are not the initiator of record may still
2447	 * initiate panic lockdown (potentially before the initiator of record does
2448	 * so) and so this debug data should only be used as a hint for the
2449	 * initiating CPU rather than a guarantee of which CPU initiated lockdown
2450	 * first.
2451	 */
2452	mov		x1, #0
2453	add		x2, x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_PC
2454	cas		x1, lr, [x2]
2455	/* If there's a non-zero value there already, we aren't the first. Skip. */
2456	cbnz	x1, Lpanic_lockdown_record_debug_data_done
2457
2458	/*
2459	 * We're the first and have exclusive access to the debug structure!
2460	 * Record all our data.
2461	 */
2462	mov		x1, sp
2463	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_SP]
2464
2465	mrs		x1, TPIDR_EL1
2466	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_TPIDR]
2467
2468	mrs		x1, MPIDR_EL1
2469	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_MPIDR]
2470
2471	mrs		x1, ESR_EL1
2472	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ESR]
2473
2474	mrs		x1, ELR_EL1
2475	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ELR]
2476
2477	mrs		x1, FAR_EL1
2478	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_FAR]
2479
2480	/* Sync and then read the timer */
2481	dsb		sy
2482	isb
2483	mrs		x1, CNTVCT_EL0
2484	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_TIMESTAMP]
2485
2486Lpanic_lockdown_record_debug_data_done:
2487	ret
2488#endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2489
2490/* ARM64_TODO Is globals_asm.h needed? */
2491//#include	"globals_asm.h"
2492
2493/* vim: set ts=4: */
2494