xref: /xnu-12377.81.4/osfmk/arm64/locore.s (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45// If __ARM_KERNEL_PROTECT__, eret is preceeded by an ISB before returning to userspace.
46// Otherwise, use BIT_ISB_PENDING flag to track that we need to issue an isb before eret if needed.
47#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__
48#define ERET_NEEDS_ISB 1
49#define BIT_ISB_PENDING 0
50#endif /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__ */
51
52#if XNU_MONITOR && !CONFIG_SPTM
53/*
54 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55 *
56 * Checks if an exception was taken from the PPL, and if so, trampolines back
57 * into the PPL.
58 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
59 *         exception was taken while in the PPL.
60 */
61.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
62	cmp		x26, xzr
63	b.eq		1f
64
65	/* Return to the PPL. */
66	mov		x15, #0
67	mov		w10, #PPL_STATE_EXCEPTION
68#error "XPRR configuration error"
691:
70.endmacro
71
72
73#endif /* XNU_MONITOR && !CONFIG_SPTM */
74
75#if CONFIG_SPTM
76#include <sptm/sptm_xnu.h>
77#include <sptm/sptm_common.h>
78/*
79 * Panic lockdown is a security enhancement which makes certain types of
80 * exceptions (generally, PAC failures and sync exceptions taken with async
81 * exceptions masked) and panics fatal against attackers with kernel R/W. It
82 * does this through a trapdoor panic bit protected by the SPTM.
83 * When this bit is set, TXM will refuse to authorize new code mappings which,
84 * ideally, renders the system unusable even if the attacker gains control over
85 * XNU. Additionally, when this bit is set XNU will refuse to handle any sync
86 * exceptions originating from user space. This makes implementing further stages
87 * of an exploit challenging as it prevents user space from driving the kernel.
88 */
89
90/*
91 * Inform the SPTM that XNU has (or, rather, must) panic. This is provided as a
92 * macro rather than a function since it's just one instruction on release and
93 * it avoids the need to spill a return addresses unless the macro caller
94 * explicitly needs to preserve LR.
95 *
96 * On CONFIG_XNUPOST, this functions returns a 1 in x0 if a simulated lockdown
97 * was performed, 0 otherwise.
98 *
99 * This macro preserves callee saved registers but clobbers all others.
100 */
101.macro BEGIN_PANIC_LOCKDOWN unused
102#if DEVELOPMENT || DEBUG
103	/*
104	 * Forcefully clobber all caller saved GPRs on DEBUG so we don't
105	 * accidentally violate our contract with SPTM.
106	 */
107	mov		x0, #0
108	mov		x1, #0
109	mov		x2, #0
110	mov		x3, #0
111	mov		x4, #0
112	mov		x5, #0
113	mov		x6, #0
114	mov		x7, #0
115	mov		x8, #0
116	mov		x9, #0
117	mov		x10, #0
118	mov		x11, #0
119	mov		x12, #0
120	mov		x13, #0
121	mov		x14, #0
122	mov		x15, #0
123	mov		x16, #0
124	mov		x17, #0
125	mov		x18, #0
126
127	/* Attempt to record the debug trace */
128	bl		EXT(panic_lockdown_record_debug_data)
129
130#endif /* DEVELOPMENT || DEBUG */
131#if CONFIG_XNUPOST
132	mrs		x0, TPIDR_EL1
133	/*
134	 * If hitting this with a null TPIDR, it's likely that this was an unexpected
135	 * exception in early boot rather than an expected one as a part of a test.
136	 * Trigger lockdown.
137	 */
138	cbz		x0, Lbegin_panic_lockdown_real_\@
139	ldr		x1, [x0, TH_EXPECTED_FAULT_HANDLER]
140	/* Is a fault handler installed? */
141	cbz 	x1, Lbegin_panic_lockdown_real_\@
142
143	/* Do the VA bits of ELR match the expected fault PC? */
144	ldr		x1, [x0, TH_EXPECTED_FAULT_PC]
145	mrs		x2, ELR_EL1
146	mov		x3, #((1 << (64 - T1SZ_BOOT - 1)) - 1)
147	and		x4, x1, x3
148	and		x5, x2, x3
149	cmp		x4, x5
150	b.eq	Lbegin_panic_lockdown_simulated_\@
151	/* If we had an expected PC but didn't hit it, fail out */
152	cbnz	x1, Lbegin_panic_lockdown_real_\@
153
154	/* Alternatively, do the FAR VA bits match the expected fault address? */
155	ldr		x1, [x0, TH_EXPECTED_FAULT_ADDR]
156	mrs		x2, FAR_EL1
157	and		x4, x1, x3
158	and		x5, x2, x3
159	cmp		x4, x5
160	b.eq	Lbegin_panic_lockdown_simulated_\@
161
162Lbegin_panic_lockdown_real_\@:
163#endif /* CONFIG_XNUPOST */
164	/*
165	 * The sptm_xnu_panic_begin routine is guaranteed to unavoidably lead to
166	 * the panic bit being set.
167	 */
168	bl EXT(sptm_xnu_panic_begin)
169#if CONFIG_XNUPOST
170	mov		x0, #0 // not a simulated lockdown
171	b		Lbegin_panic_lockdown_continue_\@
172Lbegin_panic_lockdown_simulated_\@:
173	/*
174	 * We hit lockdown with a matching exception handler installed.
175	 * Since this is an expected test exception, skip setting the panic bit
176	 * (since this will kill the system) and instead set a bit in the test
177	 * handler.
178	 */
179	mov		x0, #1 // this is a simulated lockdown!
180	adrp	x1, EXT(xnu_post_panic_lockdown_did_fire)@page
181	strb	w0, [x1, EXT(xnu_post_panic_lockdown_did_fire)@pageoff]
182	mov		lr, xzr // trash LR to ensure callers don't rely on it
183Lbegin_panic_lockdown_continue_\@:
184#endif /* CONFIG_XNUPOST */
185.endmacro
186#endif /* CONFIG_SPTM */
187
188/*
189 * MAP_KERNEL
190 *
191 * Restores the kernel EL1 mappings, if necessary.
192 *
193 * This may mutate x18.
194 */
195.macro MAP_KERNEL
196#if __ARM_KERNEL_PROTECT__
197	/* Switch to the kernel ASID (low bit set) for the task. */
198	mrs		x18, TTBR0_EL1
199	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
200	msr		TTBR0_EL1, x18
201
202	/*
203	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
204	 * to the TTBRs and writes to the TCR should be ensured by the
205	 * microarchitecture.
206	 */
207#if !defined(APPLE_ARM64_ARCH_FAMILY)
208	isb		sy
209#endif
210
211	/*
212	 * Update the TCR to map the kernel now that we are using the kernel
213	 * ASID.
214	 */
215	MOV64		x18, TCR_EL1_BOOT
216	msr		TCR_EL1, x18
217	isb		sy
218#endif /* __ARM_KERNEL_PROTECT__ */
219.endmacro
220
221/*
222 * BRANCH_TO_KVA_VECTOR
223 *
224 * Branches to the requested long exception vector in the kernelcache.
225 *   arg0 - The label to branch to
226 *   arg1 - The index of the label in exc_vectors_tables
227 *
228 * This may mutate x18.
229 */
230.macro BRANCH_TO_KVA_VECTOR
231#if HAS_MTE
232	/*
233	 * PSTATE.TCO (Tag Check Override) is automatically set by the architecture
234	 * whenever an exception is taken. TCO disables tag checking, so we want to
235	 * restore it ahead of servicing the exception.
236	 */
237	msr		TCO, #0
238#endif /* HAS_MTE */
239
240#if __ARM_KERNEL_PROTECT__
241	/*
242	 * Find the kernelcache table for the exception vectors by accessing
243	 * the per-CPU data.
244	 */
245	mrs		x18, TPIDR_EL1
246	ldr		x18, [x18, ACT_CPUDATAP]
247	ldr		x18, [x18, CPU_EXC_VECTORS]
248
249	/*
250	 * Get the handler for this exception and jump to it.
251	 */
252	ldr		x18, [x18, #($1 << 3)]
253	br		x18
254#else
255	b		$0
256#endif /* __ARM_KERNEL_PROTECT__ */
257.endmacro
258
259/*
260 * CHECK_KERNEL_STACK
261 *
262 * Verifies that the kernel stack is aligned and mapped within an expected
263 * stack address range. Note: happens before saving registers (in case we can't
264 * save to kernel stack).
265 *
266 * Expects:
267 *	{x0, x1} - saved
268 *	x1 - Exception syndrome
269 *	sp - Saved state
270 *
271 * Seems like we need an unused argument to the macro for the \@ syntax to work
272 *
273 */
274.macro CHECK_KERNEL_STACK unused
275	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
276	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
277	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
278	cmp		x1, x2								// If we have a stack alignment exception
279	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
280	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
281	cmp		x1, x2								// If we have a data abort, we need to
282	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
283	mrs		x0, SP_EL0					// Get SP_EL0
284	mrs		x1, TPIDR_EL1						// Get thread pointer
285	/*
286	 * Check for either a NULL TPIDR or a NULL kernel stack, both of which
287	 * are expected in early boot, but will cause recursive faults if not
288	 * handled specially,
289	 */
290	cbz		x1, Lcorrupt_stack_\@
291	ldr		x2, [x1, TH_KSTACKPTR]
292	cbz		x2, Lcorrupt_stack_\@
293Ltest_kstack_\@:
294	LOAD_KERN_STACK_TOP	dst=x2, src=x1, tmp=x3	// Get top of kernel stack
295	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
296	cmp		x0, x2								// if (SP_EL0 >= kstack top)
297	b.ge	Ltest_istack_\@						//    jump to istack test
298	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
299	b.gt	Lvalid_stack_\@						//    stack pointer valid
300Ltest_istack_\@:
301	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
302	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
303	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
304	cmp		x0, x2								// if (SP_EL0 >= istack top)
305	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
306	cmp		x0, x3								// if (SP_EL0 > istack bottom)
307	b.gt	Lvalid_stack_\@						//    stack pointer valid
308Lcorrupt_stack_\@:
309	ldp		x2, x3, [sp], #16
310	ldp		x0, x1, [sp], #16
311	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
312	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
313	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
314	mrs		x0, SP_EL0					// Get SP_EL0
315	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
316	INIT_SAVED_STATE_FLAVORS sp, w0, w1
317	mov		x0, sp								// Copy exception frame pointer to x0
318	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
319	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
320	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
321	b		fleh_dispatch64
322Lvalid_stack_\@:
323	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
324.endmacro
325
326/*
327 * CHECK_EXCEPTION_CRITICAL_REGION
328 *
329 * Checks if the exception occurred within range [VECTOR_BEGIN, VECTOR_END).
330 * If so, jumps to \fail_label. Otherwise, continues.
331 * This is useful for avoiding infinite exception loops.
332 *
333 * Clobbers x18, NZCV.
334 */
335.macro CHECK_EXCEPTION_CRITICAL_REGION vector_begin, vector_end, fail_label
336	/*
337	 * We need two registers to do a compare but only have x18 free without
338	 * spilling. We can't safely spill to memory yet, however, because doing so
339	 * may fault. It's evil, but since we're operating on ELR here we can
340	 * temporarily spill into it to get another free register as long as we put
341	 * everything back at the end.
342	 */
343	mrs		x18, ELR_EL1
344	msr		ELR_EL1, x19
345
346	adrp	x19, \vector_begin@PAGE
347	add		x19, x19, \vector_begin@PAGEOFF
348	cmp		x18, x19 /* HS if at or above (suspect), LO if below (safe) */
349	adrp	x19, \vector_end@PAGE
350	add		x19, x19, \vector_end@PAGEOFF
351	/*
352	 * If ELR >= \vector_begin (HS), set flags for ELR - \vector_end. LO here
353	 * indicates we are in range.
354	 * Otherwise, set HS (C)
355	 */
356	ccmp	x18, x19, #0b0010 /* C/HS */, HS
357	/* Unspill x19/fixup ELR */
358	mrs		x19, ELR_EL1
359	msr		ELR_EL1, x18
360	mov		x18, #0
361	/* If we're in the range, fail out */
362	b.lo	\fail_label
363.endmacro
364
365/*
366 * CHECK_EXCEPTION_STACK
367 *
368 * Verifies that SP1 is within exception stack and continues if it is.
369 * If not, jumps to \invalid_stack_label as we have nothing to fall back on.
370 *
371 * (out) x18: The unauthenticated CPU_EXCEPSTACK_TOP used for the comparison or
372 *            zero if the check could not be performed (such as because the
373 *            thread pointer was invalid).
374 *
375 * Clobbers NZCV.
376 */
377.macro CHECK_EXCEPTION_STACK invalid_stack_label
378	mrs		x18, TPIDR_EL1					// Get thread pointer
379	/*
380	 * The thread pointer might be invalid during early boot.
381	 * Return zero in x18 to indicate that we failed to execute the check.
382	 */
383	cbz		x18, Lskip_stack_check_\@
384	ldr		x18, [x18, ACT_CPUDATAP]
385	cbz		x18, \invalid_stack_label		// If thread context is set, cpu data should be too
386	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
387	cmp		sp, x18
388	b.gt	\invalid_stack_label			// Fail if above exception stack top
389	sub		x18, x18, EXCEPSTACK_SIZE_NUM	// Find bottom of exception stack
390	cmp		sp, x18
391	b.lt	\invalid_stack_label			// Fail if below exception stack bottom
392	add		x18, x18, EXCEPSTACK_SIZE_NUM	// Return stack top in x18
393Lskip_stack_check_\@:
394	/* FALLTHROUGH */
395.endmacro
396
397#if __ARM_KERNEL_PROTECT__
398	.section __DATA_CONST,__const
399	.align 3
400	.globl EXT(exc_vectors_table)
401LEXT(exc_vectors_table)
402	/* Table of exception handlers.
403         * These handlers sometimes contain deadloops.
404         * It's nice to have symbols for them when debugging. */
405	.quad el1_sp0_synchronous_vector_long
406	.quad el1_sp0_irq_vector_long
407	.quad el1_sp0_fiq_vector_long
408	.quad el1_sp0_serror_vector_long
409	.quad el1_sp1_synchronous_vector_long
410	.quad el1_sp1_irq_vector_long
411	.quad el1_sp1_fiq_vector_long
412	.quad el1_sp1_serror_vector_long
413	.quad el0_synchronous_vector_64_long
414	.quad el0_irq_vector_64_long
415	.quad el0_fiq_vector_64_long
416	.quad el0_serror_vector_64_long
417#endif /* __ARM_KERNEL_PROTECT__ */
418
419	.text
420#if __ARM_KERNEL_PROTECT__
421	/*
422	 * We need this to be on a page boundary so that we may avoiding mapping
423	 * other text along with it.  As this must be on the VM page boundary
424	 * (due to how the coredumping code currently works), this will be a
425	 * 16KB page boundary.
426	 */
427	.align 14
428#else
429	.align 12
430#endif /* __ARM_KERNEL_PROTECT__ */
431	.globl EXT(ExceptionVectorsBase)
432LEXT(ExceptionVectorsBase)
433Lel1_sp0_synchronous_vector:
434	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
435
436	.text
437	.align 7
438Lel1_sp0_irq_vector:
439	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
440
441	.text
442	.align 7
443Lel1_sp0_fiq_vector:
444	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
445
446	.text
447	.align 7
448Lel1_sp0_serror_vector:
449	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
450
451	.text
452	.align 7
453Lel1_sp1_synchronous_vector:
454	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
455
456	.text
457	.align 7
458Lel1_sp1_irq_vector:
459	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
460
461	.text
462	.align 7
463Lel1_sp1_fiq_vector:
464	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
465
466	.text
467	.align 7
468Lel1_sp1_serror_vector:
469	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
470
471	.text
472	.align 7
473Lel0_synchronous_vector_64:
474	MAP_KERNEL
475	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
476
477	.text
478	.align 7
479Lel0_irq_vector_64:
480	MAP_KERNEL
481	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
482
483	.text
484	.align 7
485Lel0_fiq_vector_64:
486	MAP_KERNEL
487	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
488
489	.text
490	.align 7
491Lel0_serror_vector_64:
492	MAP_KERNEL
493	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
494
495	/* Fill out the rest of the page */
496	.align 12
497
498/*********************************
499 * END OF EXCEPTION VECTORS PAGE *
500 *********************************/
501
502
503
504.macro EL1_SP0_VECTOR
505	msr		SPSel, #0							// Switch to SP0
506	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
507	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
508	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
509	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
510	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
511	INIT_SAVED_STATE_FLAVORS sp, w0, w1
512	mov		x0, sp								// Copy saved state pointer to x0
513.endmacro
514
515.macro EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
516	// SWITCH_TO_INT_STACK requires a clobberable tmp register, but at this
517	// point in the exception vector we can't spare the extra GPR.  Instead note
518	// that EL1_SP0_VECTOR ends with x0 == sp and use this to unclobber x0.
519	mrs		x1, TPIDR_EL1
520	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
521	mov		x0, sp
522	mov		sp, x1
523.endmacro
524
525el1_sp0_synchronous_vector_long:
526	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
527	mrs		x1, ESR_EL1							// Get the exception syndrome
528	/* If the stack pointer is corrupt, it will manifest either as a data abort
529	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
530	 * these quickly by testing bit 5 of the exception class.
531	 */
532	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
533	CHECK_KERNEL_STACK
534Lkernel_stack_valid:
535	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
536	EL1_SP0_VECTOR
537	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
538	add		x1, x1, EXT(fleh_synchronous)@pageoff
539	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
540	b		fleh_dispatch64
541
542el1_sp0_irq_vector_long:
543	EL1_SP0_VECTOR
544	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
545	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
546	add		x1, x1, EXT(fleh_irq)@pageoff
547	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
548	b		fleh_dispatch64
549
550el1_sp0_fiq_vector_long:
551	// ARM64_TODO write optimized decrementer
552	EL1_SP0_VECTOR
553	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
554	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
555	add		x1, x1, EXT(fleh_fiq)@pageoff
556	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
557	b		fleh_dispatch64
558
559el1_sp0_serror_vector_long:
560	EL1_SP0_VECTOR
561	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
562	add		x1, x1, EXT(fleh_serror)@pageoff
563	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
564	b		fleh_dispatch64
565
566.macro EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
567	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
568	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
569	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
570	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
571	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
572	INIT_SAVED_STATE_FLAVORS sp, w0, w1
573.if \set_x0_to_exception_frame_ptr
574	mov		x0, sp								// Copy saved state pointer to x0
575.endif
576.endmacro
577
578el1_sp1_synchronous_vector_long:
579	/*
580	 * Before making our first (potentially faulting) memory access, check if we
581	 * previously tried and failed to execute this vector. If we did, it's not
582	 * going to work this time either so let's just spin.
583	 */
584#ifdef CONFIG_SPTM
585	/*
586	 * This check is doubly important for devices which support panic lockdown
587	 * as we use this check to ensure that we can take only a bounded number of
588	 * exceptions on SP1 while trying to spill before we give up on spilling and
589	 * lockdown anyways.
590	 *
591	 * Note, however, that we only check if we took an exception inside this
592	 * vector. Although an attacker could cause exceptions outside this routine,
593	 * they can only do this a finite number of times before overflowing the
594	 * exception stack (causing CHECK_EXCEPTION_STACK to fail) since we subtract
595	 * from SP inside the checked region and do not reload SP from memory before
596	 * we hit post-spill lockdown point in fleh_synchronous_sp1.
597	 */
598#endif /* CONFIG_SPTM */
599	CHECK_EXCEPTION_CRITICAL_REGION el1_sp1_synchronous_vector_long, Lel1_sp1_synchronous_vector_long_end, EXT(el1_sp1_synchronous_vector_long_spill_failed)
600	CHECK_EXCEPTION_STACK EXT(el1_sp1_synchronous_vector_long_spill_failed)
601#ifdef KERNEL_INTEGRITY_KTRR
602	b		check_ktrr_sctlr_trap
603Lel1_sp1_synchronous_vector_continue:
604#endif /* KERNEL_INTEGRITY_KTRR */
605#if CONFIG_SPTM
606	/* Don't bother setting up x0 since we need it as a temporary */
607	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=0
608
609	/*
610	 * Did we fail to execute the stack check (x18=0)?
611	 * On devices which support panic lockdown, we cannot allow this check to be
612	 * skipped after early-boot as doing so many allow exception processing to
613	 * be delayed indefinitely.
614	 */
615	adrp	x0, EXT(startup_phase)@page
616	ldr		w0, [x0, EXT(startup_phase)@pageoff]
617	/* Are we in early-boot? */
618	cmp		w0, #-1 // STARTUP_SUB_LOCKDOWN
619	/*
620	 * If we're still in early-boot (LO), set flags for if we skipped the check
621	 * If we're after early-boot (HS), pass NE
622	 */
623	ccmp	x18, xzr, #0b0000 /* !Z/NE */, LO
624	/* Skip authentication if this was an early boot check fail */
625	b.eq	1f
626	/*
627	 * If we're not in early boot but still couldn't execute the stack bounds
628	 * check (x18=0), something is wrong (TPIDR is corrupted?).
629	 * Trigger a lockdown.
630	 */
631	cbz		x18, EXT(el1_sp1_synchronous_vector_long_spill_failed)
632
633	/*
634	 * In CHECK_EXCEPTION_STACK, we didn't have enough registers to perform the
635	 * signature verification on the exception stack top value and instead used
636	 * the unauthenticated value (x18) for the stack pointer bounds check.
637	 *
638	 * Ensure that we actually performed the check on a legitmate value now.
639	 */
640	mrs		x0, TPIDR_EL1
641	LOAD_EXCEP_STACK_THREAD dst=x0, src=x0, tmp=x1
642	cmp		x0, x18
643	/* If we aren't equal, something is very wrong and we should lockdown. */
644	b.ne	EXT(el1_sp1_synchronous_vector_long_spill_failed)
645
6461:
647	mov		x0, sp	/* Set x0 to saved state pointer */
648#else
649	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
650#endif /* CONFIG_SPTM */
651	adrp	x1, fleh_synchronous_sp1@page
652	add		x1, x1, fleh_synchronous_sp1@pageoff
653	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
654	b		fleh_dispatch64
655
656	/*
657	 * Global symbol to make it easy to pick out in backtraces.
658	 * Do not call externally.
659	 */
660	.global EXT(el1_sp1_synchronous_vector_long_spill_failed)
661LEXT(el1_sp1_synchronous_vector_long_spill_failed)
662	TRAP_UNWIND_PROLOGUE
663	TRAP_UNWIND_DIRECTIVES
664	/*
665	 * We couldn't process the exception due to either having an invalid
666	 * exception stack or because we previously tried to process it and failed.
667	 */
668#if CONFIG_SPTM
669	/*
670	 * For SP1 exceptions, we usually delay initiating lockdown until after
671	 * we've spilled in order to not lose register state. Since we have nowhere
672	 * to safely spill, we have no choice but to initiate it now, clobbering
673	 * some of our exception state in the process (RIP).
674	 */
675	BEGIN_PANIC_LOCKDOWN
676#if CONFIG_XNUPOST
677	/* Macro returns x0=1 if it performed a simulated lockdown */
678	cbz		x0, 0f
679	/* This was a test; return to fault handler so they can fixup the system. */
680	mrs		x0, TPIDR_EL1
681	ldr		x16, [x0, TH_EXPECTED_FAULT_HANDLER]
682#if __has_feature(ptrauth_calls)
683	movk	x17, #TH_EXPECTED_FAULT_HANDLER_DIVERSIFIER
684	autia	x16, x17
685#endif /* ptrauth_calls */
686	msr		ELR_EL1, x16
687	/* Pass a NULL saved state since we didn't actually save anything */
688	mov		x0, #0
689	ERET_NO_STRAIGHT_LINE_SPECULATION
690#endif /* CONFIG_XNUPOST */
691#endif /* CONFIG_SPTM */
6920:
693	wfe
694	b		0b // Spin for watchdog
695	UNWIND_EPILOGUE
696
697#if CONFIG_SPTM
698#if CONFIG_XNUPOST
699	/**
700	 * Test function which raises an exception from a location considered inside
701	 * the vector. Does not return.
702	 */
703	.global EXT(el1_sp1_synchronous_raise_exception_in_vector)
704LEXT(el1_sp1_synchronous_raise_exception_in_vector)
705	ARM64_PROLOG
706	brk		#0
707	/* Unreachable */
708	b		.
709#endif /* CONFIG_XNUPOST */
710#endif /* CONFIG_SPTM */
711Lel1_sp1_synchronous_vector_long_end:
712
713el1_sp1_irq_vector_long:
714	EL1_SP1_VECTOR
715	adrp	x1, fleh_irq_sp1@page
716	add		x1, x1, fleh_irq_sp1@pageoff
717	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
718	b		fleh_dispatch64
719
720el1_sp1_fiq_vector_long:
721	EL1_SP1_VECTOR
722	adrp	x1, fleh_fiq_sp1@page
723	add		x1, x1, fleh_fiq_sp1@pageoff
724	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
725	b		fleh_dispatch64
726
727el1_sp1_serror_vector_long:
728	EL1_SP1_VECTOR
729	adrp	x1, fleh_serror_sp1@page
730	add		x1, x1, fleh_serror_sp1@pageoff
731	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
732	b		fleh_dispatch64
733
734
735.macro EL0_64_VECTOR guest_label
736	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
737#if __ARM_KERNEL_PROTECT__
738	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
739#endif
740	mrs		x0, TPIDR_EL1						// Load the thread register
741	LOAD_USER_PCB	dst=x0, src=x0, tmp=x1		// Load the user context pointer
742	mrs		x1, SP_EL0							// Load the user stack pointer
743	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
744	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
745	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
746	msr		SPSel, #0							// Switch to SP0
747	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
748	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the user PCB
749	mrs		x1, TPIDR_EL1						// Load the thread register
750
751
752
753#if HAS_ARM_FEAT_SME
754	str		x2, [sp, SS64_X2]
755	// current_thread()->machine.umatrix_hdr == NULL: this thread has never
756	// executed a matrix instruction, so no matrix state to save
757	add		x0, x1, ACT_UMATRIX_HDR
758	ldr		x2, [x0]
759	cbz		x2, 1f
760	AUTDA_DIVERSIFIED x2, address=x0, diversifier=ACT_UMATRIX_HDR_DIVERSIFIER
761
762
763	adrp	x0, EXT(sme_version)@page
764	add		x0, x0, EXT(sme_version)@pageoff
765	ldr		w0, [x0]
766	cbz		w0, 1f
767
768	adrp	x0, EXT(sme_state_hdr)@page
769	add		x0, x0, EXT(sme_state_hdr)@pageoff
770	ldr		x0, [x0]
771	str		x0, [x2]
772	rdsvl	x0, #1
773	strh	w0, [x2, SME_SVL_B]
774	mrs		x0, SVCR
775	str		x0, [x2, SME_SVCR]
776	// SVCR.SM == 0: save SVCR only (ZA is handled during context-switch)
777	tbz		x0, #SVCR_SM_SHIFT, 1f
778
779	// SVCR.SM == 1: save SVCR, Z, and P; and exit streaming SVE mode
780	ldrh	w0, [x2, SME_SVL_B]
781	add		x2, x2, SME_Z_P_ZA
782	LOAD_OR_STORE_Z_P_REGISTERS	str, svl_b=x0, ss=x2
783	mrs		x2, FPSR
784	smstop	sm
785	msr		FPSR, x2
7861:
787	ldr		x2, [sp, SS64_X2]
788#endif /* HAS_ARM_FEAT_SME */
789
790	mov		x0, sp								// Copy the user PCB pointer to x0
791												// x1 contains thread register
792.endmacro
793
794.macro EL0_64_VECTOR_SWITCH_TO_INT_STACK
795	// Similarly to EL1_SP0_VECTOR_SWITCH_TO_INT_STACK, we need to take
796	// advantage of EL0_64_VECTOR ending with x0 == sp.  EL0_64_VECTOR also
797	// populates x1 with the thread state, so we can skip reloading it.
798	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
799	mov		x0, sp
800	mov		sp, x1
801.endmacro
802
803.macro EL0_64_VECTOR_SWITCH_TO_KERN_STACK
804	LOAD_KERN_STACK_TOP	dst=x1, src=x1, tmp=x0
805	mov		x0, sp
806	mov		sp, x1
807.endmacro
808
809el0_synchronous_vector_64_long:
810	EL0_64_VECTOR	sync
811	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
812	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
813	add		x1, x1, EXT(fleh_synchronous)@pageoff
814	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
815	b		fleh_dispatch64
816
817el0_irq_vector_64_long:
818	EL0_64_VECTOR	irq
819	EL0_64_VECTOR_SWITCH_TO_INT_STACK
820	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
821	add		x1, x1, EXT(fleh_irq)@pageoff
822	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
823	b		fleh_dispatch64
824
825el0_fiq_vector_64_long:
826	EL0_64_VECTOR	fiq
827	EL0_64_VECTOR_SWITCH_TO_INT_STACK
828	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
829	add		x1, x1, EXT(fleh_fiq)@pageoff
830	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
831	b		fleh_dispatch64
832
833el0_serror_vector_64_long:
834	EL0_64_VECTOR	serror
835	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
836	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
837	add		x1, x1, EXT(fleh_serror)@pageoff
838	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
839	b		fleh_dispatch64
840
841
842#if defined(KERNEL_INTEGRITY_KTRR)
843	.text
844	.align 2
845check_ktrr_sctlr_trap:
846/* We may abort on an instruction fetch on reset when enabling the MMU by
847 * writing SCTLR_EL1 because the page containing the privileged instruction is
848 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
849 * would otherwise panic unconditionally. Check for the condition and return
850 * safe execution to the caller on behalf of the faulting function.
851 *
852 * Expected register state:
853 *  x22 - Kernel virtual base
854 *  x23 - Kernel physical base
855 */
856	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
857	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
858	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
859	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
860	movz	w1, #0x8600, lsl #16
861	movk	w1, #0x0000
862	cmp		x0, x1
863	mrs		x0, ELR_EL1					// Check for expected abort address
864	adrp	x1, _pinst_set_sctlr_trap_addr@page
865	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
866	sub		x1, x1, x22					// Convert to physical address
867	add		x1, x1, x23
868	ccmp	x0, x1, #0, eq
869	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
870	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
871	b.ne	Lel1_sp1_synchronous_vector_continue
872	msr		ELR_EL1, lr					// Return to caller
873	ERET_NO_STRAIGHT_LINE_SPECULATION
874#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
875
876/* 64-bit first level exception handler dispatcher.
877 * Completes register context saving and branches to FLEH.
878 * Expects:
879 *  {x0, x1, sp} - saved
880 *  x0 - arm_context_t
881 *  x1 - address of FLEH
882 *  x2 - bitfield of type FLEH_DISPATCH64_OPTION_xxx, clobbered
883 *  x3 - unused
884 *  fp - previous stack frame if EL1
885 *  lr - unused
886 *  sp - kernel stack
887 */
888	.text
889	.align 2
890fleh_dispatch64:
891#if HAS_APPLE_PAC
892	pacia	x1, sp
893#endif
894
895	/* Save arm_saved_state64 */
896	SPILL_REGISTERS KERNEL_MODE, options_register=x2
897
898	/* If exception is from userspace, zero unused registers */
899	and		x23, x23, #(PSR64_MODE_EL_MASK)
900	cmp		x23, #(PSR64_MODE_EL0)
901	bne		1f
902
903	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
904
905
9062:
907#if HAS_MTE
908	/*
909	 * Exception came from userspace, so there's some gatekeeping that we
910	 * have to do for MTE. First of all, we need to generate a new seed value
911	 * for RGSR_EL1.SEED. Hidra pseudo-random generator for tags is weak, which
912	 * may allow an attacker observing a short sequence of tags to predict the
913	 * rest of it. We therefore restore it as often as possible at context
914	 * switch, reseeding it with another source of randomness.
915	 */
916#if NEEDS_MTE_IRG_RESEED
917	PACGA_IRG_RESEED x2, x3, x4
918#endif
919
920	/*
921	 * IRG default exclude mask. GCR_EL1 allows to exclude certain tags from
922	 * the selection options for any IRG execution. We want to exclude
923	 * 0 in userspace and F in the kernel as they match the canonical tags.
924	 * GCR_EL1 modifications require a CSE, but we do not want to impact
925	 * speculative performance wins here, so we simply set the register
926	 * and let the operation go. IRG calls in the kernel force the F tag
927	 * out with GMI, so this is mostly to recapture the ability to
928	 * use tag 0. In the very vast majority of cases, this path will commit
929	 * before we get to an IRG.
930	 */
931	eor		x4, x4, x4
932	mov		x4, #(GCR_EL1_RRND_ASM)
933	orr		x4, x4, #(GCR_EL1_EXCLUDE_TAGS_KERNEL)
934	msr		GCR_EL1, x4
935#endif /* HAS_MTE */
936
937	mov		x2, #0
938	mov		x3, #0
939	mov		x4, #0
940	mov		x5, #0
941	mov		x6, #0
942	mov		x7, #0
943	mov		x8, #0
944	mov		x9, #0
945	mov		x10, #0
946	mov		x11, #0
947	mov		x12, #0
948	mov		x13, #0
949	mov		x14, #0
950	mov		x15, #0
951	mov		x16, #0
952	mov		x17, #0
953	mov		x18, #0
954	mov		x19, #0
955	mov		x20, #0
956	/* x21, x22 cleared in common case below */
957	mov		x23, #0
958	mov		x24, #0
959	mov		x25, #0
960#if !XNU_MONITOR
961	mov		x26, #0
962#endif
963	mov		x27, #0
964	mov		x28, #0
965	mov		fp, #0
966	mov		lr, #0
9671:
968
969	mov		x21, x0								// Copy arm_context_t pointer to x21
970	mov		x22, x1								// Copy handler routine to x22
971
972#if XNU_MONITOR
973	/* Zero x26 to indicate that this should not return to the PPL. */
974	mov		x26, #0
975#endif
976
977#if PRECISE_USER_KERNEL_TIME
978	cmp		x23, #PSR64_MODE_EL0			// If interrupting this kernel, skip
979	b.gt	1f                                  // precise time update.
980	PUSH_FRAME
981	bl		EXT(recount_leave_user)
982	POP_FRAME_WITHOUT_LR
983	mov		x0, x21								// Reload arm_context_t pointer
9841:
985#endif /* PRECISE_USER_KERNEL_TIME */
986
987	/* Dispatch to FLEH */
988
989#if HAS_APPLE_PAC
990	braa	x22,sp
991#else
992	br		x22
993#endif
994
995
996	.text
997	.align 2
998	.global EXT(fleh_synchronous)
999LEXT(fleh_synchronous)
1000TRAP_UNWIND_PROLOGUE
1001TRAP_UNWIND_DIRECTIVES
1002	ARM64_JUMP_TARGET
1003	mrs		x1, ESR_EL1							// Load exception syndrome
1004	mrs		x2, FAR_EL1							// Load fault address
1005	mrs		lr, ELR_EL1
1006	/* NB: lr might not be a valid address (e.g. instruction abort). */
1007	PUSH_FRAME
1008
1009#if CONFIG_SPTM
1010	mrs		x25, ELR_EL1
1011
1012	/*
1013	 * Sync exceptions in the kernel are rare, so check that first.
1014	 * This check should be trivially predicted NT. We also take
1015	 * the check out of line so, on the hot path, we don't add a
1016	 * frontend redirect.
1017	 */
1018	mov		x3, #0 // by default, do not signal panic lockdown to sleh
1019	mrs		x4, SPSR_EL1
1020	tst		x4, #(PSR64_MODE_EL_MASK)
1021	b.ne	Lfleh_synchronous_ool_check_exception_el1 /* Run ELn checks if we're EL!=0 (!Z) */
1022	/* EL0 -- check if we're blocking sync exceptions due to lockdown */
1023	adrp	x4, EXT(sptm_xnu_triggered_panic_ptr)@page
1024	ldr		x4, [x4, EXT(sptm_xnu_triggered_panic_ptr)@pageoff]
1025	ldrb	w4, [x4]
1026	cbnz	w4, Lblocked_user_sync_exception
1027
1028Lfleh_synchronous_continue:
1029	/* We've had our chance to lockdown, release PC/FAR */
1030	str		x25, [x0, SS64_PC]
1031	str		x2,  [x0, SS64_FAR]
1032#endif /* CONFIG_SPTM */
1033
1034	bl		EXT(sleh_synchronous)
1035	POP_FRAME_WITHOUT_LR
1036
1037#if XNU_MONITOR && !CONFIG_SPTM
1038	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1039#endif
1040
1041	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
1042	b		exception_return_dispatch
1043
1044#if CONFIG_SPTM
1045Lfleh_synchronous_ool_check_exception_el1:
1046	/* Save off arguments needed for sleh_sync as we may clobber */
1047	mov		x26, x0
1048	mov		x27, x1
1049	mov		x28, x2
1050
1051	/*
1052	 * Evaluate the exception state to determine if we should initiate a
1053	 * lockdown. While this function is implemented in C, since it is guaranteed
1054	 * to not use the stack it should be immune from spill tampering and other
1055	 * attacks which may cause it to render the wrong ruling.
1056	 */
1057	mov		x0, x1  // ESR
1058	mov		x1, x25 // ELR
1059			        // FAR is already in x2
1060	mrs		x3, SPSR_EL1
1061	bl		EXT(sleh_panic_lockdown_should_initiate_el1_sp0_sync)
1062
1063	/* sleh_synchronous needs the lockdown decision in x3 */
1064	mov		x3, x0
1065	/* Optimistically restore registers on the assumption we won't lockdown */
1066	mov		x0, x26
1067	mov		x1, x27
1068	mov		x2, x28
1069
1070	cbz		x3, Lfleh_synchronous_continue
1071
1072	BEGIN_PANIC_LOCKDOWN
1073	mov		x0, x26
1074	mov		x1, x27
1075	mov		x2, x28
1076	/*
1077	 * A captain goes down with her ship; system is sunk but for telemetry
1078	 * try to handle the crash normally.
1079	 */
1080	mov		x3, #1 // signal to sleh that we completed panic lockdown
1081	b		Lfleh_synchronous_continue
1082#endif /* CONFIG_SPTM */
1083UNWIND_EPILOGUE
1084
1085#if CONFIG_SPTM
1086	.text
1087	.align 2
1088	/* Make a global symbol so it's easier to pick out in backtraces */
1089	.global EXT(blocked_user_sync_exception)
1090LEXT(blocked_user_sync_exception)
1091Lblocked_user_sync_exception:
1092	TRAP_UNWIND_PROLOGUE
1093	TRAP_UNWIND_DIRECTIVES
1094	/*
1095	 * User space took a sync exception after panic lockdown had been initiated.
1096	 * The system is going to panic soon, so let's just re-enable interrupts and
1097	 * wait for debugger sync.
1098	 */
1099	msr		DAIFClr, #(DAIFSC_STANDARD_DISABLE)
11000:
1101	wfe
1102	b		0b
1103	UNWIND_EPILOGUE
1104#endif /* CONFIG_SPTM */
1105
1106/* Shared prologue code for fleh_irq and fleh_fiq.
1107 * Does any interrupt booking we may want to do
1108 * before invoking the handler proper.
1109 * Expects:
1110 *  x0 - arm_context_t
1111 * x23 - CPSR
1112 *  fp - Undefined live value (we may push a frame)
1113 *  lr - Undefined live value (we may push a frame)
1114 *  sp - Interrupt stack for the current CPU
1115 */
1116.macro BEGIN_INTERRUPT_HANDLER
1117	mrs		x22, TPIDR_EL1
1118	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
1119	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
1120	ldr		w1, [x23, CPU_STAT_IRQ]
1121	add		w1, w1, #1							// Increment count
1122	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
1123	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
1124	add		w1, w1, #1					// Increment count
1125	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1126	/* Increment preempt count */
1127	ldr		w1, [x22, ACT_PREEMPT_CNT]
1128	add		w1, w1, #1
1129	str		w1, [x22, ACT_PREEMPT_CNT]
1130	/* Store context in int state */
1131	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
1132.endmacro
1133
1134/* Shared epilogue code for fleh_irq and fleh_fiq.
1135 * Cleans up after the prologue, and may do a bit more
1136 * bookkeeping (kdebug related).
1137 * Expects:
1138 * x22 - Live TPIDR_EL1 value (thread address)
1139 * x23 - Address of the current CPU data structure
1140 * w24 - 0 if kdebug is disbled, nonzero otherwise
1141 *  fp - Undefined live value (we may push a frame)
1142 *  lr - Undefined live value (we may push a frame)
1143 *  sp - Interrupt stack for the current CPU
1144 */
1145.macro END_INTERRUPT_HANDLER
1146	/* Clear int context */
1147	str		xzr, [x23, CPU_INT_STATE]
1148	/* Decrement preempt count */
1149	ldr		w0, [x22, ACT_PREEMPT_CNT]
1150	cbnz	w0, 1f								// Detect underflow
1151	b		preempt_underflow
11521:
1153	sub		w0, w0, #1
1154	str		w0, [x22, ACT_PREEMPT_CNT]
1155	/* Switch back to kernel stack */
1156	LOAD_KERN_STACK_TOP	dst=x0, src=x22, tmp=x28
1157	mov		sp, x0
1158	/* Generate a CPU-local event to terminate a post-IRQ WFE */
1159	sevl
1160.endmacro
1161
1162	.text
1163	.align 2
1164	.global EXT(fleh_irq)
1165LEXT(fleh_irq)
1166TRAP_UNWIND_PROLOGUE
1167TRAP_UNWIND_DIRECTIVES
1168	ARM64_JUMP_TARGET
1169	BEGIN_INTERRUPT_HANDLER
1170	PUSH_FRAME
1171	bl		EXT(sleh_irq)
1172	POP_FRAME_WITHOUT_LR
1173	END_INTERRUPT_HANDLER
1174
1175#if XNU_MONITOR && !CONFIG_SPTM
1176	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1177#endif
1178
1179	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1180	b		exception_return_dispatch
1181UNWIND_EPILOGUE
1182
1183	.text
1184	.align 2
1185	.global EXT(fleh_fiq_generic)
1186LEXT(fleh_fiq_generic)
1187	/*
1188	 * This function is a placeholder which should never be invoked.
1189	 * We omit the landingpad here since there is no sensible choice.
1190	 */
1191	PANIC_UNIMPLEMENTED
1192
1193	.text
1194	.align 2
1195	.global EXT(fleh_fiq)
1196LEXT(fleh_fiq)
1197TRAP_UNWIND_PROLOGUE
1198TRAP_UNWIND_DIRECTIVES
1199	ARM64_JUMP_TARGET
1200	BEGIN_INTERRUPT_HANDLER
1201	PUSH_FRAME
1202	bl		EXT(sleh_fiq)
1203	POP_FRAME_WITHOUT_LR
1204	END_INTERRUPT_HANDLER
1205
1206#if XNU_MONITOR && !CONFIG_SPTM
1207	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1208#endif
1209
1210	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1211	b		exception_return_dispatch
1212UNWIND_EPILOGUE
1213
1214	.text
1215	.align 2
1216	.global EXT(fleh_serror)
1217LEXT(fleh_serror)
1218TRAP_UNWIND_PROLOGUE
1219TRAP_UNWIND_DIRECTIVES
1220	ARM64_JUMP_TARGET
1221	mrs		x1, ESR_EL1							// Load exception syndrome
1222	mrs		x2, FAR_EL1							// Load fault address
1223
1224	PUSH_FRAME
1225	bl		EXT(sleh_serror)
1226	POP_FRAME_WITHOUT_LR
1227
1228#if XNU_MONITOR && !CONFIG_SPTM
1229	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1230#endif
1231
1232	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
1233	b		exception_return_dispatch
1234UNWIND_EPILOGUE
1235
1236/*
1237 * Register state saved before we get here.
1238 */
1239	.text
1240	.align 2
1241fleh_invalid_stack:
1242	TRAP_UNWIND_PROLOGUE
1243	TRAP_UNWIND_DIRECTIVES
1244	ARM64_JUMP_TARGET
1245#if CONFIG_SPTM
1246	/*
1247	 * Taking a data abort with an invalid kernel stack pointer is unrecoverable.
1248	 * Initiate lockdown.
1249	 */
1250
1251	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1252	mov		x25, x0
1253	mrs		x26, ELR_EL1
1254	mrs		x27, ESR_EL1
1255	mrs		x28, FAR_EL1
1256	BEGIN_PANIC_LOCKDOWN
1257	mov		x0, x25
1258	mov		x1, x27
1259	mov		x2, x28
1260	/* We deferred storing PC/FAR until after lockdown, so do that now */
1261	str		x26, [x0, SS64_PC]
1262	str		x28, [x0, SS64_FAR]
1263#else
1264	mrs		x1, ESR_EL1							// Load exception syndrome
1265	mrs		x2, FAR_EL1							// Load fault address
1266#endif /* CONFIG_SPTM */
1267	PUSH_FRAME
1268	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
1269	b 		.
1270	UNWIND_EPILOGUE
1271
1272	.text
1273	.align 2
1274fleh_synchronous_sp1:
1275	TRAP_UNWIND_PROLOGUE
1276	TRAP_UNWIND_DIRECTIVES
1277	ARM64_JUMP_TARGET
1278#if CONFIG_SPTM
1279	/*
1280	 * Without debugger intervention, all exceptions on SP1 (including debug
1281	 * trap instructions) are intended to be fatal. In order to not break
1282	 * self-hosted kernel debug, do not trigger lockdown for debug traps
1283	 * (unknown instructions/uncategorized exceptions). On release kernels, we
1284	 * don't support self-hosted kernel debug so unconditionally lockdown.
1285	 */
1286#if (DEVELOPMENT || DEBUG)
1287	tst		w1, #(ESR_EC_MASK)
1288	b.eq	Lfleh_synchronous_sp1_skip_panic_lockdown // ESR_EC_UNCATEGORIZED is 0, so skip lockdown if Z
1289#endif /* DEVELOPMENT || DEBUG */
1290	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1291	mov		x25, x0
1292	mrs		x26, ELR_EL1
1293	mrs		x27, ESR_EL1
1294	mrs		x28, FAR_EL1
1295	BEGIN_PANIC_LOCKDOWN
1296	mov		x0, x25
1297	mov		x1, x27
1298	mov		x2, x28
1299	/* We deferred storing PC/FAR until after lockdown, so do that now */
1300	str		x26, [x0, SS64_PC]
1301	str		x28, [x0, SS64_FAR]
1302Lfleh_synchronous_sp1_skip_panic_lockdown:
1303#else
1304	mrs		x1, ESR_EL1
1305	mrs		x2, FAR_EL1
1306#endif /* CONFIG_SPTM */
1307	/*
1308	 * If we got here before we have a kernel thread or kernel stack (e.g.
1309	 * still on init_thread) and we try to panic(), we'll end up in an infinite
1310	 * nested exception, so just stop here instead to preserve the call stack.
1311	 */
1312	mrs		x9, TPIDR_EL1
1313	cbz		x9, 0f
1314	ldr		x9, [x9, TH_KSTACKPTR]
1315	cbz		x9, 0f
1316	PUSH_FRAME
1317	bl		EXT(sleh_synchronous_sp1)
1318	b 		.
13190:
1320	PUSH_FRAME
1321	bl		EXT(el1_sp1_synchronous_vector_long_invalid_kstack)
1322	b 		.
1323	UNWIND_EPILOGUE
1324
1325LEXT(el1_sp1_synchronous_vector_long_invalid_kstack)
13260:
1327	wfe
1328	b		0b // Spin for watchdog
1329
1330	.text
1331	.align 2
1332fleh_irq_sp1:
1333	ARM64_JUMP_TARGET
1334	mov		x1, x0
1335	adr		x0, Lsp1_irq_str
1336	b		EXT(panic_with_thread_kernel_state)
1337Lsp1_irq_str:
1338	.asciz "IRQ exception taken while SP1 selected"
1339
1340	.text
1341	.align 2
1342fleh_fiq_sp1:
1343	ARM64_JUMP_TARGET
1344	mov		x1, x0
1345	adr		x0, Lsp1_fiq_str
1346	b		EXT(panic_with_thread_kernel_state)
1347Lsp1_fiq_str:
1348	.asciz "FIQ exception taken while SP1 selected"
1349
1350	.text
1351	.align 2
1352fleh_serror_sp1:
1353	ARM64_JUMP_TARGET
1354	mov		x1, x0
1355	adr		x0, Lsp1_serror_str
1356	b		EXT(panic_with_thread_kernel_state)
1357Lsp1_serror_str:
1358	.asciz "Asynchronous exception taken while SP1 selected"
1359
1360	.text
1361	.align 2
1362exception_return_dispatch:
1363	ldr		w0, [x21, SS64_CPSR]
1364	tst		w0, PSR64_MODE_EL_MASK
1365	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1366	b		return_to_user
1367
1368#if CONFIG_SPTM
1369/**
1370 * XNU returns to this symbol whenever handling an interrupt that occurred
1371 * during SPTM, TXM or SK runtime. This code determines which domain the
1372 * XNU thread was executing in when the interrupt occurred and tells SPTM
1373 * which domain to resume.
1374 */
1375	.text
1376	.align 2
1377	.global EXT(xnu_return_to_gl2)
1378LEXT(xnu_return_to_gl2)
1379	/**
1380	 * If thread->txm_thread_stack is set, we need to tell SPTM dispatch to
1381	 * resume the TXM thread in x0.
1382	 */
1383	mrs		x8, TPIDR_EL1
1384	ldr		x8, [x8, TH_TXM_THREAD_STACK]
1385	cbz		x8, 1f
1386	mov		x0, x8
1387	b		EXT(txm_resume)
1388	/* Unreachable */
1389	b .
1390
1391#if CONFIG_EXCLAVES
1392	/**
1393	 * If thread->th_exclaves_intstate flag TH_EXCLAVES_EXECUTION is set
1394	 * we need to tell SPTM dispatch to resume the SK thread.
1395	 */
13961:
1397	mrs		x8, TPIDR_EL1
1398	ldr		x9, [x8, TH_EXCLAVES_INTSTATE]
1399	and		x9, x9, TH_EXCLAVES_EXECUTION
1400	cbz		x9, 1f
1401	b		EXT(sk_resume)
1402	/* Unreachable */
1403	b .
1404#endif /* CONFIG_EXCLAVES */
1405
1406	/**
1407	 * If neither the above checks succeeded, this must be a thread
1408	 * that was interrupted while running in SPTM. Tell SPTM to resume
1409	 * the interrupted SPTM call.
1410	 */
14111:
1412	b		EXT(sptm_resume_from_exception)
1413	/* Unreachable */
1414	b .
1415#endif /* CONFIG_SPTM */
1416
1417	.text
1418	.align 2
1419	.global EXT(return_to_kernel)
1420LEXT(return_to_kernel)
1421	UNWIND_PROLOGUE
1422	RETURN_TO_KERNEL_UNWIND
1423	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
1424	mrs		x3, TPIDR_EL1                           // Load thread pointer
1425	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
1426	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
1427	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
1428	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
1429	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
1430	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
1431	b.eq	exception_return_unint_tpidr_x3
1432	mov		sp, x21                                 // Switch to thread stack for preemption
1433	PUSH_FRAME
1434	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
1435	POP_FRAME_WITHOUT_LR
1436	b		exception_return
1437	UNWIND_EPILOGUE
1438
1439	.text
1440	.globl EXT(thread_bootstrap_return)
1441LEXT(thread_bootstrap_return)
1442	ARM64_PROLOG
1443#if CONFIG_DTRACE
1444	bl		EXT(dtrace_thread_bootstrap)
1445#endif
1446#if KASAN_TBI
1447	PUSH_FRAME
1448	bl		EXT(__asan_handle_no_return)
1449	POP_FRAME_WITHOUT_LR
1450#endif /* KASAN_TBI */
1451	b		EXT(arm64_thread_exception_return)
1452
1453	.text
1454	.globl EXT(arm64_thread_exception_return)
1455LEXT(arm64_thread_exception_return)
1456	ARM64_PROLOG
1457	mrs		x0, TPIDR_EL1
1458	LOAD_USER_PCB	dst=x21, src=x0, tmp=x28
1459	mov		x28, xzr
1460
1461	//
1462	// Fall Through to return_to_user from arm64_thread_exception_return.
1463	// Note that if we move return_to_user or insert a new routine
1464	// below arm64_thread_exception_return, the latter will need to change.
1465	//
1466	.text
1467/* x21 is always the machine context pointer when we get here
1468 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1469return_to_user:
1470check_user_asts:
1471#if KASAN_TBI
1472	PUSH_FRAME
1473	bl		EXT(__asan_handle_no_return)
1474	POP_FRAME_WITHOUT_LR
1475#endif /* KASAN_TBI */
1476	mrs		x3, TPIDR_EL1					// Load thread pointer
1477
1478	movn		w2, #0
1479	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
1480
1481#if MACH_ASSERT
1482	ldr		w0, [x3, ACT_PREEMPT_CNT]
1483	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
1484#endif
1485
1486	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1487	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1488	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
1489	cbz		w0, no_asts							// If no asts, skip ahead
1490
1491	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
1492
1493	/* At this point, we have ASTs and we need to check whether we are running in the
1494	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1495	 * the PFZ since we don't want to handle getting a signal or getting suspended
1496	 * while holding a spinlock in userspace.
1497	 *
1498	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1499	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1500	 * to use it to indicate to userspace to come back to take a delayed
1501	 * preemption, at which point the ASTs will be handled. */
1502	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
1503	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
1504
1505	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
1506	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
1507	cbz		x0, restore_and_check_ast			// No, deal with other asts
1508
1509	mov		x0, #1
1510	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
1511	mov		x0, x19								// restore x0 to asts
1512	b		no_asts								// pretend we have no asts
1513
1514restore_and_check_ast:
1515	mov		x0, x19								// restore x0
1516	b	user_take_ast							// Service pending asts
1517no_asts:
1518
1519
1520#if PRECISE_USER_KERNEL_TIME
1521	mov		x19, x3						// Preserve thread pointer across function call
1522	PUSH_FRAME
1523	bl		EXT(recount_enter_user)
1524	POP_FRAME_WITHOUT_LR
1525	mov		x3, x19
1526#endif /* PRECISE_USER_KERNEL_TIME */
1527
1528#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1529	/* Watchtower
1530	 *
1531	 * Here we attempt to enable NEON access for EL0. If the last entry into the
1532	 * kernel from user-space was due to an IRQ, the monitor will have disabled
1533	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1534	 * check in with the monitor in order to reenable NEON for EL0 in exchange
1535	 * for routing IRQs through the monitor (2). This way the monitor will
1536	 * always 'own' either IRQs or EL0 NEON.
1537	 *
1538	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1539	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1540	 * here.
1541	 *
1542	 * EL0 user ________ IRQ                                            ______
1543	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
1544	 * EL3 monitor           \_/                                \___/
1545	 *
1546	 *                       (1)                                 (2)
1547	 */
1548
1549	mov		x0, #(CPACR_FPEN_ENABLE)
1550	msr		CPACR_EL1, x0
1551#endif
1552
1553	/* Establish this thread's debug state as the live state on the selected CPU. */
1554	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1555	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
1556	ldr		x0, [x3, ACT_DEBUGDATA]
1557	cmp		x0, x1
1558	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
1559
1560
1561	PUSH_FRAME
1562	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
1563	POP_FRAME_WITHOUT_LR
1564	mrs		x3, TPIDR_EL1						// Reload thread pointer
1565	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
1566L_skip_user_set_debug_state:
1567
1568
1569	ldrsh	x0, [x4, CPU_TPIDR_EL0]
1570	msr		TPIDR_EL0, x0
1571
1572
1573	b		exception_return_unint_tpidr_x3
1574
1575exception_return:
1576	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1577exception_return_unint:
1578	mrs		x3, TPIDR_EL1					// Load thread pointer
1579exception_return_unint_tpidr_x3:
1580	mov		sp, x21						// Reload the pcb pointer
1581
1582#if !__ARM_KERNEL_PROTECT__
1583	/*
1584	 * Restore x18 only if the task has the entitlement that allows
1585	 * usage. Those are very few, and can move to something else
1586	 * once we use x18 for something more global.
1587	 *
1588	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
1589	 * that uses x18 as one of the global use cases (and will reset
1590	 * x18 later down below).
1591	 *
1592	 * It's also unconditionally skipped for translated threads,
1593	 * as those are another use case, one where x18 must be preserved.
1594	 */
1595	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
1596	mov		x18, #0
1597	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
1598
1599exception_return_unint_tpidr_x3_restore_x18:
1600	ldr		x18, [sp, SS64_X18]
1601
1602#else /* !__ARM_KERNEL_PROTECT__ */
1603	/*
1604	 * If we are going to eret to userspace, we must return through the EL0
1605	 * eret mapping.
1606	 */
1607	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
1608	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
1609
1610	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
1611	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
1612	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
1613	add		x1, x1, Lexception_return_restore_registers@pageoff
1614	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
1615	sub		x1, x1, x0											// Calculate delta
1616	add		x0, x2, x1											// Convert KVA to EL0 vector address
1617	br		x0
1618
1619Lskip_el0_eret_mapping:
1620#endif /* !__ARM_KERNEL_PROTECT__ */
1621
1622Lexception_return_restore_registers:
1623	mov 	x0, sp								// x0 = &pcb
1624	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1625	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, x25, el0_state_allowed=1
1626
1627	msr		ELR_EL1, x1							// Load the return address into ELR
1628	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1629
1630/* Restore special register state */
1631	ldr		w3, [sp, NS64_FPSR]
1632	ldr		w4, [sp, NS64_FPCR]
1633
1634	msr		FPSR, x3
1635	mrs		x5, FPCR
1636	CMSR FPCR, x5, x4, 1
16371:
1638	mov		x5, #0
1639
1640#if HAS_ARM_FEAT_SME
1641	and		x2, x2, #(PSR64_MODE_EL_MASK)
1642	cmp		x2, #(PSR64_MODE_EL0)
1643	// SPSR_EL1.M != EL0: no SME state to restore
1644	bne		Lno_sme_saved_state
1645
1646	mrs		x3, TPIDR_EL1
1647	add		x3, x3, ACT_UMATRIX_HDR
1648	ldr		x2, [x3]
1649	cbz		x2, Lno_sme_saved_state
1650	AUTDA_DIVERSIFIED x2, address=x3, diversifier=ACT_UMATRIX_HDR_DIVERSIFIER
1651
1652	ldr		x3, [x2, SME_SVCR]
1653	msr		SVCR, x3
1654	// SVCR.SM == 0: restore SVCR only (ZA is handled during context-switch)
1655	tbz		x3, #SVCR_SM_SHIFT, Lno_sme_saved_state
1656
1657	// SVCR.SM == 1: restore SVCR, Z, and P
1658	ldrh	w3, [x2, SME_SVL_B]
1659	add		x2, x2, SME_Z_P_ZA
1660	LOAD_OR_STORE_Z_P_REGISTERS	ldr, svl_b=x3, ss=x2
1661
1662	// The FPSIMD register file acts like a view into the lower 128 bits of
1663	// Z0-Z31.  While there's no harm reading it out during exception entry,
1664	// writing it back would truncate the Z0-Z31 values we just restored.
1665	b		Lskip_restore_neon_saved_state
1666Lno_sme_saved_state:
1667#endif /* HAS_ARM_FEAT_SME */
1668
1669	/* Restore arm_neon_saved_state64 */
1670	ldp		q0, q1, [x0, NS64_Q0]
1671	ldp		q2, q3, [x0, NS64_Q2]
1672	ldp		q4, q5, [x0, NS64_Q4]
1673	ldp		q6, q7, [x0, NS64_Q6]
1674	ldp		q8, q9, [x0, NS64_Q8]
1675	ldp		q10, q11, [x0, NS64_Q10]
1676	ldp		q12, q13, [x0, NS64_Q12]
1677	ldp		q14, q15, [x0, NS64_Q14]
1678	ldp		q16, q17, [x0, NS64_Q16]
1679	ldp		q18, q19, [x0, NS64_Q18]
1680	ldp		q20, q21, [x0, NS64_Q20]
1681	ldp		q22, q23, [x0, NS64_Q22]
1682	ldp		q24, q25, [x0, NS64_Q24]
1683	ldp		q26, q27, [x0, NS64_Q26]
1684	ldp		q28, q29, [x0, NS64_Q28]
1685	ldp		q30, q31, [x0, NS64_Q30]
1686#if HAS_ARM_FEAT_SME
1687Lskip_restore_neon_saved_state:
1688#endif
1689
1690
1691#if HAS_MTE
1692#if NEEDS_MTE_IRG_RESEED
1693	PACGA_IRG_RESEED x2, x3, x4
1694#endif
1695	// Switch GCR_EL1 Exclude Tag Mask to the userland one.
1696	mrs             x2, SPSR_EL1
1697	and             x2, x2, #(PSR64_MODE_EL_MASK)
1698	cmp             x2, #(PSR64_MODE_EL0)
1699	bne             Lno_gcr_mask_el0_reset
1700	eor		x2, x2, x2
1701	mov		x2, #(GCR_EL1_RRND_ASM)
1702	orr		x2, x2, #(GCR_EL1_EXCLUDE_TAGS_USER)
1703	msr		GCR_EL1, x2
1704#if ERET_NEEDS_ISB
1705	orr		x5, x5, #(1 << BIT_ISB_PENDING)
1706#endif /* ERET_NEEDS_ISB */
1707Lno_gcr_mask_el0_reset:
1708#endif /* HAS_MTE */
1709	// If sync_on_cswitch and ERET is not a CSE, issue an ISB now. Unconditionally clear the
1710	// sync_on_cswitch flag.
1711	mrs		x1, TPIDR_EL1
1712	ldr		x1, [x1, ACT_CPUDATAP]
1713
1714	// Redefined for backporting.
1715#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__
1716	ldrb	w2, [x1, CPU_SYNC_ON_CSWITCH]
1717#if ERET_NEEDS_ISB
1718	// Set the bit, but don't sync, it will be synced shortly after this.
1719	orr		x5, x5, x2, lsl #(BIT_ISB_PENDING)
1720#else
1721	cbz		w2, 1f
1722	// Last chance, sync now.
1723	isb		sy
17241:
1725#endif  /* ERET_NEEDS_ISB */
1726#endif  /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) && !__ARM_KERNEL_PROTECT__ */
1727	strb	wzr, [x1, CPU_SYNC_ON_CSWITCH]
1728
1729
1730#if ERET_NEEDS_ISB
1731	// Apply any pending isb from earlier.
1732	tbz		x5, #(BIT_ISB_PENDING), Lskip_eret_isb
1733	isb		sy
1734Lskip_eret_isb:
1735#endif /* ERET_NEEDS_ISB */
1736
1737	/* Restore arm_saved_state64 */
1738
1739	// Skip x0, x1 - we're using them
1740	ldp		x2, x3, [x0, SS64_X2]
1741	ldp		x4, x5, [x0, SS64_X4]
1742	ldp		x6, x7, [x0, SS64_X6]
1743	ldp		x8, x9, [x0, SS64_X8]
1744	ldp		x10, x11, [x0, SS64_X10]
1745	ldp		x12, x13, [x0, SS64_X12]
1746	ldp		x14, x15, [x0, SS64_X14]
1747	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1748	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1749	ldr		x19, [x0, SS64_X19]
1750	ldp		x20, x21, [x0, SS64_X20]
1751	ldp		x22, x23, [x0, SS64_X22]
1752	ldp		x24, x25, [x0, SS64_X24]
1753	ldp		x26, x27, [x0, SS64_X26]
1754	ldr		x28, [x0, SS64_X28]
1755	ldr		fp, [x0, SS64_FP]
1756	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1757
1758	// Restore stack pointer and our last two GPRs
1759	ldr		x1, [x0, SS64_SP]
1760	mov		sp, x1
1761
1762#if __ARM_KERNEL_PROTECT__
1763	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1764#endif /* __ARM_KERNEL_PROTECT__ */
1765
1766	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1767
1768#if __ARM_KERNEL_PROTECT__
1769	/* If we are going to eret to userspace, we must unmap the kernel. */
1770	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1771
1772	/* Update TCR to unmap the kernel. */
1773	MOV64		x18, TCR_EL1_USER
1774	msr		TCR_EL1, x18
1775
1776	/*
1777	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1778	 * each other due to the microarchitecture.
1779	 */
1780#if !defined(APPLE_ARM64_ARCH_FAMILY)
1781	isb		sy
1782#endif
1783
1784	/* Switch to the user ASID (low bit clear) for the task. */
1785	mrs		x18, TTBR0_EL1
1786	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1787	msr		TTBR0_EL1, x18
1788	mov		x18, #0
1789
1790#if defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING)
1791	isb		sy
1792#endif /* defined(ERET_IS_NOT_CONTEXT_SYNCHRONIZING) */
1793
1794Lskip_ttbr1_switch:
1795#endif /* __ARM_KERNEL_PROTECT__ */
1796
1797	ERET_NO_STRAIGHT_LINE_SPECULATION
1798
1799user_take_ast:
1800	PUSH_FRAME
1801	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1802	POP_FRAME_WITHOUT_LR
1803	b		check_user_asts								// Now try again
1804
1805	.text
1806	.align 2
1807preempt_underflow:
1808	mrs		x0, TPIDR_EL1
1809	str		x0, [sp, #-16]!						// We'll print thread pointer
1810	adr		x0, L_underflow_str					// Format string
1811	CALL_EXTERN panic							// Game over
1812
1813L_underflow_str:
1814	.asciz "Preemption count negative on thread %p"
1815.align 2
1816
1817#if MACH_ASSERT
1818	.text
1819	.align 2
1820preempt_count_notzero:
1821	mrs		x0, TPIDR_EL1
1822	str		x0, [sp, #-16]!						// We'll print thread pointer
1823	ldr		w0, [x0, ACT_PREEMPT_CNT]
1824	str		w0, [sp, #8]
1825	adr		x0, L_preempt_count_notzero_str				// Format string
1826	CALL_EXTERN panic							// Game over
1827
1828L_preempt_count_notzero_str:
1829	.asciz "preemption count not 0 on thread %p (%u)"
1830#endif /* MACH_ASSERT */
1831
1832#if __ARM_KERNEL_PROTECT__
1833	/*
1834	 * This symbol denotes the end of the exception vector/eret range; we page
1835	 * align it so that we can avoid mapping other text in the EL0 exception
1836	 * vector mapping.
1837	 */
1838	.text
1839	.align 14
1840	.globl EXT(ExceptionVectorsEnd)
1841LEXT(ExceptionVectorsEnd)
1842#endif /* __ARM_KERNEL_PROTECT__ */
1843
1844#if XNU_MONITOR && !CONFIG_SPTM
1845
1846/*
1847 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1848 * mostly concerned with setting up state for the normal fleh code.
1849 */
1850	.text
1851	.align 2
1852fleh_synchronous_from_ppl:
1853	ARM64_JUMP_TARGET
1854	/* Save x0. */
1855	mov		x15, x0
1856
1857	/* Grab the ESR. */
1858	mrs		x1, ESR_EL1							// Get the exception syndrome
1859
1860	/* If the stack pointer is corrupt, it will manifest either as a data abort
1861	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1862	 * these quickly by testing bit 5 of the exception class.
1863	 */
1864	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1865	mrs		x0, SP_EL0							// Get SP_EL0
1866
1867	/* Perform high level checks for stack corruption. */
1868	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1869	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1870	cmp		x1, x2								// If we have a stack alignment exception
1871	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1872	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1873	cmp		x1, x2								// If we have a data abort, we need to
1874	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1875
1876Ltest_pstack:
1877	/* Bounds check the PPL stack. */
1878	adrp	x10, EXT(pmap_stacks_start)@page
1879	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1880	adrp	x11, EXT(pmap_stacks_end)@page
1881	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1882	cmp		x0, x10
1883	b.lo	Lcorrupt_ppl_stack
1884	cmp		x0, x11
1885	b.hi	Lcorrupt_ppl_stack
1886
1887Lvalid_ppl_stack:
1888	/* Restore x0. */
1889	mov		x0, x15
1890
1891	/* Switch back to the kernel stack. */
1892	msr		SPSel, #0
1893	GET_PMAP_CPU_DATA x5, x6, x7
1894	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1895	mov		sp, x6
1896
1897	/* Hand off to the synch handler. */
1898	b		EXT(fleh_synchronous)
1899
1900Lcorrupt_ppl_stack:
1901	/* Restore x0. */
1902	mov		x0, x15
1903
1904	/* Hand off to the invalid stack handler. */
1905	b		fleh_invalid_stack
1906
1907fleh_fiq_from_ppl:
1908	ARM64_JUMP_TARGET
1909	SWITCH_TO_INT_STACK	tmp=x25
1910	b		EXT(fleh_fiq)
1911
1912fleh_irq_from_ppl:
1913	ARM64_JUMP_TARGET
1914	SWITCH_TO_INT_STACK	tmp=x25
1915	b		EXT(fleh_irq)
1916
1917fleh_serror_from_ppl:
1918	ARM64_JUMP_TARGET
1919	GET_PMAP_CPU_DATA x5, x6, x7
1920	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1921	mov		sp, x6
1922	b		EXT(fleh_serror)
1923
1924
1925
1926
1927	// x15: ppl call number
1928	// w10: ppl_state
1929	// x20: gxf_enter caller's DAIF
1930	.globl EXT(ppl_trampoline_start)
1931LEXT(ppl_trampoline_start)
1932
1933
1934#error "XPRR configuration error"
1935	cmp		x14, x21
1936	b.ne	Lppl_fail_dispatch
1937
1938	/* Verify the request ID. */
1939	cmp		x15, PMAP_COUNT
1940	b.hs	Lppl_fail_dispatch
1941
1942	GET_PMAP_CPU_DATA	x12, x13, x14
1943
1944	/* Mark this CPU as being in the PPL. */
1945	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1946
1947	cmp		w9, #PPL_STATE_KERNEL
1948	b.eq		Lppl_mark_cpu_as_dispatching
1949
1950	/* Check to see if we are trying to trap from within the PPL. */
1951	cmp		w9, #PPL_STATE_DISPATCH
1952	b.eq		Lppl_fail_dispatch_ppl
1953
1954
1955	/* Ensure that we are returning from an exception. */
1956	cmp		w9, #PPL_STATE_EXCEPTION
1957	b.ne		Lppl_fail_dispatch
1958
1959	// where is w10 set?
1960	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1961	cmp		w10, #PPL_STATE_EXCEPTION
1962	b.ne		Lppl_fail_dispatch
1963
1964	/* This is an exception return; set the CPU to the dispatching state. */
1965	mov		w9, #PPL_STATE_DISPATCH
1966	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1967
1968	/* Find the save area, and return to the saved PPL context. */
1969	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1970	mov		sp, x0
1971	b		EXT(return_to_ppl)
1972
1973Lppl_mark_cpu_as_dispatching:
1974	cmp		w10, #PPL_STATE_KERNEL
1975	b.ne		Lppl_fail_dispatch
1976
1977	/* Mark the CPU as dispatching. */
1978	mov		w13, #PPL_STATE_DISPATCH
1979	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1980
1981	/* Switch to the regular PPL stack. */
1982	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1983	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1984
1985	// SP0 is thread stack here
1986	mov		x21, sp
1987	// SP0 is now PPL stack
1988	mov		sp, x9
1989
1990	/* Save the old stack pointer off in case we need it. */
1991	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1992
1993	/* Get the handler for the request */
1994	adrp	x9, EXT(ppl_handler_table)@page
1995	add		x9, x9, EXT(ppl_handler_table)@pageoff
1996	add		x9, x9, x15, lsl #3
1997	ldr		x10, [x9]
1998
1999	/* Branch to the code that will invoke the PPL request. */
2000	b		EXT(ppl_dispatch)
2001
2002Lppl_fail_dispatch_ppl:
2003	/* Switch back to the kernel stack. */
2004	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
2005	mov		sp, x10
2006
2007Lppl_fail_dispatch:
2008	/* Indicate that we failed. */
2009	mov		x15, #PPL_EXIT_BAD_CALL
2010
2011	/* Move the DAIF bits into the expected register. */
2012	mov		x10, x20
2013
2014	/* Return to kernel mode. */
2015	b		ppl_return_to_kernel_mode
2016
2017Lppl_dispatch_exit:
2018
2019	/* Indicate that we are cleanly exiting the PPL. */
2020	mov		x15, #PPL_EXIT_DISPATCH
2021
2022	/* Switch back to the original (kernel thread) stack. */
2023	mov		sp, x21
2024
2025	/* Move the saved DAIF bits. */
2026	mov		x10, x20
2027
2028	/* Clear the in-flight pmap pointer */
2029	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
2030	stlr		xzr, [x13]
2031
2032	/* Clear the old stack pointer. */
2033	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
2034
2035	/*
2036	 * Mark the CPU as no longer being in the PPL.  We spin if our state
2037	 * machine is broken.
2038	 */
2039	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
2040	cmp		w9, #PPL_STATE_DISPATCH
2041	b.ne		.
2042	mov		w9, #PPL_STATE_KERNEL
2043	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
2044
2045	/* Return to the kernel. */
2046	b ppl_return_to_kernel_mode
2047
2048
2049
2050	.text
2051ppl_exit:
2052	ARM64_PROLOG
2053	/*
2054	 * If we are dealing with an exception, hand off to the first level
2055	 * exception handler.
2056	 */
2057	cmp		x15, #PPL_EXIT_EXCEPTION
2058	b.eq	Ljump_to_fleh_handler
2059
2060	/* If this was a panic call from the PPL, reinvoke panic. */
2061	cmp		x15, #PPL_EXIT_PANIC_CALL
2062	b.eq	Ljump_to_panic_trap_to_debugger
2063
2064	/*
2065	 * Stash off the original DAIF in the high bits of the exit code register.
2066	 * We could keep this in a dedicated register, but that would require us to copy it to
2067	 * an additional callee-save register below (e.g. x22), which in turn would require that
2068	 * register to be saved/restored at PPL entry/exit.
2069	 */
2070	add		x15, x15, x10, lsl #32
2071
2072	/* Load the preemption count. */
2073	mrs		x10, TPIDR_EL1
2074	ldr		w12, [x10, ACT_PREEMPT_CNT]
2075
2076	/* Detect underflow */
2077	cbnz	w12, Lno_preempt_underflow
2078	b		preempt_underflow
2079Lno_preempt_underflow:
2080
2081	/* Lower the preemption count. */
2082	sub		w12, w12, #1
2083
2084#if SCHED_HYGIENE_DEBUG
2085	/* Collect preemption disable measurement if necessary. */
2086
2087	/*
2088	 * Only collect measurement if this reenabled preemption,
2089	 * and SCHED_HYGIENE_MARKER is set.
2090	 */
2091	mov		x20, #SCHED_HYGIENE_MARKER
2092	cmp		w12, w20
2093	b.ne	Lskip_collect_measurement
2094
2095	/* Stash our return value and return reason. */
2096	mov		x20, x0
2097	mov		x21, x15
2098
2099	/* Collect measurement. */
2100	bl		EXT(_collect_preemption_disable_measurement)
2101
2102	/* Restore the return value and the return reason. */
2103	mov		x0, x20
2104	mov		x15, x21
2105	/* ... and w12, which is now 0. */
2106	mov		w12, #0
2107
2108	/* Restore the thread pointer into x10. */
2109	mrs		x10, TPIDR_EL1
2110
2111Lskip_collect_measurement:
2112#endif /* SCHED_HYGIENE_DEBUG */
2113
2114	/* Save the lowered preemption count. */
2115	str		w12, [x10, ACT_PREEMPT_CNT]
2116
2117	/* Skip ASTs if the peemption count is not zero. */
2118	cbnz	x12, Lppl_skip_ast_taken
2119
2120	/*
2121	 * Skip the AST check if interrupts were originally disabled.
2122	 * The original DAIF state prior to PPL entry is stored in the upper
2123	 * 32 bits of x15.
2124	 */
2125	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
2126
2127	/* IF there is no urgent AST, skip the AST. */
2128	ldr		x12, [x10, ACT_CPUDATAP]
2129	ldr		w14, [x12, CPU_PENDING_AST]
2130	tst		w14, AST_URGENT
2131	b.eq	Lppl_skip_ast_taken
2132
2133	/* Stash our return value and return reason. */
2134	mov		x20, x0
2135	mov		x21, x15
2136
2137	/* Handle the AST. */
2138	bl		EXT(ast_taken_kernel)
2139
2140	/* Restore the return value and the return reason. */
2141	mov		x15, x21
2142	mov		x0, x20
2143
2144Lppl_skip_ast_taken:
2145
2146	/* Extract caller DAIF from high-order bits of exit code */
2147	ubfx	x10, x15, #32, #32
2148	bfc		x15, #32, #32
2149	msr		DAIF, x10
2150
2151	/* Pop the stack frame. */
2152	ldp		x29, x30, [sp, #0x10]
2153	ldp		x20, x21, [sp], #0x20
2154
2155	/* Check to see if this was a bad request. */
2156	cmp		x15, #PPL_EXIT_BAD_CALL
2157	b.eq	Lppl_bad_call
2158
2159	/* Return. */
2160	ARM64_STACK_EPILOG
2161
2162	.align 2
2163Ljump_to_fleh_handler:
2164	br	x25
2165
2166	.align 2
2167Ljump_to_panic_trap_to_debugger:
2168	b		EXT(panic_trap_to_debugger)
2169
2170Lppl_bad_call:
2171	/* Panic. */
2172	adrp	x0, Lppl_bad_call_panic_str@page
2173	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2174	b		EXT(panic)
2175
2176	.text
2177	.align 2
2178	.globl EXT(ppl_dispatch)
2179LEXT(ppl_dispatch)
2180	/*
2181	 * Save a couple of important registers (implementation detail; x12 has
2182	 * the PPL per-CPU data address; x13 is not actually interesting).
2183	 */
2184	stp		x12, x13, [sp, #-0x10]!
2185
2186	/*
2187	 * Restore the original AIF state, force D set to mask debug exceptions
2188	 * while PPL code runs.
2189	 */
2190	orr		x8, x20, DAIF_DEBUGF
2191	msr		DAIF, x8
2192
2193	/*
2194	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
2195	 * but the exception vectors will deal with this properly.
2196	 */
2197
2198	/* Invoke the PPL method. */
2199#ifdef HAS_APPLE_PAC
2200	blraa		x10, x9
2201#else
2202	blr		x10
2203#endif
2204
2205	/* Disable DAIF. */
2206	msr		DAIFSet, #(DAIFSC_ALL)
2207
2208	/* Restore those important registers. */
2209	ldp		x12, x13, [sp], #0x10
2210
2211	/* Mark this as a regular return, and hand off to the return path. */
2212	b		Lppl_dispatch_exit
2213
2214	.text
2215	.align 2
2216	.globl EXT(ppl_bootstrap_dispatch)
2217LEXT(ppl_bootstrap_dispatch)
2218	/* Verify the PPL request. */
2219	cmp		x15, PMAP_COUNT
2220	b.hs	Lppl_fail_bootstrap_dispatch
2221
2222	/* Get the requested PPL routine. */
2223	adrp	x9, EXT(ppl_handler_table)@page
2224	add		x9, x9, EXT(ppl_handler_table)@pageoff
2225	add		x9, x9, x15, lsl #3
2226	ldr		x10, [x9]
2227
2228	/* Invoke the requested PPL routine. */
2229#ifdef HAS_APPLE_PAC
2230	blraa		x10, x9
2231#else
2232	blr		x10
2233#endif
2234	LOAD_PMAP_CPU_DATA	x9, x10, x11
2235
2236	/* Clear the in-flight pmap pointer */
2237	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
2238	stlr		xzr, [x9]
2239
2240	/* Stash off the return value */
2241	mov		x20, x0
2242	/* Drop the preemption count */
2243	bl		EXT(_enable_preemption)
2244	mov		x0, x20
2245
2246	/* Pop the stack frame. */
2247	ldp		x29, x30, [sp, #0x10]
2248	ldp		x20, x21, [sp], #0x20
2249#if __has_feature(ptrauth_returns)
2250	retab
2251#else
2252	ret
2253#endif
2254
2255Lppl_fail_bootstrap_dispatch:
2256	/* Pop our stack frame and panic. */
2257	ldp		x29, x30, [sp, #0x10]
2258	ldp		x20, x21, [sp], #0x20
2259#if __has_feature(ptrauth_returns)
2260	autibsp
2261#endif
2262	adrp	x0, Lppl_bad_call_panic_str@page
2263	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2264	b		EXT(panic)
2265
2266	.text
2267	.align 2
2268	.globl EXT(ml_panic_trap_to_debugger)
2269LEXT(ml_panic_trap_to_debugger)
2270	ARM64_PROLOG
2271	mrs		x10, DAIF
2272	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
2273
2274	adrp		x12, EXT(pmap_ppl_locked_down)@page
2275	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
2276	cbz		w12, Lnot_in_ppl_dispatch
2277
2278	LOAD_PMAP_CPU_DATA	x11, x12, x13
2279
2280	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
2281	cmp		w12, #PPL_STATE_DISPATCH
2282	b.ne		Lnot_in_ppl_dispatch
2283
2284	/* Indicate (for the PPL->kernel transition) that we are panicking. */
2285	mov		x15, #PPL_EXIT_PANIC_CALL
2286
2287	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
2288	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
2289	mov		sp, x12
2290
2291	mrs		x10, DAIF
2292	mov		w13, #PPL_STATE_PANIC
2293	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
2294
2295	/**
2296	 * When we panic in PPL, we might have un-synced PTE updates. Shoot down
2297	 * all the TLB entries.
2298	 *
2299	 * A check must be done here against CurrentEL because the alle1is flavor
2300	 * of tlbi is not available to EL1, but the vmalle1is flavor is. When PPL
2301	 * runs at GL2, we can issue an alle2is and an alle1is tlbi to kill all
2302	 * the TLB entries. When PPL runs at GL1, as a guest or on an pre-H13
2303	 * platform, we issue a vmalle1is tlbi instead.
2304	 *
2305	 * Note that we only do this after passing the `PPL_STATE_DISPATCH` check
2306	 * because if we did this for every panic, including the ones triggered
2307	 * by fabric problems we may be stuck at the DSB below and trigger an AP
2308	 * watchdog.
2309	 */
2310	mrs		x12, CurrentEL
2311	cmp		x12, PSR64_MODE_EL2
2312	bne		Lnot_in_gl2
2313	tlbi		alle2is
2314	tlbi		alle1is
2315	b		Ltlb_invalidate_all_done
2316Lnot_in_gl2:
2317	tlbi		vmalle1is
2318Ltlb_invalidate_all_done:
2319	dsb		ish
2320	isb
2321
2322	/* Now we are ready to exit the PPL. */
2323	b		ppl_return_to_kernel_mode
2324Lnot_in_ppl_dispatch:
2325	msr		DAIF, x10
2326	ret
2327
2328	.data
2329Lppl_bad_call_panic_str:
2330	.asciz "ppl_dispatch: failed due to bad arguments/state"
2331#else /* XNU_MONITOR && !CONFIG_SPTM */
2332	.text
2333	.align 2
2334	.globl EXT(ml_panic_trap_to_debugger)
2335LEXT(ml_panic_trap_to_debugger)
2336	ARM64_PROLOG
2337	ret
2338#endif /* XNU_MONITOR && !CONFIG_SPTM */
2339
2340#if CONFIG_SPTM
2341	.text
2342	.align 2
2343
2344	.globl EXT(_sptm_pre_entry_hook)
2345LEXT(_sptm_pre_entry_hook)
2346	/* Push a frame. */
2347	ARM64_STACK_PROLOG
2348	PUSH_FRAME
2349	stp		x20, x21, [sp, #-0x10]!
2350
2351	/* Save arguments to SPTM function and SPTM function id. */
2352	mov		x20, x16
2353	stp		x0, x1, [sp, #-0x40]!
2354	stp		x2, x3, [sp, #0x10]
2355	stp		x4, x5, [sp, #0x20]
2356	stp		x6, x7, [sp, #0x30]
2357
2358	/* Increase the preemption count. */
2359	mrs		x9, TPIDR_EL1
2360	cbz		x9, Lskip_preemption_check_sptmhook
2361	ldr		w10, [x9, ACT_PREEMPT_CNT]
2362	add		w10, w10, #1
2363	str		w10, [x9, ACT_PREEMPT_CNT]
2364
2365	/* Update SPTM trace state to see if trace entries were generated post-exit */
2366
2367#if SCHED_HYGIENE_DEBUG
2368	/* Prepare preemption disable measurement, if necessary. */
2369
2370	/* Only prepare if we actually disabled preemption. */
2371	cmp		w10, #1
2372	b.ne	Lskip_prepare_measurement_sptmhook
2373
2374	/* Don't prepare if measuring is off completely. */
2375	adrp	x10, _sched_preemption_disable_debug_mode@page
2376	add		x10, x10, _sched_preemption_disable_debug_mode@pageoff
2377	ldr		w10, [x10]
2378	cmp		w10, #0
2379	b.eq	Lskip_prepare_measurement_sptmhook
2380
2381	/* Call prepare function with thread pointer as first arg. */
2382	bl		EXT(_prepare_preemption_disable_measurement)
2383
2384Lskip_prepare_measurement_sptmhook:
2385#endif /* SCHED_HYGIENE_DEBUG */
2386Lskip_preemption_check_sptmhook:
2387	/* assert we're not calling from guarded mode */
2388	mrs		x14, CurrentG
2389	cmp		x14, #0
2390	b.ne	.
2391
2392	/* Restore arguments to SPTM function and SPTM function id. */
2393	ldp		x6, x7, [sp, #0x30]
2394	ldp		x4, x5, [sp, #0x20]
2395	ldp		x2, x3, [sp, #0x10]
2396	ldp		x0, x1, [sp]
2397	add		sp, sp, #0x40
2398	mov		x16, x20
2399
2400	ldp		x20, x21, [sp], #0x10
2401	POP_FRAME
2402	ARM64_STACK_EPILOG EXT(_sptm_pre_entry_hook)
2403
2404	.align 2
2405	.globl EXT(_sptm_post_exit_hook)
2406LEXT(_sptm_post_exit_hook)
2407	ARM64_STACK_PROLOG
2408	PUSH_FRAME
2409	stp		x20, x21, [sp, #-0x10]!
2410
2411	/* Save SPTM return value(s) */
2412	stp		x0, x1, [sp, #-0x40]!
2413	stp		x2, x3, [sp, #0x10]
2414	stp		x4, x5, [sp, #0x20]
2415	stp		x6, x7, [sp, #0x30]
2416
2417
2418	/* Load the preemption count. */
2419	mrs		x0, TPIDR_EL1
2420	cbz		x0, Lsptm_skip_ast_taken_sptmhook
2421	ldr		w12, [x0, ACT_PREEMPT_CNT]
2422
2423	/* Detect underflow */
2424	cbnz	w12, Lno_preempt_underflow_sptmhook
2425	/* No need to clean up the stack, as preempt_underflow calls panic */
2426	b		preempt_underflow
2427Lno_preempt_underflow_sptmhook:
2428
2429	/* Lower the preemption count. */
2430	sub		w12, w12, #1
2431
2432#if SCHED_HYGIENE_DEBUG
2433	/* Collect preemption disable measurement if necessary. */
2434
2435	/*
2436	 * Only collect measurement if this reenabled preemption,
2437	 * and SCHED_HYGIENE_MARKER is set.
2438	 */
2439	mov		x20, #SCHED_HYGIENE_MARKER
2440	cmp		w12, w20
2441	b.ne	Lskip_collect_measurement_sptmhook
2442
2443	/* Collect measurement. */
2444	bl		EXT(_collect_preemption_disable_measurement)
2445
2446	/* Restore w12, which is now 0. */
2447	mov		w12, #0
2448
2449	/* Restore x0 as the thread pointer */
2450	mrs		x0, TPIDR_EL1
2451
2452Lskip_collect_measurement_sptmhook:
2453#endif /* SCHED_HYGIENE_DEBUG */
2454
2455	/* Save the lowered preemption count. */
2456	str		w12, [x0, ACT_PREEMPT_CNT]
2457
2458	/* Skip ASTs if the preemption count is not zero. */
2459	cbnz	w12, Lsptm_skip_ast_taken_sptmhook
2460
2461	/**
2462	 * Skip the AST check if interrupts were originally disabled. The original
2463	 * DAIF value needs to be placed into a callee-saved register so that the
2464	 * value is preserved across the ast_taken_kernel() call.
2465	 */
2466	mrs		x20, DAIF
2467	tbnz	x20, #(DAIF_IRQF_SHIFT), Lsptm_skip_ast_taken_sptmhook
2468
2469	/* IF there is no urgent AST, skip the AST. */
2470	ldr		x12, [x0, ACT_CPUDATAP]
2471	ldr		x14, [x12, CPU_PENDING_AST]
2472	tst		x14, AST_URGENT
2473	b.eq	Lsptm_skip_ast_taken_sptmhook
2474
2475	/* Handle the AST. This call requires interrupts to be disabled. */
2476	msr		DAIFSet, #(DAIFSC_ALL)
2477	bl		EXT(ast_taken_kernel)
2478	msr		DAIF, x20
2479
2480Lsptm_skip_ast_taken_sptmhook:
2481
2482	/* Restore SPTM return value(s) */
2483	ldp		x6, x7, [sp, #0x30]
2484	ldp		x4, x5, [sp, #0x20]
2485	ldp		x2, x3, [sp, #0x10]
2486	ldp		x0, x1, [sp]
2487	add		sp, sp, #0x40
2488
2489	/* Return. */
2490	ldp		x20, x21, [sp], 0x10
2491	POP_FRAME
2492	ARM64_STACK_EPILOG EXT(_sptm_post_exit_hook)
2493#endif /* CONFIG_SPTM */
2494
2495#if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2496/**
2497 * Record debug data for a panic lockdown event
2498 * Clobbers x0, x1, x2
2499 */
2500	.text
2501	.align 2
2502	.global EXT(panic_lockdown_record_debug_data)
2503LEXT(panic_lockdown_record_debug_data)
2504	adrp	x0, EXT(debug_panic_lockdown_initiator_state)@page
2505	add		x0, x0, EXT(debug_panic_lockdown_initiator_state)@pageoff
2506
2507	/*
2508	 * To synchronize accesses to the debug state, we use the initiator PC as a
2509	 * "lock". It starts out at zero and we try to swap in our initiator's PC
2510	 * (which is trivially non-zero) to acquire the debug state and become the
2511	 * initiator of record.
2512	 *
2513	 * Note that other CPUs which are not the initiator of record may still
2514	 * initiate panic lockdown (potentially before the initiator of record does
2515	 * so) and so this debug data should only be used as a hint for the
2516	 * initiating CPU rather than a guarantee of which CPU initiated lockdown
2517	 * first.
2518	 */
2519	mov		x1, #0
2520	add		x2, x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_PC
2521	cas		x1, lr, [x2]
2522	/* If there's a non-zero value there already, we aren't the first. Skip. */
2523	cbnz	x1, Lpanic_lockdown_record_debug_data_done
2524
2525	/*
2526	 * We're the first and have exclusive access to the debug structure!
2527	 * Record all our data.
2528	 */
2529	mov		x1, sp
2530	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_SP]
2531
2532	mrs		x1, TPIDR_EL1
2533	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_TPIDR]
2534
2535	mrs		x1, MPIDR_EL1
2536	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_MPIDR]
2537
2538	mrs		x1, ESR_EL1
2539	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ESR]
2540
2541	mrs		x1, ELR_EL1
2542	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ELR]
2543
2544	mrs		x1, FAR_EL1
2545	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_FAR]
2546
2547	/* Sync and then read the timer */
2548	dsb		sy
2549	isb
2550	mrs		x1, CNTVCT_EL0
2551	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_TIMESTAMP]
2552
2553Lpanic_lockdown_record_debug_data_done:
2554	ret
2555#endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2556
2557/* ARM64_TODO Is globals_asm.h needed? */
2558//#include	"globals_asm.h"
2559
2560/* vim: set ts=4: */
2561