xref: /xnu-11215.81.4/osfmk/arm64/locore.s (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45#if XNU_MONITOR && !CONFIG_SPTM
46/*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
52 *         exception was taken while in the PPL.
53 */
54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55	cmp		x26, xzr
56	b.eq		1f
57
58	/* Return to the PPL. */
59	mov		x15, #0
60	mov		w10, #PPL_STATE_EXCEPTION
61#error "XPRR configuration error"
621:
63.endmacro
64
65
66#endif /* XNU_MONITOR && !CONFIG_SPTM */
67
68#if CONFIG_SPTM
69#include <sptm/sptm_xnu.h>
70#include <sptm/sptm_common.h>
71/*
72 * Panic lockdown is a security enhancement which makes certain types of
73 * exceptions (generally, PAC failures and sync exceptions taken with async
74 * exceptions masked) and panics fatal against attackers with kernel R/W. It
75 * does this through a trapdoor panic bit protected by the SPTM.
76 * When this bit is set, TXM will refuse to authorize new code mappings which,
77 * ideally, renders the system unusable even if the attacker gains control over
78 * XNU. Additionally, when this bit is set XNU will refuse to handle any sync
79 * exceptions originating from user space. This makes implementing further stages
80 * of an exploit challenging as it prevents user space from driving the kernel.
81 */
82
83/*
84 * Inform the SPTM that XNU has (or, rather, must) panic. This is provided as a
85 * macro rather than a function since it's just one instruction on release and
86 * it avoids the need to spill a return addresses unless the macro caller
87 * explicitly needs to preserve LR.
88 *
89 * On CONFIG_XNUPOST, this functions returns a 1 in x0 if a simulated lockdown
90 * was performed, 0 otherwise.
91 *
92 * This macro preserves callee saved registers but clobbers all others.
93 */
94.macro BEGIN_PANIC_LOCKDOWN unused
95#if DEVELOPMENT || DEBUG
96	/*
97	 * Forcefully clobber all caller saved GPRs on DEBUG so we don't
98	 * accidentally violate our contract with SPTM.
99	 */
100	mov		x0, #0
101	mov		x1, #0
102	mov		x2, #0
103	mov		x3, #0
104	mov		x4, #0
105	mov		x5, #0
106	mov		x6, #0
107	mov		x7, #0
108	mov		x8, #0
109	mov		x9, #0
110	mov		x10, #0
111	mov		x11, #0
112	mov		x12, #0
113	mov		x13, #0
114	mov		x14, #0
115	mov		x15, #0
116	mov		x16, #0
117	mov		x17, #0
118	mov		x18, #0
119
120	/* Attempt to record the debug trace */
121	bl		EXT(panic_lockdown_record_debug_data)
122
123#endif /* DEVELOPMENT || DEBUG */
124#if CONFIG_XNUPOST
125	mrs		x0, TPIDR_EL1
126	/*
127	 * If hitting this with a null TPIDR, it's likely that this was an unexpected
128	 * exception in early boot rather than an expected one as a part of a test.
129	 * Trigger lockdown.
130	 */
131	cbz		x0, Lbegin_panic_lockdown_real_\@
132	ldr		x1, [x0, TH_EXPECTED_FAULT_HANDLER]
133	/* Is a fault handler installed? */
134	cbz 	x1, Lbegin_panic_lockdown_real_\@
135
136	/* Do the VA bits of ELR match the expected fault PC? */
137	ldr		x1, [x0, TH_EXPECTED_FAULT_PC]
138	mrs		x2, ELR_EL1
139	mov		x3, #((1 << (64 - T1SZ_BOOT - 1)) - 1)
140	and		x4, x1, x3
141	and		x5, x2, x3
142	cmp		x4, x5
143	b.eq	Lbegin_panic_lockdown_simulated_\@
144	/* If we had an expected PC but didn't hit it, fail out */
145	cbnz	x1, Lbegin_panic_lockdown_real_\@
146
147	/* Alternatively, do the FAR VA bits match the expected fault address? */
148	ldr		x1, [x0, TH_EXPECTED_FAULT_ADDR]
149	mrs		x2, FAR_EL1
150	and		x4, x1, x3
151	and		x5, x2, x3
152	cmp		x4, x5
153	b.eq	Lbegin_panic_lockdown_simulated_\@
154
155Lbegin_panic_lockdown_real_\@:
156#endif /* CONFIG_XNUPOST */
157	/*
158	 * The sptm_xnu_panic_begin routine is guaranteed to unavoidably lead to
159	 * the panic bit being set.
160	 */
161	bl EXT(sptm_xnu_panic_begin)
162#if CONFIG_XNUPOST
163	mov		x0, #0 // not a simulated lockdown
164	b		Lbegin_panic_lockdown_continue_\@
165Lbegin_panic_lockdown_simulated_\@:
166	/*
167	 * We hit lockdown with a matching exception handler installed.
168	 * Since this is an expected test exception, skip setting the panic bit
169	 * (since this will kill the system) and instead set a bit in the test
170	 * handler.
171	 */
172	mov		x0, #1 // this is a simulated lockdown!
173	adrp	x1, EXT(xnu_post_panic_lockdown_did_fire)@page
174	strb	w0, [x1, EXT(xnu_post_panic_lockdown_did_fire)@pageoff]
175	mov		lr, xzr // trash LR to ensure callers don't rely on it
176Lbegin_panic_lockdown_continue_\@:
177#endif /* CONFIG_XNUPOST */
178.endmacro
179#endif /* CONFIG_SPTM */
180
181/*
182 * MAP_KERNEL
183 *
184 * Restores the kernel EL1 mappings, if necessary.
185 *
186 * This may mutate x18.
187 */
188.macro MAP_KERNEL
189#if __ARM_KERNEL_PROTECT__
190	/* Switch to the kernel ASID (low bit set) for the task. */
191	mrs		x18, TTBR0_EL1
192	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
193	msr		TTBR0_EL1, x18
194
195	/*
196	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
197	 * to the TTBRs and writes to the TCR should be ensured by the
198	 * microarchitecture.
199	 */
200#if !defined(APPLE_ARM64_ARCH_FAMILY)
201	isb		sy
202#endif
203
204	/*
205	 * Update the TCR to map the kernel now that we are using the kernel
206	 * ASID.
207	 */
208	MOV64		x18, TCR_EL1_BOOT
209	msr		TCR_EL1, x18
210	isb		sy
211#endif /* __ARM_KERNEL_PROTECT__ */
212.endmacro
213
214/*
215 * BRANCH_TO_KVA_VECTOR
216 *
217 * Branches to the requested long exception vector in the kernelcache.
218 *   arg0 - The label to branch to
219 *   arg1 - The index of the label in exc_vectors_tables
220 *
221 * This may mutate x18.
222 */
223.macro BRANCH_TO_KVA_VECTOR
224#if __ARM_KERNEL_PROTECT__
225	/*
226	 * Find the kernelcache table for the exception vectors by accessing
227	 * the per-CPU data.
228	 */
229	mrs		x18, TPIDR_EL1
230	ldr		x18, [x18, ACT_CPUDATAP]
231	ldr		x18, [x18, CPU_EXC_VECTORS]
232
233	/*
234	 * Get the handler for this exception and jump to it.
235	 */
236	ldr		x18, [x18, #($1 << 3)]
237	br		x18
238#else
239	b		$0
240#endif /* __ARM_KERNEL_PROTECT__ */
241.endmacro
242
243/*
244 * CHECK_KERNEL_STACK
245 *
246 * Verifies that the kernel stack is aligned and mapped within an expected
247 * stack address range. Note: happens before saving registers (in case we can't
248 * save to kernel stack).
249 *
250 * Expects:
251 *	{x0, x1} - saved
252 *	x1 - Exception syndrome
253 *	sp - Saved state
254 *
255 * Seems like we need an unused argument to the macro for the \@ syntax to work
256 *
257 */
258.macro CHECK_KERNEL_STACK unused
259	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
260	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
261	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
262	cmp		x1, x2								// If we have a stack alignment exception
263	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
264	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
265	cmp		x1, x2								// If we have a data abort, we need to
266	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
267	mrs		x0, SP_EL0					// Get SP_EL0
268	mrs		x1, TPIDR_EL1						// Get thread pointer
269Ltest_kstack_\@:
270	LOAD_KERN_STACK_TOP	dst=x2, src=x1, tmp=x3	// Get top of kernel stack
271	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
272	cmp		x0, x2								// if (SP_EL0 >= kstack top)
273	b.ge	Ltest_istack_\@						//    jump to istack test
274	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
275	b.gt	Lvalid_stack_\@						//    stack pointer valid
276Ltest_istack_\@:
277	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
278	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
279	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
280	cmp		x0, x2								// if (SP_EL0 >= istack top)
281	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
282	cmp		x0, x3								// if (SP_EL0 > istack bottom)
283	b.gt	Lvalid_stack_\@						//    stack pointer valid
284Lcorrupt_stack_\@:
285	ldp		x2, x3, [sp], #16
286	ldp		x0, x1, [sp], #16
287	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
288	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
289	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
290	mrs		x0, SP_EL0					// Get SP_EL0
291	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
292	INIT_SAVED_STATE_FLAVORS sp, w0, w1
293	mov		x0, sp								// Copy exception frame pointer to x0
294	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
295	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
296	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
297	b		fleh_dispatch64
298Lvalid_stack_\@:
299	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
300.endmacro
301
302/*
303 * CHECK_EXCEPTION_CRITICAL_REGION
304 *
305 * Checks if the exception occurred within range [VECTOR_BEGIN, VECTOR_END).
306 * If so, jumps to \fail_label. Otherwise, continues.
307 * This is useful for avoiding infinite exception loops.
308 *
309 * Clobbers x18, NZCV.
310 */
311.macro CHECK_EXCEPTION_CRITICAL_REGION vector_begin, vector_end, fail_label
312	/*
313	 * We need two registers to do a compare but only have x18 free without
314	 * spilling. We can't safely spill to memory yet, however, because doing so
315	 * may fault. It's evil, but since we're operating on ELR here we can
316	 * temporarily spill into it to get another free register as long as we put
317	 * everything back at the end.
318	 */
319	mrs		x18, ELR_EL1
320	msr		ELR_EL1, x19
321
322	adrp	x19, \vector_begin@PAGE
323	add		x19, x19, \vector_begin@PAGEOFF
324	cmp		x18, x19 /* HS if at or above (suspect), LO if below (safe) */
325	adrp	x19, \vector_end@PAGE
326	add		x19, x19, \vector_end@PAGEOFF
327	/*
328	 * If ELR >= \vector_begin (HS), set flags for ELR - \vector_end. LO here
329	 * indicates we are in range.
330	 * Otherwise, set HS (C)
331	 */
332	ccmp	x18, x19, #0b0010 /* C/HS */, HS
333	/* Unspill x19/fixup ELR */
334	mrs		x19, ELR_EL1
335	msr		ELR_EL1, x18
336	mov		x18, #0
337	/* If we're in the range, fail out */
338	b.lo	\fail_label
339.endmacro
340
341/*
342 * CHECK_EXCEPTION_STACK
343 *
344 * Verifies that SP1 is within exception stack and continues if it is.
345 * If not, jumps to \invalid_stack_label as we have nothing to fall back on.
346 *
347 * (out) x18: The unauthenticated CPU_EXCEPSTACK_TOP used for the comparison or
348 *            zero if the check could not be performed (such as because the
349 *            thread pointer was invalid).
350 *
351 * Clobbers NZCV.
352 */
353.macro CHECK_EXCEPTION_STACK invalid_stack_label
354	mrs		x18, TPIDR_EL1					// Get thread pointer
355	/*
356	 * The thread pointer might be invalid during early boot.
357	 * Return zero in x18 to indicate that we failed to execute the check.
358	 */
359	cbz		x18, Lskip_stack_check_\@
360	ldr		x18, [x18, ACT_CPUDATAP]
361	cbz		x18, \invalid_stack_label		// If thread context is set, cpu data should be too
362	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
363	cmp		sp, x18
364	b.gt	\invalid_stack_label			// Fail if above exception stack top
365	sub		x18, x18, EXCEPSTACK_SIZE_NUM	// Find bottom of exception stack
366	cmp		sp, x18
367	b.lt	\invalid_stack_label			// Fail if below exception stack bottom
368	add		x18, x18, EXCEPSTACK_SIZE_NUM	// Return stack top in x18
369Lskip_stack_check_\@:
370	/* FALLTHROUGH */
371.endmacro
372
373#if __ARM_KERNEL_PROTECT__
374	.section __DATA_CONST,__const
375	.align 3
376	.globl EXT(exc_vectors_table)
377LEXT(exc_vectors_table)
378	/* Table of exception handlers.
379         * These handlers sometimes contain deadloops.
380         * It's nice to have symbols for them when debugging. */
381	.quad el1_sp0_synchronous_vector_long
382	.quad el1_sp0_irq_vector_long
383	.quad el1_sp0_fiq_vector_long
384	.quad el1_sp0_serror_vector_long
385	.quad el1_sp1_synchronous_vector_long
386	.quad el1_sp1_irq_vector_long
387	.quad el1_sp1_fiq_vector_long
388	.quad el1_sp1_serror_vector_long
389	.quad el0_synchronous_vector_64_long
390	.quad el0_irq_vector_64_long
391	.quad el0_fiq_vector_64_long
392	.quad el0_serror_vector_64_long
393#endif /* __ARM_KERNEL_PROTECT__ */
394
395	.text
396#if __ARM_KERNEL_PROTECT__
397	/*
398	 * We need this to be on a page boundary so that we may avoiding mapping
399	 * other text along with it.  As this must be on the VM page boundary
400	 * (due to how the coredumping code currently works), this will be a
401	 * 16KB page boundary.
402	 */
403	.align 14
404#else
405	.align 12
406#endif /* __ARM_KERNEL_PROTECT__ */
407	.globl EXT(ExceptionVectorsBase)
408LEXT(ExceptionVectorsBase)
409Lel1_sp0_synchronous_vector:
410	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
411
412	.text
413	.align 7
414Lel1_sp0_irq_vector:
415	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
416
417	.text
418	.align 7
419Lel1_sp0_fiq_vector:
420	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
421
422	.text
423	.align 7
424Lel1_sp0_serror_vector:
425	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
426
427	.text
428	.align 7
429Lel1_sp1_synchronous_vector:
430	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
431
432	.text
433	.align 7
434Lel1_sp1_irq_vector:
435	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
436
437	.text
438	.align 7
439Lel1_sp1_fiq_vector:
440	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
441
442	.text
443	.align 7
444Lel1_sp1_serror_vector:
445	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
446
447	.text
448	.align 7
449Lel0_synchronous_vector_64:
450	MAP_KERNEL
451	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
452
453	.text
454	.align 7
455Lel0_irq_vector_64:
456	MAP_KERNEL
457	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
458
459	.text
460	.align 7
461Lel0_fiq_vector_64:
462	MAP_KERNEL
463	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
464
465	.text
466	.align 7
467Lel0_serror_vector_64:
468	MAP_KERNEL
469	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
470
471	/* Fill out the rest of the page */
472	.align 12
473
474/*********************************
475 * END OF EXCEPTION VECTORS PAGE *
476 *********************************/
477
478
479
480.macro EL1_SP0_VECTOR
481	msr		SPSel, #0							// Switch to SP0
482	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
483	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
484	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
485	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
486	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
487	INIT_SAVED_STATE_FLAVORS sp, w0, w1
488	mov		x0, sp								// Copy saved state pointer to x0
489.endmacro
490
491.macro EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
492	// SWITCH_TO_INT_STACK requires a clobberable tmp register, but at this
493	// point in the exception vector we can't spare the extra GPR.  Instead note
494	// that EL1_SP0_VECTOR ends with x0 == sp and use this to unclobber x0.
495	mrs		x1, TPIDR_EL1
496	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
497	mov		x0, sp
498	mov		sp, x1
499.endmacro
500
501el1_sp0_synchronous_vector_long:
502	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
503	mrs		x1, ESR_EL1							// Get the exception syndrome
504	/* If the stack pointer is corrupt, it will manifest either as a data abort
505	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
506	 * these quickly by testing bit 5 of the exception class.
507	 */
508	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
509	CHECK_KERNEL_STACK
510Lkernel_stack_valid:
511	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
512	EL1_SP0_VECTOR
513	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
514	add		x1, x1, EXT(fleh_synchronous)@pageoff
515	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
516	b		fleh_dispatch64
517
518el1_sp0_irq_vector_long:
519	EL1_SP0_VECTOR
520	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
521	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
522	add		x1, x1, EXT(fleh_irq)@pageoff
523	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
524	b		fleh_dispatch64
525
526el1_sp0_fiq_vector_long:
527	// ARM64_TODO write optimized decrementer
528	EL1_SP0_VECTOR
529	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
530	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
531	add		x1, x1, EXT(fleh_fiq)@pageoff
532	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
533	b		fleh_dispatch64
534
535el1_sp0_serror_vector_long:
536	EL1_SP0_VECTOR
537	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
538	add		x1, x1, EXT(fleh_serror)@pageoff
539	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
540	b		fleh_dispatch64
541
542.macro EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
543	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
544	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
545	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to exception frame
546	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
547	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
548	INIT_SAVED_STATE_FLAVORS sp, w0, w1
549.if \set_x0_to_exception_frame_ptr
550	mov		x0, sp								// Copy saved state pointer to x0
551.endif
552.endmacro
553
554el1_sp1_synchronous_vector_long:
555	/*
556	 * Before making our first (potentially faulting) memory access, check if we
557	 * previously tried and failed to execute this vector. If we did, it's not
558	 * going to work this time either so let's just spin.
559	 */
560#ifdef CONFIG_SPTM
561	/*
562	 * This check is doubly important for devices which support panic lockdown
563	 * as we use this check to ensure that we can take only a bounded number of
564	 * exceptions on SP1 while trying to spill before we give up on spilling and
565	 * lockdown anyways.
566	 *
567	 * Note, however, that we only check if we took an exception inside this
568	 * vector. Although an attacker could cause exceptions outside this routine,
569	 * they can only do this a finite number of times before overflowing the
570	 * exception stack (causing CHECK_EXCEPTION_STACK to fail) since we subtract
571	 * from SP inside the checked region and do not reload SP from memory before
572	 * we hit post-spill lockdown point in fleh_synchronous_sp1.
573	 */
574#endif /* CONFIG_SPTM */
575	CHECK_EXCEPTION_CRITICAL_REGION el1_sp1_synchronous_vector_long, Lel1_sp1_synchronous_vector_long_end, EXT(el1_sp1_synchronous_vector_long_spill_failed)
576	CHECK_EXCEPTION_STACK EXT(el1_sp1_synchronous_vector_long_spill_failed)
577#ifdef KERNEL_INTEGRITY_KTRR
578	b		check_ktrr_sctlr_trap
579Lel1_sp1_synchronous_vector_continue:
580#endif /* KERNEL_INTEGRITY_KTRR */
581#if CONFIG_SPTM
582	/* Don't bother setting up x0 since we need it as a temporary */
583	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=0
584
585	/*
586	 * Did we fail to execute the stack check (x18=0)?
587	 * On devices which support panic lockdown, we cannot allow this check to be
588	 * skipped after early-boot as doing so many allow exception processing to
589	 * be delayed indefinitely.
590	 */
591	adrp	x0, EXT(startup_phase)@page
592	ldr		w0, [x0, EXT(startup_phase)@pageoff]
593	/* Are we in early-boot? */
594	cmp		w0, #-1 // STARTUP_SUB_LOCKDOWN
595	/*
596	 * If we're still in early-boot (LO), set flags for if we skipped the check
597	 * If we're after early-boot (HS), pass NE
598	 */
599	ccmp	x18, xzr, #0b0000 /* !Z/NE */, LO
600	/* Skip authentication if this was an early boot check fail */
601	b.eq	1f
602	/*
603	 * If we're not in early boot but still couldn't execute the stack bounds
604	 * check (x18=0), something is wrong (TPIDR is corrupted?).
605	 * Trigger a lockdown.
606	 */
607	cbz		x18, EXT(el1_sp1_synchronous_vector_long_spill_failed)
608
609	/*
610	 * In CHECK_EXCEPTION_STACK, we didn't have enough registers to perform the
611	 * signature verification on the exception stack top value and instead used
612	 * the unauthenticated value (x18) for the stack pointer bounds check.
613	 *
614	 * Ensure that we actually performed the check on a legitmate value now.
615	 */
616	mrs		x0, TPIDR_EL1
617	LOAD_EXCEP_STACK_THREAD dst=x0, src=x0, tmp=x1
618	cmp		x0, x18
619	/* If we aren't equal, something is very wrong and we should lockdown. */
620	b.ne	EXT(el1_sp1_synchronous_vector_long_spill_failed)
621
6221:
623	mov		x0, sp	/* Set x0 to saved state pointer */
624#else
625	EL1_SP1_VECTOR set_x0_to_exception_frame_ptr=1
626#endif /* CONFIG_SPTM */
627	adrp	x1, fleh_synchronous_sp1@page
628	add		x1, x1, fleh_synchronous_sp1@pageoff
629	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_SYNC_EXCEPTION)
630	b		fleh_dispatch64
631
632	/*
633	 * Global symbol to make it easy to pick out in backtraces.
634	 * Do not call externally.
635	 */
636	.global EXT(el1_sp1_synchronous_vector_long_spill_failed)
637LEXT(el1_sp1_synchronous_vector_long_spill_failed)
638	TRAP_UNWIND_PROLOGUE
639	TRAP_UNWIND_DIRECTIVES
640	/*
641	 * We couldn't process the exception due to either having an invalid
642	 * exception stack or because we previously tried to process it and failed.
643	 */
644#if CONFIG_SPTM
645	/*
646	 * For SP1 exceptions, we usually delay initiating lockdown until after
647	 * we've spilled in order to not lose register state. Since we have nowhere
648	 * to safely spill, we have no choice but to initiate it now, clobbering
649	 * some of our exception state in the process (RIP).
650	 */
651	BEGIN_PANIC_LOCKDOWN
652#if CONFIG_XNUPOST
653	/* Macro returns x0=1 if it performed a simulated lockdown */
654	cbz		x0, 0f
655	/* This was a test; return to fault handler so they can fixup the system. */
656	mrs		x0, TPIDR_EL1
657	ldr		x16, [x0, TH_EXPECTED_FAULT_HANDLER]
658#if __has_feature(ptrauth_calls)
659	movk	x17, #TH_EXPECTED_FAULT_HANDLER_DIVERSIFIER
660	autia	x16, x17
661#endif /* ptrauth_calls */
662	msr		ELR_EL1, x16
663	/* Pass a NULL saved state since we didn't actually save anything */
664	mov		x0, #0
665	ERET_NO_STRAIGHT_LINE_SPECULATION
666#endif /* CONFIG_XNUPOST */
667#endif /* CONFIG_SPTM */
6680:
669	wfe
670	b		0b // Spin for watchdog
671	UNWIND_EPILOGUE
672
673#if CONFIG_SPTM
674#if CONFIG_XNUPOST
675	/**
676	 * Test function which raises an exception from a location considered inside
677	 * the vector. Does not return.
678	 */
679	.global EXT(el1_sp1_synchronous_raise_exception_in_vector)
680LEXT(el1_sp1_synchronous_raise_exception_in_vector)
681	ARM64_PROLOG
682	brk		#0
683	/* Unreachable */
684	b		.
685#endif /* CONFIG_XNUPOST */
686#endif /* CONFIG_SPTM */
687Lel1_sp1_synchronous_vector_long_end:
688
689el1_sp1_irq_vector_long:
690	EL1_SP1_VECTOR
691	adrp	x1, fleh_irq_sp1@page
692	add		x1, x1, fleh_irq_sp1@pageoff
693	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
694	b		fleh_dispatch64
695
696el1_sp1_fiq_vector_long:
697	EL1_SP1_VECTOR
698	adrp	x1, fleh_fiq_sp1@page
699	add		x1, x1, fleh_fiq_sp1@pageoff
700	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
701	b		fleh_dispatch64
702
703el1_sp1_serror_vector_long:
704	EL1_SP1_VECTOR
705	adrp	x1, fleh_serror_sp1@page
706	add		x1, x1, fleh_serror_sp1@pageoff
707	mov		x2, #(FLEH_DISPATCH64_OPTION_FATAL_EXCEPTION)
708	b		fleh_dispatch64
709
710
711.macro EL0_64_VECTOR guest_label
712	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
713#if __ARM_KERNEL_PROTECT__
714	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
715#endif
716	mrs		x0, TPIDR_EL1						// Load the thread register
717	LOAD_USER_PCB	dst=x0, src=x0, tmp=x1		// Load the user context pointer
718	mrs		x1, SP_EL0							// Load the user stack pointer
719	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
720	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
721	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
722	msr		SPSel, #0							// Switch to SP0
723	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
724	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the user PCB
725	mrs		x1, TPIDR_EL1						// Load the thread register
726
727
728#if HAS_ARM_FEAT_SME
729	str		x2, [sp, SS64_X2]
730	// current_thread()->machine.umatrix_hdr == NULL: this thread has never
731	// executed smstart, so no SME state to save
732	ldr		x2, [x1, ACT_UMATRIX_HDR]
733	cbz		x2, 1f
734
735	mrs		x0, SVCR
736	str		x0, [x2, SME_SVCR]
737	// SVCR.SM == 0: save SVCR only (ZA is handled during context-switch)
738	tbz		x0, #SVCR_SM_SHIFT, 1f
739
740	// SVCR.SM == 1: save SVCR, Z, and P; and exit streaming SVE mode
741	ldrh	w0, [x2, SME_SVL_B]
742	add		x2, x2, SME_Z_P_ZA
743	LOAD_OR_STORE_Z_P_REGISTERS	str, svl_b=x0, ss=x2
744	mrs		x2, FPSR
745	smstop	sm
746	msr		FPSR, x2
7471:
748	ldr		x2, [sp, SS64_X2]
749#endif /* HAS_ARM_FEAT_SME */
750
751	mov		x0, sp								// Copy the user PCB pointer to x0
752												// x1 contains thread register
753.endmacro
754
755.macro EL0_64_VECTOR_SWITCH_TO_INT_STACK
756	// Similarly to EL1_SP0_VECTOR_SWITCH_TO_INT_STACK, we need to take
757	// advantage of EL0_64_VECTOR ending with x0 == sp.  EL0_64_VECTOR also
758	// populates x1 with the thread state, so we can skip reloading it.
759	LOAD_INT_STACK_THREAD	dst=x1, src=x1, tmp=x0
760	mov		x0, sp
761	mov		sp, x1
762.endmacro
763
764.macro EL0_64_VECTOR_SWITCH_TO_KERN_STACK
765	LOAD_KERN_STACK_TOP	dst=x1, src=x1, tmp=x0
766	mov		x0, sp
767	mov		sp, x1
768.endmacro
769
770el0_synchronous_vector_64_long:
771	EL0_64_VECTOR	sync
772	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
773	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
774	add		x1, x1, EXT(fleh_synchronous)@pageoff
775	mov		x2, #(FLEH_DISPATCH64_OPTION_SYNC_EXCEPTION)
776	b		fleh_dispatch64
777
778el0_irq_vector_64_long:
779	EL0_64_VECTOR	irq
780	EL0_64_VECTOR_SWITCH_TO_INT_STACK
781	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
782	add		x1, x1, EXT(fleh_irq)@pageoff
783	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
784	b		fleh_dispatch64
785
786el0_fiq_vector_64_long:
787	EL0_64_VECTOR	fiq
788	EL0_64_VECTOR_SWITCH_TO_INT_STACK
789	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
790	add		x1, x1, EXT(fleh_fiq)@pageoff
791	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
792	b		fleh_dispatch64
793
794el0_serror_vector_64_long:
795	EL0_64_VECTOR	serror
796	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
797	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
798	add		x1, x1, EXT(fleh_serror)@pageoff
799	mov		x2, #(FLEH_DISPATCH64_OPTION_NONE)
800	b		fleh_dispatch64
801
802
803#if defined(KERNEL_INTEGRITY_KTRR)
804	.text
805	.align 2
806check_ktrr_sctlr_trap:
807/* We may abort on an instruction fetch on reset when enabling the MMU by
808 * writing SCTLR_EL1 because the page containing the privileged instruction is
809 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
810 * would otherwise panic unconditionally. Check for the condition and return
811 * safe execution to the caller on behalf of the faulting function.
812 *
813 * Expected register state:
814 *  x22 - Kernel virtual base
815 *  x23 - Kernel physical base
816 */
817	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
818	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
819	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
820	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
821	movz	w1, #0x8600, lsl #16
822	movk	w1, #0x0000
823	cmp		x0, x1
824	mrs		x0, ELR_EL1					// Check for expected abort address
825	adrp	x1, _pinst_set_sctlr_trap_addr@page
826	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
827	sub		x1, x1, x22					// Convert to physical address
828	add		x1, x1, x23
829	ccmp	x0, x1, #0, eq
830	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
831	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
832	b.ne	Lel1_sp1_synchronous_vector_continue
833	msr		ELR_EL1, lr					// Return to caller
834	ERET_NO_STRAIGHT_LINE_SPECULATION
835#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
836
837/* 64-bit first level exception handler dispatcher.
838 * Completes register context saving and branches to FLEH.
839 * Expects:
840 *  {x0, x1, sp} - saved
841 *  x0 - arm_context_t
842 *  x1 - address of FLEH
843 *  x2 - bitfield of type FLEH_DISPATCH64_OPTION_xxx, clobbered
844 *  x3 - unused
845 *  fp - previous stack frame if EL1
846 *  lr - unused
847 *  sp - kernel stack
848 */
849	.text
850	.align 2
851fleh_dispatch64:
852#if HAS_APPLE_PAC
853	pacia	x1, sp
854#endif
855
856	/* Save arm_saved_state64 */
857	SPILL_REGISTERS KERNEL_MODE, options_register=x2
858
859	/* If exception is from userspace, zero unused registers */
860	and		x23, x23, #(PSR64_MODE_EL_MASK)
861	cmp		x23, #(PSR64_MODE_EL0)
862	bne		1f
863
864	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
865
866
8672:
868
869	mov		x2, #0
870	mov		x3, #0
871	mov		x4, #0
872	mov		x5, #0
873	mov		x6, #0
874	mov		x7, #0
875	mov		x8, #0
876	mov		x9, #0
877	mov		x10, #0
878	mov		x11, #0
879	mov		x12, #0
880	mov		x13, #0
881	mov		x14, #0
882	mov		x15, #0
883	mov		x16, #0
884	mov		x17, #0
885	mov		x18, #0
886	mov		x19, #0
887	mov		x20, #0
888	/* x21, x22 cleared in common case below */
889	mov		x23, #0
890	mov		x24, #0
891	mov		x25, #0
892#if !XNU_MONITOR
893	mov		x26, #0
894#endif
895	mov		x27, #0
896	mov		x28, #0
897	mov		fp, #0
898	mov		lr, #0
8991:
900
901	mov		x21, x0								// Copy arm_context_t pointer to x21
902	mov		x22, x1								// Copy handler routine to x22
903
904#if XNU_MONITOR
905	/* Zero x26 to indicate that this should not return to the PPL. */
906	mov		x26, #0
907#endif
908
909#if PRECISE_USER_KERNEL_TIME
910	cmp		x23, #PSR64_MODE_EL0			// If interrupting this kernel, skip
911	b.gt	1f                                  // precise time update.
912	PUSH_FRAME
913	bl		EXT(recount_leave_user)
914	POP_FRAME_WITHOUT_LR
915	mov		x0, x21								// Reload arm_context_t pointer
9161:
917#endif /* PRECISE_USER_KERNEL_TIME */
918
919	/* Dispatch to FLEH */
920
921#if HAS_APPLE_PAC
922	braa	x22,sp
923#else
924	br		x22
925#endif
926
927
928	.text
929	.align 2
930	.global EXT(fleh_synchronous)
931LEXT(fleh_synchronous)
932TRAP_UNWIND_PROLOGUE
933TRAP_UNWIND_DIRECTIVES
934	ARM64_JUMP_TARGET
935	mrs		x1, ESR_EL1							// Load exception syndrome
936	mrs		x2, FAR_EL1							// Load fault address
937
938	/* At this point, the LR contains the value of ELR_EL1. In the case of an
939	 * instruction prefetch abort, this will be the faulting pc, which we know
940	 * to be invalid. This will prevent us from backtracing through the
941	 * exception if we put it in our stack frame, so we load the LR from the
942	 * exception saved state instead.
943	 */
944	and		w6, w1, #(ESR_EC_MASK)
945	lsr		w6, w6, #(ESR_EC_SHIFT)
946	mov		w4, #(ESR_EC_IABORT_EL1)
947	cmp		w6, w4
948	b.eq	Lfleh_sync_load_lr
949Lvalid_link_register:
950
951#if CONFIG_SPTM
952	mrs		x25, ELR_EL1
953
954	/*
955	 * Sync exceptions in the kernel are rare, so check that first.
956	 * This check should be trivially predicted NT. We also take
957	 * the check out of line so, on the hot path, we don't add a
958	 * frontend redirect.
959	 */
960	mov		x3, #0 // by default, do not signal panic lockdown to sleh
961	mrs		x4, SPSR_EL1
962	tst		x4, #(PSR64_MODE_EL_MASK)
963	b.ne	Lfleh_synchronous_ool_check_exception_el1 /* Run ELn checks if we're EL!=0 (!Z) */
964	/* EL0 -- check if we're blocking sync exceptions due to lockdown */
965	adrp	x4, EXT(sptm_xnu_triggered_panic_ptr)@page
966	ldr		x4, [x4, EXT(sptm_xnu_triggered_panic_ptr)@pageoff]
967	ldrb	w4, [x4]
968	cbnz	w4, Lblocked_user_sync_exception
969
970Lfleh_synchronous_continue:
971	/* We've had our chance to lockdown, release PC/FAR */
972	str		x25, [x0, SS64_PC]
973	str		x2,  [x0, SS64_FAR]
974#endif /* CONFIG_SPTM */
975
976	PUSH_FRAME
977	bl		EXT(sleh_synchronous)
978	POP_FRAME_WITHOUT_LR
979
980#if XNU_MONITOR && !CONFIG_SPTM
981	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
982#endif
983
984	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
985	b		exception_return_dispatch
986
987Lfleh_sync_load_lr:
988	ldr		lr, [x0, SS64_LR]
989	b Lvalid_link_register
990
991#if CONFIG_SPTM
992Lfleh_synchronous_ool_check_exception_el1:
993	/*
994	 * We're in the kernel!
995	 *
996	 * An SP0 sync exception is forced to be fatal if:
997	 * (AND
998	 * 	(SPSR.M[3:2] > 0) // Originates from kernel
999	 * 	(OR
1000	 * 		(ESR.EC == ESR_EC_PAC_FAIL)    // FPAC failure
1001	 * 		(AND  // PAC BRK instruction (compiler generated traps)
1002	 * 			ESR.EC == ESR_EC_BRK_AARCH64
1003	 * 			ESR.ISS is in PTRAUTH_TRAPS
1004	 * 		)
1005	 *		(AND // Potential dPAC failure (poisoned VA)
1006	 *			(ESR.EC == ESR_EC_DABORT_EL1)
1007	 *			(XPACD(FAR) != FAR)
1008	 *			(NAND //outside copyio region
1009	 *				ELR >= copyio_fault_region_begin
1010	 *				ELR < copyio_fault_region_end
1011	 * 			)
1012	 *		)
1013	 *		(ESR.EC == ESR_EC_IABORT_EL1)  // Potential iPAC failure (poisoned PC)
1014	 * 		(AND
1015	 * 			(AND SPSR.A SPSR.I SPSR.F) // Async exceptions were masked
1016	 * 			(ESR.EC != ESR_EC_UNCATEGORIZED) // Not an undefined instruction (GDBTRAP for stackshots, etc.)
1017	 *			(NAND // brks other than PAC traps are permitted for non-fatal telemetry
1018	 *				(ESR.EC == ESR_EC_BRK_AARCH64)
1019	 *				(ESR.ISS is in PTRAUTH_TRAPS)
1020	 *			)
1021	 *			(CONFIG_BTI_TELEMETRY && ESR.EC != ESR_EC_BTI_FAIL) // Do not make BTI telemetry exceptions fatal
1022	 * 			(startup_phase < STARTUP_SUB_LOCKDOWN) // Not in early-boot
1023	 * 			(OR !CONFIG_XNUPOST (saved_expected_fault_handler == NULL)) // Not an expected, test exception
1024	 *			(NAND // copyio data aborts are permitted while exceptions are masked
1025	 *				ESR.EC == ESR_EC_DABORT_EL1
1026	 *				ELR >= copyio_fault_region_begin
1027	 *				ELR < copyio_fault_region_end
1028	 *			)
1029	 * 		)
1030	 * 	)
1031	 * )
1032	 */
1033
1034	/*
1035	 * Pre-compute some sub-expressions which will be used later
1036	 */
1037	mrs		x10, ELR_EL1
1038	adrp	x11, EXT(copyio_fault_region_begin)@page
1039	add		x11, x11, EXT(copyio_fault_region_begin)@pageoff
1040	adrp	x12, EXT(copyio_fault_region_end)@page
1041	add		x12, x12, EXT(copyio_fault_region_end)@pageoff
1042
1043	/* in-copyio-region sub-expression */
1044	/* Are we after the start of the copyio region? */
1045	cmp		x10, x11
1046	/*
1047	 * If after the start (HS), test upper bounds.
1048	 * Otherwise (LO), fail forward (HS)
1049	 */
1050	ccmp	x10, x12, #0b0010 /* C/HS */, HS
1051	/*
1052	 * Spill "in-copyio-region" flag for later reuse
1053	 * x10=1 if ELR was in copyio region, 0 otherwise
1054	 */
1055	cset	x10, LO
1056
1057#if __has_feature(ptrauth_calls)
1058	/* is-dPAC-poisoned-DABORT sub-expression */
1059	mrs		x5, FAR_EL1
1060	/*
1061	 * Is XPACD(FAR) == FAR?
1062	 * XPAC converts an arbitrary pointer like value into the canonical form
1063	 * that would be produced if the pointer were to successfully pass AUTx.
1064	 * If the pointer is canonical, this has no effect.
1065	 * If the pointer is non-canonical (such as due to PAC poisoning), the value
1066	 * will not match FAR.
1067	 */
1068	mov		x11, x5
1069	xpacd	x11
1070	/*
1071	 * If we're outside the copyio region (HS), set flags for whether FAR is
1072	 * clean (EQ) or has PAC poisoning (NE).
1073	 * Otherwise (LO), set EQ
1074	 */
1075	ccmp    x5, x11, #0b0100 /* Z/EQ */, HS
1076	/*
1077	 * If we were poisoned (NE), was this a data abort?
1078	 * Otherwise (EQ), pass NE
1079	 */
1080	mov		w5, #(ESR_EC_DABORT_EL1)
1081	ccmp	w6, w5, #0b0000 /* !Z/NE */, NE
1082	/* x11=1 when we had a DABORT with a poisoned VA outside the copyio region */
1083	cset	x11, EQ
1084#endif /* ptrauth_calls */
1085
1086	/*
1087	 * Now let's check the rare but fast conditions that apply only to kernel
1088	 * sync exceptions.
1089	 */
1090
1091	/*
1092	 * if ((ESR.EC == ESR_EC_BRK_AARCH64 && IS_PTRAUTH(ESR.ISS)) ||
1093	 * 		ESR.EC == ESR_EC_PAC_FAIL ||
1094	 *		ESR.EC == ESR_IABORT_EL1 ||
1095	 *		poisoned_dabort)
1096	 * 	goto Lfleh_synchronous_panic_lockdown
1097	 */
1098	cmp		w6, #(ESR_EC_BRK_AARCH64) // eq if this is a BRK instruction
1099	/*
1100	 * Is this a PAC breakpoint? ESR.ISS in [0xC470, 0xC473], which is true when
1101	 * {ESR.ISS[24:2], 2'b00} == 0xC470
1102	 */
1103	mov		w5, #(0xC470)
1104	and		w7, w1, #0xfffc
1105	/*
1106	 * If we're not BRK, NE
1107	 * If we're BRK, set flags for ISS=PAC breakpoint
1108	 */
1109	ccmp	w7, w5, #0, EQ
1110
1111	/*
1112	 * If we aren't a PAC BRK (NE), set flags for ESR.EC==PAC_FAIL
1113	 * If we are a PAC BRK (EQ), pass EQ through.
1114	*/
1115	ccmp	w6, #(ESR_EC_PAC_FAIL), #0b0100 /* Z */, NE
1116
1117	/*
1118	 * If !(PAC BRK || EC == PAC_FAIL) (NE), set flags for ESR.EC==IABORT
1119	 * If (PAC BRK || EC == PAC_FAIL) (EQ), pass lockdown request (EQ)
1120	 */
1121	mov		w5, #(ESR_EC_IABORT_EL1)
1122	ccmp	w6, w5, #0b0100 /* Z/EQ */, NE
1123
1124#if __has_feature(ptrauth_calls)
1125	/*
1126	 * If !(PAC BRK || EC == PAC_FAIL || EC == IABORT) (NE), set flags for
1127	 * whether this was a posioned DABORT (previously computed in x11).
1128	 * If (PAC BRK || EC == PAC_FAIL || EC == IABORT) (EQ), pass lockdown
1129	 * request (EQ)
1130	 */
1131	ccmp	x11, #1, #0b0100 /* Z/EQ */, NE
1132#endif /* ptrauth_calls */
1133
1134	b.eq	Lfleh_synchronous_panic_lockdown
1135
1136	/*
1137	 * Most kernel exceptions won't be taken with exceptions masked but if they
1138	 * are they'll be stackshot traps or telemetry breakpoints. Check these
1139	 * first since they're cheap.
1140	 *
1141	 * if (!((PSTATE & DAIF_STANDARD_DISABLE) == DAIF_STANDARD_DISABLE
1142	 * 		&& ESR.EC != ESR_EC_UNCATEGORIZED
1143	 * 		&& ESR.EC != ESR_EC_BRK_AARCH64))
1144	 * 		goto Lfleh_synchronous_continue
1145	 */
1146	mov		w5, #(DAIF_STANDARD_DISABLE)
1147	bics	wzr, w5, w4 // (DAIF_STANDARD_DISABLE & (~PSTATE)). If !Z/NE, AIF wasn't (fully) masked.
1148	/*
1149	 * If AIF was masked (EQ), test EC =? fasttrap
1150	 * If AIF wasn't masked (NE), pass lockdown skip (EQ)
1151	 */
1152	ccmp	w6, #(ESR_EC_UNCATEGORIZED), #0b0100 /* Z/EQ */, EQ
1153	/*
1154	 * If AIF was masked AND EC != fasttrap (NE), test EC =? BRK
1155	 * If AIF wasn't masked OR EC == fasttrap (EQ), pass lockdown skip (EQ)
1156	 */
1157	mov		w5, #(ESR_EC_BRK_AARCH64)
1158	ccmp	w6, w5, #0b0100 /* Z/EQ */, NE
1159	/*
1160	 * AIF was masked AND EC != fasttrap AND EC != BRK (NE)
1161	 * AIF wasn't masked OR EC == fasttrap OR EC == BRK (EQ) -> skip lockdown!
1162	 */
1163	b.eq	Lfleh_synchronous_continue
1164
1165	/*
1166	 * Non-PAC/BRK/fasttrap exception taken with exceptions disabled.
1167	 * We're going down if the system IS NOT:
1168	 * 1) in early boot OR
1169	 * 2) handling an expected XNUPOST exception (handled in lockdown macro)
1170	 * 3) taking a copyio data abort
1171	 */
1172	adrp	x7, EXT(startup_phase)@page
1173	add		x7, x7, EXT(startup_phase)@pageoff
1174	ldr		w7, [x7]
1175	cmp		w7, #-1 // STARTUP_SUB_LOCKDOWN
1176	b.lo	Lfleh_synchronous_continue
1177
1178	/* Was this a copyio data abort taken while exceptions were masked? */
1179	cmp		w6, #ESR_EC_DABORT_EL1
1180	/* x10=1 when ELR was in copyio range */
1181	ccmp	x10, #1, #0b0000 /* !Z/NE */, EQ
1182	b.eq	Lfleh_synchronous_continue
1183
1184#if BTI_ENFORCED && CONFIG_BTI_TELEMETRY
1185	/* BTI telemetry exceptions are recoverable only in telemetry mode */
1186	cmp		w6, #ESR_EC_BTI_FAIL
1187	b.eq	Lfleh_synchronous_continue
1188#endif /* CONFIG_BTI_TELEMETRY */
1189
1190	/* FALLTHROUGH */
1191Lfleh_synchronous_panic_lockdown:
1192	/* Save off arguments for sleh as SPTM may clobber */
1193	mov		x26, x0
1194	mov		x27, x1
1195	mov		x28, x2
1196	BEGIN_PANIC_LOCKDOWN
1197	mov		x0, x26
1198	mov		x1, x27
1199	mov		x2, x28
1200	/*
1201	 * A captain goes down with her ship; system is sunk but for telemetry
1202	 * try to handle the crash normally.
1203	 */
1204	mov		x3, #1 // signal to sleh that we completed panic lockdown
1205	b		Lfleh_synchronous_continue
1206#endif /* CONFIG_SPTM */
1207UNWIND_EPILOGUE
1208
1209#if CONFIG_SPTM
1210	.text
1211	.align 2
1212	/* Make a global symbol so it's easier to pick out in backtraces */
1213	.global EXT(blocked_user_sync_exception)
1214LEXT(blocked_user_sync_exception)
1215Lblocked_user_sync_exception:
1216	TRAP_UNWIND_PROLOGUE
1217	TRAP_UNWIND_DIRECTIVES
1218	/*
1219	 * User space took a sync exception after panic lockdown had been initiated.
1220	 * The system is going to panic soon, so let's just re-enable FIQs and wait
1221	 * for debugger sync.
1222	 */
1223	msr		DAIFClr, #DAIFSC_FIQF
12240:
1225	wfe
1226	b		0b
1227	UNWIND_EPILOGUE
1228#endif /* CONFIG_SPTM */
1229
1230/* Shared prologue code for fleh_irq and fleh_fiq.
1231 * Does any interrupt booking we may want to do
1232 * before invoking the handler proper.
1233 * Expects:
1234 *  x0 - arm_context_t
1235 * x23 - CPSR
1236 *  fp - Undefined live value (we may push a frame)
1237 *  lr - Undefined live value (we may push a frame)
1238 *  sp - Interrupt stack for the current CPU
1239 */
1240.macro BEGIN_INTERRUPT_HANDLER
1241	mrs		x22, TPIDR_EL1
1242	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
1243	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
1244	ldr		w1, [x23, CPU_STAT_IRQ]
1245	add		w1, w1, #1							// Increment count
1246	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
1247	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
1248	add		w1, w1, #1					// Increment count
1249	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1250	/* Increment preempt count */
1251	ldr		w1, [x22, ACT_PREEMPT_CNT]
1252	add		w1, w1, #1
1253	str		w1, [x22, ACT_PREEMPT_CNT]
1254	/* Store context in int state */
1255	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
1256.endmacro
1257
1258/* Shared epilogue code for fleh_irq and fleh_fiq.
1259 * Cleans up after the prologue, and may do a bit more
1260 * bookkeeping (kdebug related).
1261 * Expects:
1262 * x22 - Live TPIDR_EL1 value (thread address)
1263 * x23 - Address of the current CPU data structure
1264 * w24 - 0 if kdebug is disbled, nonzero otherwise
1265 *  fp - Undefined live value (we may push a frame)
1266 *  lr - Undefined live value (we may push a frame)
1267 *  sp - Interrupt stack for the current CPU
1268 */
1269.macro END_INTERRUPT_HANDLER
1270	/* Clear int context */
1271	str		xzr, [x23, CPU_INT_STATE]
1272	/* Decrement preempt count */
1273	ldr		w0, [x22, ACT_PREEMPT_CNT]
1274	cbnz	w0, 1f								// Detect underflow
1275	b		preempt_underflow
12761:
1277	sub		w0, w0, #1
1278	str		w0, [x22, ACT_PREEMPT_CNT]
1279	/* Switch back to kernel stack */
1280	LOAD_KERN_STACK_TOP	dst=x0, src=x22, tmp=x28
1281	mov		sp, x0
1282	/* Generate a CPU-local event to terminate a post-IRQ WFE */
1283	sevl
1284.endmacro
1285
1286	.text
1287	.align 2
1288	.global EXT(fleh_irq)
1289LEXT(fleh_irq)
1290TRAP_UNWIND_PROLOGUE
1291TRAP_UNWIND_DIRECTIVES
1292	ARM64_JUMP_TARGET
1293	BEGIN_INTERRUPT_HANDLER
1294	PUSH_FRAME
1295	bl		EXT(sleh_irq)
1296	POP_FRAME_WITHOUT_LR
1297	END_INTERRUPT_HANDLER
1298
1299#if XNU_MONITOR && !CONFIG_SPTM
1300	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1301#endif
1302
1303	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1304	b		exception_return_dispatch
1305UNWIND_EPILOGUE
1306
1307	.text
1308	.align 2
1309	.global EXT(fleh_fiq_generic)
1310LEXT(fleh_fiq_generic)
1311	/*
1312	 * This function is a placeholder which should never be invoked.
1313	 * We omit the landingpad here since there is no sensible choice.
1314	 */
1315	PANIC_UNIMPLEMENTED
1316
1317	.text
1318	.align 2
1319	.global EXT(fleh_fiq)
1320LEXT(fleh_fiq)
1321TRAP_UNWIND_PROLOGUE
1322TRAP_UNWIND_DIRECTIVES
1323	ARM64_JUMP_TARGET
1324	BEGIN_INTERRUPT_HANDLER
1325	PUSH_FRAME
1326	bl		EXT(sleh_fiq)
1327	POP_FRAME_WITHOUT_LR
1328	END_INTERRUPT_HANDLER
1329
1330#if XNU_MONITOR && !CONFIG_SPTM
1331	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1332#endif
1333
1334	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
1335	b		exception_return_dispatch
1336UNWIND_EPILOGUE
1337
1338	.text
1339	.align 2
1340	.global EXT(fleh_serror)
1341LEXT(fleh_serror)
1342TRAP_UNWIND_PROLOGUE
1343TRAP_UNWIND_DIRECTIVES
1344	ARM64_JUMP_TARGET
1345	mrs		x1, ESR_EL1							// Load exception syndrome
1346	mrs		x2, FAR_EL1							// Load fault address
1347
1348	PUSH_FRAME
1349	bl		EXT(sleh_serror)
1350	POP_FRAME_WITHOUT_LR
1351
1352#if XNU_MONITOR && !CONFIG_SPTM
1353	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1354#endif
1355
1356	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
1357	b		exception_return_dispatch
1358UNWIND_EPILOGUE
1359
1360/*
1361 * Register state saved before we get here.
1362 */
1363	.text
1364	.align 2
1365fleh_invalid_stack:
1366	ARM64_JUMP_TARGET
1367#if CONFIG_SPTM
1368	/*
1369	 * Taking a data abort with an invalid kernel stack pointer is unrecoverable.
1370	 * Initiate lockdown.
1371	 */
1372
1373	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1374	mov		x25, x0
1375	mrs		x26, ELR_EL1
1376	mrs		x27, ESR_EL1
1377	mrs		x28, FAR_EL1
1378	BEGIN_PANIC_LOCKDOWN
1379	mov		x0, x25
1380	mov		x1, x27
1381	mov		x2, x28
1382	/* We deferred storing PC/FAR until after lockdown, so do that now */
1383	str		x26, [x0, SS64_PC]
1384	str		x28, [x0, SS64_FAR]
1385#else
1386	mrs		x1, ESR_EL1							// Load exception syndrome
1387	mrs		x2, FAR_EL1							// Load fault address
1388#endif /* CONFIG_SPTM */
1389	PUSH_FRAME
1390	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
1391	b 		.
1392
1393	.text
1394	.align 2
1395fleh_synchronous_sp1:
1396	ARM64_JUMP_TARGET
1397#if CONFIG_SPTM
1398	/*
1399	 * Without debugger intervention, all exceptions on SP1 (including debug
1400	 * trap instructions) are intended to be fatal. In order to not break
1401	 * self-hosted kernel debug, do not trigger lockdown for debug traps
1402	 * (unknown instructions/uncategorized exceptions). On release kernels, we
1403	 * don't support self-hosted kernel debug so unconditionally lockdown.
1404	 */
1405#if (DEVELOPMENT || DEBUG)
1406	tst		w1, #(ESR_EC_MASK)
1407	b.eq	Lfleh_synchronous_sp1_skip_panic_lockdown // ESR_EC_UNCATEGORIZED is 0, so skip lockdown if Z
1408#endif /* DEVELOPMENT || DEBUG */
1409	/* Save off temporaries (including exception SPRs) as SPTM can clobber */
1410	mov		x25, x0
1411	mrs		x26, ELR_EL1
1412	mrs		x27, ESR_EL1
1413	mrs		x28, FAR_EL1
1414	BEGIN_PANIC_LOCKDOWN
1415	mov		x0, x25
1416	mov		x1, x27
1417	mov		x2, x28
1418	/* We deferred storing PC/FAR until after lockdown, so do that now */
1419	str		x26, [x0, SS64_PC]
1420	str		x28, [x0, SS64_FAR]
1421Lfleh_synchronous_sp1_skip_panic_lockdown:
1422#else
1423	mrs		x1, ESR_EL1
1424	mrs		x2, FAR_EL1
1425#endif /* CONFIG_SPTM */
1426
1427	PUSH_FRAME
1428	bl		EXT(sleh_synchronous_sp1)
1429	b 		.
1430
1431	.text
1432	.align 2
1433fleh_irq_sp1:
1434	ARM64_JUMP_TARGET
1435	mov		x1, x0
1436	adr		x0, Lsp1_irq_str
1437	b		EXT(panic_with_thread_kernel_state)
1438Lsp1_irq_str:
1439	.asciz "IRQ exception taken while SP1 selected"
1440
1441	.text
1442	.align 2
1443fleh_fiq_sp1:
1444	ARM64_JUMP_TARGET
1445	mov		x1, x0
1446	adr		x0, Lsp1_fiq_str
1447	b		EXT(panic_with_thread_kernel_state)
1448Lsp1_fiq_str:
1449	.asciz "FIQ exception taken while SP1 selected"
1450
1451	.text
1452	.align 2
1453fleh_serror_sp1:
1454	ARM64_JUMP_TARGET
1455	mov		x1, x0
1456	adr		x0, Lsp1_serror_str
1457	b		EXT(panic_with_thread_kernel_state)
1458Lsp1_serror_str:
1459	.asciz "Asynchronous exception taken while SP1 selected"
1460
1461	.text
1462	.align 2
1463exception_return_dispatch:
1464	ldr		w0, [x21, SS64_CPSR]
1465	tst		w0, PSR64_MODE_EL_MASK
1466	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
1467	b		return_to_user
1468
1469#if CONFIG_SPTM
1470/**
1471 * XNU returns to this symbol whenever handling an interrupt that occurred
1472 * during SPTM, TXM or SK runtime. This code determines which domain the
1473 * XNU thread was executing in when the interrupt occurred and tells SPTM
1474 * which domain to resume.
1475 */
1476	.text
1477	.align 2
1478	.global EXT(xnu_return_to_gl2)
1479LEXT(xnu_return_to_gl2)
1480	/**
1481	 * If thread->txm_thread_stack is set, we need to tell SPTM dispatch to
1482	 * resume the TXM thread in x0.
1483	 */
1484	mrs		x8, TPIDR_EL1
1485	ldr		x8, [x8, TH_TXM_THREAD_STACK]
1486	cbz		x8, 1f
1487	mov		x0, x8
1488	b		EXT(txm_resume)
1489	/* Unreachable */
1490	b .
1491
1492#if CONFIG_EXCLAVES
1493	/**
1494	 * If thread->th_exclaves_intstate flag TH_EXCLAVES_EXECUTION is set
1495	 * we need to tell SPTM dispatch to resume the SK thread.
1496	 */
14971:
1498	mrs		x8, TPIDR_EL1
1499	ldr		x9, [x8, TH_EXCLAVES_INTSTATE]
1500	and		x9, x9, TH_EXCLAVES_EXECUTION
1501	cbz		x9, 1f
1502	b		EXT(sk_resume)
1503	/* Unreachable */
1504	b .
1505#endif /* CONFIG_EXCLAVES */
1506
1507	/**
1508	 * If neither the above checks succeeded, this must be a thread
1509	 * that was interrupted while running in SPTM. Tell SPTM to resume
1510	 * the interrupted SPTM call.
1511	 */
15121:
1513	b		EXT(sptm_resume_from_exception)
1514	/* Unreachable */
1515	b .
1516#endif /* CONFIG_SPTM */
1517
1518	.text
1519	.align 2
1520	.global EXT(return_to_kernel)
1521LEXT(return_to_kernel)
1522	UNWIND_PROLOGUE
1523	RETURN_TO_KERNEL_UNWIND
1524	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
1525	mrs		x3, TPIDR_EL1                           // Load thread pointer
1526	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
1527	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
1528	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
1529	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
1530	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
1531	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
1532	b.eq	exception_return_unint_tpidr_x3
1533	mov		sp, x21                                 // Switch to thread stack for preemption
1534	PUSH_FRAME
1535	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
1536	POP_FRAME_WITHOUT_LR
1537	b		exception_return
1538	UNWIND_EPILOGUE
1539
1540	.text
1541	.globl EXT(thread_bootstrap_return)
1542LEXT(thread_bootstrap_return)
1543	ARM64_PROLOG
1544#if CONFIG_DTRACE
1545	bl		EXT(dtrace_thread_bootstrap)
1546#endif
1547#if KASAN_TBI
1548	PUSH_FRAME
1549	bl		EXT(__asan_handle_no_return)
1550	POP_FRAME_WITHOUT_LR
1551#endif /* KASAN_TBI */
1552	b		EXT(arm64_thread_exception_return)
1553
1554	.text
1555	.globl EXT(arm64_thread_exception_return)
1556LEXT(arm64_thread_exception_return)
1557	ARM64_PROLOG
1558	mrs		x0, TPIDR_EL1
1559	LOAD_USER_PCB	dst=x21, src=x0, tmp=x28
1560	mov		x28, xzr
1561
1562	//
1563	// Fall Through to return_to_user from arm64_thread_exception_return.
1564	// Note that if we move return_to_user or insert a new routine
1565	// below arm64_thread_exception_return, the latter will need to change.
1566	//
1567	.text
1568/* x21 is always the machine context pointer when we get here
1569 * x28 is a bit indicating whether or not we should check if pc is in pfz */
1570return_to_user:
1571check_user_asts:
1572#if KASAN_TBI
1573	PUSH_FRAME
1574	bl		EXT(__asan_handle_no_return)
1575	POP_FRAME_WITHOUT_LR
1576#endif /* KASAN_TBI */
1577	mrs		x3, TPIDR_EL1					// Load thread pointer
1578
1579	movn		w2, #0
1580	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
1581
1582#if MACH_ASSERT
1583	ldr		w0, [x3, ACT_PREEMPT_CNT]
1584	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
1585#endif
1586
1587	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1588	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1589	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
1590	cbz		w0, no_asts							// If no asts, skip ahead
1591
1592	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
1593
1594	/* At this point, we have ASTs and we need to check whether we are running in the
1595	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
1596	 * the PFZ since we don't want to handle getting a signal or getting suspended
1597	 * while holding a spinlock in userspace.
1598	 *
1599	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
1600	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
1601	 * to use it to indicate to userspace to come back to take a delayed
1602	 * preemption, at which point the ASTs will be handled. */
1603	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
1604	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
1605
1606	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
1607	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
1608	cbz		x0, restore_and_check_ast			// No, deal with other asts
1609
1610	mov		x0, #1
1611	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
1612	mov		x0, x19								// restore x0 to asts
1613	b		no_asts								// pretend we have no asts
1614
1615restore_and_check_ast:
1616	mov		x0, x19								// restore x0
1617	b	user_take_ast							// Service pending asts
1618no_asts:
1619
1620
1621#if PRECISE_USER_KERNEL_TIME
1622	mov		x19, x3						// Preserve thread pointer across function call
1623	PUSH_FRAME
1624	bl		EXT(recount_enter_user)
1625	POP_FRAME_WITHOUT_LR
1626	mov		x3, x19
1627#endif /* PRECISE_USER_KERNEL_TIME */
1628
1629#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1630	/* Watchtower
1631	 *
1632	 * Here we attempt to enable NEON access for EL0. If the last entry into the
1633	 * kernel from user-space was due to an IRQ, the monitor will have disabled
1634	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1635	 * check in with the monitor in order to reenable NEON for EL0 in exchange
1636	 * for routing IRQs through the monitor (2). This way the monitor will
1637	 * always 'own' either IRQs or EL0 NEON.
1638	 *
1639	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1640	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1641	 * here.
1642	 *
1643	 * EL0 user ________ IRQ                                            ______
1644	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
1645	 * EL3 monitor           \_/                                \___/
1646	 *
1647	 *                       (1)                                 (2)
1648	 */
1649
1650	mov		x0, #(CPACR_FPEN_ENABLE)
1651	msr		CPACR_EL1, x0
1652#endif
1653
1654	/* Establish this thread's debug state as the live state on the selected CPU. */
1655	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
1656	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
1657	ldr		x0, [x3, ACT_DEBUGDATA]
1658	cmp		x0, x1
1659	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
1660
1661
1662	PUSH_FRAME
1663	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
1664	POP_FRAME_WITHOUT_LR
1665	mrs		x3, TPIDR_EL1						// Reload thread pointer
1666	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
1667L_skip_user_set_debug_state:
1668
1669
1670	ldrsh	x0, [x4, CPU_TPIDR_EL0]
1671	msr		TPIDR_EL0, x0
1672
1673
1674	b		exception_return_unint_tpidr_x3
1675
1676exception_return:
1677	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1678exception_return_unint:
1679	mrs		x3, TPIDR_EL1					// Load thread pointer
1680exception_return_unint_tpidr_x3:
1681	mov		sp, x21						// Reload the pcb pointer
1682
1683#if !__ARM_KERNEL_PROTECT__
1684	/*
1685	 * Restore x18 only if the task has the entitlement that allows
1686	 * usage. Those are very few, and can move to something else
1687	 * once we use x18 for something more global.
1688	 *
1689	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
1690	 * that uses x18 as one of the global use cases (and will reset
1691	 * x18 later down below).
1692	 *
1693	 * It's also unconditionally skipped for translated threads,
1694	 * as those are another use case, one where x18 must be preserved.
1695	 */
1696	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
1697	mov		x18, #0
1698	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
1699
1700exception_return_unint_tpidr_x3_restore_x18:
1701	ldr		x18, [sp, SS64_X18]
1702
1703#else /* !__ARM_KERNEL_PROTECT__ */
1704	/*
1705	 * If we are going to eret to userspace, we must return through the EL0
1706	 * eret mapping.
1707	 */
1708	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
1709	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
1710
1711	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
1712	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
1713	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
1714	add		x1, x1, Lexception_return_restore_registers@pageoff
1715	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
1716	sub		x1, x1, x0											// Calculate delta
1717	add		x0, x2, x1											// Convert KVA to EL0 vector address
1718	br		x0
1719
1720Lskip_el0_eret_mapping:
1721#endif /* !__ARM_KERNEL_PROTECT__ */
1722
1723Lexception_return_restore_registers:
1724	mov 	x0, sp								// x0 = &pcb
1725	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1726	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, x25, el0_state_allowed=1
1727
1728	msr		ELR_EL1, x1							// Load the return address into ELR
1729	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1730
1731/* Restore special register state */
1732	ldr		w3, [sp, NS64_FPSR]
1733	ldr		w4, [sp, NS64_FPCR]
1734
1735	msr		FPSR, x3
1736	mrs		x5, FPCR
1737	CMSR FPCR, x5, x4, 1
17381:
1739
1740
1741#if HAS_ARM_FEAT_SME
1742	mrs		x2, SPSR_EL1
1743	and		x2, x2, #(PSR64_MODE_EL_MASK)
1744	cmp		x2, #(PSR64_MODE_EL0)
1745	// SPSR_EL1.M != EL0: no SME state to restore
1746	bne		Lno_sme_saved_state
1747
1748	mrs		x3, TPIDR_EL1
1749	ldr		x2, [x3, ACT_UMATRIX_HDR]
1750	cbz		x2, Lno_sme_saved_state
1751
1752	ldr		x3, [x2, SME_SVCR]
1753	msr		SVCR, x3
1754	// SVCR.SM == 0: restore SVCR only (ZA is handled during context-switch)
1755	tbz		x3, #SVCR_SM_SHIFT, Lno_sme_saved_state
1756
1757	// SVCR.SM == 1: restore SVCR, Z, and P
1758	ldrh	w3, [x2, SME_SVL_B]
1759	add		x2, x2, SME_Z_P_ZA
1760	LOAD_OR_STORE_Z_P_REGISTERS	ldr, svl_b=x3, ss=x2
1761
1762	// The FPSIMD register file acts like a view into the lower 128 bits of
1763	// Z0-Z31.  While there's no harm reading it out during exception entry,
1764	// writing it back would truncate the Z0-Z31 values we just restored.
1765	b		Lskip_restore_neon_saved_state
1766Lno_sme_saved_state:
1767#endif /* HAS_ARM_FEAT_SME */
1768
1769	/* Restore arm_neon_saved_state64 */
1770	ldp		q0, q1, [x0, NS64_Q0]
1771	ldp		q2, q3, [x0, NS64_Q2]
1772	ldp		q4, q5, [x0, NS64_Q4]
1773	ldp		q6, q7, [x0, NS64_Q6]
1774	ldp		q8, q9, [x0, NS64_Q8]
1775	ldp		q10, q11, [x0, NS64_Q10]
1776	ldp		q12, q13, [x0, NS64_Q12]
1777	ldp		q14, q15, [x0, NS64_Q14]
1778	ldp		q16, q17, [x0, NS64_Q16]
1779	ldp		q18, q19, [x0, NS64_Q18]
1780	ldp		q20, q21, [x0, NS64_Q20]
1781	ldp		q22, q23, [x0, NS64_Q22]
1782	ldp		q24, q25, [x0, NS64_Q24]
1783	ldp		q26, q27, [x0, NS64_Q26]
1784	ldp		q28, q29, [x0, NS64_Q28]
1785	ldp		q30, q31, [x0, NS64_Q30]
1786#if HAS_ARM_FEAT_SME
1787Lskip_restore_neon_saved_state:
1788#endif
1789
1790	/* Restore arm_saved_state64 */
1791
1792	// Skip x0, x1 - we're using them
1793	ldp		x2, x3, [x0, SS64_X2]
1794	ldp		x4, x5, [x0, SS64_X4]
1795	ldp		x6, x7, [x0, SS64_X6]
1796	ldp		x8, x9, [x0, SS64_X8]
1797	ldp		x10, x11, [x0, SS64_X10]
1798	ldp		x12, x13, [x0, SS64_X12]
1799	ldp		x14, x15, [x0, SS64_X14]
1800	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1801	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1802	ldr		x19, [x0, SS64_X19]
1803	ldp		x20, x21, [x0, SS64_X20]
1804	ldp		x22, x23, [x0, SS64_X22]
1805	ldp		x24, x25, [x0, SS64_X24]
1806	ldp		x26, x27, [x0, SS64_X26]
1807	ldr		x28, [x0, SS64_X28]
1808	ldr		fp, [x0, SS64_FP]
1809	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1810
1811	// Restore stack pointer and our last two GPRs
1812	ldr		x1, [x0, SS64_SP]
1813	mov		sp, x1
1814
1815#if __ARM_KERNEL_PROTECT__
1816	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1817#endif /* __ARM_KERNEL_PROTECT__ */
1818
1819	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1820
1821#if __ARM_KERNEL_PROTECT__
1822	/* If we are going to eret to userspace, we must unmap the kernel. */
1823	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1824
1825	/* Update TCR to unmap the kernel. */
1826	MOV64		x18, TCR_EL1_USER
1827	msr		TCR_EL1, x18
1828
1829	/*
1830	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1831	 * each other due to the microarchitecture.
1832	 */
1833#if !defined(APPLE_ARM64_ARCH_FAMILY)
1834	isb		sy
1835#endif
1836
1837	/* Switch to the user ASID (low bit clear) for the task. */
1838	mrs		x18, TTBR0_EL1
1839	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1840	msr		TTBR0_EL1, x18
1841	mov		x18, #0
1842
1843	/* We don't need an ISB here, as the eret is synchronizing. */
1844Lskip_ttbr1_switch:
1845#endif /* __ARM_KERNEL_PROTECT__ */
1846
1847	ERET_NO_STRAIGHT_LINE_SPECULATION
1848
1849user_take_ast:
1850	PUSH_FRAME
1851	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1852	POP_FRAME_WITHOUT_LR
1853	b		check_user_asts								// Now try again
1854
1855	.text
1856	.align 2
1857preempt_underflow:
1858	mrs		x0, TPIDR_EL1
1859	str		x0, [sp, #-16]!						// We'll print thread pointer
1860	adr		x0, L_underflow_str					// Format string
1861	CALL_EXTERN panic							// Game over
1862
1863L_underflow_str:
1864	.asciz "Preemption count negative on thread %p"
1865.align 2
1866
1867#if MACH_ASSERT
1868	.text
1869	.align 2
1870preempt_count_notzero:
1871	mrs		x0, TPIDR_EL1
1872	str		x0, [sp, #-16]!						// We'll print thread pointer
1873	ldr		w0, [x0, ACT_PREEMPT_CNT]
1874	str		w0, [sp, #8]
1875	adr		x0, L_preempt_count_notzero_str				// Format string
1876	CALL_EXTERN panic							// Game over
1877
1878L_preempt_count_notzero_str:
1879	.asciz "preemption count not 0 on thread %p (%u)"
1880#endif /* MACH_ASSERT */
1881
1882#if __ARM_KERNEL_PROTECT__
1883	/*
1884	 * This symbol denotes the end of the exception vector/eret range; we page
1885	 * align it so that we can avoid mapping other text in the EL0 exception
1886	 * vector mapping.
1887	 */
1888	.text
1889	.align 14
1890	.globl EXT(ExceptionVectorsEnd)
1891LEXT(ExceptionVectorsEnd)
1892#endif /* __ARM_KERNEL_PROTECT__ */
1893
1894#if XNU_MONITOR && !CONFIG_SPTM
1895
1896/*
1897 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1898 * mostly concerned with setting up state for the normal fleh code.
1899 */
1900	.text
1901	.align 2
1902fleh_synchronous_from_ppl:
1903	ARM64_JUMP_TARGET
1904	/* Save x0. */
1905	mov		x15, x0
1906
1907	/* Grab the ESR. */
1908	mrs		x1, ESR_EL1							// Get the exception syndrome
1909
1910	/* If the stack pointer is corrupt, it will manifest either as a data abort
1911	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1912	 * these quickly by testing bit 5 of the exception class.
1913	 */
1914	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1915	mrs		x0, SP_EL0							// Get SP_EL0
1916
1917	/* Perform high level checks for stack corruption. */
1918	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1919	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1920	cmp		x1, x2								// If we have a stack alignment exception
1921	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1922	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1923	cmp		x1, x2								// If we have a data abort, we need to
1924	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1925
1926Ltest_pstack:
1927	/* Bounds check the PPL stack. */
1928	adrp	x10, EXT(pmap_stacks_start)@page
1929	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1930	adrp	x11, EXT(pmap_stacks_end)@page
1931	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1932	cmp		x0, x10
1933	b.lo	Lcorrupt_ppl_stack
1934	cmp		x0, x11
1935	b.hi	Lcorrupt_ppl_stack
1936
1937Lvalid_ppl_stack:
1938	/* Restore x0. */
1939	mov		x0, x15
1940
1941	/* Switch back to the kernel stack. */
1942	msr		SPSel, #0
1943	GET_PMAP_CPU_DATA x5, x6, x7
1944	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1945	mov		sp, x6
1946
1947	/* Hand off to the synch handler. */
1948	b		EXT(fleh_synchronous)
1949
1950Lcorrupt_ppl_stack:
1951	/* Restore x0. */
1952	mov		x0, x15
1953
1954	/* Hand off to the invalid stack handler. */
1955	b		fleh_invalid_stack
1956
1957fleh_fiq_from_ppl:
1958	ARM64_JUMP_TARGET
1959	SWITCH_TO_INT_STACK	tmp=x25
1960	b		EXT(fleh_fiq)
1961
1962fleh_irq_from_ppl:
1963	ARM64_JUMP_TARGET
1964	SWITCH_TO_INT_STACK	tmp=x25
1965	b		EXT(fleh_irq)
1966
1967fleh_serror_from_ppl:
1968	ARM64_JUMP_TARGET
1969	GET_PMAP_CPU_DATA x5, x6, x7
1970	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1971	mov		sp, x6
1972	b		EXT(fleh_serror)
1973
1974
1975
1976
1977	// x15: ppl call number
1978	// w10: ppl_state
1979	// x20: gxf_enter caller's DAIF
1980	.globl EXT(ppl_trampoline_start)
1981LEXT(ppl_trampoline_start)
1982
1983
1984#error "XPRR configuration error"
1985	cmp		x14, x21
1986	b.ne	Lppl_fail_dispatch
1987
1988	/* Verify the request ID. */
1989	cmp		x15, PMAP_COUNT
1990	b.hs	Lppl_fail_dispatch
1991
1992	GET_PMAP_CPU_DATA	x12, x13, x14
1993
1994	/* Mark this CPU as being in the PPL. */
1995	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1996
1997	cmp		w9, #PPL_STATE_KERNEL
1998	b.eq		Lppl_mark_cpu_as_dispatching
1999
2000	/* Check to see if we are trying to trap from within the PPL. */
2001	cmp		w9, #PPL_STATE_DISPATCH
2002	b.eq		Lppl_fail_dispatch_ppl
2003
2004
2005	/* Ensure that we are returning from an exception. */
2006	cmp		w9, #PPL_STATE_EXCEPTION
2007	b.ne		Lppl_fail_dispatch
2008
2009	// where is w10 set?
2010	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
2011	cmp		w10, #PPL_STATE_EXCEPTION
2012	b.ne		Lppl_fail_dispatch
2013
2014	/* This is an exception return; set the CPU to the dispatching state. */
2015	mov		w9, #PPL_STATE_DISPATCH
2016	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
2017
2018	/* Find the save area, and return to the saved PPL context. */
2019	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
2020	mov		sp, x0
2021	b		EXT(return_to_ppl)
2022
2023Lppl_mark_cpu_as_dispatching:
2024	cmp		w10, #PPL_STATE_KERNEL
2025	b.ne		Lppl_fail_dispatch
2026
2027	/* Mark the CPU as dispatching. */
2028	mov		w13, #PPL_STATE_DISPATCH
2029	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
2030
2031	/* Switch to the regular PPL stack. */
2032	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
2033	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
2034
2035	// SP0 is thread stack here
2036	mov		x21, sp
2037	// SP0 is now PPL stack
2038	mov		sp, x9
2039
2040	/* Save the old stack pointer off in case we need it. */
2041	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
2042
2043	/* Get the handler for the request */
2044	adrp	x9, EXT(ppl_handler_table)@page
2045	add		x9, x9, EXT(ppl_handler_table)@pageoff
2046	add		x9, x9, x15, lsl #3
2047	ldr		x10, [x9]
2048
2049	/* Branch to the code that will invoke the PPL request. */
2050	b		EXT(ppl_dispatch)
2051
2052Lppl_fail_dispatch_ppl:
2053	/* Switch back to the kernel stack. */
2054	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
2055	mov		sp, x10
2056
2057Lppl_fail_dispatch:
2058	/* Indicate that we failed. */
2059	mov		x15, #PPL_EXIT_BAD_CALL
2060
2061	/* Move the DAIF bits into the expected register. */
2062	mov		x10, x20
2063
2064	/* Return to kernel mode. */
2065	b		ppl_return_to_kernel_mode
2066
2067Lppl_dispatch_exit:
2068
2069	/* Indicate that we are cleanly exiting the PPL. */
2070	mov		x15, #PPL_EXIT_DISPATCH
2071
2072	/* Switch back to the original (kernel thread) stack. */
2073	mov		sp, x21
2074
2075	/* Move the saved DAIF bits. */
2076	mov		x10, x20
2077
2078	/* Clear the in-flight pmap pointer */
2079	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
2080	stlr		xzr, [x13]
2081
2082	/* Clear the old stack pointer. */
2083	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
2084
2085	/*
2086	 * Mark the CPU as no longer being in the PPL.  We spin if our state
2087	 * machine is broken.
2088	 */
2089	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
2090	cmp		w9, #PPL_STATE_DISPATCH
2091	b.ne		.
2092	mov		w9, #PPL_STATE_KERNEL
2093	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
2094
2095	/* Return to the kernel. */
2096	b ppl_return_to_kernel_mode
2097
2098
2099
2100	.text
2101ppl_exit:
2102	ARM64_PROLOG
2103	/*
2104	 * If we are dealing with an exception, hand off to the first level
2105	 * exception handler.
2106	 */
2107	cmp		x15, #PPL_EXIT_EXCEPTION
2108	b.eq	Ljump_to_fleh_handler
2109
2110	/* If this was a panic call from the PPL, reinvoke panic. */
2111	cmp		x15, #PPL_EXIT_PANIC_CALL
2112	b.eq	Ljump_to_panic_trap_to_debugger
2113
2114	/*
2115	 * Stash off the original DAIF in the high bits of the exit code register.
2116	 * We could keep this in a dedicated register, but that would require us to copy it to
2117	 * an additional callee-save register below (e.g. x22), which in turn would require that
2118	 * register to be saved/restored at PPL entry/exit.
2119	 */
2120	add		x15, x15, x10, lsl #32
2121
2122	/* Load the preemption count. */
2123	mrs		x10, TPIDR_EL1
2124	ldr		w12, [x10, ACT_PREEMPT_CNT]
2125
2126	/* Detect underflow */
2127	cbnz	w12, Lno_preempt_underflow
2128	b		preempt_underflow
2129Lno_preempt_underflow:
2130
2131	/* Lower the preemption count. */
2132	sub		w12, w12, #1
2133
2134#if SCHED_HYGIENE_DEBUG
2135	/* Collect preemption disable measurement if necessary. */
2136
2137	/*
2138	 * Only collect measurement if this reenabled preemption,
2139	 * and SCHED_HYGIENE_MARKER is set.
2140	 */
2141	mov		x20, #SCHED_HYGIENE_MARKER
2142	cmp		w12, w20
2143	b.ne	Lskip_collect_measurement
2144
2145	/* Stash our return value and return reason. */
2146	mov		x20, x0
2147	mov		x21, x15
2148
2149	/* Collect measurement. */
2150	bl		EXT(_collect_preemption_disable_measurement)
2151
2152	/* Restore the return value and the return reason. */
2153	mov		x0, x20
2154	mov		x15, x21
2155	/* ... and w12, which is now 0. */
2156	mov		w12, #0
2157
2158	/* Restore the thread pointer into x10. */
2159	mrs		x10, TPIDR_EL1
2160
2161Lskip_collect_measurement:
2162#endif /* SCHED_HYGIENE_DEBUG */
2163
2164	/* Save the lowered preemption count. */
2165	str		w12, [x10, ACT_PREEMPT_CNT]
2166
2167	/* Skip ASTs if the peemption count is not zero. */
2168	cbnz	x12, Lppl_skip_ast_taken
2169
2170	/*
2171	 * Skip the AST check if interrupts were originally disabled.
2172	 * The original DAIF state prior to PPL entry is stored in the upper
2173	 * 32 bits of x15.
2174	 */
2175	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
2176
2177	/* IF there is no urgent AST, skip the AST. */
2178	ldr		x12, [x10, ACT_CPUDATAP]
2179	ldr		w14, [x12, CPU_PENDING_AST]
2180	tst		w14, AST_URGENT
2181	b.eq	Lppl_skip_ast_taken
2182
2183	/* Stash our return value and return reason. */
2184	mov		x20, x0
2185	mov		x21, x15
2186
2187	/* Handle the AST. */
2188	bl		EXT(ast_taken_kernel)
2189
2190	/* Restore the return value and the return reason. */
2191	mov		x15, x21
2192	mov		x0, x20
2193
2194Lppl_skip_ast_taken:
2195
2196	/* Extract caller DAIF from high-order bits of exit code */
2197	ubfx	x10, x15, #32, #32
2198	bfc		x15, #32, #32
2199	msr		DAIF, x10
2200
2201	/* Pop the stack frame. */
2202	ldp		x29, x30, [sp, #0x10]
2203	ldp		x20, x21, [sp], #0x20
2204
2205	/* Check to see if this was a bad request. */
2206	cmp		x15, #PPL_EXIT_BAD_CALL
2207	b.eq	Lppl_bad_call
2208
2209	/* Return. */
2210	ARM64_STACK_EPILOG
2211
2212	.align 2
2213Ljump_to_fleh_handler:
2214	br	x25
2215
2216	.align 2
2217Ljump_to_panic_trap_to_debugger:
2218	b		EXT(panic_trap_to_debugger)
2219
2220Lppl_bad_call:
2221	/* Panic. */
2222	adrp	x0, Lppl_bad_call_panic_str@page
2223	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2224	b		EXT(panic)
2225
2226	.text
2227	.align 2
2228	.globl EXT(ppl_dispatch)
2229LEXT(ppl_dispatch)
2230	/*
2231	 * Save a couple of important registers (implementation detail; x12 has
2232	 * the PPL per-CPU data address; x13 is not actually interesting).
2233	 */
2234	stp		x12, x13, [sp, #-0x10]!
2235
2236	/*
2237	 * Restore the original AIF state, force D set to mask debug exceptions
2238	 * while PPL code runs.
2239	 */
2240	orr		x8, x20, DAIF_DEBUGF
2241	msr		DAIF, x8
2242
2243	/*
2244	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
2245	 * but the exception vectors will deal with this properly.
2246	 */
2247
2248	/* Invoke the PPL method. */
2249#ifdef HAS_APPLE_PAC
2250	blraa		x10, x9
2251#else
2252	blr		x10
2253#endif
2254
2255	/* Disable DAIF. */
2256	msr		DAIFSet, #(DAIFSC_ALL)
2257
2258	/* Restore those important registers. */
2259	ldp		x12, x13, [sp], #0x10
2260
2261	/* Mark this as a regular return, and hand off to the return path. */
2262	b		Lppl_dispatch_exit
2263
2264	.text
2265	.align 2
2266	.globl EXT(ppl_bootstrap_dispatch)
2267LEXT(ppl_bootstrap_dispatch)
2268	/* Verify the PPL request. */
2269	cmp		x15, PMAP_COUNT
2270	b.hs	Lppl_fail_bootstrap_dispatch
2271
2272	/* Get the requested PPL routine. */
2273	adrp	x9, EXT(ppl_handler_table)@page
2274	add		x9, x9, EXT(ppl_handler_table)@pageoff
2275	add		x9, x9, x15, lsl #3
2276	ldr		x10, [x9]
2277
2278	/* Invoke the requested PPL routine. */
2279#ifdef HAS_APPLE_PAC
2280	blraa		x10, x9
2281#else
2282	blr		x10
2283#endif
2284	LOAD_PMAP_CPU_DATA	x9, x10, x11
2285
2286	/* Clear the in-flight pmap pointer */
2287	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
2288	stlr		xzr, [x9]
2289
2290	/* Stash off the return value */
2291	mov		x20, x0
2292	/* Drop the preemption count */
2293	bl		EXT(_enable_preemption)
2294	mov		x0, x20
2295
2296	/* Pop the stack frame. */
2297	ldp		x29, x30, [sp, #0x10]
2298	ldp		x20, x21, [sp], #0x20
2299#if __has_feature(ptrauth_returns)
2300	retab
2301#else
2302	ret
2303#endif
2304
2305Lppl_fail_bootstrap_dispatch:
2306	/* Pop our stack frame and panic. */
2307	ldp		x29, x30, [sp, #0x10]
2308	ldp		x20, x21, [sp], #0x20
2309#if __has_feature(ptrauth_returns)
2310	autibsp
2311#endif
2312	adrp	x0, Lppl_bad_call_panic_str@page
2313	add		x0, x0, Lppl_bad_call_panic_str@pageoff
2314	b		EXT(panic)
2315
2316	.text
2317	.align 2
2318	.globl EXT(ml_panic_trap_to_debugger)
2319LEXT(ml_panic_trap_to_debugger)
2320	ARM64_PROLOG
2321	mrs		x10, DAIF
2322	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
2323
2324	adrp		x12, EXT(pmap_ppl_locked_down)@page
2325	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
2326	cbz		w12, Lnot_in_ppl_dispatch
2327
2328	LOAD_PMAP_CPU_DATA	x11, x12, x13
2329
2330	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
2331	cmp		w12, #PPL_STATE_DISPATCH
2332	b.ne		Lnot_in_ppl_dispatch
2333
2334	/* Indicate (for the PPL->kernel transition) that we are panicking. */
2335	mov		x15, #PPL_EXIT_PANIC_CALL
2336
2337	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
2338	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
2339	mov		sp, x12
2340
2341	mrs		x10, DAIF
2342	mov		w13, #PPL_STATE_PANIC
2343	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
2344
2345	/**
2346	 * When we panic in PPL, we might have un-synced PTE updates. Shoot down
2347	 * all the TLB entries.
2348	 *
2349	 * A check must be done here against CurrentEL because the alle1is flavor
2350	 * of tlbi is not available to EL1, but the vmalle1is flavor is. When PPL
2351	 * runs at GL2, we can issue an alle2is and an alle1is tlbi to kill all
2352	 * the TLB entries. When PPL runs at GL1, as a guest or on an pre-H13
2353	 * platform, we issue a vmalle1is tlbi instead.
2354	 *
2355	 * Note that we only do this after passing the `PPL_STATE_DISPATCH` check
2356	 * because if we did this for every panic, including the ones triggered
2357	 * by fabric problems we may be stuck at the DSB below and trigger an AP
2358	 * watchdog.
2359	 */
2360	mrs		x12, CurrentEL
2361	cmp		x12, PSR64_MODE_EL2
2362	bne		Lnot_in_gl2
2363	tlbi		alle2is
2364	tlbi		alle1is
2365	b		Ltlb_invalidate_all_done
2366Lnot_in_gl2:
2367	tlbi		vmalle1is
2368Ltlb_invalidate_all_done:
2369	dsb		ish
2370	isb
2371
2372	/* Now we are ready to exit the PPL. */
2373	b		ppl_return_to_kernel_mode
2374Lnot_in_ppl_dispatch:
2375	msr		DAIF, x10
2376	ret
2377
2378	.data
2379Lppl_bad_call_panic_str:
2380	.asciz "ppl_dispatch: failed due to bad arguments/state"
2381#else /* XNU_MONITOR && !CONFIG_SPTM */
2382	.text
2383	.align 2
2384	.globl EXT(ml_panic_trap_to_debugger)
2385LEXT(ml_panic_trap_to_debugger)
2386	ARM64_PROLOG
2387	ret
2388#endif /* XNU_MONITOR && !CONFIG_SPTM */
2389
2390#if CONFIG_SPTM
2391	.text
2392	.align 2
2393
2394	.globl EXT(_sptm_pre_entry_hook)
2395LEXT(_sptm_pre_entry_hook)
2396	/* Push a frame. */
2397	ARM64_STACK_PROLOG
2398	PUSH_FRAME
2399	stp		x20, x21, [sp, #-0x10]!
2400
2401	/* Increase the preemption count. */
2402	mrs		x9, TPIDR_EL1
2403	cbz		x9, Lskip_preemption_check_sptmhook
2404	ldr		w10, [x9, ACT_PREEMPT_CNT]
2405	add		w10, w10, #1
2406	str		w10, [x9, ACT_PREEMPT_CNT]
2407
2408#if SCHED_HYGIENE_DEBUG
2409	/* Prepare preemption disable measurement, if necessary. */
2410
2411	/* Only prepare if we actually disabled preemption. */
2412	cmp		w10, #1
2413	b.ne	Lskip_prepare_measurement_sptmhook
2414
2415	/* Don't prepare if measuring is off completely. */
2416	adrp	x10, _sched_preemption_disable_debug_mode@page
2417	add		x10, x10, _sched_preemption_disable_debug_mode@pageoff
2418	ldr		w10, [x10]
2419	cmp		w10, #0
2420	b.eq	Lskip_prepare_measurement_sptmhook
2421
2422	/* Save arguments to SPTM function and SPTM function id. */
2423	mov		x20, x16
2424	stp		x0, x1, [sp, #-0x40]!
2425	stp		x2, x3, [sp, #0x10]
2426	stp		x4, x5, [sp, #0x20]
2427	stp		x6, x7, [sp, #0x30]
2428
2429	/* Call prepare function with thread pointer as first arg. */
2430	bl		EXT(_prepare_preemption_disable_measurement)
2431
2432	/* Restore arguments to SPTM function and SPTM function id. */
2433	ldp		x6, x7, [sp, #0x30]
2434	ldp		x4, x5, [sp, #0x20]
2435	ldp		x2, x3, [sp, #0x10]
2436	ldp		x0, x1, [sp]
2437	add		sp, sp, #0x40
2438	mov		x16, x20
2439
2440Lskip_prepare_measurement_sptmhook:
2441#endif /* SCHED_HYGIENE_DEBUG */
2442Lskip_preemption_check_sptmhook:
2443	/* assert we're not calling from guarded mode */
2444	mrs		x14, CurrentG
2445	cmp		x14, #0
2446	b.ne	.
2447
2448	ldp		x20, x21, [sp], #0x10
2449	POP_FRAME
2450	ARM64_STACK_EPILOG
2451
2452	.align 2
2453	.globl EXT(_sptm_post_exit_hook)
2454LEXT(_sptm_post_exit_hook)
2455	ARM64_STACK_PROLOG
2456	PUSH_FRAME
2457	stp		x20, x21, [sp, #-0x10]!
2458
2459	/* Save SPTM return value(s) */
2460	stp		x0, x1, [sp, #-0x40]!
2461	stp		x2, x3, [sp, #0x10]
2462	stp		x4, x5, [sp, #0x20]
2463	stp		x6, x7, [sp, #0x30]
2464
2465
2466	/* Load the preemption count. */
2467	mrs		x0, TPIDR_EL1
2468	cbz		x0, Lsptm_skip_ast_taken_sptmhook
2469	ldr		w12, [x0, ACT_PREEMPT_CNT]
2470
2471	/* Detect underflow */
2472	cbnz	w12, Lno_preempt_underflow_sptmhook
2473	/* No need to clean up the stack, as preempt_underflow calls panic */
2474	b		preempt_underflow
2475Lno_preempt_underflow_sptmhook:
2476
2477	/* Lower the preemption count. */
2478	sub		w12, w12, #1
2479
2480#if SCHED_HYGIENE_DEBUG
2481	/* Collect preemption disable measurement if necessary. */
2482
2483	/*
2484	 * Only collect measurement if this reenabled preemption,
2485	 * and SCHED_HYGIENE_MARKER is set.
2486	 */
2487	mov		x20, #SCHED_HYGIENE_MARKER
2488	cmp		w12, w20
2489	b.ne	Lskip_collect_measurement_sptmhook
2490
2491	/* Collect measurement. */
2492	bl		EXT(_collect_preemption_disable_measurement)
2493
2494	/* Restore w12, which is now 0. */
2495	mov		w12, #0
2496
2497	/* Restore x0 as the thread pointer */
2498	mrs		x0, TPIDR_EL1
2499
2500Lskip_collect_measurement_sptmhook:
2501#endif /* SCHED_HYGIENE_DEBUG */
2502
2503	/* Save the lowered preemption count. */
2504	str		w12, [x0, ACT_PREEMPT_CNT]
2505
2506	/* Skip ASTs if the preemption count is not zero. */
2507	cbnz	w12, Lsptm_skip_ast_taken_sptmhook
2508
2509	/**
2510	 * Skip the AST check if interrupts were originally disabled. The original
2511	 * DAIF value needs to be placed into a callee-saved register so that the
2512	 * value is preserved across the ast_taken_kernel() call.
2513	 */
2514	mrs		x20, DAIF
2515	tbnz	x20, #(DAIF_IRQF_SHIFT), Lsptm_skip_ast_taken_sptmhook
2516
2517	/* IF there is no urgent AST, skip the AST. */
2518	ldr		x12, [x0, ACT_CPUDATAP]
2519	ldr		x14, [x12, CPU_PENDING_AST]
2520	tst		x14, AST_URGENT
2521	b.eq	Lsptm_skip_ast_taken_sptmhook
2522
2523	/* Handle the AST. This call requires interrupts to be disabled. */
2524	msr		DAIFSet, #(DAIFSC_ALL)
2525	bl		EXT(ast_taken_kernel)
2526	msr		DAIF, x20
2527
2528Lsptm_skip_ast_taken_sptmhook:
2529
2530	/* Restore SPTM return value(s) */
2531	ldp		x6, x7, [sp, #0x30]
2532	ldp		x4, x5, [sp, #0x20]
2533	ldp		x2, x3, [sp, #0x10]
2534	ldp		x0, x1, [sp]
2535	add		sp, sp, #0x40
2536
2537	/* Return. */
2538	ldp		x20, x21, [sp], 0x10
2539	POP_FRAME
2540	ARM64_STACK_EPILOG
2541#endif /* CONFIG_SPTM */
2542
2543#if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
2544/**
2545 * Record debug data for a panic lockdown event
2546 * Clobbers x0, x1, x2
2547 */
2548	.text
2549	.align 2
2550	.global EXT(panic_lockdown_record_debug_data)
2551LEXT(panic_lockdown_record_debug_data)
2552	adrp	x0, EXT(debug_panic_lockdown_initiator_state)@page
2553	add		x0, x0, EXT(debug_panic_lockdown_initiator_state)@pageoff
2554
2555	/*
2556	 * To synchronize accesses to the debug state, we use the initiator PC as a
2557	 * "lock". It starts out at zero and we try to swap in our initiator's PC
2558	 * (which is trivially non-zero) to acquire the debug state and become the
2559	 * initiator of record.
2560	 *
2561	 * Note that other CPUs which are not the initiator of record may still
2562	 * initiate panic lockdown (potentially before the initiator of record does
2563	 * so) and so this debug data should only be used as a hint for the
2564	 * initiating CPU rather than a guarantee of which CPU initiated lockdown
2565	 * first.
2566	 */
2567	mov		x1, #0
2568	add		x2, x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_PC
2569	cas		x1, lr, [x2]
2570	/* If there's a non-zero value there already, we aren't the first. Skip. */
2571	cbnz	x1, Lpanic_lockdown_record_debug_data_done
2572
2573	/*
2574	 * We're the first and have exclusive access to the debug structure!
2575	 * Record all our data.
2576	 */
2577	mov		x1, sp
2578	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_SP]
2579
2580	mrs		x1, TPIDR_EL1
2581	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_TPIDR]
2582
2583	mrs		x1, MPIDR_EL1
2584	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_INITIATOR_MPIDR]
2585
2586	mrs		x1, ESR_EL1
2587	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ESR]
2588
2589	mrs		x1, ELR_EL1
2590	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_ELR]
2591
2592	mrs		x1, FAR_EL1
2593	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_FAR]
2594
2595	/* Sync and then read the timer */
2596	dsb		sy
2597	isb
2598	mrs		x1, CNTVCT_EL0
2599	str		x1, [x0, #PANIC_LOCKDOWN_INITIATOR_STATE_TIMESTAMP]
2600
2601Lpanic_lockdown_record_debug_data_done:
2602	ret
2603#endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
2604
2605/* ARM64_TODO Is globals_asm.h needed? */
2606//#include	"globals_asm.h"
2607
2608/* vim: set ts=4: */
2609