xref: /xnu-8792.41.9/osfmk/arm64/locore.s (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45#if XNU_MONITOR
46/*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
52 *         exception was taken while in the PPL.
53 */
54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55	cmp		x26, xzr
56	b.eq		1f
57
58	/* Return to the PPL. */
59	mov		x15, #0
60	mov		w10, #PPL_STATE_EXCEPTION
61#error "XPRR configuration error"
621:
63.endmacro
64
65
66#endif /* XNU_MONITOR */
67
68/*
69 * MAP_KERNEL
70 *
71 * Restores the kernel EL1 mappings, if necessary.
72 *
73 * This may mutate x18.
74 */
75.macro MAP_KERNEL
76#if __ARM_KERNEL_PROTECT__
77	/* Switch to the kernel ASID (low bit set) for the task. */
78	mrs		x18, TTBR0_EL1
79	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
80	msr		TTBR0_EL1, x18
81
82	/*
83	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
84	 * to the TTBRs and writes to the TCR should be ensured by the
85	 * microarchitecture.
86	 */
87#if !defined(APPLE_ARM64_ARCH_FAMILY)
88	isb		sy
89#endif
90
91	/*
92	 * Update the TCR to map the kernel now that we are using the kernel
93	 * ASID.
94	 */
95	MOV64		x18, TCR_EL1_BOOT
96	msr		TCR_EL1, x18
97	isb		sy
98#endif /* __ARM_KERNEL_PROTECT__ */
99.endmacro
100
101/*
102 * BRANCH_TO_KVA_VECTOR
103 *
104 * Branches to the requested long exception vector in the kernelcache.
105 *   arg0 - The label to branch to
106 *   arg1 - The index of the label in exc_vectors_tables
107 *
108 * This may mutate x18.
109 */
110.macro BRANCH_TO_KVA_VECTOR
111#if __ARM_KERNEL_PROTECT__
112	/*
113	 * Find the kernelcache table for the exception vectors by accessing
114	 * the per-CPU data.
115	 */
116	mrs		x18, TPIDR_EL1
117	ldr		x18, [x18, ACT_CPUDATAP]
118	ldr		x18, [x18, CPU_EXC_VECTORS]
119
120	/*
121	 * Get the handler for this exception and jump to it.
122	 */
123	ldr		x18, [x18, #($1 << 3)]
124	br		x18
125#else
126	b		$0
127#endif /* __ARM_KERNEL_PROTECT__ */
128.endmacro
129
130/*
131 * CHECK_KERNEL_STACK
132 *
133 * Verifies that the kernel stack is aligned and mapped within an expected
134 * stack address range. Note: happens before saving registers (in case we can't
135 * save to kernel stack).
136 *
137 * Expects:
138 *	{x0, x1} - saved
139 *	x1 - Exception syndrome
140 *	sp - Saved state
141 *
142 * Seems like we need an unused argument to the macro for the \@ syntax to work
143 *
144 */
145.macro CHECK_KERNEL_STACK unused
146	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
147	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
148	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
149	cmp		x1, x2								// If we have a stack alignment exception
150	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
151	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
152	cmp		x1, x2								// If we have a data abort, we need to
153	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
154	mrs		x0, SP_EL0					// Get SP_EL0
155	mrs		x1, TPIDR_EL1						// Get thread pointer
156Ltest_kstack_\@:
157	ldr		x2, [x1, TH_KSTACKPTR]				// Get top of kernel stack
158	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
159	cmp		x0, x2								// if (SP_EL0 >= kstack top)
160	b.ge	Ltest_istack_\@						//    jump to istack test
161	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
162	b.gt	Lvalid_stack_\@						//    stack pointer valid
163Ltest_istack_\@:
164	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
165	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
166	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
167	cmp		x0, x2								// if (SP_EL0 >= istack top)
168	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
169	cmp		x0, x3								// if (SP_EL0 > istack bottom)
170	b.gt	Lvalid_stack_\@						//    stack pointer valid
171Lcorrupt_stack_\@:
172	ldp		x2, x3, [sp], #16
173	ldp		x0, x1, [sp], #16
174	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
175	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
176	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
177	mrs		x0, SP_EL0					// Get SP_EL0
178	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
179	INIT_SAVED_STATE_FLAVORS sp, w0, w1
180	mov		x0, sp								// Copy exception frame pointer to x0
181	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
182	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
183	b		fleh_dispatch64
184Lvalid_stack_\@:
185	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
186.endmacro
187
188
189#if __ARM_KERNEL_PROTECT__
190	.section __DATA_CONST,__const
191	.align 3
192	.globl EXT(exc_vectors_table)
193LEXT(exc_vectors_table)
194	/* Table of exception handlers.
195         * These handlers sometimes contain deadloops.
196         * It's nice to have symbols for them when debugging. */
197	.quad el1_sp0_synchronous_vector_long
198	.quad el1_sp0_irq_vector_long
199	.quad el1_sp0_fiq_vector_long
200	.quad el1_sp0_serror_vector_long
201	.quad el1_sp1_synchronous_vector_long
202	.quad el1_sp1_irq_vector_long
203	.quad el1_sp1_fiq_vector_long
204	.quad el1_sp1_serror_vector_long
205	.quad el0_synchronous_vector_64_long
206	.quad el0_irq_vector_64_long
207	.quad el0_fiq_vector_64_long
208	.quad el0_serror_vector_64_long
209#endif /* __ARM_KERNEL_PROTECT__ */
210
211	.text
212#if __ARM_KERNEL_PROTECT__
213	/*
214	 * We need this to be on a page boundary so that we may avoiding mapping
215	 * other text along with it.  As this must be on the VM page boundary
216	 * (due to how the coredumping code currently works), this will be a
217	 * 16KB page boundary.
218	 */
219	.align 14
220#else
221	.align 12
222#endif /* __ARM_KERNEL_PROTECT__ */
223	.globl EXT(ExceptionVectorsBase)
224LEXT(ExceptionVectorsBase)
225Lel1_sp0_synchronous_vector:
226	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
227
228	.text
229	.align 7
230Lel1_sp0_irq_vector:
231	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
232
233	.text
234	.align 7
235Lel1_sp0_fiq_vector:
236	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
237
238	.text
239	.align 7
240Lel1_sp0_serror_vector:
241	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
242
243	.text
244	.align 7
245Lel1_sp1_synchronous_vector:
246	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
247
248	.text
249	.align 7
250Lel1_sp1_irq_vector:
251	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
252
253	.text
254	.align 7
255Lel1_sp1_fiq_vector:
256	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
257
258	.text
259	.align 7
260Lel1_sp1_serror_vector:
261	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
262
263	.text
264	.align 7
265Lel0_synchronous_vector_64:
266	MAP_KERNEL
267	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
268
269	.text
270	.align 7
271Lel0_irq_vector_64:
272	MAP_KERNEL
273	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
274
275	.text
276	.align 7
277Lel0_fiq_vector_64:
278	MAP_KERNEL
279	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
280
281	.text
282	.align 7
283Lel0_serror_vector_64:
284	MAP_KERNEL
285	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
286
287	/* Fill out the rest of the page */
288	.align 12
289
290/*********************************
291 * END OF EXCEPTION VECTORS PAGE *
292 *********************************/
293
294
295
296.macro EL1_SP0_VECTOR
297	msr		SPSel, #0							// Switch to SP0
298	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
299	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
300	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
301	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
302	INIT_SAVED_STATE_FLAVORS sp, w0, w1
303	mov		x0, sp								// Copy saved state pointer to x0
304.endmacro
305
306el1_sp0_synchronous_vector_long:
307	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
308	mrs		x1, ESR_EL1							// Get the exception syndrome
309	/* If the stack pointer is corrupt, it will manifest either as a data abort
310	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
311	 * these quickly by testing bit 5 of the exception class.
312	 */
313	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
314	CHECK_KERNEL_STACK
315Lkernel_stack_valid:
316	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
317	EL1_SP0_VECTOR
318	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
319	add		x1, x1, EXT(fleh_synchronous)@pageoff
320	b		fleh_dispatch64
321
322el1_sp0_irq_vector_long:
323	EL1_SP0_VECTOR
324	SWITCH_TO_INT_STACK
325	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
326	add		x1, x1, EXT(fleh_irq)@pageoff
327	b		fleh_dispatch64
328
329el1_sp0_fiq_vector_long:
330	// ARM64_TODO write optimized decrementer
331	EL1_SP0_VECTOR
332	SWITCH_TO_INT_STACK
333	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
334	add		x1, x1, EXT(fleh_fiq)@pageoff
335	b		fleh_dispatch64
336
337el1_sp0_serror_vector_long:
338	EL1_SP0_VECTOR
339	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
340	add		x1, x1, EXT(fleh_serror)@pageoff
341	b		fleh_dispatch64
342
343.macro EL1_SP1_VECTOR
344	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
345	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
346	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
347	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
348	INIT_SAVED_STATE_FLAVORS sp, w0, w1
349	mov		x0, sp								// Copy saved state pointer to x0
350.endmacro
351
352el1_sp1_synchronous_vector_long:
353	b		check_exception_stack
354Lel1_sp1_synchronous_valid_stack:
355#if defined(KERNEL_INTEGRITY_KTRR)
356	b		check_ktrr_sctlr_trap
357Lel1_sp1_synchronous_vector_continue:
358#endif
359	EL1_SP1_VECTOR
360	adrp	x1, fleh_synchronous_sp1@page
361	add		x1, x1, fleh_synchronous_sp1@pageoff
362	b		fleh_dispatch64
363
364el1_sp1_irq_vector_long:
365	EL1_SP1_VECTOR
366	adrp	x1, fleh_irq_sp1@page
367	add		x1, x1, fleh_irq_sp1@pageoff
368	b		fleh_dispatch64
369
370el1_sp1_fiq_vector_long:
371	EL1_SP1_VECTOR
372	adrp	x1, fleh_fiq_sp1@page
373	add		x1, x1, fleh_fiq_sp1@pageoff
374	b		fleh_dispatch64
375
376el1_sp1_serror_vector_long:
377	EL1_SP1_VECTOR
378	adrp	x1, fleh_serror_sp1@page
379	add		x1, x1, fleh_serror_sp1@pageoff
380	b		fleh_dispatch64
381
382
383.macro EL0_64_VECTOR
384	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
385#if __ARM_KERNEL_PROTECT__
386	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
387#endif
388	mrs		x0, TPIDR_EL1						// Load the thread register
389	mrs		x1, SP_EL0							// Load the user stack pointer
390	add		x0, x0, ACT_CONTEXT					// Calculate where we store the user context pointer
391	ldr		x0, [x0]						// Load the user context pointer
392	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
393	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
394	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
395	msr		SPSel, #0							// Switch to SP0
396	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
397	mrs		x1, TPIDR_EL1						// Load the thread register
398
399
400	mov		x0, sp								// Copy the user PCB pointer to x0
401												// x1 contains thread register
402.endmacro
403
404
405el0_synchronous_vector_64_long:
406	EL0_64_VECTOR	sync
407	SWITCH_TO_KERN_STACK
408	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
409	add		x1, x1, EXT(fleh_synchronous)@pageoff
410	b		fleh_dispatch64
411
412el0_irq_vector_64_long:
413	EL0_64_VECTOR	irq
414	SWITCH_TO_INT_STACK
415	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
416	add		x1, x1, EXT(fleh_irq)@pageoff
417	b		fleh_dispatch64
418
419el0_fiq_vector_64_long:
420	EL0_64_VECTOR	fiq
421	SWITCH_TO_INT_STACK
422	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
423	add		x1, x1, EXT(fleh_fiq)@pageoff
424	b		fleh_dispatch64
425
426el0_serror_vector_64_long:
427	EL0_64_VECTOR	serror
428	SWITCH_TO_KERN_STACK
429	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
430	add		x1, x1, EXT(fleh_serror)@pageoff
431	b		fleh_dispatch64
432
433
434/*
435 * check_exception_stack
436 *
437 * Verifies that stack pointer at SP1 is within exception stack
438 * If not, will simply hang as we have no more stack to fall back on.
439 */
440
441	.text
442	.align 2
443check_exception_stack:
444	mrs		x18, TPIDR_EL1					// Get thread pointer
445	cbz		x18, Lvalid_exception_stack			// Thread context may not be set early in boot
446	ldr		x18, [x18, ACT_CPUDATAP]
447	cbz		x18, .						// If thread context is set, cpu data should be too
448	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
449	cmp		sp, x18
450	b.gt		.						// Hang if above exception stack top
451	sub		x18, x18, EXCEPSTACK_SIZE_NUM			// Find bottom of exception stack
452	cmp		sp, x18
453	b.lt		.						// Hang if below exception stack bottom
454Lvalid_exception_stack:
455	mov		x18, #0
456	b		Lel1_sp1_synchronous_valid_stack
457
458
459#if defined(KERNEL_INTEGRITY_KTRR)
460	.text
461	.align 2
462check_ktrr_sctlr_trap:
463/* We may abort on an instruction fetch on reset when enabling the MMU by
464 * writing SCTLR_EL1 because the page containing the privileged instruction is
465 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
466 * would otherwise panic unconditionally. Check for the condition and return
467 * safe execution to the caller on behalf of the faulting function.
468 *
469 * Expected register state:
470 *  x22 - Kernel virtual base
471 *  x23 - Kernel physical base
472 */
473	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
474	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
475	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
476	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
477	movz	w1, #0x8600, lsl #16
478	movk	w1, #0x0000
479	cmp		x0, x1
480	mrs		x0, ELR_EL1					// Check for expected abort address
481	adrp	x1, _pinst_set_sctlr_trap_addr@page
482	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
483	sub		x1, x1, x22					// Convert to physical address
484	add		x1, x1, x23
485	ccmp	x0, x1, #0, eq
486	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
487	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
488	b.ne	Lel1_sp1_synchronous_vector_continue
489	msr		ELR_EL1, lr					// Return to caller
490	ERET_CONTEXT_SYNCHRONIZING
491#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
492
493/* 64-bit first level exception handler dispatcher.
494 * Completes register context saving and branches to FLEH.
495 * Expects:
496 *  {x0, x1, sp} - saved
497 *  x0 - arm_context_t
498 *  x1 - address of FLEH
499 *  fp - previous stack frame if EL1
500 *  lr - unused
501 *  sp - kernel stack
502 */
503	.text
504	.align 2
505fleh_dispatch64:
506	/* Save arm_saved_state64 */
507	SPILL_REGISTERS KERNEL_MODE
508
509	/* If exception is from userspace, zero unused registers */
510	and		x23, x23, #(PSR64_MODE_EL_MASK)
511	cmp		x23, #(PSR64_MODE_EL0)
512	bne		1f
513
514	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
5152:
516	mov		x2, #0
517	mov		x3, #0
518	mov		x4, #0
519	mov		x5, #0
520	mov		x6, #0
521	mov		x7, #0
522	mov		x8, #0
523	mov		x9, #0
524	mov		x10, #0
525	mov		x11, #0
526	mov		x12, #0
527	mov		x13, #0
528	mov		x14, #0
529	mov		x15, #0
530	mov		x16, #0
531	mov		x17, #0
532	mov		x18, #0
533	mov		x19, #0
534	mov		x20, #0
535	/* x21, x22 cleared in common case below */
536	mov		x23, #0
537	mov		x24, #0
538	mov		x25, #0
539#if !XNU_MONITOR
540	mov		x26, #0
541#endif
542	mov		x27, #0
543	mov		x28, #0
544	mov		fp, #0
545	mov		lr, #0
5461:
547
548	mov		x21, x0								// Copy arm_context_t pointer to x21
549	mov		x22, x1								// Copy handler routine to x22
550#if HAS_APPLE_PAC
551	pacia	x22, sp
552#endif
553
554#if XNU_MONITOR
555	/* Zero x26 to indicate that this should not return to the PPL. */
556	mov		x26, #0
557#endif
558
559#if PRECISE_USER_KERNEL_TIME
560	tst		x23, PSR64_MODE_EL_MASK				// If any EL MODE bits are set, we're coming from
561	b.ne	1f									// kernel mode, so skip precise time update
562	PUSH_FRAME
563	bl		EXT(recount_leave_user)
564	POP_FRAME
565	mov		x0, x21								// Reload arm_context_t pointer
5661:
567#endif /* PRECISE_USER_KERNEL_TIME */
568
569	/* Dispatch to FLEH */
570
571#if HAS_APPLE_PAC
572	braa	x22,sp
573#else
574	br		x22
575#endif
576
577
578	.text
579	.align 2
580	.global EXT(fleh_synchronous)
581LEXT(fleh_synchronous)
582
583UNWIND_PROLOGUE
584UNWIND_DIRECTIVES
585
586	mrs		x1, ESR_EL1							// Load exception syndrome
587	mrs		x2, FAR_EL1							// Load fault address
588
589	/* At this point, the LR contains the value of ELR_EL1. In the case of an
590	 * instruction prefetch abort, this will be the faulting pc, which we know
591	 * to be invalid. This will prevent us from backtracing through the
592	 * exception if we put it in our stack frame, so we load the LR from the
593	 * exception saved state instead.
594	 */
595	and		w3, w1, #(ESR_EC_MASK)
596	lsr		w3, w3, #(ESR_EC_SHIFT)
597	mov		w4, #(ESR_EC_IABORT_EL1)
598	cmp		w3, w4
599	b.eq	Lfleh_sync_load_lr
600Lvalid_link_register:
601
602	PUSH_FRAME
603	bl		EXT(sleh_synchronous)
604	POP_FRAME
605
606#if XNU_MONITOR
607	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
608#endif
609
610	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
611	b		exception_return_dispatch
612
613Lfleh_sync_load_lr:
614	ldr		lr, [x0, SS64_LR]
615	b Lvalid_link_register
616UNWIND_EPILOGUE
617
618/* Shared prologue code for fleh_irq and fleh_fiq.
619 * Does any interrupt booking we may want to do
620 * before invoking the handler proper.
621 * Expects:
622 *  x0 - arm_context_t
623 * x23 - CPSR
624 *  fp - Undefined live value (we may push a frame)
625 *  lr - Undefined live value (we may push a frame)
626 *  sp - Interrupt stack for the current CPU
627 */
628.macro BEGIN_INTERRUPT_HANDLER
629	mrs		x22, TPIDR_EL1
630	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
631	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
632	ldr		w1, [x23, CPU_STAT_IRQ]
633	add		w1, w1, #1							// Increment count
634	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
635	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
636	add		w1, w1, #1					// Increment count
637	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
638	/* Increment preempt count */
639	ldr		w1, [x22, ACT_PREEMPT_CNT]
640	add		w1, w1, #1
641	str		w1, [x22, ACT_PREEMPT_CNT]
642	/* Store context in int state */
643	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
644.endmacro
645
646/* Shared epilogue code for fleh_irq and fleh_fiq.
647 * Cleans up after the prologue, and may do a bit more
648 * bookkeeping (kdebug related).
649 * Expects:
650 * x22 - Live TPIDR_EL1 value (thread address)
651 * x23 - Address of the current CPU data structure
652 * w24 - 0 if kdebug is disbled, nonzero otherwise
653 *  fp - Undefined live value (we may push a frame)
654 *  lr - Undefined live value (we may push a frame)
655 *  sp - Interrupt stack for the current CPU
656 */
657.macro END_INTERRUPT_HANDLER
658	/* Clear int context */
659	str		xzr, [x23, CPU_INT_STATE]
660	/* Decrement preempt count */
661	ldr		w0, [x22, ACT_PREEMPT_CNT]
662	cbnz	w0, 1f								// Detect underflow
663	b		preempt_underflow
6641:
665	sub		w0, w0, #1
666	str		w0, [x22, ACT_PREEMPT_CNT]
667	/* Switch back to kernel stack */
668	ldr		x0, [x22, TH_KSTACKPTR]
669	mov		sp, x0
670	/* Generate a CPU-local event to terminate a post-IRQ WFE */
671	sevl
672.endmacro
673
674	.text
675	.align 2
676	.global EXT(fleh_irq)
677LEXT(fleh_irq)
678UNWIND_PROLOGUE
679UNWIND_DIRECTIVES
680	BEGIN_INTERRUPT_HANDLER
681	PUSH_FRAME
682	bl		EXT(sleh_irq)
683	POP_FRAME
684	END_INTERRUPT_HANDLER
685
686#if XNU_MONITOR
687	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
688#endif
689
690	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
691	b		exception_return_dispatch
692UNWIND_EPILOGUE
693
694	.text
695	.align 2
696	.global EXT(fleh_fiq_generic)
697LEXT(fleh_fiq_generic)
698	PANIC_UNIMPLEMENTED
699
700	.text
701	.align 2
702	.global EXT(fleh_fiq)
703LEXT(fleh_fiq)
704UNWIND_PROLOGUE
705UNWIND_DIRECTIVES
706	BEGIN_INTERRUPT_HANDLER
707	PUSH_FRAME
708	bl		EXT(sleh_fiq)
709	POP_FRAME
710	END_INTERRUPT_HANDLER
711
712#if XNU_MONITOR
713	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
714#endif
715
716	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
717	b		exception_return_dispatch
718UNWIND_EPILOGUE
719
720	.text
721	.align 2
722	.global EXT(fleh_serror)
723LEXT(fleh_serror)
724UNWIND_PROLOGUE
725UNWIND_DIRECTIVES
726	mrs		x1, ESR_EL1							// Load exception syndrome
727	mrs		x2, FAR_EL1							// Load fault address
728
729	PUSH_FRAME
730	bl		EXT(sleh_serror)
731	POP_FRAME
732
733#if XNU_MONITOR
734	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
735#endif
736
737	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
738	b		exception_return_dispatch
739UNWIND_EPILOGUE
740
741/*
742 * Register state saved before we get here.
743 */
744	.text
745	.align 2
746fleh_invalid_stack:
747	mrs		x1, ESR_EL1							// Load exception syndrome
748	str		x1, [x0, SS64_ESR]
749	mrs		x2, FAR_EL1							// Load fault address
750	str		x2, [x0, SS64_FAR]
751	PUSH_FRAME
752	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
753	b 		.
754
755	.text
756	.align 2
757fleh_synchronous_sp1:
758	mrs		x1, ESR_EL1							// Load exception syndrome
759	str		x1, [x0, SS64_ESR]
760	mrs		x2, FAR_EL1							// Load fault address
761	str		x2, [x0, SS64_FAR]
762	PUSH_FRAME
763	bl		EXT(sleh_synchronous_sp1)
764	b 		.
765
766	.text
767	.align 2
768fleh_irq_sp1:
769	mov		x1, x0
770	adr		x0, Lsp1_irq_str
771	b		EXT(panic_with_thread_kernel_state)
772Lsp1_irq_str:
773	.asciz "IRQ exception taken while SP1 selected"
774
775	.text
776	.align 2
777fleh_fiq_sp1:
778	mov		x1, x0
779	adr		x0, Lsp1_fiq_str
780	b		EXT(panic_with_thread_kernel_state)
781Lsp1_fiq_str:
782	.asciz "FIQ exception taken while SP1 selected"
783
784	.text
785	.align 2
786fleh_serror_sp1:
787	mov		x1, x0
788	adr		x0, Lsp1_serror_str
789	b		EXT(panic_with_thread_kernel_state)
790Lsp1_serror_str:
791	.asciz "Asynchronous exception taken while SP1 selected"
792
793	.text
794	.align 2
795exception_return_dispatch:
796	ldr		w0, [x21, SS64_CPSR]
797	tst		w0, PSR64_MODE_EL_MASK
798	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
799	b		return_to_user
800
801	.text
802	.align 2
803	.global EXT(return_to_kernel)
804LEXT(return_to_kernel)
805	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
806	mrs		x3, TPIDR_EL1                           // Load thread pointer
807	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
808	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
809	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
810	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
811	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
812	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
813	b.eq	exception_return_unint_tpidr_x3
814	mov		sp, x21                                 // Switch to thread stack for preemption
815	PUSH_FRAME
816	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
817	POP_FRAME
818	b		exception_return
819
820	.text
821	.globl EXT(thread_bootstrap_return)
822LEXT(thread_bootstrap_return)
823#if CONFIG_DTRACE
824	bl		EXT(dtrace_thread_bootstrap)
825#endif
826#if KASAN && CONFIG_KERNEL_TBI
827	PUSH_FRAME
828	bl		EXT(__asan_handle_no_return)
829	POP_FRAME
830#endif
831	b		EXT(arm64_thread_exception_return)
832
833	.text
834	.globl EXT(arm64_thread_exception_return)
835LEXT(arm64_thread_exception_return)
836	mrs		x0, TPIDR_EL1
837	add		x21, x0, ACT_CONTEXT
838	ldr		x21, [x21]
839	mov		x28, xzr
840
841	//
842	// Fall Through to return_to_user from arm64_thread_exception_return.
843	// Note that if we move return_to_user or insert a new routine
844	// below arm64_thread_exception_return, the latter will need to change.
845	//
846	.text
847/* x21 is always the machine context pointer when we get here
848 * x28 is a bit indicating whether or not we should check if pc is in pfz */
849return_to_user:
850check_user_asts:
851#if KASAN && CONFIG_KERNEL_TBI
852	PUSH_FRAME
853	bl		EXT(__asan_handle_no_return)
854	POP_FRAME
855#endif
856	mrs		x3, TPIDR_EL1					// Load thread pointer
857
858	movn		w2, #0
859	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
860
861#if MACH_ASSERT
862	ldr		w0, [x3, ACT_PREEMPT_CNT]
863	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
864#endif
865
866	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
867	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
868	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
869	cbz		w0, no_asts							// If no asts, skip ahead
870
871	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
872
873	/* At this point, we have ASTs and we need to check whether we are running in the
874	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
875	 * the PFZ since we don't want to handle getting a signal or getting suspended
876	 * while holding a spinlock in userspace.
877	 *
878	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
879	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
880	 * to use it to indicate to userspace to come back to take a delayed
881	 * preemption, at which point the ASTs will be handled. */
882	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
883	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
884
885	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
886	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
887	cbz		x0, restore_and_check_ast			// No, deal with other asts
888
889	mov		x0, #1
890	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
891	mov		x0, x19								// restore x0 to asts
892	b		no_asts								// pretend we have no asts
893
894restore_and_check_ast:
895	mov		x0, x19								// restore x0
896	b	user_take_ast							// Service pending asts
897no_asts:
898
899
900#if PRECISE_USER_KERNEL_TIME
901	mov		x19, x3						// Preserve thread pointer across function call
902	PUSH_FRAME
903	bl		EXT(recount_enter_user)
904	POP_FRAME
905	mov		x3, x19
906#endif /* PRECISE_USER_KERNEL_TIME */
907
908#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
909	/* Watchtower
910	 *
911	 * Here we attempt to enable NEON access for EL0. If the last entry into the
912	 * kernel from user-space was due to an IRQ, the monitor will have disabled
913	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
914	 * check in with the monitor in order to reenable NEON for EL0 in exchange
915	 * for routing IRQs through the monitor (2). This way the monitor will
916	 * always 'own' either IRQs or EL0 NEON.
917	 *
918	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
919	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
920	 * here.
921	 *
922	 * EL0 user ________ IRQ                                            ______
923	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
924	 * EL3 monitor           \_/                                \___/
925	 *
926	 *                       (1)                                 (2)
927	 */
928
929	mov		x0, #(CPACR_FPEN_ENABLE)
930	msr		CPACR_EL1, x0
931#endif
932
933	/* Establish this thread's debug state as the live state on the selected CPU. */
934	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
935	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
936	ldr		x0, [x3, ACT_DEBUGDATA]
937	cmp		x0, x1
938	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
939
940
941	PUSH_FRAME
942	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
943	POP_FRAME
944	mrs		x3, TPIDR_EL1						// Reload thread pointer
945	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
946L_skip_user_set_debug_state:
947	ldrsh	x0, [x4, CPU_NUMBER_GS]
948	msr		TPIDR_EL0, x0
949
950
951	b		exception_return_unint_tpidr_x3
952
953exception_return:
954	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
955exception_return_unint:
956	mrs		x3, TPIDR_EL1					// Load thread pointer
957exception_return_unint_tpidr_x3:
958	mov		sp, x21						// Reload the pcb pointer
959
960#if !__ARM_KERNEL_PROTECT__
961	/*
962	 * Restore x18 only if the task has the entitlement that allows
963	 * usage. Those are very few, and can move to something else
964	 * once we use x18 for something more global.
965	 *
966	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
967	 * that uses x18 as one of the global use cases (and will reset
968	 * x18 later down below).
969	 *
970	 * It's also unconditionally skipped for translated threads,
971	 * as those are another use case, one where x18 must be preserved.
972	 */
973	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
974	mov		x18, #0
975	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
976
977exception_return_unint_tpidr_x3_restore_x18:
978	ldr		x18, [sp, SS64_X18]
979
980#else /* !__ARM_KERNEL_PROTECT__ */
981	/*
982	 * If we are going to eret to userspace, we must return through the EL0
983	 * eret mapping.
984	 */
985	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
986	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
987
988	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
989	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
990	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
991	add		x1, x1, Lexception_return_restore_registers@pageoff
992	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
993	sub		x1, x1, x0											// Calculate delta
994	add		x0, x2, x1											// Convert KVA to EL0 vector address
995	br		x0
996
997Lskip_el0_eret_mapping:
998#endif /* !__ARM_KERNEL_PROTECT__ */
999
1000Lexception_return_restore_registers:
1001	mov 	x0, sp								// x0 = &pcb
1002	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1003	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, el0_state_allowed=1
1004
1005/* Restore special register state */
1006	ldr		w3, [sp, NS64_FPSR]
1007	ldr		w4, [sp, NS64_FPCR]
1008
1009	msr		ELR_EL1, x1							// Load the return address into ELR
1010	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1011	msr		FPSR, x3
1012	mrs		x5, FPCR
1013	CMSR FPCR, x5, x4, 1
10141:
1015
1016
1017	/* Restore arm_neon_saved_state64 */
1018	ldp		q0, q1, [x0, NS64_Q0]
1019	ldp		q2, q3, [x0, NS64_Q2]
1020	ldp		q4, q5, [x0, NS64_Q4]
1021	ldp		q6, q7, [x0, NS64_Q6]
1022	ldp		q8, q9, [x0, NS64_Q8]
1023	ldp		q10, q11, [x0, NS64_Q10]
1024	ldp		q12, q13, [x0, NS64_Q12]
1025	ldp		q14, q15, [x0, NS64_Q14]
1026	ldp		q16, q17, [x0, NS64_Q16]
1027	ldp		q18, q19, [x0, NS64_Q18]
1028	ldp		q20, q21, [x0, NS64_Q20]
1029	ldp		q22, q23, [x0, NS64_Q22]
1030	ldp		q24, q25, [x0, NS64_Q24]
1031	ldp		q26, q27, [x0, NS64_Q26]
1032	ldp		q28, q29, [x0, NS64_Q28]
1033	ldp		q30, q31, [x0, NS64_Q30]
1034
1035	/* Restore arm_saved_state64 */
1036
1037	// Skip x0, x1 - we're using them
1038	ldp		x2, x3, [x0, SS64_X2]
1039	ldp		x4, x5, [x0, SS64_X4]
1040	ldp		x6, x7, [x0, SS64_X6]
1041	ldp		x8, x9, [x0, SS64_X8]
1042	ldp		x10, x11, [x0, SS64_X10]
1043	ldp		x12, x13, [x0, SS64_X12]
1044	ldp		x14, x15, [x0, SS64_X14]
1045	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1046	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1047	ldr		x19, [x0, SS64_X19]
1048	ldp		x20, x21, [x0, SS64_X20]
1049	ldp		x22, x23, [x0, SS64_X22]
1050	ldp		x24, x25, [x0, SS64_X24]
1051	ldp		x26, x27, [x0, SS64_X26]
1052	ldr		x28, [x0, SS64_X28]
1053	ldr		fp, [x0, SS64_FP]
1054	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1055
1056	// Restore stack pointer and our last two GPRs
1057	ldr		x1, [x0, SS64_SP]
1058	mov		sp, x1
1059
1060#if __ARM_KERNEL_PROTECT__
1061	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1062#endif /* __ARM_KERNEL_PROTECT__ */
1063
1064	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1065
1066#if __ARM_KERNEL_PROTECT__
1067	/* If we are going to eret to userspace, we must unmap the kernel. */
1068	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1069
1070	/* Update TCR to unmap the kernel. */
1071	MOV64		x18, TCR_EL1_USER
1072	msr		TCR_EL1, x18
1073
1074	/*
1075	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1076	 * each other due to the microarchitecture.
1077	 */
1078#if !defined(APPLE_ARM64_ARCH_FAMILY)
1079	isb		sy
1080#endif
1081
1082	/* Switch to the user ASID (low bit clear) for the task. */
1083	mrs		x18, TTBR0_EL1
1084	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1085	msr		TTBR0_EL1, x18
1086	mov		x18, #0
1087
1088	/* We don't need an ISB here, as the eret is synchronizing. */
1089Lskip_ttbr1_switch:
1090#endif /* __ARM_KERNEL_PROTECT__ */
1091
1092	ERET_CONTEXT_SYNCHRONIZING
1093
1094user_take_ast:
1095	PUSH_FRAME
1096	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1097	POP_FRAME
1098	b		check_user_asts								// Now try again
1099
1100	.text
1101	.align 2
1102preempt_underflow:
1103	mrs		x0, TPIDR_EL1
1104	str		x0, [sp, #-16]!						// We'll print thread pointer
1105	adr		x0, L_underflow_str					// Format string
1106	CALL_EXTERN panic							// Game over
1107
1108L_underflow_str:
1109	.asciz "Preemption count negative on thread %p"
1110.align 2
1111
1112#if MACH_ASSERT
1113	.text
1114	.align 2
1115preempt_count_notzero:
1116	mrs		x0, TPIDR_EL1
1117	str		x0, [sp, #-16]!						// We'll print thread pointer
1118	ldr		w0, [x0, ACT_PREEMPT_CNT]
1119	str		w0, [sp, #8]
1120	adr		x0, L_preempt_count_notzero_str				// Format string
1121	CALL_EXTERN panic							// Game over
1122
1123L_preempt_count_notzero_str:
1124	.asciz "preemption count not 0 on thread %p (%u)"
1125#endif /* MACH_ASSERT */
1126
1127#if __ARM_KERNEL_PROTECT__
1128	/*
1129	 * This symbol denotes the end of the exception vector/eret range; we page
1130	 * align it so that we can avoid mapping other text in the EL0 exception
1131	 * vector mapping.
1132	 */
1133	.text
1134	.align 14
1135	.globl EXT(ExceptionVectorsEnd)
1136LEXT(ExceptionVectorsEnd)
1137#endif /* __ARM_KERNEL_PROTECT__ */
1138
1139#if XNU_MONITOR
1140
1141/*
1142 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1143 * mostly concerned with setting up state for the normal fleh code.
1144 */
1145	.text
1146	.align 2
1147fleh_synchronous_from_ppl:
1148	/* Save x0. */
1149	mov		x15, x0
1150
1151	/* Grab the ESR. */
1152	mrs		x1, ESR_EL1							// Get the exception syndrome
1153
1154	/* If the stack pointer is corrupt, it will manifest either as a data abort
1155	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1156	 * these quickly by testing bit 5 of the exception class.
1157	 */
1158	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1159	mrs		x0, SP_EL0							// Get SP_EL0
1160
1161	/* Perform high level checks for stack corruption. */
1162	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1163	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1164	cmp		x1, x2								// If we have a stack alignment exception
1165	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1166	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1167	cmp		x1, x2								// If we have a data abort, we need to
1168	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1169
1170Ltest_pstack:
1171	/* Bounds check the PPL stack. */
1172	adrp	x10, EXT(pmap_stacks_start)@page
1173	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1174	adrp	x11, EXT(pmap_stacks_end)@page
1175	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1176	cmp		x0, x10
1177	b.lo	Lcorrupt_ppl_stack
1178	cmp		x0, x11
1179	b.hi	Lcorrupt_ppl_stack
1180
1181Lvalid_ppl_stack:
1182	/* Restore x0. */
1183	mov		x0, x15
1184
1185	/* Switch back to the kernel stack. */
1186	msr		SPSel, #0
1187	GET_PMAP_CPU_DATA x5, x6, x7
1188	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1189	mov		sp, x6
1190
1191	/* Hand off to the synch handler. */
1192	b		EXT(fleh_synchronous)
1193
1194Lcorrupt_ppl_stack:
1195	/* Restore x0. */
1196	mov		x0, x15
1197
1198	/* Hand off to the invalid stack handler. */
1199	b		fleh_invalid_stack
1200
1201fleh_fiq_from_ppl:
1202	SWITCH_TO_INT_STACK
1203	b		EXT(fleh_fiq)
1204
1205fleh_irq_from_ppl:
1206	SWITCH_TO_INT_STACK
1207	b		EXT(fleh_irq)
1208
1209fleh_serror_from_ppl:
1210	GET_PMAP_CPU_DATA x5, x6, x7
1211	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1212	mov		sp, x6
1213	b		EXT(fleh_serror)
1214
1215
1216
1217
1218	// x15: ppl call number
1219	// w10: ppl_state
1220	// x20: gxf_enter caller's DAIF
1221	.globl EXT(ppl_trampoline_start)
1222LEXT(ppl_trampoline_start)
1223
1224
1225#error "XPRR configuration error"
1226	cmp		x14, x21
1227	b.ne	Lppl_fail_dispatch
1228
1229	/* Verify the request ID. */
1230	cmp		x15, PMAP_COUNT
1231	b.hs	Lppl_fail_dispatch
1232
1233	GET_PMAP_CPU_DATA	x12, x13, x14
1234
1235	/* Mark this CPU as being in the PPL. */
1236	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1237
1238	cmp		w9, #PPL_STATE_KERNEL
1239	b.eq		Lppl_mark_cpu_as_dispatching
1240
1241	/* Check to see if we are trying to trap from within the PPL. */
1242	cmp		w9, #PPL_STATE_DISPATCH
1243	b.eq		Lppl_fail_dispatch_ppl
1244
1245
1246	/* Ensure that we are returning from an exception. */
1247	cmp		w9, #PPL_STATE_EXCEPTION
1248	b.ne		Lppl_fail_dispatch
1249
1250	// where is w10 set?
1251	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1252	cmp		w10, #PPL_STATE_EXCEPTION
1253	b.ne		Lppl_fail_dispatch
1254
1255	/* This is an exception return; set the CPU to the dispatching state. */
1256	mov		w9, #PPL_STATE_DISPATCH
1257	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1258
1259	/* Find the save area, and return to the saved PPL context. */
1260	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1261	mov		sp, x0
1262	b		EXT(return_to_ppl)
1263
1264Lppl_mark_cpu_as_dispatching:
1265	cmp		w10, #PPL_STATE_KERNEL
1266	b.ne		Lppl_fail_dispatch
1267
1268	/* Mark the CPU as dispatching. */
1269	mov		w13, #PPL_STATE_DISPATCH
1270	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1271
1272	/* Switch to the regular PPL stack. */
1273	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1274	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1275
1276	// SP0 is thread stack here
1277	mov		x21, sp
1278	// SP0 is now PPL stack
1279	mov		sp, x9
1280
1281	/* Save the old stack pointer off in case we need it. */
1282	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1283
1284	/* Get the handler for the request */
1285	adrp	x9, EXT(ppl_handler_table)@page
1286	add		x9, x9, EXT(ppl_handler_table)@pageoff
1287	add		x9, x9, x15, lsl #3
1288	ldr		x10, [x9]
1289
1290	/* Branch to the code that will invoke the PPL request. */
1291	b		EXT(ppl_dispatch)
1292
1293Lppl_fail_dispatch_ppl:
1294	/* Switch back to the kernel stack. */
1295	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1296	mov		sp, x10
1297
1298Lppl_fail_dispatch:
1299	/* Indicate that we failed. */
1300	mov		x15, #PPL_EXIT_BAD_CALL
1301
1302	/* Move the DAIF bits into the expected register. */
1303	mov		x10, x20
1304
1305	/* Return to kernel mode. */
1306	b		ppl_return_to_kernel_mode
1307
1308Lppl_dispatch_exit:
1309
1310	/* Indicate that we are cleanly exiting the PPL. */
1311	mov		x15, #PPL_EXIT_DISPATCH
1312
1313	/* Switch back to the original (kernel thread) stack. */
1314	mov		sp, x21
1315
1316	/* Move the saved DAIF bits. */
1317	mov		x10, x20
1318
1319	/* Clear the in-flight pmap pointer */
1320	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
1321	stlr		xzr, [x13]
1322
1323	/* Clear the old stack pointer. */
1324	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1325
1326	/*
1327	 * Mark the CPU as no longer being in the PPL.  We spin if our state
1328	 * machine is broken.
1329	 */
1330	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1331	cmp		w9, #PPL_STATE_DISPATCH
1332	b.ne		.
1333	mov		w9, #PPL_STATE_KERNEL
1334	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1335
1336	/* Return to the kernel. */
1337	b ppl_return_to_kernel_mode
1338
1339
1340
1341	.text
1342ppl_exit:
1343	/*
1344	 * If we are dealing with an exception, hand off to the first level
1345	 * exception handler.
1346	 */
1347	cmp		x15, #PPL_EXIT_EXCEPTION
1348	b.eq	Ljump_to_fleh_handler
1349
1350	/* If this was a panic call from the PPL, reinvoke panic. */
1351	cmp		x15, #PPL_EXIT_PANIC_CALL
1352	b.eq	Ljump_to_panic_trap_to_debugger
1353
1354	/*
1355	 * Stash off the original DAIF in the high bits of the exit code register.
1356	 * We could keep this in a dedicated register, but that would require us to copy it to
1357	 * an additional callee-save register below (e.g. x22), which in turn would require that
1358	 * register to be saved/restored at PPL entry/exit.
1359	 */
1360	add		x15, x15, x10, lsl #32
1361
1362	/* Load the preemption count. */
1363	mrs		x10, TPIDR_EL1
1364	ldr		w12, [x10, ACT_PREEMPT_CNT]
1365
1366	/* Detect underflow */
1367	cbnz	w12, Lno_preempt_underflow
1368	b		preempt_underflow
1369Lno_preempt_underflow:
1370
1371	/* Lower the preemption count. */
1372	sub		w12, w12, #1
1373
1374#if SCHED_HYGIENE_DEBUG
1375	/* Collect preemption disable measurement if necessary. */
1376
1377	/* Only collect measurement if this reenabled preemption. */
1378	cmp		w12, #0
1379	b.ne	Lskip_collect_measurement
1380
1381	/* Only collect measurement if a start time was set. */
1382	ldr		x14, [x10, ACT_PREEMPT_MT]
1383	cmp		x14, #0
1384	b.eq	Lskip_collect_measurement
1385
1386	/* Stash our return value and return reason. */
1387	mov		x20, x0
1388	mov		x21, x15
1389
1390	/* Collect measurement. */
1391	mov		x0, x10
1392	bl		EXT(_collect_preemption_disable_measurement)
1393
1394	/* Restore the return value and the return reason. */
1395	mov		x0, x20
1396	mov		x15, x21
1397	/* ... and w12, which was 0. */
1398	mov		w12, #0
1399
1400	/* Restore the thread pointer into x10. */
1401	mrs		x10, TPIDR_EL1
1402
1403Lskip_collect_measurement:
1404#endif /* SCHED_HYGIENE_DEBUG */
1405
1406	/* Save the lowered preemption count. */
1407	str		w12, [x10, ACT_PREEMPT_CNT]
1408
1409	/* Skip ASTs if the peemption count is not zero. */
1410	cbnz	x12, Lppl_skip_ast_taken
1411
1412	/*
1413	 * Skip the AST check if interrupts were originally disabled.
1414	 * The original DAIF state prior to PPL entry is stored in the upper
1415	 * 32 bits of x15.
1416	 */
1417	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
1418
1419	/* IF there is no urgent AST, skip the AST. */
1420	ldr		x12, [x10, ACT_CPUDATAP]
1421	ldr		w14, [x12, CPU_PENDING_AST]
1422	tst		w14, AST_URGENT
1423	b.eq	Lppl_skip_ast_taken
1424
1425	/* Stash our return value and return reason. */
1426	mov		x20, x0
1427	mov		x21, x15
1428
1429	/* Handle the AST. */
1430	bl		EXT(ast_taken_kernel)
1431
1432	/* Restore the return value and the return reason. */
1433	mov		x15, x21
1434	mov		x0, x20
1435
1436Lppl_skip_ast_taken:
1437
1438	/* Extract caller DAIF from high-order bits of exit code */
1439	ubfx	x10, x15, #32, #32
1440	bfc		x15, #32, #32
1441	msr		DAIF, x10
1442
1443	/* Pop the stack frame. */
1444	ldp		x29, x30, [sp, #0x10]
1445	ldp		x20, x21, [sp], #0x20
1446
1447	/* Check to see if this was a bad request. */
1448	cmp		x15, #PPL_EXIT_BAD_CALL
1449	b.eq	Lppl_bad_call
1450
1451	/* Return. */
1452	ARM64_STACK_EPILOG
1453
1454	.align 2
1455Ljump_to_fleh_handler:
1456	br	x25
1457
1458	.align 2
1459Ljump_to_panic_trap_to_debugger:
1460	b		EXT(panic_trap_to_debugger)
1461
1462Lppl_bad_call:
1463	/* Panic. */
1464	adrp	x0, Lppl_bad_call_panic_str@page
1465	add		x0, x0, Lppl_bad_call_panic_str@pageoff
1466	b		EXT(panic)
1467
1468	.text
1469	.align 2
1470	.globl EXT(ppl_dispatch)
1471LEXT(ppl_dispatch)
1472	/*
1473	 * Save a couple of important registers (implementation detail; x12 has
1474	 * the PPL per-CPU data address; x13 is not actually interesting).
1475	 */
1476	stp		x12, x13, [sp, #-0x10]!
1477
1478	/* Restore the original AIF state. */
1479	msr		DAIF, x20
1480
1481	/*
1482	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1483	 * but the exception vectors will deal with this properly.
1484	 */
1485
1486	/* Invoke the PPL method. */
1487#ifdef HAS_APPLE_PAC
1488	blraa		x10, x9
1489#else
1490	blr		x10
1491#endif
1492
1493	/* Disable AIF. */
1494	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
1495
1496	/* Restore those important registers. */
1497	ldp		x12, x13, [sp], #0x10
1498
1499	/* Mark this as a regular return, and hand off to the return path. */
1500	b		Lppl_dispatch_exit
1501
1502	.text
1503	.align 2
1504	.globl EXT(ppl_bootstrap_dispatch)
1505LEXT(ppl_bootstrap_dispatch)
1506	/* Verify the PPL request. */
1507	cmp		x15, PMAP_COUNT
1508	b.hs	Lppl_fail_bootstrap_dispatch
1509
1510	/* Get the requested PPL routine. */
1511	adrp	x9, EXT(ppl_handler_table)@page
1512	add		x9, x9, EXT(ppl_handler_table)@pageoff
1513	add		x9, x9, x15, lsl #3
1514	ldr		x10, [x9]
1515
1516	/* Invoke the requested PPL routine. */
1517#ifdef HAS_APPLE_PAC
1518	blraa		x10, x9
1519#else
1520	blr		x10
1521#endif
1522	LOAD_PMAP_CPU_DATA	x9, x10, x11
1523
1524	/* Clear the in-flight pmap pointer */
1525	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
1526	stlr		xzr, [x9]
1527
1528	/* Stash off the return value */
1529	mov		x20, x0
1530	/* Drop the preemption count */
1531	bl		EXT(_enable_preemption)
1532	mov		x0, x20
1533
1534	/* Pop the stack frame. */
1535	ldp		x29, x30, [sp, #0x10]
1536	ldp		x20, x21, [sp], #0x20
1537#if __has_feature(ptrauth_returns)
1538	retab
1539#else
1540	ret
1541#endif
1542
1543Lppl_fail_bootstrap_dispatch:
1544	/* Pop our stack frame and panic. */
1545	ldp		x29, x30, [sp, #0x10]
1546	ldp		x20, x21, [sp], #0x20
1547#if __has_feature(ptrauth_returns)
1548	autibsp
1549#endif
1550	adrp	x0, Lppl_bad_call_panic_str@page
1551	add		x0, x0, Lppl_bad_call_panic_str@pageoff
1552	b		EXT(panic)
1553
1554	.text
1555	.align 2
1556	.globl EXT(ml_panic_trap_to_debugger)
1557LEXT(ml_panic_trap_to_debugger)
1558	mrs		x10, DAIF
1559	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
1560
1561	adrp		x12, EXT(pmap_ppl_locked_down)@page
1562	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1563	cbz		w12, Lnot_in_ppl_dispatch
1564
1565	LOAD_PMAP_CPU_DATA	x11, x12, x13
1566
1567	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1568	cmp		w12, #PPL_STATE_DISPATCH
1569	b.ne		Lnot_in_ppl_dispatch
1570
1571	/* Indicate (for the PPL->kernel transition) that we are panicking. */
1572	mov		x15, #PPL_EXIT_PANIC_CALL
1573
1574	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1575	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1576	mov		sp, x12
1577
1578	mrs		x10, DAIF
1579	mov		w13, #PPL_STATE_PANIC
1580	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1581
1582	/* Now we are ready to exit the PPL. */
1583	b		ppl_return_to_kernel_mode
1584Lnot_in_ppl_dispatch:
1585	msr		DAIF, x10
1586	ret
1587
1588	.data
1589Lppl_bad_call_panic_str:
1590	.asciz "ppl_dispatch: failed due to bad arguments/state"
1591#else /* XNU_MONITOR */
1592	.text
1593	.align 2
1594	.globl EXT(ml_panic_trap_to_debugger)
1595LEXT(ml_panic_trap_to_debugger)
1596	ret
1597#endif /* XNU_MONITOR */
1598
1599/* ARM64_TODO Is globals_asm.h needed? */
1600//#include	"globals_asm.h"
1601
1602/* vim: set ts=4: */
1603