xref: /xnu-10002.81.5/osfmk/arm64/locore.s (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/machine_routines_asm.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach/exception_types.h>
35#include <mach_kdp.h>
36#include <config_dtrace.h>
37#include "assym.s"
38#include <arm64/exception_asm.h>
39#include "dwarf_unwind.h"
40
41#if __ARM_KERNEL_PROTECT__
42#include <arm/pmap.h>
43#endif
44
45#if XNU_MONITOR
46/*
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
48 *
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
50 * into the PPL.
51 *   x26 - 0 if the exception was taken while in the kernel, 1 if the
52 *         exception was taken while in the PPL.
53 */
54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
55	cmp		x26, xzr
56	b.eq		1f
57
58	/* Return to the PPL. */
59	mov		x15, #0
60	mov		w10, #PPL_STATE_EXCEPTION
61#error "XPRR configuration error"
621:
63.endmacro
64
65
66#endif /* XNU_MONITOR */
67
68
69/*
70 * MAP_KERNEL
71 *
72 * Restores the kernel EL1 mappings, if necessary.
73 *
74 * This may mutate x18.
75 */
76.macro MAP_KERNEL
77#if __ARM_KERNEL_PROTECT__
78	/* Switch to the kernel ASID (low bit set) for the task. */
79	mrs		x18, TTBR0_EL1
80	orr		x18, x18, #(1 << TTBR_ASID_SHIFT)
81	msr		TTBR0_EL1, x18
82
83	/*
84	 * We eschew some barriers on Apple CPUs, as relative ordering of writes
85	 * to the TTBRs and writes to the TCR should be ensured by the
86	 * microarchitecture.
87	 */
88#if !defined(APPLE_ARM64_ARCH_FAMILY)
89	isb		sy
90#endif
91
92	/*
93	 * Update the TCR to map the kernel now that we are using the kernel
94	 * ASID.
95	 */
96	MOV64		x18, TCR_EL1_BOOT
97	msr		TCR_EL1, x18
98	isb		sy
99#endif /* __ARM_KERNEL_PROTECT__ */
100.endmacro
101
102/*
103 * BRANCH_TO_KVA_VECTOR
104 *
105 * Branches to the requested long exception vector in the kernelcache.
106 *   arg0 - The label to branch to
107 *   arg1 - The index of the label in exc_vectors_tables
108 *
109 * This may mutate x18.
110 */
111.macro BRANCH_TO_KVA_VECTOR
112#if __ARM_KERNEL_PROTECT__
113	/*
114	 * Find the kernelcache table for the exception vectors by accessing
115	 * the per-CPU data.
116	 */
117	mrs		x18, TPIDR_EL1
118	ldr		x18, [x18, ACT_CPUDATAP]
119	ldr		x18, [x18, CPU_EXC_VECTORS]
120
121	/*
122	 * Get the handler for this exception and jump to it.
123	 */
124	ldr		x18, [x18, #($1 << 3)]
125	br		x18
126#else
127	b		$0
128#endif /* __ARM_KERNEL_PROTECT__ */
129.endmacro
130
131/*
132 * CHECK_KERNEL_STACK
133 *
134 * Verifies that the kernel stack is aligned and mapped within an expected
135 * stack address range. Note: happens before saving registers (in case we can't
136 * save to kernel stack).
137 *
138 * Expects:
139 *	{x0, x1} - saved
140 *	x1 - Exception syndrome
141 *	sp - Saved state
142 *
143 * Seems like we need an unused argument to the macro for the \@ syntax to work
144 *
145 */
146.macro CHECK_KERNEL_STACK unused
147	stp		x2, x3, [sp, #-16]!				// Save {x2-x3}
148	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
149	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
150	cmp		x1, x2								// If we have a stack alignment exception
151	b.eq	Lcorrupt_stack_\@					// ...the stack is definitely corrupted
152	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
153	cmp		x1, x2								// If we have a data abort, we need to
154	b.ne	Lvalid_stack_\@						// ...validate the stack pointer
155	mrs		x0, SP_EL0					// Get SP_EL0
156	mrs		x1, TPIDR_EL1						// Get thread pointer
157Ltest_kstack_\@:
158	LOAD_KERN_STACK_TOP	dst=x2, src=x1, tmp=x3	// Get top of kernel stack
159	sub		x3, x2, KERNEL_STACK_SIZE			// Find bottom of kernel stack
160	cmp		x0, x2								// if (SP_EL0 >= kstack top)
161	b.ge	Ltest_istack_\@						//    jump to istack test
162	cmp		x0, x3								// if (SP_EL0 > kstack bottom)
163	b.gt	Lvalid_stack_\@						//    stack pointer valid
164Ltest_istack_\@:
165	ldr		x1, [x1, ACT_CPUDATAP]				// Load the cpu data ptr
166	ldr		x2, [x1, CPU_INTSTACK_TOP]			// Get top of istack
167	sub		x3, x2, INTSTACK_SIZE_NUM			// Find bottom of istack
168	cmp		x0, x2								// if (SP_EL0 >= istack top)
169	b.ge	Lcorrupt_stack_\@					//    corrupt stack pointer
170	cmp		x0, x3								// if (SP_EL0 > istack bottom)
171	b.gt	Lvalid_stack_\@						//    stack pointer valid
172Lcorrupt_stack_\@:
173	ldp		x2, x3, [sp], #16
174	ldp		x0, x1, [sp], #16
175	sub		sp, sp, ARM_CONTEXT_SIZE			// Allocate exception frame
176	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the exception frame
177	stp		x2, x3, [sp, SS64_X2]				// Save x2, x3 to the exception frame
178	mrs		x0, SP_EL0					// Get SP_EL0
179	str		x0, [sp, SS64_SP]				// Save sp to the exception frame
180	INIT_SAVED_STATE_FLAVORS sp, w0, w1
181	mov		x0, sp								// Copy exception frame pointer to x0
182	adrp	x1, fleh_invalid_stack@page			// Load address for fleh
183	add		x1, x1, fleh_invalid_stack@pageoff	// fleh_dispatch64 will save register state before we get there
184	b		fleh_dispatch64_noreturn
185Lvalid_stack_\@:
186	ldp		x2, x3, [sp], #16			// Restore {x2-x3}
187.endmacro
188
189
190#if __ARM_KERNEL_PROTECT__
191	.section __DATA_CONST,__const
192	.align 3
193	.globl EXT(exc_vectors_table)
194LEXT(exc_vectors_table)
195	/* Table of exception handlers.
196         * These handlers sometimes contain deadloops.
197         * It's nice to have symbols for them when debugging. */
198	.quad el1_sp0_synchronous_vector_long
199	.quad el1_sp0_irq_vector_long
200	.quad el1_sp0_fiq_vector_long
201	.quad el1_sp0_serror_vector_long
202	.quad el1_sp1_synchronous_vector_long
203	.quad el1_sp1_irq_vector_long
204	.quad el1_sp1_fiq_vector_long
205	.quad el1_sp1_serror_vector_long
206	.quad el0_synchronous_vector_64_long
207	.quad el0_irq_vector_64_long
208	.quad el0_fiq_vector_64_long
209	.quad el0_serror_vector_64_long
210#endif /* __ARM_KERNEL_PROTECT__ */
211
212	.text
213#if __ARM_KERNEL_PROTECT__
214	/*
215	 * We need this to be on a page boundary so that we may avoiding mapping
216	 * other text along with it.  As this must be on the VM page boundary
217	 * (due to how the coredumping code currently works), this will be a
218	 * 16KB page boundary.
219	 */
220	.align 14
221#else
222	.align 12
223#endif /* __ARM_KERNEL_PROTECT__ */
224	.globl EXT(ExceptionVectorsBase)
225LEXT(ExceptionVectorsBase)
226Lel1_sp0_synchronous_vector:
227	BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
228
229	.text
230	.align 7
231Lel1_sp0_irq_vector:
232	BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
233
234	.text
235	.align 7
236Lel1_sp0_fiq_vector:
237	BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
238
239	.text
240	.align 7
241Lel1_sp0_serror_vector:
242	BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
243
244	.text
245	.align 7
246Lel1_sp1_synchronous_vector:
247	BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
248
249	.text
250	.align 7
251Lel1_sp1_irq_vector:
252	BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
253
254	.text
255	.align 7
256Lel1_sp1_fiq_vector:
257	BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
258
259	.text
260	.align 7
261Lel1_sp1_serror_vector:
262	BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
263
264	.text
265	.align 7
266Lel0_synchronous_vector_64:
267	MAP_KERNEL
268	BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
269
270	.text
271	.align 7
272Lel0_irq_vector_64:
273	MAP_KERNEL
274	BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
275
276	.text
277	.align 7
278Lel0_fiq_vector_64:
279	MAP_KERNEL
280	BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
281
282	.text
283	.align 7
284Lel0_serror_vector_64:
285	MAP_KERNEL
286	BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
287
288	/* Fill out the rest of the page */
289	.align 12
290
291/*********************************
292 * END OF EXCEPTION VECTORS PAGE *
293 *********************************/
294
295
296
297.macro EL1_SP0_VECTOR
298	msr		SPSel, #0							// Switch to SP0
299	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
300	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
301	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
302	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
303	INIT_SAVED_STATE_FLAVORS sp, w0, w1
304	mov		x0, sp								// Copy saved state pointer to x0
305.endmacro
306
307.macro EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
308	// SWITCH_TO_INT_STACK requires a clobberable tmp register, but at this
309	// point in the exception vector we can't spare the extra GPR.  Instead note
310	// that EL1_SP0_VECTOR ends with x0 == sp and use this to unclobber x0.
311	mrs		x1, TPIDR_EL1
312	LOAD_INT_STACK	dst=x1, src=x1, tmp=x0
313	mov		x0, sp
314	mov		sp, x1
315.endmacro
316
317el1_sp0_synchronous_vector_long:
318	stp		x0, x1, [sp, #-16]!				// Save x0 and x1 to the exception stack
319	mrs		x1, ESR_EL1							// Get the exception syndrome
320	/* If the stack pointer is corrupt, it will manifest either as a data abort
321	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
322	 * these quickly by testing bit 5 of the exception class.
323	 */
324	tbz		x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
325	CHECK_KERNEL_STACK
326Lkernel_stack_valid:
327	ldp		x0, x1, [sp], #16				// Restore x0 and x1 from the exception stack
328	EL1_SP0_VECTOR
329	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
330	add		x1, x1, EXT(fleh_synchronous)@pageoff
331	b		fleh_dispatch64
332
333el1_sp0_irq_vector_long:
334	EL1_SP0_VECTOR
335	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
336	adrp	x1, EXT(fleh_irq)@page					// Load address for fleh
337	add		x1, x1, EXT(fleh_irq)@pageoff
338	b		fleh_dispatch64
339
340el1_sp0_fiq_vector_long:
341	// ARM64_TODO write optimized decrementer
342	EL1_SP0_VECTOR
343	EL1_SP0_VECTOR_SWITCH_TO_INT_STACK
344	adrp	x1, EXT(fleh_fiq)@page					// Load address for fleh
345	add		x1, x1, EXT(fleh_fiq)@pageoff
346	b		fleh_dispatch64
347
348el1_sp0_serror_vector_long:
349	EL1_SP0_VECTOR
350	adrp	x1, EXT(fleh_serror)@page				// Load address for fleh
351	add		x1, x1, EXT(fleh_serror)@pageoff
352	b		fleh_dispatch64
353
354.macro EL1_SP1_VECTOR
355	sub		sp, sp, ARM_CONTEXT_SIZE			// Create exception frame
356	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to exception frame
357	add		x0, sp, ARM_CONTEXT_SIZE			// Calculate the original stack pointer
358	str		x0, [sp, SS64_SP]					// Save stack pointer to exception frame
359	INIT_SAVED_STATE_FLAVORS sp, w0, w1
360	mov		x0, sp								// Copy saved state pointer to x0
361.endmacro
362
363el1_sp1_synchronous_vector_long:
364	b		check_exception_stack
365Lel1_sp1_synchronous_valid_stack:
366#if defined(KERNEL_INTEGRITY_KTRR)
367	b		check_ktrr_sctlr_trap
368Lel1_sp1_synchronous_vector_continue:
369#endif
370	EL1_SP1_VECTOR
371	adrp	x1, fleh_synchronous_sp1@page
372	add		x1, x1, fleh_synchronous_sp1@pageoff
373	b		fleh_dispatch64_noreturn
374
375el1_sp1_irq_vector_long:
376	EL1_SP1_VECTOR
377	adrp	x1, fleh_irq_sp1@page
378	add		x1, x1, fleh_irq_sp1@pageoff
379	b		fleh_dispatch64_noreturn
380
381el1_sp1_fiq_vector_long:
382	EL1_SP1_VECTOR
383	adrp	x1, fleh_fiq_sp1@page
384	add		x1, x1, fleh_fiq_sp1@pageoff
385	b		fleh_dispatch64_noreturn
386
387el1_sp1_serror_vector_long:
388	EL1_SP1_VECTOR
389	adrp	x1, fleh_serror_sp1@page
390	add		x1, x1, fleh_serror_sp1@pageoff
391	b		fleh_dispatch64_noreturn
392
393
394
395.macro EL0_64_VECTOR guest_label
396	stp		x0, x1, [sp, #-16]!					// Save x0 and x1 to the exception stack
397#if __ARM_KERNEL_PROTECT__
398	mov		x18, #0 						// Zero x18 to avoid leaking data to user SS
399#endif
400	mrs		x0, TPIDR_EL1						// Load the thread register
401	LOAD_USER_PCB	dst=x0, src=x0, tmp=x1		// Load the user context pointer
402	mrs		x1, SP_EL0							// Load the user stack pointer
403	str		x1, [x0, SS64_SP]					// Store the user stack pointer in the user PCB
404	msr		SP_EL0, x0							// Copy the user PCB pointer to SP0
405	ldp		x0, x1, [sp], #16					// Restore x0 and x1 from the exception stack
406	msr		SPSel, #0							// Switch to SP0
407	stp		x0, x1, [sp, SS64_X0]				// Save x0, x1 to the user PCB
408	mrs		x1, TPIDR_EL1						// Load the thread register
409
410
411
412	mov		x0, sp								// Copy the user PCB pointer to x0
413												// x1 contains thread register
414.endmacro
415
416.macro EL0_64_VECTOR_SWITCH_TO_INT_STACK
417	// Similarly to EL1_SP0_VECTOR_SWITCH_TO_INT_STACK, we need to take
418	// advantage of EL0_64_VECTOR ending with x0 == sp.  EL0_64_VECTOR also
419	// populates x1 with the thread state, so we can skip reloading it.
420	LOAD_INT_STACK	dst=x1, src=x1, tmp=x0
421	mov		x0, sp
422	mov		sp, x1
423.endmacro
424
425.macro EL0_64_VECTOR_SWITCH_TO_KERN_STACK
426	LOAD_KERN_STACK_TOP	dst=x1, src=x1, tmp=x0
427	mov		x0, sp
428	mov		sp, x1
429.endmacro
430
431el0_synchronous_vector_64_long:
432	EL0_64_VECTOR	sync
433	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
434	adrp	x1, EXT(fleh_synchronous)@page			// Load address for fleh
435	add		x1, x1, EXT(fleh_synchronous)@pageoff
436	b		fleh_dispatch64
437
438el0_irq_vector_64_long:
439	EL0_64_VECTOR	irq
440	EL0_64_VECTOR_SWITCH_TO_INT_STACK
441	adrp	x1, EXT(fleh_irq)@page					// load address for fleh
442	add		x1, x1, EXT(fleh_irq)@pageoff
443	b		fleh_dispatch64
444
445el0_fiq_vector_64_long:
446	EL0_64_VECTOR	fiq
447	EL0_64_VECTOR_SWITCH_TO_INT_STACK
448	adrp	x1, EXT(fleh_fiq)@page					// load address for fleh
449	add		x1, x1, EXT(fleh_fiq)@pageoff
450	b		fleh_dispatch64
451
452el0_serror_vector_64_long:
453	EL0_64_VECTOR	serror
454	EL0_64_VECTOR_SWITCH_TO_KERN_STACK
455	adrp	x1, EXT(fleh_serror)@page				// load address for fleh
456	add		x1, x1, EXT(fleh_serror)@pageoff
457	b		fleh_dispatch64
458
459
460/*
461 * check_exception_stack
462 *
463 * Verifies that stack pointer at SP1 is within exception stack
464 * If not, will simply hang as we have no more stack to fall back on.
465 */
466
467	.text
468	.align 2
469check_exception_stack:
470	mrs		x18, TPIDR_EL1					// Get thread pointer
471	cbz		x18, Lvalid_exception_stack			// Thread context may not be set early in boot
472	ldr		x18, [x18, ACT_CPUDATAP]
473	cbz		x18, Lcheck_exception_stack_fail	// If thread context is set, cpu data should be too
474	ldr		x18, [x18, CPU_EXCEPSTACK_TOP]
475	cmp		sp, x18
476	b.gt	Lcheck_exception_stack_fail	// Hang if above exception stack top
477	sub		x18, x18, EXCEPSTACK_SIZE_NUM			// Find bottom of exception stack
478	cmp		sp, x18
479	b.lt	Lcheck_exception_stack_fail	// Hang if below exception stack bottom
480Lvalid_exception_stack:
481	mov		x18, #0
482	b		Lel1_sp1_synchronous_valid_stack
483
484Lcheck_exception_stack_fail:
4851:
486	wfi
487	b		1b		// Spin for debugger/watchdog
488
489#if defined(KERNEL_INTEGRITY_KTRR)
490	.text
491	.align 2
492check_ktrr_sctlr_trap:
493/* We may abort on an instruction fetch on reset when enabling the MMU by
494 * writing SCTLR_EL1 because the page containing the privileged instruction is
495 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
496 * would otherwise panic unconditionally. Check for the condition and return
497 * safe execution to the caller on behalf of the faulting function.
498 *
499 * Expected register state:
500 *  x22 - Kernel virtual base
501 *  x23 - Kernel physical base
502 */
503	sub		sp, sp, ARM_CONTEXT_SIZE	// Make some space on the stack
504	stp		x0, x1, [sp, SS64_X0]		// Stash x0, x1
505	mrs		x0, ESR_EL1					// Check ESR for instr. fetch abort
506	and		x0, x0, #0xffffffffffffffc0	// Mask off ESR.ISS.IFSC
507	movz	w1, #0x8600, lsl #16
508	movk	w1, #0x0000
509	cmp		x0, x1
510	mrs		x0, ELR_EL1					// Check for expected abort address
511	adrp	x1, _pinst_set_sctlr_trap_addr@page
512	add		x1, x1, _pinst_set_sctlr_trap_addr@pageoff
513	sub		x1, x1, x22					// Convert to physical address
514	add		x1, x1, x23
515	ccmp	x0, x1, #0, eq
516	ldp		x0, x1, [sp, SS64_X0]		// Restore x0, x1
517	add		sp, sp, ARM_CONTEXT_SIZE	// Clean up stack
518	b.ne	Lel1_sp1_synchronous_vector_continue
519	msr		ELR_EL1, lr					// Return to caller
520	ERET_CONTEXT_SYNCHRONIZING
521#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
522
523/* 64-bit first level exception handler dispatcher.
524 * Completes register context saving and branches to a non-returning FLEH.
525 * FLEH can inspect the spilled thread state, but it contains an invalid
526 * thread signature.
527 *
528 * Expects:
529 *  {x0, x1, sp} - saved
530 *  x0 - arm_context_t
531 *  x1 - address of FLEH
532 *  fp - previous stack frame if EL1
533 *  lr - unused
534 *  sp - kernel stack
535 */
536	.text
537	.align 2
538fleh_dispatch64_noreturn:
539#if HAS_APPLE_PAC
540	pacia	x1, sp
541	/* Save arm_saved_state64 with invalid signature */
542	SPILL_REGISTERS KERNEL_MODE, POISON_THREAD_SIGNATURE
543	b	fleh_dispatch64_common
544#else
545	// Fall through to fleh_dispatch64
546#endif
547
548/* 64-bit first level exception handler dispatcher.
549 * Completes register context saving and branches to FLEH.
550 * Expects:
551 *  {x0, x1, sp} - saved
552 *  x0 - arm_context_t
553 *  x1 - address of FLEH
554 *  fp - previous stack frame if EL1
555 *  lr - unused
556 *  sp - kernel stack
557 */
558	.text
559	.align 2
560fleh_dispatch64:
561#if HAS_APPLE_PAC
562	pacia	x1, sp
563#endif
564
565	/* Save arm_saved_state64 */
566	SPILL_REGISTERS KERNEL_MODE
567
568fleh_dispatch64_common:
569	/* If exception is from userspace, zero unused registers */
570	and		x23, x23, #(PSR64_MODE_EL_MASK)
571	cmp		x23, #(PSR64_MODE_EL0)
572	bne		1f
573
574	SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
5752:
576	mov		x2, #0
577	mov		x3, #0
578	mov		x4, #0
579	mov		x5, #0
580	mov		x6, #0
581	mov		x7, #0
582	mov		x8, #0
583	mov		x9, #0
584	mov		x10, #0
585	mov		x11, #0
586	mov		x12, #0
587	mov		x13, #0
588	mov		x14, #0
589	mov		x15, #0
590	mov		x16, #0
591	mov		x17, #0
592	mov		x18, #0
593	mov		x19, #0
594	mov		x20, #0
595	/* x21, x22 cleared in common case below */
596	mov		x23, #0
597	mov		x24, #0
598	mov		x25, #0
599#if !XNU_MONITOR
600	mov		x26, #0
601#endif
602	mov		x27, #0
603	mov		x28, #0
604	mov		fp, #0
605	mov		lr, #0
6061:
607
608	mov		x21, x0								// Copy arm_context_t pointer to x21
609	mov		x22, x1								// Copy handler routine to x22
610
611#if XNU_MONITOR
612	/* Zero x26 to indicate that this should not return to the PPL. */
613	mov		x26, #0
614#endif
615
616#if PRECISE_USER_KERNEL_TIME
617	tst		x23, PSR64_MODE_EL_MASK				// If any EL MODE bits are set, we're coming from
618	b.ne	1f									// kernel mode, so skip precise time update
619	PUSH_FRAME
620	bl		EXT(recount_leave_user)
621	POP_FRAME
622	mov		x0, x21								// Reload arm_context_t pointer
6231:
624#endif /* PRECISE_USER_KERNEL_TIME */
625
626	/* Dispatch to FLEH */
627
628#if HAS_APPLE_PAC
629	braa	x22,sp
630#else
631	br		x22
632#endif
633
634
635	.text
636	.align 2
637	.global EXT(fleh_synchronous)
638LEXT(fleh_synchronous)
639
640UNWIND_PROLOGUE
641UNWIND_DIRECTIVES
642
643	mrs		x1, ESR_EL1							// Load exception syndrome
644	mrs		x2, FAR_EL1							// Load fault address
645
646	/* At this point, the LR contains the value of ELR_EL1. In the case of an
647	 * instruction prefetch abort, this will be the faulting pc, which we know
648	 * to be invalid. This will prevent us from backtracing through the
649	 * exception if we put it in our stack frame, so we load the LR from the
650	 * exception saved state instead.
651	 */
652	and		w6, w1, #(ESR_EC_MASK)
653	lsr		w6, w6, #(ESR_EC_SHIFT)
654	mov		w4, #(ESR_EC_IABORT_EL1)
655	cmp		w6, w4
656	b.eq	Lfleh_sync_load_lr
657Lvalid_link_register:
658
659
660	PUSH_FRAME
661	bl		EXT(sleh_synchronous)
662	POP_FRAME
663
664#if XNU_MONITOR
665	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
666#endif
667
668	mov		x28, xzr		// Don't need to check PFZ if there are ASTs
669	b		exception_return_dispatch
670
671Lfleh_sync_load_lr:
672	ldr		lr, [x0, SS64_LR]
673	b Lvalid_link_register
674
675UNWIND_EPILOGUE
676
677
678/* Shared prologue code for fleh_irq and fleh_fiq.
679 * Does any interrupt booking we may want to do
680 * before invoking the handler proper.
681 * Expects:
682 *  x0 - arm_context_t
683 * x23 - CPSR
684 *  fp - Undefined live value (we may push a frame)
685 *  lr - Undefined live value (we may push a frame)
686 *  sp - Interrupt stack for the current CPU
687 */
688.macro BEGIN_INTERRUPT_HANDLER
689	mrs		x22, TPIDR_EL1
690	ldr		x23, [x22, ACT_CPUDATAP]			// Get current cpu
691	/* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence  */
692	ldr		w1, [x23, CPU_STAT_IRQ]
693	add		w1, w1, #1							// Increment count
694	str		w1, [x23, CPU_STAT_IRQ]				// Update  IRQ count
695	ldr		w1, [x23, CPU_STAT_IRQ_WAKE]
696	add		w1, w1, #1					// Increment count
697	str		w1, [x23, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
698	/* Increment preempt count */
699	ldr		w1, [x22, ACT_PREEMPT_CNT]
700	add		w1, w1, #1
701	str		w1, [x22, ACT_PREEMPT_CNT]
702	/* Store context in int state */
703	str		x0, [x23, CPU_INT_STATE] 			// Saved context in cpu_int_state
704.endmacro
705
706/* Shared epilogue code for fleh_irq and fleh_fiq.
707 * Cleans up after the prologue, and may do a bit more
708 * bookkeeping (kdebug related).
709 * Expects:
710 * x22 - Live TPIDR_EL1 value (thread address)
711 * x23 - Address of the current CPU data structure
712 * w24 - 0 if kdebug is disbled, nonzero otherwise
713 *  fp - Undefined live value (we may push a frame)
714 *  lr - Undefined live value (we may push a frame)
715 *  sp - Interrupt stack for the current CPU
716 */
717.macro END_INTERRUPT_HANDLER
718	/* Clear int context */
719	str		xzr, [x23, CPU_INT_STATE]
720	/* Decrement preempt count */
721	ldr		w0, [x22, ACT_PREEMPT_CNT]
722	cbnz	w0, 1f								// Detect underflow
723	b		preempt_underflow
7241:
725	sub		w0, w0, #1
726	str		w0, [x22, ACT_PREEMPT_CNT]
727	/* Switch back to kernel stack */
728	LOAD_KERN_STACK_TOP	dst=x0, src=x22, tmp=x28
729	mov		sp, x0
730	/* Generate a CPU-local event to terminate a post-IRQ WFE */
731	sevl
732.endmacro
733
734	.text
735	.align 2
736	.global EXT(fleh_irq)
737LEXT(fleh_irq)
738UNWIND_PROLOGUE
739UNWIND_DIRECTIVES
740	BEGIN_INTERRUPT_HANDLER
741	PUSH_FRAME
742	bl		EXT(sleh_irq)
743	POP_FRAME
744	END_INTERRUPT_HANDLER
745
746#if XNU_MONITOR
747	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
748#endif
749
750	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
751	b		exception_return_dispatch
752UNWIND_EPILOGUE
753
754	.text
755	.align 2
756	.global EXT(fleh_fiq_generic)
757LEXT(fleh_fiq_generic)
758	PANIC_UNIMPLEMENTED
759
760	.text
761	.align 2
762	.global EXT(fleh_fiq)
763LEXT(fleh_fiq)
764UNWIND_PROLOGUE
765UNWIND_DIRECTIVES
766	BEGIN_INTERRUPT_HANDLER
767	PUSH_FRAME
768	bl		EXT(sleh_fiq)
769	POP_FRAME
770	END_INTERRUPT_HANDLER
771
772#if XNU_MONITOR
773	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
774#endif
775
776	mov		x28, #1			// Set a bit to check PFZ if there are ASTs
777	b		exception_return_dispatch
778UNWIND_EPILOGUE
779
780	.text
781	.align 2
782	.global EXT(fleh_serror)
783LEXT(fleh_serror)
784UNWIND_PROLOGUE
785UNWIND_DIRECTIVES
786	mrs		x1, ESR_EL1							// Load exception syndrome
787	mrs		x2, FAR_EL1							// Load fault address
788
789	PUSH_FRAME
790	bl		EXT(sleh_serror)
791	POP_FRAME
792
793#if XNU_MONITOR
794	CHECK_EXCEPTION_RETURN_DISPATCH_PPL
795#endif
796
797	mov		x28, xzr		// Don't need to check PFZ If there are ASTs
798	b		exception_return_dispatch
799UNWIND_EPILOGUE
800
801/*
802 * Register state saved before we get here.
803 */
804	.text
805	.align 2
806fleh_invalid_stack:
807	mrs		x1, ESR_EL1							// Load exception syndrome
808	str		x1, [x0, SS64_ESR]
809	mrs		x2, FAR_EL1							// Load fault address
810	str		x2, [x0, SS64_FAR]
811	PUSH_FRAME
812	bl		EXT(sleh_invalid_stack)				// Shouldn't return!
813	b 		.
814
815	.text
816	.align 2
817fleh_synchronous_sp1:
818	mrs		x1, ESR_EL1							// Load exception syndrome
819	str		x1, [x0, SS64_ESR]
820	mrs		x2, FAR_EL1							// Load fault address
821	str		x2, [x0, SS64_FAR]
822
823
824	PUSH_FRAME
825	bl		EXT(sleh_synchronous_sp1)
826	b 		.
827
828	.text
829	.align 2
830fleh_irq_sp1:
831	mov		x1, x0
832	adr		x0, Lsp1_irq_str
833	b		EXT(panic_with_thread_kernel_state)
834Lsp1_irq_str:
835	.asciz "IRQ exception taken while SP1 selected"
836
837	.text
838	.align 2
839fleh_fiq_sp1:
840	mov		x1, x0
841	adr		x0, Lsp1_fiq_str
842	b		EXT(panic_with_thread_kernel_state)
843Lsp1_fiq_str:
844	.asciz "FIQ exception taken while SP1 selected"
845
846	.text
847	.align 2
848fleh_serror_sp1:
849	mov		x1, x0
850	adr		x0, Lsp1_serror_str
851	b		EXT(panic_with_thread_kernel_state)
852Lsp1_serror_str:
853	.asciz "Asynchronous exception taken while SP1 selected"
854
855	.text
856	.align 2
857exception_return_dispatch:
858	ldr		w0, [x21, SS64_CPSR]
859	tst		w0, PSR64_MODE_EL_MASK
860	b.ne		EXT(return_to_kernel) // return to kernel if M[3:2] > 0
861	b		return_to_user
862
863
864	.text
865	.align 2
866	.global EXT(return_to_kernel)
867LEXT(return_to_kernel)
868	tbnz	w0, #DAIF_IRQF_SHIFT, exception_return  // Skip AST check if IRQ disabled
869	mrs		x3, TPIDR_EL1                           // Load thread pointer
870	ldr		w1, [x3, ACT_PREEMPT_CNT]               // Load preemption count
871	msr		DAIFSet, #DAIFSC_ALL                    // Disable exceptions
872	cbnz	x1, exception_return_unint_tpidr_x3     // If preemption disabled, skip AST check
873	ldr		x1, [x3, ACT_CPUDATAP]                  // Get current CPU data pointer
874	ldr		w2, [x1, CPU_PENDING_AST]               // Get ASTs
875	tst		w2, AST_URGENT                          // If no urgent ASTs, skip ast_taken
876	b.eq	exception_return_unint_tpidr_x3
877	mov		sp, x21                                 // Switch to thread stack for preemption
878	PUSH_FRAME
879	bl		EXT(ast_taken_kernel)                   // Handle AST_URGENT
880	POP_FRAME
881	b		exception_return
882
883	.text
884	.globl EXT(thread_bootstrap_return)
885LEXT(thread_bootstrap_return)
886#if CONFIG_DTRACE
887	bl		EXT(dtrace_thread_bootstrap)
888#endif
889#if KASAN_TBI
890	PUSH_FRAME
891	bl		EXT(__asan_handle_no_return)
892	POP_FRAME
893#endif /* KASAN_TBI */
894	b		EXT(arm64_thread_exception_return)
895
896	.text
897	.globl EXT(arm64_thread_exception_return)
898LEXT(arm64_thread_exception_return)
899	mrs		x0, TPIDR_EL1
900	LOAD_USER_PCB	dst=x21, src=x0, tmp=x28
901	mov		x28, xzr
902
903	//
904	// Fall Through to return_to_user from arm64_thread_exception_return.
905	// Note that if we move return_to_user or insert a new routine
906	// below arm64_thread_exception_return, the latter will need to change.
907	//
908	.text
909/* x21 is always the machine context pointer when we get here
910 * x28 is a bit indicating whether or not we should check if pc is in pfz */
911return_to_user:
912check_user_asts:
913#if KASAN_TBI
914	PUSH_FRAME
915	bl		EXT(__asan_handle_no_return)
916	POP_FRAME
917#endif /* KASAN_TBI */
918	mrs		x3, TPIDR_EL1					// Load thread pointer
919
920	movn		w2, #0
921	str		w2, [x3, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
922
923#if MACH_ASSERT
924	ldr		w0, [x3, ACT_PREEMPT_CNT]
925	cbnz		w0, preempt_count_notzero			// Detect unbalanced enable/disable preemption
926#endif
927
928	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
929	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
930	ldr		w0, [x4, CPU_PENDING_AST]			// Get ASTs
931	cbz		w0, no_asts							// If no asts, skip ahead
932
933	cbz		x28, user_take_ast					// If we don't need to check PFZ, just handle asts
934
935	/* At this point, we have ASTs and we need to check whether we are running in the
936	 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
937	 * the PFZ since we don't want to handle getting a signal or getting suspended
938	 * while holding a spinlock in userspace.
939	 *
940	 * If userspace was in the PFZ, we know (via coordination with the PFZ code
941	 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
942	 * to use it to indicate to userspace to come back to take a delayed
943	 * preemption, at which point the ASTs will be handled. */
944	mov		x28, xzr							// Clear the "check PFZ" bit so that we don't do this again
945	mov		x19, x0								// Save x0 since it will be clobbered by commpage_is_in_pfz64
946
947	ldr		x0, [x21, SS64_PC]					// Load pc from machine state
948	bl		EXT(commpage_is_in_pfz64)			// pc in pfz?
949	cbz		x0, restore_and_check_ast			// No, deal with other asts
950
951	mov		x0, #1
952	str		x0, [x21, SS64_X15]					// Mark x15 for userspace to take delayed preemption
953	mov		x0, x19								// restore x0 to asts
954	b		no_asts								// pretend we have no asts
955
956restore_and_check_ast:
957	mov		x0, x19								// restore x0
958	b	user_take_ast							// Service pending asts
959no_asts:
960
961
962#if PRECISE_USER_KERNEL_TIME
963	mov		x19, x3						// Preserve thread pointer across function call
964	PUSH_FRAME
965	bl		EXT(recount_enter_user)
966	POP_FRAME
967	mov		x3, x19
968#endif /* PRECISE_USER_KERNEL_TIME */
969
970#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
971	/* Watchtower
972	 *
973	 * Here we attempt to enable NEON access for EL0. If the last entry into the
974	 * kernel from user-space was due to an IRQ, the monitor will have disabled
975	 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
976	 * check in with the monitor in order to reenable NEON for EL0 in exchange
977	 * for routing IRQs through the monitor (2). This way the monitor will
978	 * always 'own' either IRQs or EL0 NEON.
979	 *
980	 * If Watchtower is disabled or we did not enter the kernel through an IRQ
981	 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
982	 * here.
983	 *
984	 * EL0 user ________ IRQ                                            ______
985	 * EL1 xnu              \   ______________________ CPACR_EL1     __/
986	 * EL3 monitor           \_/                                \___/
987	 *
988	 *                       (1)                                 (2)
989	 */
990
991	mov		x0, #(CPACR_FPEN_ENABLE)
992	msr		CPACR_EL1, x0
993#endif
994
995	/* Establish this thread's debug state as the live state on the selected CPU. */
996	ldr		x4, [x3, ACT_CPUDATAP]				// Get current CPU data pointer
997	ldr		x1, [x4, CPU_USER_DEBUG]			// Get Debug context
998	ldr		x0, [x3, ACT_DEBUGDATA]
999	cmp		x0, x1
1000	beq		L_skip_user_set_debug_state			// If active CPU debug state does not match thread debug state, apply thread state
1001
1002
1003	PUSH_FRAME
1004	bl		EXT(arm_debug_set)					// Establish thread debug state in live regs
1005	POP_FRAME
1006	mrs		x3, TPIDR_EL1						// Reload thread pointer
1007	ldr		x4, [x3, ACT_CPUDATAP]				// Reload CPU data pointer
1008L_skip_user_set_debug_state:
1009	ldrsh	x0, [x4, CPU_TPIDR_EL0]
1010	msr		TPIDR_EL0, x0
1011
1012
1013	b		exception_return_unint_tpidr_x3
1014
1015exception_return:
1016	msr		DAIFSet, #DAIFSC_ALL				// Disable exceptions
1017exception_return_unint:
1018	mrs		x3, TPIDR_EL1					// Load thread pointer
1019exception_return_unint_tpidr_x3:
1020	mov		sp, x21						// Reload the pcb pointer
1021
1022#if !__ARM_KERNEL_PROTECT__
1023	/*
1024	 * Restore x18 only if the task has the entitlement that allows
1025	 * usage. Those are very few, and can move to something else
1026	 * once we use x18 for something more global.
1027	 *
1028	 * This is not done here on devices with __ARM_KERNEL_PROTECT__, as
1029	 * that uses x18 as one of the global use cases (and will reset
1030	 * x18 later down below).
1031	 *
1032	 * It's also unconditionally skipped for translated threads,
1033	 * as those are another use case, one where x18 must be preserved.
1034	 */
1035	ldr		w0, [x3, TH_ARM_MACHINE_FLAGS]
1036	mov		x18, #0
1037	tbz		w0, ARM_MACHINE_THREAD_PRESERVE_X18_SHIFT, Lexception_return_restore_registers
1038
1039exception_return_unint_tpidr_x3_restore_x18:
1040	ldr		x18, [sp, SS64_X18]
1041
1042#else /* !__ARM_KERNEL_PROTECT__ */
1043	/*
1044	 * If we are going to eret to userspace, we must return through the EL0
1045	 * eret mapping.
1046	 */
1047	ldr		w1, [sp, SS64_CPSR]									// Load CPSR
1048	tbnz		w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping	// Skip if returning to EL1
1049
1050	/* We need to switch to the EL0 mapping of this code to eret to EL0. */
1051	adrp		x0, EXT(ExceptionVectorsBase)@page				// Load vector base
1052	adrp		x1, Lexception_return_restore_registers@page	// Load target PC
1053	add		x1, x1, Lexception_return_restore_registers@pageoff
1054	MOV64		x2, ARM_KERNEL_PROTECT_EXCEPTION_START			// Load EL0 vector address
1055	sub		x1, x1, x0											// Calculate delta
1056	add		x0, x2, x1											// Convert KVA to EL0 vector address
1057	br		x0
1058
1059Lskip_el0_eret_mapping:
1060#endif /* !__ARM_KERNEL_PROTECT__ */
1061
1062Lexception_return_restore_registers:
1063	mov 	x0, sp								// x0 = &pcb
1064	// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1065	AUTH_THREAD_STATE_IN_X0	x20, x21, x22, x23, x24, x25, el0_state_allowed=1
1066
1067	msr		ELR_EL1, x1							// Load the return address into ELR
1068	msr		SPSR_EL1, x2						// Load the return CPSR into SPSR
1069
1070/* Restore special register state */
1071	ldr		w3, [sp, NS64_FPSR]
1072	ldr		w4, [sp, NS64_FPCR]
1073
1074	msr		FPSR, x3
1075	mrs		x5, FPCR
1076	CMSR FPCR, x5, x4, 1
10771:
1078
1079
1080
1081	/* Restore arm_neon_saved_state64 */
1082	ldp		q0, q1, [x0, NS64_Q0]
1083	ldp		q2, q3, [x0, NS64_Q2]
1084	ldp		q4, q5, [x0, NS64_Q4]
1085	ldp		q6, q7, [x0, NS64_Q6]
1086	ldp		q8, q9, [x0, NS64_Q8]
1087	ldp		q10, q11, [x0, NS64_Q10]
1088	ldp		q12, q13, [x0, NS64_Q12]
1089	ldp		q14, q15, [x0, NS64_Q14]
1090	ldp		q16, q17, [x0, NS64_Q16]
1091	ldp		q18, q19, [x0, NS64_Q18]
1092	ldp		q20, q21, [x0, NS64_Q20]
1093	ldp		q22, q23, [x0, NS64_Q22]
1094	ldp		q24, q25, [x0, NS64_Q24]
1095	ldp		q26, q27, [x0, NS64_Q26]
1096	ldp		q28, q29, [x0, NS64_Q28]
1097	ldp		q30, q31, [x0, NS64_Q30]
1098
1099	/* Restore arm_saved_state64 */
1100
1101	// Skip x0, x1 - we're using them
1102	ldp		x2, x3, [x0, SS64_X2]
1103	ldp		x4, x5, [x0, SS64_X4]
1104	ldp		x6, x7, [x0, SS64_X6]
1105	ldp		x8, x9, [x0, SS64_X8]
1106	ldp		x10, x11, [x0, SS64_X10]
1107	ldp		x12, x13, [x0, SS64_X12]
1108	ldp		x14, x15, [x0, SS64_X14]
1109	// Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1110	// Skip x18 - already restored or trashed above (below with __ARM_KERNEL_PROTECT__)
1111	ldr		x19, [x0, SS64_X19]
1112	ldp		x20, x21, [x0, SS64_X20]
1113	ldp		x22, x23, [x0, SS64_X22]
1114	ldp		x24, x25, [x0, SS64_X24]
1115	ldp		x26, x27, [x0, SS64_X26]
1116	ldr		x28, [x0, SS64_X28]
1117	ldr		fp, [x0, SS64_FP]
1118	// Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1119
1120	// Restore stack pointer and our last two GPRs
1121	ldr		x1, [x0, SS64_SP]
1122	mov		sp, x1
1123
1124#if __ARM_KERNEL_PROTECT__
1125	ldr		w18, [x0, SS64_CPSR]				// Stash CPSR
1126#endif /* __ARM_KERNEL_PROTECT__ */
1127
1128	ldp		x0, x1, [x0, SS64_X0]				// Restore the GPRs
1129
1130#if __ARM_KERNEL_PROTECT__
1131	/* If we are going to eret to userspace, we must unmap the kernel. */
1132	tbnz		w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1133
1134	/* Update TCR to unmap the kernel. */
1135	MOV64		x18, TCR_EL1_USER
1136	msr		TCR_EL1, x18
1137
1138	/*
1139	 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1140	 * each other due to the microarchitecture.
1141	 */
1142#if !defined(APPLE_ARM64_ARCH_FAMILY)
1143	isb		sy
1144#endif
1145
1146	/* Switch to the user ASID (low bit clear) for the task. */
1147	mrs		x18, TTBR0_EL1
1148	bic		x18, x18, #(1 << TTBR_ASID_SHIFT)
1149	msr		TTBR0_EL1, x18
1150	mov		x18, #0
1151
1152	/* We don't need an ISB here, as the eret is synchronizing. */
1153Lskip_ttbr1_switch:
1154#endif /* __ARM_KERNEL_PROTECT__ */
1155
1156	ERET_CONTEXT_SYNCHRONIZING
1157
1158user_take_ast:
1159	PUSH_FRAME
1160	bl		EXT(ast_taken_user)							// Handle all ASTs, may return via continuation
1161	POP_FRAME
1162	b		check_user_asts								// Now try again
1163
1164	.text
1165	.align 2
1166preempt_underflow:
1167	mrs		x0, TPIDR_EL1
1168	str		x0, [sp, #-16]!						// We'll print thread pointer
1169	adr		x0, L_underflow_str					// Format string
1170	CALL_EXTERN panic							// Game over
1171
1172L_underflow_str:
1173	.asciz "Preemption count negative on thread %p"
1174.align 2
1175
1176#if MACH_ASSERT
1177	.text
1178	.align 2
1179preempt_count_notzero:
1180	mrs		x0, TPIDR_EL1
1181	str		x0, [sp, #-16]!						// We'll print thread pointer
1182	ldr		w0, [x0, ACT_PREEMPT_CNT]
1183	str		w0, [sp, #8]
1184	adr		x0, L_preempt_count_notzero_str				// Format string
1185	CALL_EXTERN panic							// Game over
1186
1187L_preempt_count_notzero_str:
1188	.asciz "preemption count not 0 on thread %p (%u)"
1189#endif /* MACH_ASSERT */
1190
1191#if __ARM_KERNEL_PROTECT__
1192	/*
1193	 * This symbol denotes the end of the exception vector/eret range; we page
1194	 * align it so that we can avoid mapping other text in the EL0 exception
1195	 * vector mapping.
1196	 */
1197	.text
1198	.align 14
1199	.globl EXT(ExceptionVectorsEnd)
1200LEXT(ExceptionVectorsEnd)
1201#endif /* __ARM_KERNEL_PROTECT__ */
1202
1203#if XNU_MONITOR
1204
1205/*
1206 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1207 * mostly concerned with setting up state for the normal fleh code.
1208 */
1209	.text
1210	.align 2
1211fleh_synchronous_from_ppl:
1212	/* Save x0. */
1213	mov		x15, x0
1214
1215	/* Grab the ESR. */
1216	mrs		x1, ESR_EL1							// Get the exception syndrome
1217
1218	/* If the stack pointer is corrupt, it will manifest either as a data abort
1219	 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1220	 * these quickly by testing bit 5 of the exception class.
1221	 */
1222	tbz		x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1223	mrs		x0, SP_EL0							// Get SP_EL0
1224
1225	/* Perform high level checks for stack corruption. */
1226	and		x1, x1, #ESR_EC_MASK				// Mask the exception class
1227	mov		x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1228	cmp		x1, x2								// If we have a stack alignment exception
1229	b.eq	Lcorrupt_ppl_stack						// ...the stack is definitely corrupted
1230	mov		x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1231	cmp		x1, x2								// If we have a data abort, we need to
1232	b.ne	Lvalid_ppl_stack						// ...validate the stack pointer
1233
1234Ltest_pstack:
1235	/* Bounds check the PPL stack. */
1236	adrp	x10, EXT(pmap_stacks_start)@page
1237	ldr		x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1238	adrp	x11, EXT(pmap_stacks_end)@page
1239	ldr		x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1240	cmp		x0, x10
1241	b.lo	Lcorrupt_ppl_stack
1242	cmp		x0, x11
1243	b.hi	Lcorrupt_ppl_stack
1244
1245Lvalid_ppl_stack:
1246	/* Restore x0. */
1247	mov		x0, x15
1248
1249	/* Switch back to the kernel stack. */
1250	msr		SPSel, #0
1251	GET_PMAP_CPU_DATA x5, x6, x7
1252	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1253	mov		sp, x6
1254
1255	/* Hand off to the synch handler. */
1256	b		EXT(fleh_synchronous)
1257
1258Lcorrupt_ppl_stack:
1259	/* Restore x0. */
1260	mov		x0, x15
1261
1262	/* Hand off to the invalid stack handler. */
1263	b		fleh_invalid_stack
1264
1265fleh_fiq_from_ppl:
1266	SWITCH_TO_INT_STACK	tmp=x25
1267	b		EXT(fleh_fiq)
1268
1269fleh_irq_from_ppl:
1270	SWITCH_TO_INT_STACK	tmp=x25
1271	b		EXT(fleh_irq)
1272
1273fleh_serror_from_ppl:
1274	GET_PMAP_CPU_DATA x5, x6, x7
1275	ldr		x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1276	mov		sp, x6
1277	b		EXT(fleh_serror)
1278
1279
1280
1281
1282	// x15: ppl call number
1283	// w10: ppl_state
1284	// x20: gxf_enter caller's DAIF
1285	.globl EXT(ppl_trampoline_start)
1286LEXT(ppl_trampoline_start)
1287
1288
1289#error "XPRR configuration error"
1290	cmp		x14, x21
1291	b.ne	Lppl_fail_dispatch
1292
1293	/* Verify the request ID. */
1294	cmp		x15, PMAP_COUNT
1295	b.hs	Lppl_fail_dispatch
1296
1297	GET_PMAP_CPU_DATA	x12, x13, x14
1298
1299	/* Mark this CPU as being in the PPL. */
1300	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1301
1302	cmp		w9, #PPL_STATE_KERNEL
1303	b.eq		Lppl_mark_cpu_as_dispatching
1304
1305	/* Check to see if we are trying to trap from within the PPL. */
1306	cmp		w9, #PPL_STATE_DISPATCH
1307	b.eq		Lppl_fail_dispatch_ppl
1308
1309
1310	/* Ensure that we are returning from an exception. */
1311	cmp		w9, #PPL_STATE_EXCEPTION
1312	b.ne		Lppl_fail_dispatch
1313
1314	// where is w10 set?
1315	// in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1316	cmp		w10, #PPL_STATE_EXCEPTION
1317	b.ne		Lppl_fail_dispatch
1318
1319	/* This is an exception return; set the CPU to the dispatching state. */
1320	mov		w9, #PPL_STATE_DISPATCH
1321	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1322
1323	/* Find the save area, and return to the saved PPL context. */
1324	ldr		x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1325	mov		sp, x0
1326	b		EXT(return_to_ppl)
1327
1328Lppl_mark_cpu_as_dispatching:
1329	cmp		w10, #PPL_STATE_KERNEL
1330	b.ne		Lppl_fail_dispatch
1331
1332	/* Mark the CPU as dispatching. */
1333	mov		w13, #PPL_STATE_DISPATCH
1334	str		w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1335
1336	/* Switch to the regular PPL stack. */
1337	// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1338	ldr		x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1339
1340	// SP0 is thread stack here
1341	mov		x21, sp
1342	// SP0 is now PPL stack
1343	mov		sp, x9
1344
1345	/* Save the old stack pointer off in case we need it. */
1346	str		x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1347
1348	/* Get the handler for the request */
1349	adrp	x9, EXT(ppl_handler_table)@page
1350	add		x9, x9, EXT(ppl_handler_table)@pageoff
1351	add		x9, x9, x15, lsl #3
1352	ldr		x10, [x9]
1353
1354	/* Branch to the code that will invoke the PPL request. */
1355	b		EXT(ppl_dispatch)
1356
1357Lppl_fail_dispatch_ppl:
1358	/* Switch back to the kernel stack. */
1359	ldr		x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1360	mov		sp, x10
1361
1362Lppl_fail_dispatch:
1363	/* Indicate that we failed. */
1364	mov		x15, #PPL_EXIT_BAD_CALL
1365
1366	/* Move the DAIF bits into the expected register. */
1367	mov		x10, x20
1368
1369	/* Return to kernel mode. */
1370	b		ppl_return_to_kernel_mode
1371
1372Lppl_dispatch_exit:
1373
1374	/* Indicate that we are cleanly exiting the PPL. */
1375	mov		x15, #PPL_EXIT_DISPATCH
1376
1377	/* Switch back to the original (kernel thread) stack. */
1378	mov		sp, x21
1379
1380	/* Move the saved DAIF bits. */
1381	mov		x10, x20
1382
1383	/* Clear the in-flight pmap pointer */
1384	add		x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP
1385	stlr		xzr, [x13]
1386
1387	/* Clear the old stack pointer. */
1388	str		xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1389
1390	/*
1391	 * Mark the CPU as no longer being in the PPL.  We spin if our state
1392	 * machine is broken.
1393	 */
1394	ldr		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1395	cmp		w9, #PPL_STATE_DISPATCH
1396	b.ne		.
1397	mov		w9, #PPL_STATE_KERNEL
1398	str		w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1399
1400	/* Return to the kernel. */
1401	b ppl_return_to_kernel_mode
1402
1403
1404
1405	.text
1406ppl_exit:
1407	/*
1408	 * If we are dealing with an exception, hand off to the first level
1409	 * exception handler.
1410	 */
1411	cmp		x15, #PPL_EXIT_EXCEPTION
1412	b.eq	Ljump_to_fleh_handler
1413
1414	/* If this was a panic call from the PPL, reinvoke panic. */
1415	cmp		x15, #PPL_EXIT_PANIC_CALL
1416	b.eq	Ljump_to_panic_trap_to_debugger
1417
1418	/*
1419	 * Stash off the original DAIF in the high bits of the exit code register.
1420	 * We could keep this in a dedicated register, but that would require us to copy it to
1421	 * an additional callee-save register below (e.g. x22), which in turn would require that
1422	 * register to be saved/restored at PPL entry/exit.
1423	 */
1424	add		x15, x15, x10, lsl #32
1425
1426	/* Load the preemption count. */
1427	mrs		x10, TPIDR_EL1
1428	ldr		w12, [x10, ACT_PREEMPT_CNT]
1429
1430	/* Detect underflow */
1431	cbnz	w12, Lno_preempt_underflow
1432	b		preempt_underflow
1433Lno_preempt_underflow:
1434
1435	/* Lower the preemption count. */
1436	sub		w12, w12, #1
1437
1438#if SCHED_HYGIENE_DEBUG
1439	/* Collect preemption disable measurement if necessary. */
1440
1441	/*
1442	 * Only collect measurement if this reenabled preemption,
1443	 * and SCHED_HYGIENE_MARKER is set.
1444	 */
1445	mov		x20, #SCHED_HYGIENE_MARKER
1446	cmp		w12, w20
1447	b.ne	Lskip_collect_measurement
1448
1449	/* Stash our return value and return reason. */
1450	mov		x20, x0
1451	mov		x21, x15
1452
1453	/* Collect measurement. */
1454	bl		EXT(_collect_preemption_disable_measurement)
1455
1456	/* Restore the return value and the return reason. */
1457	mov		x0, x20
1458	mov		x15, x21
1459	/* ... and w12, which is now 0. */
1460	mov		w12, #0
1461
1462	/* Restore the thread pointer into x10. */
1463	mrs		x10, TPIDR_EL1
1464
1465Lskip_collect_measurement:
1466#endif /* SCHED_HYGIENE_DEBUG */
1467
1468	/* Save the lowered preemption count. */
1469	str		w12, [x10, ACT_PREEMPT_CNT]
1470
1471	/* Skip ASTs if the peemption count is not zero. */
1472	cbnz	x12, Lppl_skip_ast_taken
1473
1474	/*
1475	 * Skip the AST check if interrupts were originally disabled.
1476	 * The original DAIF state prior to PPL entry is stored in the upper
1477	 * 32 bits of x15.
1478	 */
1479	tbnz		x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken
1480
1481	/* IF there is no urgent AST, skip the AST. */
1482	ldr		x12, [x10, ACT_CPUDATAP]
1483	ldr		w14, [x12, CPU_PENDING_AST]
1484	tst		w14, AST_URGENT
1485	b.eq	Lppl_skip_ast_taken
1486
1487	/* Stash our return value and return reason. */
1488	mov		x20, x0
1489	mov		x21, x15
1490
1491	/* Handle the AST. */
1492	bl		EXT(ast_taken_kernel)
1493
1494	/* Restore the return value and the return reason. */
1495	mov		x15, x21
1496	mov		x0, x20
1497
1498Lppl_skip_ast_taken:
1499
1500	/* Extract caller DAIF from high-order bits of exit code */
1501	ubfx	x10, x15, #32, #32
1502	bfc		x15, #32, #32
1503	msr		DAIF, x10
1504
1505	/* Pop the stack frame. */
1506	ldp		x29, x30, [sp, #0x10]
1507	ldp		x20, x21, [sp], #0x20
1508
1509	/* Check to see if this was a bad request. */
1510	cmp		x15, #PPL_EXIT_BAD_CALL
1511	b.eq	Lppl_bad_call
1512
1513	/* Return. */
1514	ARM64_STACK_EPILOG
1515
1516	.align 2
1517Ljump_to_fleh_handler:
1518	br	x25
1519
1520	.align 2
1521Ljump_to_panic_trap_to_debugger:
1522	b		EXT(panic_trap_to_debugger)
1523
1524Lppl_bad_call:
1525	/* Panic. */
1526	adrp	x0, Lppl_bad_call_panic_str@page
1527	add		x0, x0, Lppl_bad_call_panic_str@pageoff
1528	b		EXT(panic)
1529
1530	.text
1531	.align 2
1532	.globl EXT(ppl_dispatch)
1533LEXT(ppl_dispatch)
1534	/*
1535	 * Save a couple of important registers (implementation detail; x12 has
1536	 * the PPL per-CPU data address; x13 is not actually interesting).
1537	 */
1538	stp		x12, x13, [sp, #-0x10]!
1539
1540	/*
1541	 * Restore the original AIF state, force D set to mask debug exceptions
1542	 * while PPL code runs.
1543	 */
1544	orr		x8, x20, DAIF_DEBUGF
1545	msr		DAIF, x8
1546
1547	/*
1548	 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1549	 * but the exception vectors will deal with this properly.
1550	 */
1551
1552	/* Invoke the PPL method. */
1553#ifdef HAS_APPLE_PAC
1554	blraa		x10, x9
1555#else
1556	blr		x10
1557#endif
1558
1559	/* Disable DAIF. */
1560	msr		DAIFSet, #(DAIFSC_ALL)
1561
1562	/* Restore those important registers. */
1563	ldp		x12, x13, [sp], #0x10
1564
1565	/* Mark this as a regular return, and hand off to the return path. */
1566	b		Lppl_dispatch_exit
1567
1568	.text
1569	.align 2
1570	.globl EXT(ppl_bootstrap_dispatch)
1571LEXT(ppl_bootstrap_dispatch)
1572	/* Verify the PPL request. */
1573	cmp		x15, PMAP_COUNT
1574	b.hs	Lppl_fail_bootstrap_dispatch
1575
1576	/* Get the requested PPL routine. */
1577	adrp	x9, EXT(ppl_handler_table)@page
1578	add		x9, x9, EXT(ppl_handler_table)@pageoff
1579	add		x9, x9, x15, lsl #3
1580	ldr		x10, [x9]
1581
1582	/* Invoke the requested PPL routine. */
1583#ifdef HAS_APPLE_PAC
1584	blraa		x10, x9
1585#else
1586	blr		x10
1587#endif
1588	LOAD_PMAP_CPU_DATA	x9, x10, x11
1589
1590	/* Clear the in-flight pmap pointer */
1591	add		x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP
1592	stlr		xzr, [x9]
1593
1594	/* Stash off the return value */
1595	mov		x20, x0
1596	/* Drop the preemption count */
1597	bl		EXT(_enable_preemption)
1598	mov		x0, x20
1599
1600	/* Pop the stack frame. */
1601	ldp		x29, x30, [sp, #0x10]
1602	ldp		x20, x21, [sp], #0x20
1603#if __has_feature(ptrauth_returns)
1604	retab
1605#else
1606	ret
1607#endif
1608
1609Lppl_fail_bootstrap_dispatch:
1610	/* Pop our stack frame and panic. */
1611	ldp		x29, x30, [sp, #0x10]
1612	ldp		x20, x21, [sp], #0x20
1613#if __has_feature(ptrauth_returns)
1614	autibsp
1615#endif
1616	adrp	x0, Lppl_bad_call_panic_str@page
1617	add		x0, x0, Lppl_bad_call_panic_str@pageoff
1618	b		EXT(panic)
1619
1620	.text
1621	.align 2
1622	.globl EXT(ml_panic_trap_to_debugger)
1623LEXT(ml_panic_trap_to_debugger)
1624	mrs		x10, DAIF
1625	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)
1626
1627	adrp		x12, EXT(pmap_ppl_locked_down)@page
1628	ldr		w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1629	cbz		w12, Lnot_in_ppl_dispatch
1630
1631	LOAD_PMAP_CPU_DATA	x11, x12, x13
1632
1633	ldr		w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1634	cmp		w12, #PPL_STATE_DISPATCH
1635	b.ne		Lnot_in_ppl_dispatch
1636
1637	/* Indicate (for the PPL->kernel transition) that we are panicking. */
1638	mov		x15, #PPL_EXIT_PANIC_CALL
1639
1640	/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1641	ldr		x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1642	mov		sp, x12
1643
1644	mrs		x10, DAIF
1645	mov		w13, #PPL_STATE_PANIC
1646	str		w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1647
1648	/* Now we are ready to exit the PPL. */
1649	b		ppl_return_to_kernel_mode
1650Lnot_in_ppl_dispatch:
1651	msr		DAIF, x10
1652	ret
1653
1654	.data
1655Lppl_bad_call_panic_str:
1656	.asciz "ppl_dispatch: failed due to bad arguments/state"
1657#else /* XNU_MONITOR */
1658	.text
1659	.align 2
1660	.globl EXT(ml_panic_trap_to_debugger)
1661LEXT(ml_panic_trap_to_debugger)
1662	ret
1663#endif /* XNU_MONITOR */
1664
1665
1666/* ARM64_TODO Is globals_asm.h needed? */
1667//#include	"globals_asm.h"
1668
1669/* vim: set ts=4: */
1670