xref: /xnu-12377.81.4/osfmk/arm64/sptm/start_sptm.s (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1/**
2 * Copyright (c) 2022-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/exception_asm.h>
31#include <arm64/dwarf_unwind.h>
32#include <sptm/sptm_xnu.h>
33
34/**
35 * XNU entry point.
36 *
37 * The SPTM jumps here as part of both the cold and warm boot paths, for all
38 * CPUs. This entry point is also jumped to when the SPTM wants to trigger the
39 * XNU panic path.
40 *
41 * @param x0 Sentinel value describing why we jumped to this entry point:
42 *           SPTM_CPU_BOOT_COLD: Cold boot path.
43 *           SPTM_CPU_BOOT_WARM: Warm boot path.
44 *           SPTM_CPU_BOOT_SECONDARY: Secondary CPU boot path.
45 *           SPTM_CPU_BOOT_HIB: Hibernation exit path.
46 *           SPTM_CPU_PANIC: A panic condition was triggered in SPTM/TXM/cL4.
47 *
48 * The possible values of the rest of the argument registers are dependent on
49 * the sentinel value in x0.
50 *
51 * If x0 is SPTM_CPU_PANIC:
52 * @param x1 A pointer to the panic string.
53 * @param x2 A boolean defining whether XNU should attempt a local coredump or
54 *           not. If this is false, then the SPTM is in a state that trying to
55 *           generate a coredump will most likely trigger more panics (seeing as
56 *           the NVMe driver will need to call into the SPTM).
57 *
58 * Otherwise:
59 * @param x1 iBoot boot arguments.
60 * @param x2 SPTM boot arguments.
61 *
62 * @note The SPTM initially only maps the __TEXT_BOOT_EXEC segment
63 *       as RX, and does not remap the rest of the code as RX until
64 *       after the XNU fixups phase has been completed. Since this
65 *       is the entry point, it must be made executable from the
66 *       very start.
67 */
68	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
69	.align 14
70	.globl EXT(_start)
71LEXT(_start)
72	ARM64_PROLOG
73	/**
74	 * When SPTM/TXM/cL4 panics, it jumps to the XNU entry point with a special
75	 * sentinel value placed into x0. Let's check for that and jump to the
76	 * standard panic function if so.
77	 */
78	mov		x8, #SPTM_CPU_PANIC
79	cmp		x0, x8
80	b.ne	start_boot_path
81
82	/**
83	 * Set global variable to tell panic path whether the SPTM supports
84	 * generating a local coredump. This can be disabled based on the SPTM's
85	 * build flags or determined at runtime.
86	 */
87	adrp	x8, EXT(sptm_supports_local_coredump)@page
88	strb	w2, [x8, EXT(sptm_supports_local_coredump)@pageoff]
89
90	/* The panic string is in x1, but the panic function expects it as the first argument. */
91	mov		x0, x1
92	b		EXT(panic_from_sptm)
93
94	/* Should never reach here as we should have panicked. */
95	b		.
96
97start_boot_path:
98	/* Clear thread pointers */
99	msr		TPIDR_EL1, xzr
100	msr		TPIDRRO_EL0, xzr
101
102#if HAS_CLUSTER && !NO_CPU_OVRD
103	/* Unmask external IRQs if we're restarting from non-retention WFI */
104	mrs		x9, CPU_OVRD
105	and		x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
106	msr		CPU_OVRD, x9
107#endif
108
109	/* Jump to the correct start routine */
110	mov		x20, #SPTM_CPU_BOOT_COLD
111	cmp		x0, x20
112	b.eq	start_cold
113	/*
114	 * Note that on hibernation resume, we take the warm boot path, as SPTM already
115	 * initialized VBAR_EL1 in sptm_init_cpu_registers, called from sptm_resume_cpu.
116	 */
117	b		start_warm
118
119/**
120 * Cold boot path.
121 */
122start_cold:
123	/* Set up exception stack */
124	msr		SPSel, #1
125	adrp	x10, EXT(excepstack_top)@page
126	add		x10, x10, EXT(excepstack_top)@pageoff
127	mov		sp, x10
128
129	/* Set up IRQ stack */
130	msr		SPSel, #0
131	adrp	x10, EXT(intstack_top)@page
132	add		x10, x10, EXT(intstack_top)@pageoff
133	mov		sp, x10
134
135	/* Save off boot arguments */
136	mov x26, x1
137	mov x27, x2
138
139	/* Rebase and sign absolute addresses */
140	bl EXT(arm_slide_rebase_and_sign_image)
141
142	mov		x0, x26
143	bl EXT(arm_static_if_init)
144
145	/**
146	 * Now setup final XNU exception vectors. This is the closest we can do this
147	 * in XNU because after sending SPTM SPTM_FUNCTIONID_FIXUPS_COMPLETE, VBAR
148	 * will be validated and locked.
149	 */
150	adrp	x9, EXT(ExceptionVectorsBase)@page
151	add		x9, x9, EXT(ExceptionVectorsBase)@pageoff
152	msr		VBAR_EL1, x9
153	isb
154
155	/**
156	 * Call into the SPTM for the first time. This function traps to GL2 to
157	 * signal the SPTM that the fixups phase has been completed.
158	 */
159	SPTM_LOAD_DISPATCH_ID SPTM_DOMAIN, SPTM_DISPATCH_TABLE_XNU_BOOTSTRAP, SPTM_FUNCTIONID_FIXUPS_COMPLETE
160	SPTM_DOMAIN_ENTER	x16
161
162	/**
163	 * At this point, the SPTM has retyped the RX region to SPTM_XNU_CODE.
164	 */
165
166	/* Jump to handler */
167	mov		x0, x26
168	mov		x1, x27
169#if KASAN
170	b		EXT(arm_init_kasan)
171#else
172	b		EXT(arm_init)
173#endif /* KASAN */
174
175/**
176 * Secondary CPU boot path.
177 */
178start_warm:
179	/* Save the hibernation arguments pointer in x20 */
180	mov		x20, x3
181
182#if HAS_BP_RET
183	bl		EXT(set_bp_ret)
184#endif
185
186	/**
187	 * Search for the correct CPU Data entry.
188	 * This works by iterating over the per-CPU data array,
189	 * searching for the entry who's physical CPU ID matches
190	 * the physical ID extracted from this CPU's MPIDR_EL1.
191	 *
192	 * x1 is initially set to the first entry in the per-CPU data
193	 * array.
194	 */
195
196	/* Get CPU physical ID */
197	mrs		x15, MPIDR_EL1
198#if HAS_CLUSTER
199	and		x0, x15, #(MPIDR_AFF0_MASK | MPIDR_AFF1_MASK)
200#else
201	and		x0, x15, #(MPIDR_AFF0_MASK)
202#endif
203
204	adrp	x1, EXT(CpuDataEntries)@page
205	add		x1, x1, EXT(CpuDataEntries)@pageoff
206
207	MOV64	x19, CPU_DATA_SIZE
208	mov		x4, MAX_CPUS
209
210	/* Set x3 to the end of the per-CPU data array (exclusive) */
211	mul		x3, x19, x4
212	add		x3, x1, x3
213
214	/**
215	 * Use x1 as the cursor, and stop when we have either found
216	 * an entry, or when we have finished traversing the array.
217	 */
218check_cpu_data_entry:
219	/* Load physical CPU data address */
220	ldr		x21, [x1, CPU_DATA_VADDR]
221	cbz		x21, .
222
223	/* Attempt to match the physical CPU ID */
224	ldr		w2, [x21, CPU_PHYS_ID]
225	cmp		x0, x2
226	b.eq	found_cpu_data_entry
227next_cpu_data_entry:
228	/* Move onto the next element in the array, if it exists */
229	add		x1, x1, x19
230	cmp		x1, x3
231	b.eq	cpu_data_entry_not_found
232	b		check_cpu_data_entry
233
234/* An entry was found */
235found_cpu_data_entry:
236	/* Set up exception stack */
237	msr		SPSel, #1
238	ldr		x10, [x21, CPU_EXCEPSTACK_TOP]
239	mov		sp, x10
240
241	/* Set up IRQ stack */
242	msr		SPSel, #0
243	ldr		x10, [x21, CPU_INTSTACK_TOP]
244	mov		sp, x10
245
246	/* Set up input parameters to reset handler */
247	mov		x0, x21
248	mov		x1, x20
249
250	/* Obtain reset handler */
251	ldr		x2, [x21, CPU_RESET_HANDLER]
252	cbz		x2, Lskip_cpu_reset_handler
253
254	/* Validate that our handler is one of the two expected ones */
255	adrp	x3, EXT(arm_init_cpu)@page
256	add		x3, x3, EXT(arm_init_cpu)@pageoff
257	cmp		x2, x3
258	beq		1f
259
260	adrp	x3, EXT(arm_init_idle_cpu)@page
261	add		x3, x3, EXT(arm_init_idle_cpu)@pageoff
262	cmp		x2, x3
263	beq		2f
264
265	/* No valid handler was found */
266	b		Lskip_cpu_reset_handler
267
2681:
269	b		EXT(arm_init_cpu)
2702:
271	b		EXT(arm_init_idle_cpu)
272
273/**
274 * A valid reset handler was not found. This points to a bug in XNU.
275 * It is unsafe to continue, so just spin here.
276 */
277Lskip_cpu_reset_handler:
278	MOV64	x0, 0xDEADB001
279	b		.
280
281/**
282 * An entry was not found. This points to a bug in XNU.
283 * It is unsafe to continue, so just spin here.
284 */
285cpu_data_entry_not_found:
286	MOV64	x0, 0xDEADB002
287	b		.
288
289/**
290 * This is a stub function that calls the XNU panic entry point.
291 * We push this frame onto the stack so that the LLDB unwinder
292 * understands that the stack pointer has been changed when
293 * unwinding a stack that has panicked in SPTM or TXM, for example.
294 *
295 * The SPTM_UNWIND_DIRECTIVES tell LLDB that the panic caller FP,
296 * LR, SP, and PC are in a data structure pointed to by X21, which
297 * is set by SPTM dispatch logic before handing control back to XNU
298 * during a panic.
299 */
300	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
301	.align 14
302	.globl EXT(panic_from_sptm)
303LEXT(panic_from_sptm)
304TRAP_UNWIND_PROLOGUE
305SPTM_UNWIND_DIRECTIVES
306	ARM64_STACK_PROLOG
307	PUSH_FRAME
308	bl 		EXT(panic)
309	b .
310UNWIND_EPILOGUE
311