xref: /xnu-11215.81.4/osfmk/arm64/sptm/start_sptm.s (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1/**
2 * Copyright (c) 2022-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/exception_asm.h>
31#include <arm64/dwarf_unwind.h>
32#include <sptm/sptm_xnu.h>
33
34/**
35 * XNU entry point.
36 *
37 * The SPTM jumps here as part of both the cold and warm boot paths, for all
38 * CPUs. This entry point is also jumped to when the SPTM wants to trigger the
39 * XNU panic path.
40 *
41 * @param x0 Sentinel value describing why we jumped to this entry point:
42 *           SPTM_CPU_BOOT_COLD: Cold boot path.
43 *           SPTM_CPU_BOOT_WARM: Warm boot path.
44 *           SPTM_CPU_BOOT_SECONDARY: Secondary CPU boot path.
45 *           SPTM_CPU_BOOT_HIB: Hibernation exit path.
46 *           SPTM_CPU_PANIC: A panic condition was triggered in SPTM/TXM/cL4.
47 *
48 * The possible values of the rest of the argument registers are dependent on
49 * the sentinel value in x0.
50 *
51 * If x0 is SPTM_CPU_PANIC:
52 * @param x1 A pointer to the panic string.
53 * @param x2 A boolean defining whether XNU should attempt a local coredump or
54 *           not. If this is false, then the SPTM is in a state that trying to
55 *           generate a coredump will most likely trigger more panics (seeing as
56 *           the NVMe driver will need to call into the SPTM).
57 *
58 * Otherwise:
59 * @param x1 iBoot boot arguments.
60 * @param x2 SPTM boot arguments.
61 *
62 * @note The SPTM initially only maps the __TEXT_BOOT_EXEC segment
63 *       as RX, and does not remap the rest of the code as RX until
64 *       after the XNU fixups phase has been completed. Since this
65 *       is the entry point, it must be made executable from the
66 *       very start.
67 */
68	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
69	.align 14
70	.globl EXT(_start)
71LEXT(_start)
72	/**
73	 * When SPTM/TXM/cL4 panics, it jumps to the XNU entry point with a special
74	 * sentinel value placed into x0. Let's check for that and jump to the
75	 * standard panic function if so.
76	 */
77	mov		x8, #SPTM_CPU_PANIC
78	cmp		x0, x8
79	b.ne	start_boot_path
80
81	/**
82	 * Set global variable to tell panic path whether the SPTM supports
83	 * generating a local coredump. This can be disabled based on the SPTM's
84	 * build flags or determined at runtime.
85	 */
86	adrp	x8, EXT(sptm_supports_local_coredump)@page
87	strb	w2, [x8, EXT(sptm_supports_local_coredump)@pageoff]
88
89	/* The panic string is in x1, but the panic function expects it as the first argument. */
90	mov		x0, x1
91	b		EXT(panic_from_sptm)
92
93	/* Should never reach here as we should have panicked. */
94	b		.
95
96start_boot_path:
97	/* Clear thread pointers */
98	msr		TPIDR_EL1, xzr
99	msr		TPIDRRO_EL0, xzr
100
101#if HAS_CLUSTER && !NO_CPU_OVRD
102	/* Unmask external IRQs if we're restarting from non-retention WFI */
103	mrs		x9, CPU_OVRD
104	and		x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
105	msr		CPU_OVRD, x9
106#endif
107
108	/* Jump to the correct start routine */
109	mov		x20, #SPTM_CPU_BOOT_COLD
110	cmp		x0, x20
111	b.eq	start_cold
112	/*
113	 * Note that on hibernation resume, we take the warm boot path, as SPTM already
114	 * initialized VBAR_EL1 in sptm_init_cpu_registers, called from sptm_resume_cpu.
115	 */
116	b		start_warm
117
118/**
119 * Cold boot path.
120 */
121start_cold:
122	/* Install the exception vector */
123	adrp	x9, EXT(ExceptionVectorsBase)@page
124	add		x9, x9, EXT(ExceptionVectorsBase)@pageoff
125	msr		VBAR_EL1, x9
126	isb
127
128	/* Set up exception stack */
129	msr		SPSel, #1
130	adrp	x10, EXT(excepstack_top)@page
131	add		x10, x10, EXT(excepstack_top)@pageoff
132	mov		sp, x10
133
134	/* Set up IRQ stack */
135	msr		SPSel, #0
136	adrp	x10, EXT(intstack_top)@page
137	add		x10, x10, EXT(intstack_top)@pageoff
138	mov		sp, x10
139
140	/* Save off boot arguments */
141	mov x26, x1
142	mov x27, x2
143
144	/* Rebase and sign absolute addresses */
145	bl EXT(arm_slide_rebase_and_sign_image)
146
147	/**
148	 * Call into the SPTM for the first time. This function traps to GL2 to
149	 * signal the SPTM that the fixups phase has been completed.
150	 */
151	SPTM_LOAD_DISPATCH_ID SPTM_DOMAIN, SPTM_DISPATCH_TABLE_XNU_BOOTSTRAP, SPTM_FUNCTIONID_FIXUPS_COMPLETE
152	SPTM_DOMAIN_ENTER	x16
153
154	/**
155	 * At this point, the SPTM has retyped the RX region to SPTM_XNU_CODE.
156	 */
157
158	/* Jump to handler */
159	mov		x0, x26
160	mov		x1, x27
161#if KASAN
162	b		EXT(arm_init_kasan)
163#else
164	b		EXT(arm_init)
165#endif /* KASAN */
166
167/**
168 * Secondary CPU boot path.
169 */
170start_warm:
171	/* Save the hibernation arguments pointer in x20 */
172	mov		x20, x3
173
174#if HAS_BP_RET
175	bl		EXT(set_bp_ret)
176#endif
177
178	/**
179	 * Search for the correct CPU Data entry.
180	 * This works by iterating over the per-CPU data array,
181	 * searching for the entry who's physical CPU ID matches
182	 * the physical ID extracted from this CPU's MPIDR_EL1.
183	 *
184	 * x1 is initially set to the first entry in the per-CPU data
185	 * array.
186	 */
187
188	/* Get CPU physical ID */
189	mrs		x15, MPIDR_EL1
190#if HAS_CLUSTER
191	and		x0, x15, #(MPIDR_AFF0_MASK | MPIDR_AFF1_MASK)
192#else
193	and		x0, x15, #(MPIDR_AFF0_MASK)
194#endif
195
196	adrp	x1, EXT(CpuDataEntries)@page
197	add		x1, x1, EXT(CpuDataEntries)@pageoff
198
199	MOV64	x19, CPU_DATA_SIZE
200	mov		x4, MAX_CPUS
201
202	/* Set x3 to the end of the per-CPU data array (exclusive) */
203	mul		x3, x19, x4
204	add		x3, x1, x3
205
206	/**
207	 * Use x1 as the cursor, and stop when we have either found
208	 * an entry, or when we have finished traversing the array.
209	 */
210check_cpu_data_entry:
211	/* Load physical CPU data address */
212	ldr		x21, [x1, CPU_DATA_VADDR]
213	cbz		x21, .
214
215	/* Attempt to match the physical CPU ID */
216	ldr		w2, [x21, CPU_PHYS_ID]
217	cmp		x0, x2
218	b.eq	found_cpu_data_entry
219next_cpu_data_entry:
220	/* Move onto the next element in the array, if it exists */
221	add		x1, x1, x19
222	cmp		x1, x3
223	b.eq	cpu_data_entry_not_found
224	b		check_cpu_data_entry
225
226/* An entry was found */
227found_cpu_data_entry:
228	/* Set up exception stack */
229	msr		SPSel, #1
230	ldr		x10, [x21, CPU_EXCEPSTACK_TOP]
231	mov		sp, x10
232
233	/* Set up IRQ stack */
234	msr		SPSel, #0
235	ldr		x10, [x21, CPU_INTSTACK_TOP]
236	mov		sp, x10
237
238	/* Set up input parameters to reset handler */
239	mov		x0, x21
240	mov		x1, x20
241
242	/* Obtain reset handler */
243	ldr		x2, [x21, CPU_RESET_HANDLER]
244	cbz		x2, Lskip_cpu_reset_handler
245
246	/* Validate that our handler is one of the two expected ones */
247	adrp	x3, EXT(arm_init_cpu)@page
248	add		x3, x3, EXT(arm_init_cpu)@pageoff
249	cmp		x2, x3
250	beq		1f
251
252	adrp	x3, EXT(arm_init_idle_cpu)@page
253	add		x3, x3, EXT(arm_init_idle_cpu)@pageoff
254	cmp		x2, x3
255	beq		2f
256
257	/* No valid handler was found */
258	b		Lskip_cpu_reset_handler
259
2601:
261	b		EXT(arm_init_cpu)
2622:
263	b		EXT(arm_init_idle_cpu)
264
265/**
266 * A valid reset handler was not found. This points to a bug in XNU.
267 * It is unsafe to continue, so just spin here.
268 */
269Lskip_cpu_reset_handler:
270	MOV64	x0, 0xDEADB001
271	b		.
272
273/**
274 * An entry was not found. This points to a bug in XNU.
275 * It is unsafe to continue, so just spin here.
276 */
277cpu_data_entry_not_found:
278	MOV64	x0, 0xDEADB002
279	b		.
280
281/**
282 * This is a stub function that calls the XNU panic entry point.
283 * We push this frame onto the stack so that the LLDB unwinder
284 * understands that the stack pointer has been changed when
285 * unwinding a stack that has panicked in SPTM or TXM, for example.
286 *
287 * The SPTM_UNWIND_DIRECTIVES tell LLDB that the panic caller FP,
288 * LR, SP, and PC are in a data structure pointed to by X21, which
289 * is set by SPTM dispatch logic before handing control back to XNU
290 * during a panic.
291 */
292	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
293	.align 14
294	.globl EXT(panic_from_sptm)
295LEXT(panic_from_sptm)
296TRAP_UNWIND_PROLOGUE
297SPTM_UNWIND_DIRECTIVES
298	PUSH_FRAME
299	bl 		EXT(panic)
300	b .
301UNWIND_EPILOGUE
302