xref: /xnu-11215.41.3/osfmk/arm64/sptm/start_sptm.s (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1/**
2 * Copyright (c) 2022-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/exception_asm.h>
31#include <arm64/dwarf_unwind.h>
32#include <sptm/sptm_xnu.h>
33
34/**
35 * XNU entry point.
36 *
37 * The SPTM jumps here as part of both the cold and warm boot paths, for all
38 * CPUs. This entry point is also jumped to when the SPTM wants to trigger the
39 * XNU panic path.
40 *
41 * @param x0 Sentinel value describing why we jumped to this entry point:
42 *           SPTM_CPU_BOOT_COLD: Cold boot path.
43 *           SPTM_CPU_BOOT_WARM: Warm boot path.
44 *           SPTM_CPU_BOOT_SECONDARY: Secondary CPU boot path.
45 *           SPTM_CPU_BOOT_HIB: Hibernation exit path.
46 *           SPTM_CPU_PANIC: A panic condition was triggered in SPTM/TXM/cL4.
47 *
48 * The possible values of the rest of the argument registers are dependent on
49 * the sentinel value in x0.
50 *
51 * If x0 is SPTM_CPU_PANIC:
52 * @param x1 A pointer to the panic string.
53 * @param x2 A boolean defining whether XNU should attempt a local coredump or
54 *           not. If this is false, then the SPTM is in a state that trying to
55 *           generate a coredump will most likely trigger more panics (seeing as
56 *           the NVMe driver will need to call into the SPTM).
57 *
58 * Otherwise:
59 * @param x1 iBoot boot arguments.
60 * @param x2 SPTM boot arguments.
61 *
62 * @note The SPTM initially only maps the __TEXT_BOOT_EXEC segment
63 *       as RX, and does not remap the rest of the code as RX until
64 *       after the XNU fixups phase has been completed. Since this
65 *       is the entry point, it must be made executable from the
66 *       very start.
67 */
68	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
69	.align 14
70	.globl EXT(_start)
71LEXT(_start)
72	/**
73	 * When SPTM/TXM/cL4 panics, it jumps to the XNU entry point with a special
74	 * sentinel value placed into x0. Let's check for that and jump to the
75	 * standard panic function if so.
76	 */
77	mov		x8, #SPTM_CPU_PANIC
78	cmp		x0, x8
79	b.ne	start_boot_path
80
81	/**
82	 * Set global variable to tell panic path whether the SPTM supports
83	 * generating a local coredump. This can be disabled based on the SPTM's
84	 * build flags or determined at runtime.
85	 */
86	adrp	x8, EXT(sptm_supports_local_coredump)@page
87	strb	w2, [x8, EXT(sptm_supports_local_coredump)@pageoff]
88
89	/* The panic string is in x1, but the panic function expects it as the first argument. */
90	mov		x0, x1
91	b		EXT(panic_from_sptm)
92
93	/* Should never reach here as we should have panicked. */
94	b		.
95
96start_boot_path:
97	/* Clear thread pointers */
98	msr		TPIDR_EL1, xzr
99	msr		TPIDRRO_EL0, xzr
100
101#if HAS_CLUSTER && !NO_CPU_OVRD
102	/* Unmask external IRQs if we're restarting from non-retention WFI */
103	mrs		x9, CPU_OVRD
104	and		x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
105	msr		CPU_OVRD, x9
106#endif
107
108	/* Jump to the correct start routine */
109	mov		x20, #SPTM_CPU_BOOT_COLD
110	cmp		x0, x20
111	b.eq	start_cold
112	/*
113	 * Note that on hibernation resume, we take the warm boot path, as SPTM already
114	 * initialized VBAR_EL1 in sptm_init_cpu_registers, called from sptm_resume_cpu.
115	 */
116	b		start_warm
117
118/**
119 * Cold boot path.
120 */
121start_cold:
122	/* Install the exception vector */
123	adrp	x9, EXT(ExceptionVectorsBase)@page
124	add		x9, x9, EXT(ExceptionVectorsBase)@pageoff
125	msr		VBAR_EL1, x9
126	isb
127
128	/* Set up exception stack */
129	msr		SPSel, #1
130	adrp	x10, EXT(excepstack_top)@page
131	add		x10, x10, EXT(excepstack_top)@pageoff
132	mov		sp, x10
133
134	/* Set up IRQ stack */
135	msr		SPSel, #0
136	adrp	x10, EXT(intstack_top)@page
137	add		x10, x10, EXT(intstack_top)@pageoff
138	mov		sp, x10
139
140	/* Save off boot arguments */
141	mov x26, x1
142	mov x27, x2
143
144	/* Rebase and sign absolute addresses */
145	bl EXT(arm_slide_rebase_and_sign_image)
146
147	/**
148	 * Call into the SPTM for the first time. This function traps to GL2 to
149	 * signal the SPTM that the fixups phase has been completed.
150	 */
151	SPTM_LOAD_DISPATCH_ID SPTM_DOMAIN, SPTM_DISPATCH_TABLE_XNU_BOOTSTRAP, SPTM_FUNCTIONID_FIXUPS_COMPLETE
152	SPTM_DOMAIN_ENTER	x16
153
154	/**
155	 * At this point, the SPTM has retyped the RX region to SPTM_XNU_CODE.
156	 */
157
158	/* Jump to handler */
159	mov		x0, x26
160	mov		x1, x27
161	b		EXT(arm_init)
162
163/**
164 * Secondary CPU boot path.
165 */
166start_warm:
167	/* Save the hibernation arguments pointer in x20 */
168	mov		x20, x3
169
170#if HAS_BP_RET
171	bl		EXT(set_bp_ret)
172#endif
173
174	/**
175	 * Search for the correct CPU Data entry.
176	 * This works by iterating over the per-CPU data array,
177	 * searching for the entry who's physical CPU ID matches
178	 * the physical ID extracted from this CPU's MPIDR_EL1.
179	 *
180	 * x1 is initially set to the first entry in the per-CPU data
181	 * array.
182	 */
183
184	/* Get CPU physical ID */
185	mrs		x15, MPIDR_EL1
186#if HAS_CLUSTER
187	and		x0, x15, #(MPIDR_AFF0_MASK | MPIDR_AFF1_MASK)
188#else
189	and		x0, x15, #(MPIDR_AFF0_MASK)
190#endif
191
192	adrp	x1, EXT(CpuDataEntries)@page
193	add		x1, x1, EXT(CpuDataEntries)@pageoff
194
195	MOV64	x19, CPU_DATA_SIZE
196	mov		x4, MAX_CPUS
197
198	/* Set x3 to the end of the per-CPU data array (exclusive) */
199	mul		x3, x19, x4
200	add		x3, x1, x3
201
202	/**
203	 * Use x1 as the cursor, and stop when we have either found
204	 * an entry, or when we have finished traversing the array.
205	 */
206check_cpu_data_entry:
207	/* Load physical CPU data address */
208	ldr		x21, [x1, CPU_DATA_VADDR]
209	cbz		x21, .
210
211	/* Attempt to match the physical CPU ID */
212	ldr		w2, [x21, CPU_PHYS_ID]
213	cmp		x0, x2
214	b.eq	found_cpu_data_entry
215next_cpu_data_entry:
216	/* Move onto the next element in the array, if it exists */
217	add		x1, x1, x19
218	cmp		x1, x3
219	b.eq	cpu_data_entry_not_found
220	b		check_cpu_data_entry
221
222/* An entry was found */
223found_cpu_data_entry:
224	/* Set up exception stack */
225	msr		SPSel, #1
226	ldr		x10, [x21, CPU_EXCEPSTACK_TOP]
227	mov		sp, x10
228
229	/* Set up IRQ stack */
230	msr		SPSel, #0
231	ldr		x10, [x21, CPU_INTSTACK_TOP]
232	mov		sp, x10
233
234	/* Set up input parameters to reset handler */
235	mov		x0, x21
236	mov		x1, x20
237
238	/* Obtain reset handler */
239	ldr		x2, [x21, CPU_RESET_HANDLER]
240	cbz		x2, Lskip_cpu_reset_handler
241
242	/* Validate that our handler is one of the two expected ones */
243	adrp	x3, EXT(arm_init_cpu)@page
244	add		x3, x3, EXT(arm_init_cpu)@pageoff
245	cmp		x2, x3
246	beq		1f
247
248	adrp	x3, EXT(arm_init_idle_cpu)@page
249	add		x3, x3, EXT(arm_init_idle_cpu)@pageoff
250	cmp		x2, x3
251	beq		2f
252
253	/* No valid handler was found */
254	b		Lskip_cpu_reset_handler
255
2561:
257	b		EXT(arm_init_cpu)
2582:
259	b		EXT(arm_init_idle_cpu)
260
261/**
262 * A valid reset handler was not found. This points to a bug in XNU.
263 * It is unsafe to continue, so just spin here.
264 */
265Lskip_cpu_reset_handler:
266	MOV64	x0, 0xDEADB001
267	b		.
268
269/**
270 * An entry was not found. This points to a bug in XNU.
271 * It is unsafe to continue, so just spin here.
272 */
273cpu_data_entry_not_found:
274	MOV64	x0, 0xDEADB002
275	b		.
276
277/**
278 * This is a stub function that calls the XNU panic entry point.
279 * We push this frame onto the stack so that the LLDB unwinder
280 * understands that the stack pointer has been changed when
281 * unwinding a stack that has panicked in SPTM or TXM, for example.
282 *
283 * The SPTM_UNWIND_DIRECTIVES tell LLDB that the panic caller FP,
284 * LR, SP, and PC are in a data structure pointed to by X21, which
285 * is set by SPTM dispatch logic before handing control back to XNU
286 * during a panic.
287 */
288	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
289	.align 14
290	.globl EXT(panic_from_sptm)
291LEXT(panic_from_sptm)
292TRAP_UNWIND_PROLOGUE
293SPTM_UNWIND_DIRECTIVES
294	PUSH_FRAME
295	bl 		EXT(panic)
296	b .
297UNWIND_EPILOGUE
298