xref: /xnu-10063.141.1/osfmk/arm64/sptm/start_sptm.s (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1/**
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/exception_asm.h>
31#include <arm64/dwarf_unwind.h>
32#include <sptm/sptm_xnu.h>
33
34/**
35 * XNU entry point.
36 *
37 * The SPTM jumps here as part of both the cold and warm boot paths, for all
38 * CPUs. This entry point is also jumped to when the SPTM wants to trigger the
39 * XNU panic path.
40 *
41 * @param x0 Sentinel value describing why we jumped to this entry point:
42 *           SPTM_CPU_BOOT_COLD: Cold boot path.
43 *           SPTM_CPU_BOOT_WARM: Warm boot path.
44 *           SPTM_CPU_BOOT_SECONDARY: Secondary CPU boot path.
45 *           SPTM_CPU_PANIC: A panic condition was triggered in SPTM/TXM/cL4.
46 *
47 * The possible values of the rest of the argument registers are dependent on
48 * the sentinel value in x0.
49 *
50 * If x0 is SPTM_CPU_PANIC:
51 * @param x1 A pointer to the panic string.
52 * @param x2 A boolean defining whether XNU should attempt a local coredump or
53 *           not. If this is false, then the SPTM is in a state that trying to
54 *           generate a coredump will most likely trigger more panics (seeing as
55 *           the NVMe driver will need to call into the SPTM).
56 *
57 * Otherwise:
58 * @param x1 iBoot boot arguments.
59 * @param x2 SPTM boot arguments.
60 *
61 * @note The SPTM initially only maps the __TEXT_BOOT_EXEC segment
62 *       as RX, and does not remap the rest of the code as RX until
63 *       after the XNU fixups phase has been completed. Since this
64 *       is the entry point, it must be made executable from the
65 *       very start.
66 */
67	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
68	.align 14
69	.globl EXT(_start)
70LEXT(_start)
71	/**
72	 * When SPTM/TXM/cL4 panics, it jumps to the XNU entry point with a special
73	 * sentinel value placed into x0. Let's check for that and jump to the
74	 * standard panic function if so.
75	 */
76	mov		x8, #SPTM_CPU_PANIC
77	cmp		x0, x8
78	b.ne	start_boot_path
79
80	/**
81	 * Set global variable to tell panic path whether the SPTM supports
82	 * generating a local coredump. This can be disabled based on the SPTM's
83	 * build flags or determined at runtime.
84	 */
85	adrp	x8, EXT(sptm_supports_local_coredump)@page
86	strb	w2, [x8, EXT(sptm_supports_local_coredump)@pageoff]
87
88	/* The panic string is in x1, but the panic function expects it as the first argument. */
89	mov		x0, x1
90	b		EXT(panic_from_sptm)
91
92	/* Should never reach here as we should have panicked. */
93	b		.
94
95start_boot_path:
96	/* Clear thread pointers */
97	msr		TPIDR_EL1, xzr
98	msr		TPIDRRO_EL0, xzr
99
100#if HAS_CLUSTER && !NO_CPU_OVRD
101	/* Unmask external IRQs if we're restarting from non-retention WFI */
102	mrs		x9, CPU_OVRD
103	and		x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
104	msr		CPU_OVRD, x9
105#endif
106
107	/* Jump to the correct start routine */
108	mov		x20, #SPTM_CPU_BOOT_COLD
109	cmp		x0, x20
110	b.eq	start_cold
111	b		start_warm
112
113/**
114 * Cold boot path.
115 */
116start_cold:
117	/* Install the exception vector */
118	adrp	x9, EXT(ExceptionVectorsBase)@page
119	add		x9, x9, EXT(ExceptionVectorsBase)@pageoff
120	msr		VBAR_EL1, x9
121	isb
122
123	/* Set up exception stack */
124	msr		SPSel, #1
125	adrp	x10, EXT(excepstack_top)@page
126	add		x10, x10, EXT(excepstack_top)@pageoff
127	mov		sp, x10
128
129	/* Set up IRQ stack */
130	msr		SPSel, #0
131	adrp	x10, EXT(intstack_top)@page
132	add		x10, x10, EXT(intstack_top)@pageoff
133	mov		sp, x10
134
135	/* Save off boot arguments */
136	mov x26, x1
137	mov x27, x2
138
139	/* Rebase and sign absolute addresses */
140	bl EXT(arm_slide_rebase_and_sign_image)
141
142	/**
143	 * Call into the SPTM for the first time. This function traps to GL2 to
144	 * signal the SPTM that the fixups phase has been completed.
145	 */
146	SPTM_LOAD_DISPATCH_ID SPTM_DOMAIN, SPTM_DISPATCH_TABLE_XNU_BOOTSTRAP, SPTM_FUNCTIONID_FIXUPS_COMPLETE
147	SPTM_DOMAIN_ENTER	x16
148
149	/**
150	 * At this point, the SPTM has retyped the RX region to SPTM_XNU_CODE.
151	 */
152
153	/* Jump to handler */
154	mov		x0, x26
155	mov		x1, x27
156	b		EXT(arm_init)
157
158/**
159 * Secondary CPU boot path.
160 */
161start_warm:
162#if HAS_BP_RET
163	bl		EXT(set_bp_ret)
164#endif
165
166	/**
167	 * Search for the correct CPU Data entry.
168	 * This works by iterating over the per-CPU data array,
169	 * searching for the entry who's physical CPU ID matches
170	 * the physical ID extracted from this CPU's MPIDR_EL1.
171	 *
172	 * x1 is initially set to the first entry in the per-CPU data
173	 * array.
174	 */
175
176	/* Get CPU physical ID */
177	mrs		x15, MPIDR_EL1
178#if HAS_CLUSTER
179	and		x0, x15, #(MPIDR_AFF0_MASK | MPIDR_AFF1_MASK)
180#else
181	and		x0, x15, #(MPIDR_AFF0_MASK)
182#endif
183
184	adrp	x1, EXT(CpuDataEntries)@page
185	add		x1, x1, EXT(CpuDataEntries)@pageoff
186
187	MOV64	x19, CPU_DATA_SIZE
188	mov		x4, MAX_CPUS
189
190	/* Set x3 to the end of the per-CPU data array (exclusive) */
191	mul		x3, x19, x4
192	add		x3, x1, x3
193
194	/**
195	 * Use x1 as the cursor, and stop when we have either found
196	 * an entry, or when we have finished traversing the array.
197	 */
198check_cpu_data_entry:
199	/* Load physical CPU data address */
200	ldr		x21, [x1, CPU_DATA_VADDR]
201	cbz		x21, .
202
203	/* Attempt to match the physical CPU ID */
204	ldr		w2, [x21, CPU_PHYS_ID]
205	cmp		x0, x2
206	b.eq	found_cpu_data_entry
207next_cpu_data_entry:
208	/* Move onto the next element in the array, if it exists */
209	add		x1, x1, x19
210	cmp		x1, x3
211	b.eq	cpu_data_entry_not_found
212	b		check_cpu_data_entry
213
214/* An entry was found */
215found_cpu_data_entry:
216	/* Set up exception stack */
217	msr		SPSel, #1
218	ldr		x10, [x21, CPU_EXCEPSTACK_TOP]
219	mov		sp, x10
220
221	/* Set up IRQ stack */
222	msr		SPSel, #0
223	ldr		x10, [x21, CPU_INTSTACK_TOP]
224	mov		sp, x10
225
226	/* Set up input parameter to reset handler */
227	mov		x0, x21
228
229	/* Obtain reset handler */
230	ldr		x1, [x21, CPU_RESET_HANDLER]
231	cbz		x1, Lskip_cpu_reset_handler
232
233	/* Validate that our handler is one of the two expected ones */
234	adrp	x2, EXT(arm_init_cpu)@page
235	add		x2, x2, EXT(arm_init_cpu)@pageoff
236	cmp		x1, x2
237	beq		1f
238
239	adrp	x2, EXT(arm_init_idle_cpu)@page
240	add		x2, x2, EXT(arm_init_idle_cpu)@pageoff
241	cmp		x1, x2
242	beq		2f
243
244	/* No valid handler was found */
245	b		Lskip_cpu_reset_handler
246
2471:
248	b		EXT(arm_init_cpu)
2492:
250	b		EXT(arm_init_idle_cpu)
251
252/**
253 * A valid reset handler was not found. This points to a bug in XNU.
254 * It is unsafe to continue, so just spin here.
255 */
256Lskip_cpu_reset_handler:
257	MOV64	x0, 0xDEADB001
258	b		.
259
260/**
261 * An entry was not found. This points to a bug in XNU.
262 * It is unsafe to continue, so just spin here.
263 */
264cpu_data_entry_not_found:
265	MOV64	x0, 0xDEADB002
266	b		.
267
268/**
269 * This is a stub function that calls the XNU panic entry point.
270 * We push this frame onto the stack so that the LLDB unwinder
271 * understands that the stack pointer has been changed when
272 * unwinding a stack that has panicked in SPTM or TXM, for example.
273 *
274 * The SPTM_UNWIND_DIRECTIVES tell LLDB that the panic caller FP,
275 * LR, SP, and PC are in a data structure pointed to by X21, which
276 * is set by SPTM dispatch logic before handing control back to XNU
277 * during a panic.
278 */
279	.section __TEXT_BOOT_EXEC, __bootcode, regular, pure_instructions
280	.align 14
281	.globl EXT(panic_from_sptm)
282LEXT(panic_from_sptm)
283UNWIND_PROLOGUE
284SPTM_UNWIND_DIRECTIVES
285	PUSH_FRAME
286	bl 		EXT(panic)
287	b .
288UNWIND_EPILOGUE
289