xref: /xnu-10002.81.5/osfmk/arm64/start.s (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1/*
2 * Copyright (c) 2007-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include "assym.s"
29#include <arm64/asm.h>
30#include <arm64/proc_reg.h>
31#include <arm64/machine_machdep.h>
32#include <arm64/proc_reg.h>
33#include <pexpert/arm64/board_config.h>
34#include <mach_assert.h>
35#include <machine/asm.h>
36#include <arm64/tunables/tunables.s>
37#include <arm64/exception_asm.h>
38
39#if __ARM_KERNEL_PROTECT__
40#include <arm/pmap.h>
41#endif /* __ARM_KERNEL_PROTECT__ */
42
43
44
45.macro MSR_VBAR_EL1_X0
46#if defined(KERNEL_INTEGRITY_KTRR)
47	mov	x1, lr
48	bl		EXT(pinst_set_vbar)
49	mov	lr, x1
50#else
51	msr		VBAR_EL1, x0
52#endif
53.endmacro
54
55.macro MSR_TCR_EL1_X1
56#if defined(KERNEL_INTEGRITY_KTRR)
57	mov		x0, x1
58	mov		x1, lr
59	bl		EXT(pinst_set_tcr)
60	mov		lr, x1
61#else
62	msr		TCR_EL1, x1
63#endif
64.endmacro
65
66.macro MSR_TTBR1_EL1_X0
67#if defined(KERNEL_INTEGRITY_KTRR)
68	mov		x1, lr
69	bl		EXT(pinst_set_ttbr1)
70	mov		lr, x1
71#else
72	msr		TTBR1_EL1, x0
73#endif
74.endmacro
75
76.macro MSR_SCTLR_EL1_X0
77#if defined(KERNEL_INTEGRITY_KTRR)
78	mov		x1, lr
79
80	// This may abort, do so on SP1
81	bl		EXT(pinst_spsel_1)
82
83	bl		EXT(pinst_set_sctlr)
84	msr		SPSel, #0									// Back to SP0
85	mov		lr, x1
86#else
87	msr		SCTLR_EL1, x0
88#endif /* defined(KERNEL_INTEGRITY_KTRR) */
89.endmacro
90
91/*
92 * Checks the reset handler for global and CPU-specific reset-assist functions,
93 * then jumps to the reset handler with boot args and cpu data. This is copied
94 * to the first physical page during CPU bootstrap (see cpu.c).
95 *
96 * Variables:
97 *	x19 - Reset handler data pointer
98 *	x20 - Boot args pointer
99 *	x21 - CPU data pointer
100 */
101	.text
102	.align 12
103	.globl EXT(LowResetVectorBase)
104LEXT(LowResetVectorBase)
105	/*
106	 * On reset, both RVBAR_EL1 and VBAR_EL1 point here.  SPSel.SP is 1,
107	 * so on reset the CPU will jump to offset 0x0 and on exceptions
108	 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380.
109	 * In order for both the reset vector and exception vectors to
110	 * coexist in the same space, the reset code is moved to the end
111	 * of the exception vector area.
112	 */
113	b		EXT(reset_vector)
114
115	/* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */
116	.align	9
117	b		.
118	.align	7
119	b		.
120	.align	7
121	b		.
122	.align	7
123	b		.
124
125	.align	7
126	.globl EXT(reset_vector)
127LEXT(reset_vector)
128	// Preserve x0 for start_first_cpu, if called
129	// Unlock the core for debugging
130	msr		OSLAR_EL1, xzr
131	msr		DAIFSet, #(DAIFSC_ALL)				// Disable all interrupts
132
133#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
134	// Set low reset vector before attempting any loads
135	adrp    x0, EXT(LowExceptionVectorBase)@page
136	add     x0, x0, EXT(LowExceptionVectorBase)@pageoff
137	msr     VBAR_EL1, x0
138#endif
139
140
141
142	// Process reset handlers
143	adrp	x19, EXT(ResetHandlerData)@page			// Get address of the reset handler data
144	add		x19, x19, EXT(ResetHandlerData)@pageoff
145	mrs		x15, MPIDR_EL1						// Load MPIDR to get CPU number
146#if HAS_CLUSTER
147	and		x0, x15, #0xFFFF					// CPU number in Affinity0, cluster ID in Affinity1
148#else
149	and		x0, x15, #0xFF						// CPU number is in MPIDR Affinity Level 0
150#endif
151	ldr		x1, [x19, CPU_DATA_ENTRIES]			// Load start of data entries
152	add		x3, x1, MAX_CPUS * 16				// end addr of data entries = start + (16 * MAX_CPUS)
153Lcheck_cpu_data_entry:
154	ldr		x21, [x1, CPU_DATA_PADDR]			// Load physical CPU data address
155	cbz		x21, Lnext_cpu_data_entry
156	ldr		w2, [x21, CPU_PHYS_ID]				// Load ccc cpu phys id
157	cmp		x0, x2						// Compare cpu data phys cpu and MPIDR_EL1 phys cpu
158	b.eq	Lfound_cpu_data_entry				// Branch if match
159Lnext_cpu_data_entry:
160	add		x1, x1, #16					// Increment to the next cpu data entry
161	cmp		x1, x3
162	b.eq	Lskip_cpu_reset_handler				// Not found
163	b		Lcheck_cpu_data_entry	// loop
164Lfound_cpu_data_entry:
165
166
167	adrp	x20, EXT(const_boot_args)@page
168	add		x20, x20, EXT(const_boot_args)@pageoff
169	ldr		x0, [x21, CPU_RESET_HANDLER]		// Call CPU reset handler
170	cbz		x0, Lskip_cpu_reset_handler
171
172	// Validate that our handler is one of the two expected handlers
173	adrp	x2, EXT(resume_idle_cpu)@page
174	add		x2, x2, EXT(resume_idle_cpu)@pageoff
175	cmp		x0, x2
176	beq		1f
177	adrp	x2, EXT(start_cpu)@page
178	add		x2, x2, EXT(start_cpu)@pageoff
179	cmp		x0, x2
180	bne		Lskip_cpu_reset_handler
1811:
182
183#if HAS_BP_RET
184	bl		EXT(set_bp_ret)
185#endif
186
187#if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
188	/*
189	 * Populate TPIDR_EL1 (in case the CPU takes an exception while
190	 * turning on the MMU).
191	 */
192	ldr		x13, [x21, CPU_ACTIVE_THREAD]
193	msr		TPIDR_EL1, x13
194#endif /* __ARM_KERNEL_PROTECT__ */
195
196	blr		x0
197Lskip_cpu_reset_handler:
198	b		.									// Hang if the handler is NULL or returns
199
200	.align 3
201	.global EXT(LowResetVectorEnd)
202LEXT(LowResetVectorEnd)
203	.global	EXT(SleepToken)
204#if WITH_CLASSIC_S2R
205LEXT(SleepToken)
206	.space	(stSize_NUM),0
207#endif
208
209	.section __DATA_CONST,__const
210	.align	3
211	.globl  EXT(ResetHandlerData)
212LEXT(ResetHandlerData)
213	.space  (rhdSize_NUM),0		// (filled with 0s)
214	.text
215
216
217/*
218 * __start trampoline is located at a position relative to LowResetVectorBase
219 * so that iBoot can compute the reset vector position to set IORVBAR using
220 * only the kernel entry point.  Reset vector = (__start & ~0xfff)
221 */
222	.align	3
223	.globl EXT(_start)
224LEXT(_start)
225	b	EXT(start_first_cpu)
226
227
228/*
229 * Provides an early-boot exception vector so that the processor will spin
230 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
231 * code triggers an exception. This is copied to the second physical page
232 * during CPU bootstrap (see cpu.c).
233 */
234	.align 12, 0
235	.global	EXT(LowExceptionVectorBase)
236LEXT(LowExceptionVectorBase)
237	/* EL1 SP 0 */
238	b		.
239	.align	7
240	b		.
241	.align	7
242	b		.
243	.align	7
244	b		.
245	/* EL1 SP1 */
246	.align	7
247	b		.
248	.align	7
249	b		.
250	.align	7
251	b		.
252	.align	7
253	b		.
254	/* EL0 64 */
255	.align	7
256	b		.
257	.align	7
258	b		.
259	.align	7
260	b		.
261	.align	7
262	b		.
263	/* EL0 32 */
264	.align	7
265	b		.
266	.align	7
267	b		.
268	.align	7
269	b		.
270	.align	7
271	b		.
272	.align 12, 0
273
274#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
275/*
276 * Provide a global symbol so that we can narrow the V=P mapping to cover
277 * this page during arm_vm_init.
278 */
279.align ARM_PGSHIFT
280.globl EXT(bootstrap_instructions)
281LEXT(bootstrap_instructions)
282
283#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
284	.align 2
285	.globl EXT(resume_idle_cpu)
286LEXT(resume_idle_cpu)
287	adrp	lr, EXT(arm_init_idle_cpu)@page
288	add		lr, lr, EXT(arm_init_idle_cpu)@pageoff
289	b		start_cpu
290
291	.align 2
292	.globl EXT(start_cpu)
293LEXT(start_cpu)
294	adrp	lr, EXT(arm_init_cpu)@page
295	add		lr, lr, EXT(arm_init_cpu)@pageoff
296	b		start_cpu
297
298	.align 2
299start_cpu:
300#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
301	// This is done right away in reset vector for pre-KTRR devices
302	// Set low reset vector now that we are in the KTRR-free zone
303	adrp	x0, EXT(LowExceptionVectorBase)@page
304	add		x0, x0, EXT(LowExceptionVectorBase)@pageoff
305	MSR_VBAR_EL1_X0
306#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
307
308	// x20 set to BootArgs phys address
309	// x21 set to cpu data phys address
310
311	// Get the kernel memory parameters from the boot args
312	ldr		x22, [x20, BA_VIRT_BASE]			// Get the kernel virt base
313	ldr		x23, [x20, BA_PHYS_BASE]			// Get the kernel phys base
314	ldr		x24, [x20, BA_MEM_SIZE]				// Get the physical memory size
315	adrp	x25, EXT(bootstrap_pagetables)@page	// Get the start of the page tables
316	ldr		x26, [x20, BA_BOOT_FLAGS]			// Get the kernel boot flags
317
318
319	// Set TPIDR_EL0 with cached CPU info
320	ldr		x0, [x21, CPU_TPIDR_EL0]
321	msr		TPIDR_EL0, x0
322
323	// Set TPIDRRO_EL0 to 0
324	msr		TPIDRRO_EL0, xzr
325
326
327	// Set the exception stack pointer
328	ldr		x0, [x21, CPU_EXCEPSTACK_TOP]
329
330
331	// Set SP_EL1 to exception stack
332#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
333	mov		x1, lr
334	bl		EXT(pinst_spsel_1)
335	mov		lr, x1
336#else
337	msr		SPSel, #1
338#endif
339	mov		sp, x0
340
341	// Set the interrupt stack pointer
342	ldr		x0, [x21, CPU_INTSTACK_TOP]
343	msr		SPSel, #0
344	mov		sp, x0
345
346	// Convert lr to KVA
347	add		lr, lr, x22
348	sub		lr, lr, x23
349
350	b		common_start
351
352/*
353 * create_l1_table_entry
354 *
355 * Given a virtual address, creates a table entry in an L1 translation table
356 * to point to an L2 translation table.
357 *   arg0 - Virtual address
358 *   arg1 - L1 table address
359 *   arg2 - L2 table address
360 *   arg3 - Scratch register
361 *   arg4 - Scratch register
362 *   arg5 - Scratch register
363 */
364.macro create_l1_table_entry
365	and		$3,	$0, #(ARM_TT_L1_INDEX_MASK)
366	lsr		$3, $3, #(ARM_TT_L1_SHIFT)			// Get index in L1 table for L2 table
367	lsl		$3, $3, #(TTE_SHIFT)				// Convert index into pointer offset
368	add		$3, $1, $3							// Get L1 entry pointer
369	mov		$4, #(ARM_TTE_BOOT_TABLE)			// Get L1 table entry template
370	and		$5, $2, #(ARM_TTE_TABLE_MASK)		// Get address bits of L2 table
371	orr		$5, $4, $5 							// Create table entry for L2 table
372	str		$5, [$3]							// Write entry to L1 table
373.endmacro
374
375/*
376 * create_l2_block_entries
377 *
378 * Given base virtual and physical addresses, creates consecutive block entries
379 * in an L2 translation table.
380 *   arg0 - Virtual address
381 *   arg1 - Physical address
382 *   arg2 - L2 table address
383 *   arg3 - Number of entries
384 *   arg4 - Scratch register
385 *   arg5 - Scratch register
386 *   arg6 - Scratch register
387 *   arg7 - Scratch register
388 */
389.macro create_l2_block_entries
390	and		$4,	$0, #(ARM_TT_L2_INDEX_MASK)
391	lsr		$4, $4, #(ARM_TTE_BLOCK_L2_SHIFT)	// Get index in L2 table for block entry
392	lsl		$4, $4, #(TTE_SHIFT)				// Convert index into pointer offset
393	add		$4, $2, $4							// Get L2 entry pointer
394	mov		$5, #(ARM_TTE_BOOT_BLOCK)			// Get L2 block entry template
395	and		$6, $1, #(ARM_TTE_BLOCK_L2_MASK)	// Get address bits of block mapping
396	orr		$6, $5, $6
397	mov		$5, $3
398	mov		$7, #(ARM_TT_L2_SIZE)
3991:
400	str		$6, [$4], #(1 << TTE_SHIFT)			// Write entry to L2 table and advance
401	add		$6, $6, $7							// Increment the output address
402	subs	$5, $5, #1							// Decrement the number of entries
403	b.ne	1b
404.endmacro
405
406/*
407 *  arg0 - virtual start address
408 *  arg1 - physical start address
409 *  arg2 - number of entries to map
410 *  arg3 - L1 table address
411 *  arg4 - free space pointer
412 *  arg5 - scratch (entries mapped per loop)
413 *  arg6 - scratch
414 *  arg7 - scratch
415 *  arg8 - scratch
416 *  arg9 - scratch
417 */
418.macro create_bootstrap_mapping
419	/* calculate entries left in this page */
420	and	$5, $0, #(ARM_TT_L2_INDEX_MASK)
421	lsr	$5, $5, #(ARM_TT_L2_SHIFT)
422	mov	$6, #(TTE_PGENTRIES)
423	sub	$5, $6, $5
424
425	/* allocate an L2 table */
4263:	add	$4, $4, PGBYTES
427
428	/* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
429	create_l1_table_entry	$0, $3, $4, $6, $7, $8
430
431	/* determine how many entries to map this loop - the smaller of entries
432	 * remaining in page and total entries left */
433	cmp	$2, $5
434	csel	$5, $2, $5, lt
435
436	/* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
437	create_l2_block_entries	$0, $1, $4, $5, $6, $7, $8, $9
438
439	/* subtract entries just mapped and bail out if we're done */
440	subs	$2, $2, $5
441	beq	2f
442
443	/* entries left to map - advance base pointers */
444	add 	$0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
445	add 	$1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
446
447	mov	$5, #(TTE_PGENTRIES)  /* subsequent loops map (up to) a whole L2 page */
448	b	3b
4492:
450.endmacro
451
452/*
453 * _start_first_cpu
454 * Cold boot init routine.  Called from __start
455 *   x0 - Boot args
456 */
457	.align 2
458	.globl EXT(start_first_cpu)
459LEXT(start_first_cpu)
460
461	// Unlock the core for debugging
462	msr		OSLAR_EL1, xzr
463	msr		DAIFSet, #(DAIFSC_ALL)				// Disable all interrupts
464
465	mov		x20, x0
466	mov		x21, #0
467
468	// Set low reset vector before attempting any loads
469	adrp	x0, EXT(LowExceptionVectorBase)@page
470	add		x0, x0, EXT(LowExceptionVectorBase)@pageoff
471	MSR_VBAR_EL1_X0
472
473
474	// Get the kernel memory parameters from the boot args
475	ldr		x22, [x20, BA_VIRT_BASE]			// Get the kernel virt base
476	ldr		x23, [x20, BA_PHYS_BASE]			// Get the kernel phys base
477	ldr		x24, [x20, BA_MEM_SIZE]				// Get the physical memory size
478	adrp	x25, EXT(bootstrap_pagetables)@page	// Get the start of the page tables
479	ldr		x26, [x20, BA_BOOT_FLAGS]			// Get the kernel boot flags
480
481	// Clear the registers that will be used to store the userspace thread pointer and CPU number.
482	// We may not actually be booting from ordinal CPU 0, so this register will be updated
483	// in ml_parse_cpu_topology(), which happens later in bootstrap.
484	msr		TPIDRRO_EL0, xzr
485	msr		TPIDR_EL0, xzr
486
487	// Set up exception stack pointer
488	adrp	x0, EXT(excepstack_top)@page		// Load top of exception stack
489	add		x0, x0, EXT(excepstack_top)@pageoff
490	add		x0, x0, x22							// Convert to KVA
491	sub		x0, x0, x23
492
493	// Set SP_EL1 to exception stack
494#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
495	bl		EXT(pinst_spsel_1)
496#else
497	msr		SPSel, #1
498#endif
499
500	mov		sp, x0
501
502	// Set up interrupt stack pointer
503	adrp	x0, EXT(intstack_top)@page			// Load top of irq stack
504	add		x0, x0, EXT(intstack_top)@pageoff
505	add		x0, x0, x22							// Convert to KVA
506	sub		x0, x0, x23
507	msr		SPSel, #0							// Set SP_EL0 to interrupt stack
508	mov		sp, x0
509
510	// Load address to the C init routine into link register
511	adrp	lr, EXT(arm_init)@page
512	add		lr, lr, EXT(arm_init)@pageoff
513	add		lr, lr, x22							// Convert to KVA
514	sub		lr, lr, x23
515
516	/*
517	 * Set up the bootstrap page tables with a single block entry for the V=P
518	 * mapping, a single block entry for the trampolined kernel address (KVA),
519	 * and all else invalid. This requires four pages:
520	 *	Page 1 - V=P L1 table
521	 *	Page 2 - V=P L2 table
522	 *	Page 3 - KVA L1 table
523	 *	Page 4 - KVA L2 table
524	 */
525
526	// Invalidate all entries in the bootstrap page tables
527	mov		x0, #(ARM_TTE_EMPTY)				// Load invalid entry template
528	mov		x1, x25								// Start at V=P pagetable root
529	mov		x2, #(TTE_PGENTRIES)				// Load number of entries per page
530	lsl		x2, x2, #2							// Shift by 2 for num entries on 4 pages
531
532Linvalidate_bootstrap:							// do {
533	str		x0, [x1], #(1 << TTE_SHIFT)			//   Invalidate and advance
534	subs	x2, x2, #1							//   entries--
535	b.ne	Linvalidate_bootstrap				// } while (entries != 0)
536
537	/*
538	 * In order to reclaim memory on targets where TZ0 (or some other entity)
539	 * must be located at the base of memory, iBoot may set the virtual and
540	 * physical base addresses to immediately follow whatever lies at the
541	 * base of physical memory.
542	 *
543	 * If the base address belongs to TZ0, it may be dangerous for xnu to map
544	 * it (as it may be prefetched, despite being technically inaccessible).
545	 * In order to avoid this issue while keeping the mapping code simple, we
546	 * may continue to use block mappings, but we will only map the kernelcache
547	 * mach header to the end of memory.
548	 *
549	 * Given that iBoot guarantees that the unslid kernelcache base address
550	 * will begin on an L2 boundary, this should prevent us from accidentally
551	 * mapping TZ0.
552	 */
553	adrp	x0, EXT(_mh_execute_header)@page	// address of kernel mach header
554	add		x0, x0, EXT(_mh_execute_header)@pageoff
555	ldr		w1, [x0, #0x18]						// load mach_header->flags
556	tbz		w1, #0x1f, Lkernelcache_base_found	// if MH_DYLIB_IN_CACHE unset, base is kernel mach header
557	ldr		w1, [x0, #0x20]						// load first segment cmd (offset sizeof(kernel_mach_header_t))
558	cmp		w1, #0x19							// must be LC_SEGMENT_64
559	bne		.
560	ldr		x1, [x0, #0x38]						// load first segment vmaddr
561	sub		x1, x0, x1							// compute slide
562	MOV64	x0, VM_KERNEL_LINK_ADDRESS
563	add		x0, x0, x1							// base is kernel link address + slide
564
565Lkernelcache_base_found:
566	/*
567	 * Adjust physical and virtual base addresses to account for physical
568	 * memory preceeding xnu Mach-O header
569	 * x22 - Kernel virtual base
570	 * x23 - Kernel physical base
571	 * x24 - Physical memory size
572	 */
573	sub		x18, x0, x23
574	sub		x24, x24, x18
575	add		x22, x22, x18
576	add		x23, x23, x18
577
578	/*
579	 * x0  - V=P virtual cursor
580	 * x4  - V=P physical cursor
581	 * x14 - KVA virtual cursor
582	 * x15 - KVA physical cursor
583	 */
584	mov		x4, x0
585	mov		x14, x22
586	mov		x15, x23
587
588	/*
589	 * Allocate L1 tables
590	 * x1 - V=P L1 page
591	 * x3 - KVA L1 page
592	 * x2 - free mem pointer from which we allocate a variable number of L2
593	 * pages. The maximum number of bootstrap page table pages is limited to
594	 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
595	 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
596	 * 8 total pages for V=P and KVA.
597	 */
598	mov		x1, x25
599	add		x3, x1, PGBYTES
600	mov		x2, x3
601
602	/*
603	 * Setup the V=P bootstrap mapping
604	 * x5 - total number of L2 entries to allocate
605	 */
606	lsr		x5,  x24, #(ARM_TT_L2_SHIFT)
607	/* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
608	create_bootstrap_mapping x0,  x4,  x5, x1, x2, x6, x10, x11, x12, x13
609
610	/* Setup the KVA bootstrap mapping */
611	lsr		x5,  x24, #(ARM_TT_L2_SHIFT)
612	create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
613
614	/* Ensure TTEs are visible */
615	dsb		ish
616
617
618	b		common_start
619
620/*
621 * Begin common CPU initialization
622 *
623 * Regster state:
624 *	x20 - PA of boot args
625 *	x21 - zero on cold boot, PA of cpu data on warm reset
626 *	x22 - Kernel virtual base
627 *	x23 - Kernel physical base
628 *	x25 - PA of the V=P pagetable root
629 *	 lr - KVA of C init routine
630 *	 sp - SP_EL0 selected
631 *
632 *	SP_EL0 - KVA of CPU's interrupt stack
633 *	SP_EL1 - KVA of CPU's exception stack
634 *	TPIDRRO_EL0 - CPU number
635 */
636common_start:
637
638#if HAS_NEX_PG
639	mov x19, lr
640	bl		EXT(set_nex_pg)
641	mov lr, x19
642#endif
643
644	// Set the translation control register.
645	adrp	x0,     EXT(sysreg_restore)@page		// Load TCR value from the system register restore structure
646	add		x0, x0, EXT(sysreg_restore)@pageoff
647	ldr		x1, [x0, SR_RESTORE_TCR_EL1]
648	MSR_TCR_EL1_X1
649
650	/* Set up translation table base registers.
651	 *	TTBR0 - V=P table @ top of kernel
652	 *	TTBR1 - KVA table @ top of kernel + 1 page
653	 */
654#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
655	/* Note that for KTRR configurations, the V=P map will be modified by
656	 * arm_vm_init.c.
657	 */
658#endif
659	and		x0, x25, #(TTBR_BADDR_MASK)
660	mov		x19, lr
661	bl		EXT(set_mmu_ttb)
662	mov		lr, x19
663	add		x0, x25, PGBYTES
664	and		x0, x0, #(TTBR_BADDR_MASK)
665	MSR_TTBR1_EL1_X0
666
667	// Set up MAIR attr0 for normal memory, attr1 for device memory
668	mov		x0, xzr
669	mov		x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
670	orr		x0, x0, x1
671	mov		x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
672	orr		x0, x0, x1
673	mov		x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
674	orr		x0, x0, x1
675	mov		x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
676	orr		x0, x0, x1
677	mov		x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_RESERVED))
678	orr		x0, x0, x1
679	mov		x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED))
680	orr		x0, x0, x1
681#if HAS_FEAT_XS
682	mov		x1, #(MAIR_POSTED_XS << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_XS))
683	orr		x0, x0, x1
684	mov		x1, #(MAIR_POSTED_COMBINED_REORDERED_XS << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED_XS))
685	orr		x0, x0, x1
686#else
687	mov		x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
688	orr		x0, x0, x1
689	mov		x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED))
690	orr		x0, x0, x1
691#endif /* HAS_FEAT_XS */
692	msr		MAIR_EL1, x0
693	isb
694	tlbi	vmalle1
695	dsb		ish
696
697
698#if defined(BCM2837)
699	// Setup timer interrupt routing; must be done before MMU is enabled
700	mrs		x15, MPIDR_EL1						// Load MPIDR to get CPU number
701	and		x15, x15, #0xFF						// CPU number is in MPIDR Affinity Level 0
702	mov		x0, #0x4000
703	lsl		x0, x0, #16
704	add		x0, x0, #0x0040						// x0: 0x4000004X Core Timers interrupt control
705	add		x0, x0, x15, lsl #2
706	mov		w1, #0xF0 						// x1: 0xF0 	  Route to Core FIQs
707	str		w1, [x0]
708	isb		sy
709#endif
710
711#ifndef __ARM_IC_NOALIAS_ICACHE__
712	/* Invalidate the TLB and icache on systems that do not guarantee that the
713	 * caches are invalidated on reset.
714	 */
715	tlbi	vmalle1
716	ic		iallu
717#endif
718
719	/* If x21 is not 0, then this is either the start_cpu path or
720	 * the resume_idle_cpu path.  cpu_ttep should already be
721	 * populated, so just switch to the kernel_pmap now.
722	 */
723
724	cbz		x21, 1f
725	adrp	x0, EXT(cpu_ttep)@page
726	add		x0, x0, EXT(cpu_ttep)@pageoff
727	ldr		x0, [x0]
728	MSR_TTBR1_EL1_X0
7291:
730
731	// Set up the exception vectors
732#if __ARM_KERNEL_PROTECT__
733	/* If this is not the first reset of the boot CPU, the alternate mapping
734	 * for the exception vectors will be set up, so use it.  Otherwise, we
735	 * should use the mapping located in the kernelcache mapping.
736	 */
737	MOV64	x0, ARM_KERNEL_PROTECT_EXCEPTION_START
738
739	cbnz		x21, 1f
740#endif /* __ARM_KERNEL_PROTECT__ */
741	adrp	x0, EXT(ExceptionVectorsBase)@page			// Load exception vectors base address
742	add		x0, x0, EXT(ExceptionVectorsBase)@pageoff
743	add		x0, x0, x22									// Convert exception vector address to KVA
744	sub		x0, x0, x23
7451:
746	MSR_VBAR_EL1_X0
747
748#if HAS_APPLE_PAC
749	PAC_INIT_KEY_STATE tmp=x0, tmp2=x1
750#endif /* HAS_APPLE_PAC */
751
752	// Enable caches, MMU, ROP and JOP
753	MOV64   x0, SCTLR_EL1_DEFAULT
754	MSR_SCTLR_EL1_X0
755	isb		sy
756
757#if !VMAPPLE
758	MOV64   x1, SCTLR_EL1_DEFAULT
759	cmp		x0, x1
760	bne		.
761#endif /* !VMAPPLE */
762
763#if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
764	/* Watchtower
765	 *
766	 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
767	 * it here would trap to EL3.
768	 */
769
770	// Enable NEON
771	mov		x0, #(CPACR_FPEN_ENABLE)
772	msr		CPACR_EL1, x0
773#endif
774
775	// Clear thread pointer
776	msr		TPIDR_EL1, xzr						// Set thread register
777
778
779#if defined(APPLE_ARM64_ARCH_FAMILY)
780	mrs		x12, MDSCR_EL1
781	orr		x12, x12, MDSCR_TDCC
782	msr		MDSCR_EL1, x12
783	// Initialization common to all non-virtual Apple targets
784#endif  // APPLE_ARM64_ARCH_FAMILY
785
786	// Read MIDR before start of per-SoC tunables
787	mrs x12, MIDR_EL1
788
789	APPLY_TUNABLES x12, x13, x14
790
791#if HAS_CLUSTER
792	// Unmask external IRQs if we're restarting from non-retention WFI
793	mrs		x9, CPU_OVRD
794	and		x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
795	msr		CPU_OVRD, x9
796#endif
797
798	// If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
799	cbnz	x21, Ltrampoline
800
801	// Set KVA of boot args as first arg
802	add		x0, x20, x22
803	sub		x0, x0, x23
804
805#if KASAN
806	mov	x20, x0
807	mov	x21, lr
808
809	// x0: boot args
810	// x1: KVA page table phys base
811	mrs	x1, TTBR1_EL1
812	bl	EXT(kasan_bootstrap)
813
814	mov	x0, x20
815	mov	lr, x21
816#endif
817
818	// Return to arm_init()
819	ret
820
821Ltrampoline:
822	// Load VA of the trampoline
823	adrp	x0, arm_init_tramp@page
824	add		x0, x0, arm_init_tramp@pageoff
825	add		x0, x0, x22
826	sub		x0, x0, x23
827
828	// Branch to the trampoline
829	br		x0
830
831/*
832 * V=P to KVA trampoline.
833 *	x0 - KVA of cpu data pointer
834 */
835	.text
836	.align 2
837arm_init_tramp:
838	/* On a warm boot, the full kernel translation table is initialized in
839	 * addition to the bootstrap tables. The layout is as follows:
840	 *
841	 *  +--Top of Memory--+
842	 *         ...
843	 *  |                 |
844	 *  |  Primary Kernel |
845	 *  |   Trans. Table  |
846	 *  |                 |
847	 *  +--Top + 5 pages--+
848	 *  |                 |
849	 *  |  Invalid Table  |
850	 *  |                 |
851	 *  +--Top + 4 pages--+
852	 *  |                 |
853	 *  |    KVA Table    |
854	 *  |                 |
855	 *  +--Top + 2 pages--+
856	 *  |                 |
857	 *  |    V=P Table    |
858	 *  |                 |
859	 *  +--Top of Kernel--+
860	 *  |                 |
861	 *  |  Kernel Mach-O  |
862	 *  |                 |
863	 *         ...
864	 *  +---Kernel Base---+
865	 */
866
867
868	mov		x19, lr
869	// Convert CPU data PA to VA and set as first argument
870	mov		x0, x21
871	bl		EXT(phystokv)
872
873	mov		lr, x19
874
875	/* Return to arm_init() */
876	ret
877
878//#include	"globals_asm.h"
879
880/* vim: set ts=4: */
881