xref: /xnu-8796.141.3/osfmk/arm/arm_init.c (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 
32 #include <debug.h>
33 #include <mach_ldebug.h>
34 #include <mach_kdp.h>
35 
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <kern/processor.h>
40 #include <kern/startup.h>
41 #include <kern/debug.h>
42 #include <prng/random.h>
43 #include <kern/ecc.h>
44 #include <machine/machine_routines.h>
45 #include <machine/commpage.h>
46 #include <machine/config.h>
47 #if HIBERNATION
48 #include <machine/pal_hibernate.h>
49 #endif /* HIBERNATION */
50 /* ARM64_TODO unify boot.h */
51 #if __arm64__
52 #include <pexpert/arm64/boot.h>
53 #else
54 #error Unsupported arch
55 #endif
56 #include <pexpert/arm/consistent_debug.h>
57 #include <pexpert/device_tree.h>
58 #include <arm64/proc_reg.h>
59 #include <arm/pmap.h>
60 #include <arm/caches_internal.h>
61 #include <arm/cpu_internal.h>
62 #include <arm/cpu_data_internal.h>
63 #include <arm/cpuid_internal.h>
64 #include <arm/misc_protos.h>
65 #include <arm/machine_cpu.h>
66 #include <arm/rtclock.h>
67 #include <vm/vm_map.h>
68 
69 #include <libkern/kernel_mach_header.h>
70 #include <libkern/stack_protector.h>
71 #include <libkern/section_keywords.h>
72 #include <san/kasan.h>
73 #include <sys/kdebug.h>
74 
75 #include <pexpert/pexpert.h>
76 
77 #include <console/serial_protos.h>
78 
79 #if CONFIG_TELEMETRY
80 #include <kern/telemetry.h>
81 #endif
82 #if MONOTONIC
83 #include <kern/monotonic.h>
84 #endif /* MONOTONIC */
85 
86 #if KPERF
87 #include <kperf/kptimer.h>
88 #endif /* KPERF */
89 
90 #if HIBERNATION
91 #include <IOKit/IOPlatformExpert.h>
92 #endif /* HIBERNATION */
93 
94 extern void     patch_low_glo(void);
95 extern int      serial_init(void);
96 extern void sleep_token_buffer_init(void);
97 
98 extern vm_offset_t intstack_top;
99 #if __arm64__
100 extern vm_offset_t excepstack_top;
101 #endif
102 
103 extern const char version[];
104 extern const char version_variant[];
105 extern int      disableConsoleOutput;
106 
107 int             pc_trace_buf[PC_TRACE_BUF_SIZE] = {0};
108 int             pc_trace_cnt = PC_TRACE_BUF_SIZE;
109 int             debug_task;
110 
111 SECURITY_READ_ONLY_LATE(bool) static_kernelcache = false;
112 
113 #if HAS_BP_RET
114 /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */
115 uint32_t bp_ret = 3;
116 extern void set_bp_ret(void);
117 #endif
118 
119 #if SCHED_HYGIENE_DEBUG
120 boolean_t sched_hygiene_debug_pmc = 1;
121 #endif
122 
123 #if SCHED_HYGIENE_DEBUG
124 TUNABLE_DT_WRITEABLE(sched_hygiene_mode_t, interrupt_masked_debug_mode,
125     "machine-timeouts", "interrupt-masked-debug-mode",
126     "interrupt-masked-debug-mode",
127     SCHED_HYGIENE_MODE_PANIC,
128     TUNABLE_DT_CHECK_CHOSEN);
129 
130 MACHINE_TIMEOUT_DEV_WRITEABLE(interrupt_masked_timeout, "interrupt-masked",
131     0xd0000, MACHINE_TIMEOUT_UNIT_TIMEBASE,  /* 35.499ms */
132     NULL);
133 #if __arm64__
134 #define SSHOT_INTERRUPT_MASKED_TIMEOUT 0xf9999 /* 64-bit: 42.599ms */
135 #endif
136 MACHINE_TIMEOUT_DEV_WRITEABLE(stackshot_interrupt_masked_timeout, "sshot-interrupt-masked",
137     SSHOT_INTERRUPT_MASKED_TIMEOUT, MACHINE_TIMEOUT_UNIT_TIMEBASE,
138     NULL);
139 #undef SSHOT_INTERRUPT_MASKED_TIMEOUT
140 #endif
141 
142 /*
143  * A 6-second timeout will give the watchdog code a chance to run
144  * before a panic is triggered by the xcall routine.
145  */
146 #define XCALL_ACK_TIMEOUT_NS ((uint64_t) 6000000000)
147 uint64_t xcall_ack_timeout_abstime;
148 
149 boot_args const_boot_args __attribute__((section("__DATA, __const")));
150 boot_args      *BootArgs __attribute__((section("__DATA, __const")));
151 
152 TUNABLE(uint32_t, arm_diag, "diag", 0);
153 #ifdef  APPLETYPHOON
154 static unsigned cpus_defeatures = 0x0;
155 extern void cpu_defeatures_set(unsigned int);
156 #endif
157 
158 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
159 extern volatile boolean_t arm64_stall_sleep;
160 #endif
161 
162 extern boolean_t force_immediate_debug_halt;
163 
164 #if HAS_APPLE_PAC
165 SECURITY_READ_ONLY_LATE(boolean_t) diversify_user_jop = TRUE;
166 #endif
167 
168 SECURITY_READ_ONLY_LATE(uint64_t) gDramBase;
169 SECURITY_READ_ONLY_LATE(uint64_t) gDramSize;
170 
171 SECURITY_READ_ONLY_LATE(bool) serial_console_enabled = false;
172 #ifdef XNU_ENABLE_PROCESSOR_EXIT
173 SECURITY_READ_ONLY_LATE(bool) enable_processor_exit = true;
174 #else
175 SECURITY_READ_ONLY_LATE(bool) enable_processor_exit = false;
176 #endif
177 
178 /*
179  * Forward definition
180  */
181 void arm_init(boot_args * args);
182 
183 #if __arm64__
184 unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */
185 
186 extern void configure_misc_apple_boot_args(void);
187 extern void configure_misc_apple_regs(void);
188 extern void configure_timer_apple_regs(void);
189 extern void configure_late_apple_regs(void);
190 #endif /* __arm64__ */
191 
192 
193 /*
194  * JOP rebasing
195  */
196 
197 #define dyldLogFunc(msg, ...)
198 #include <mach/dyld_kernel_fixups.h>
199 
200 extern uint32_t __thread_starts_sect_start[] __asm("section$start$__TEXT$__thread_starts");
201 extern uint32_t __thread_starts_sect_end[]   __asm("section$end$__TEXT$__thread_starts");
202 #if defined(HAS_APPLE_PAC)
203 extern void OSRuntimeSignStructors(kernel_mach_header_t * header);
204 extern void OSRuntimeSignStructorsInFileset(kernel_mach_header_t * header);
205 #endif /* defined(HAS_APPLE_PAC) */
206 
207 extern vm_offset_t vm_kernel_slide;
208 extern vm_offset_t segLOWESTKC, segHIGHESTKC, segLOWESTROKC, segHIGHESTROKC;
209 extern vm_offset_t segLOWESTAuxKC, segHIGHESTAuxKC, segLOWESTROAuxKC, segHIGHESTROAuxKC;
210 extern vm_offset_t segLOWESTRXAuxKC, segHIGHESTRXAuxKC, segHIGHESTNLEAuxKC;
211 
212 static void
arm_slide_rebase_and_sign_image(void)213 arm_slide_rebase_and_sign_image(void)
214 {
215 	kernel_mach_header_t *k_mh, *kc_mh = NULL;
216 	kernel_segment_command_t *seg;
217 	uintptr_t slide;
218 
219 	k_mh = &_mh_execute_header;
220 	if (kernel_mach_header_is_in_fileset(k_mh)) {
221 		/*
222 		 * The kernel is part of a MH_FILESET kernel collection, determine slide
223 		 * based on first segment's mach-o vmaddr (requires first kernel load
224 		 * command to be LC_SEGMENT_64 of the __TEXT segment)
225 		 */
226 		seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh));
227 		assert(seg->cmd == LC_SEGMENT_KERNEL);
228 		slide = (uintptr_t)k_mh - seg->vmaddr;
229 
230 		/*
231 		 * The kernel collection linker guarantees that the boot collection mach
232 		 * header vmaddr is the hardcoded kernel link address (as specified to
233 		 * ld64 when linking the kernel).
234 		 */
235 		kc_mh = (kernel_mach_header_t*)(VM_KERNEL_LINK_ADDRESS + slide);
236 		assert(kc_mh->filetype == MH_FILESET);
237 
238 		/*
239 		 * rebase and sign jops
240 		 * Note that we can't call any functions before this point, so
241 		 * we have to hard-code the knowledge that the base of the KC
242 		 * is the KC's mach-o header. This would change if any
243 		 * segment's VA started *before* the text segment
244 		 * (as the HIB segment does on x86).
245 		 */
246 		const void *collection_base_pointers[KCNumKinds] = {[0] = kc_mh, };
247 		kernel_collection_slide((struct mach_header_64 *)kc_mh, collection_base_pointers);
248 
249 		PE_set_kc_header(KCKindPrimary, kc_mh, slide);
250 
251 		/*
252 		 * iBoot doesn't slide load command vmaddrs in an MH_FILESET kernel
253 		 * collection, so adjust them now, and determine the vmaddr range
254 		 * covered by read-only segments for the CTRR rorgn.
255 		 */
256 		kernel_collection_adjust_mh_addrs((struct mach_header_64 *)kc_mh, slide, false,
257 		    (uintptr_t *)&segLOWESTKC, (uintptr_t *)&segHIGHESTKC,
258 		    (uintptr_t *)&segLOWESTROKC, (uintptr_t *)&segHIGHESTROKC,
259 		    NULL, NULL, NULL);
260 #if defined(HAS_APPLE_PAC)
261 		OSRuntimeSignStructorsInFileset(kc_mh);
262 #endif /* defined(HAS_APPLE_PAC) */
263 	} else {
264 		/*
265 		 * Static kernelcache: iBoot slid kernel MachO vmaddrs, determine slide
266 		 * using hardcoded kernel link address
267 		 */
268 		slide = (uintptr_t)k_mh - VM_KERNEL_LINK_ADDRESS;
269 
270 		/* rebase and sign jops */
271 		static_kernelcache = &__thread_starts_sect_end[0] != &__thread_starts_sect_start[0];
272 		if (static_kernelcache) {
273 			rebase_threaded_starts( &__thread_starts_sect_start[0],
274 			    &__thread_starts_sect_end[0],
275 			    (uintptr_t)k_mh, (uintptr_t)k_mh - slide, slide);
276 		}
277 #if defined(HAS_APPLE_PAC)
278 		OSRuntimeSignStructors(&_mh_execute_header);
279 #endif /* defined(HAS_APPLE_PAC) */
280 	}
281 
282 
283 	/*
284 	 * Initialize slide global here to avoid duplicating this logic in
285 	 * arm_vm_init()
286 	 */
287 	vm_kernel_slide = slide;
288 }
289 
290 void
arm_auxkc_init(void * mh,void * base)291 arm_auxkc_init(void *mh, void *base)
292 {
293 	/*
294 	 * The kernel collection linker guarantees that the lowest vmaddr in an
295 	 * AuxKC collection is 0 (but note that the mach header is higher up since
296 	 * RW segments precede RO segments in the AuxKC).
297 	 */
298 	uintptr_t slide = (uintptr_t)base;
299 	kernel_mach_header_t *akc_mh = (kernel_mach_header_t*)mh;
300 
301 	assert(akc_mh->filetype == MH_FILESET);
302 	PE_set_kc_header_and_base(KCKindAuxiliary, akc_mh, base, slide);
303 
304 	/* rebase and sign jops */
305 	const void *collection_base_pointers[KCNumKinds];
306 	memcpy(collection_base_pointers, PE_get_kc_base_pointers(), sizeof(collection_base_pointers));
307 	kernel_collection_slide((struct mach_header_64 *)akc_mh, collection_base_pointers);
308 
309 	kernel_collection_adjust_mh_addrs((struct mach_header_64 *)akc_mh, slide, false,
310 	    (uintptr_t *)&segLOWESTAuxKC, (uintptr_t *)&segHIGHESTAuxKC, (uintptr_t *)&segLOWESTROAuxKC,
311 	    (uintptr_t *)&segHIGHESTROAuxKC, (uintptr_t *)&segLOWESTRXAuxKC, (uintptr_t *)&segHIGHESTRXAuxKC,
312 	    (uintptr_t *)&segHIGHESTNLEAuxKC);
313 #if defined(HAS_APPLE_PAC)
314 	OSRuntimeSignStructorsInFileset(akc_mh);
315 #endif /* defined(HAS_APPLE_PAC) */
316 }
317 
318 /*
319  *	Routine:	arm_setup_pre_sign
320  *	Function:	Perform HW initialization that must happen ahead of the first PAC sign
321  *			operation.
322  */
323 static void
arm_setup_pre_sign(void)324 arm_setup_pre_sign(void)
325 {
326 #if __arm64__
327 	/* DATA TBI, if enabled, affects the number of VA bits that contain the signature */
328 	arm_set_kernel_tbi();
329 #endif /* __arm64 */
330 }
331 
332 /*
333  *		Routine:		arm_init
334  *		Function:		Runs on the boot CPU, once, on entry from iBoot.
335  */
336 
337 __startup_func
338 void
arm_init(boot_args * args)339 arm_init(
340 	boot_args       *args)
341 {
342 	unsigned int    maxmem;
343 	uint32_t        memsize;
344 	uint64_t        xmaxmem;
345 	thread_t        thread;
346 	DTEntry chosen = NULL;
347 	unsigned int dt_entry_size = 0;
348 
349 	arm_setup_pre_sign();
350 
351 	arm_slide_rebase_and_sign_image();
352 
353 	/* If kernel integrity is supported, use a constant copy of the boot args. */
354 	const_boot_args = *args;
355 	BootArgs = args = &const_boot_args;
356 
357 	cpu_data_init(&BootCpuData);
358 #if defined(HAS_APPLE_PAC)
359 	/* bootstrap cpu process dependent key for kernel has been loaded by start.s */
360 	BootCpuData.rop_key = ml_default_rop_pid();
361 	BootCpuData.jop_key = ml_default_jop_pid();
362 #endif /* defined(HAS_APPLE_PAC) */
363 
364 	PE_init_platform(FALSE, args); /* Get platform expert set up */
365 
366 #if __arm64__
367 	configure_timer_apple_regs();
368 	wfe_timeout_configure();
369 	wfe_timeout_init();
370 
371 	configure_misc_apple_boot_args();
372 	configure_misc_apple_regs();
373 
374 #if (DEVELOPMENT || DEBUG)
375 	unsigned long const *platform_stall_ptr = NULL;
376 
377 	if (SecureDTLookupEntry(NULL, "/chosen", &chosen) != kSuccess) {
378 		panic("%s: Unable to find 'chosen' DT node", __FUNCTION__);
379 	}
380 
381 	// Not usable TUNABLE here because TUNABLEs are parsed at a later point.
382 	if (SecureDTGetProperty(chosen, "xnu_platform_stall", (void const **)&platform_stall_ptr,
383 	    &dt_entry_size) == kSuccess) {
384 		xnu_platform_stall_value = *platform_stall_ptr;
385 	}
386 
387 	platform_stall_panic_or_spin(PLATFORM_STALL_XNU_LOCATION_ARM_INIT);
388 
389 	chosen = NULL; // Force a re-lookup later on since VM addresses are not final at this point
390 	dt_entry_size = 0;
391 #endif
392 
393 
394 	{
395 		/*
396 		 * Select the advertised kernel page size.
397 		 */
398 		if (args->memSize > 1ULL * 1024 * 1024 * 1024) {
399 			/*
400 			 * arm64 device with > 1GB of RAM:
401 			 * kernel uses 16KB pages.
402 			 */
403 			PAGE_SHIFT_CONST = PAGE_MAX_SHIFT;
404 		} else {
405 			/*
406 			 * arm64 device with <= 1GB of RAM:
407 			 * kernel uses hardware page size
408 			 * (4KB for H6/H7, 16KB for H8+).
409 			 */
410 			PAGE_SHIFT_CONST = ARM_PGSHIFT;
411 		}
412 
413 		/* 32-bit apps always see 16KB page size */
414 		page_shift_user32 = PAGE_MAX_SHIFT;
415 #ifdef  APPLETYPHOON
416 		if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures, sizeof(cpus_defeatures))) {
417 			if ((cpus_defeatures & 0xF) != 0) {
418 				cpu_defeatures_set(cpus_defeatures & 0xF);
419 			}
420 		}
421 #endif
422 	}
423 #endif
424 
425 	ml_parse_cpu_topology();
426 
427 
428 	master_cpu = ml_get_boot_cpu_number();
429 	assert(master_cpu >= 0 && master_cpu <= ml_get_max_cpu_number());
430 
431 	BootCpuData.cpu_number = (unsigned short)master_cpu;
432 	BootCpuData.intstack_top = (vm_offset_t) &intstack_top;
433 	BootCpuData.istackptr = BootCpuData.intstack_top;
434 #if __arm64__
435 	BootCpuData.excepstack_top = (vm_offset_t) &excepstack_top;
436 	BootCpuData.excepstackptr = BootCpuData.excepstack_top;
437 #endif
438 	CpuDataEntries[master_cpu].cpu_data_vaddr = &BootCpuData;
439 	CpuDataEntries[master_cpu].cpu_data_paddr = (void *)((uintptr_t)(args->physBase)
440 	    + ((uintptr_t)&BootCpuData
441 	    - (uintptr_t)(args->virtBase)));
442 
443 	thread = thread_bootstrap();
444 	thread->machine.CpuDatap = &BootCpuData;
445 	thread->machine.pcpu_data_base = (vm_offset_t)0;
446 	machine_set_current_thread(thread);
447 
448 	/*
449 	 * Preemption is enabled for this thread so that it can lock mutexes without
450 	 * tripping the preemption check. In reality scheduling is not enabled until
451 	 * this thread completes, and there are no other threads to switch to, so
452 	 * preemption level is not really meaningful for the bootstrap thread.
453 	 */
454 	thread->machine.preemption_count = 0;
455 	cpu_bootstrap();
456 
457 	rtclock_early_init();
458 
459 	kernel_debug_string_early("kernel_startup_bootstrap");
460 	kernel_startup_bootstrap();
461 
462 	/*
463 	 * Initialize the timer callout world
464 	 */
465 	timer_call_init();
466 
467 	cpu_init();
468 
469 	processor_bootstrap();
470 
471 	if (PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) {
472 		xmaxmem = (uint64_t) maxmem * (1024 * 1024);
473 	} else if (PE_get_default("hw.memsize", &memsize, sizeof(memsize))) {
474 		xmaxmem = (uint64_t) memsize;
475 	} else {
476 		xmaxmem = 0;
477 	}
478 
479 #if SCHED_HYGIENE_DEBUG
480 	{
481 		int wdt_boot_arg = 0;
482 		bool const wdt_disabled = (PE_parse_boot_argn("wdt", &wdt_boot_arg, sizeof(wdt_boot_arg)) && (wdt_boot_arg == -1));
483 
484 		/* Disable if WDT is disabled */
485 		if (wdt_disabled || kern_feature_override(KF_INTERRUPT_MASKED_DEBUG_OVRD)) {
486 			interrupt_masked_debug_mode = SCHED_HYGIENE_MODE_OFF;
487 		} else if (kern_feature_override(KF_SCHED_HYGIENE_DEBUG_PMC_OVRD)) {
488 			/*
489 			 * The sched hygiene facility can, in adition to checking time, capture
490 			 * metrics provided by the cycle and instruction counters available in some
491 			 * systems. Check if we should enable this feature based on the validation
492 			 * overrides.
493 			 */
494 			sched_hygiene_debug_pmc = 0;
495 		}
496 
497 		if (wdt_disabled || kern_feature_override(KF_PREEMPTION_DISABLED_DEBUG_OVRD)) {
498 			sched_preemption_disable_debug_mode = SCHED_HYGIENE_MODE_OFF;
499 		}
500 	}
501 #endif /* SCHED_HYGIENE_DEBUG */
502 
503 	nanoseconds_to_absolutetime(XCALL_ACK_TIMEOUT_NS, &xcall_ack_timeout_abstime);
504 
505 #if HAS_BP_RET
506 	PE_parse_boot_argn("bpret", &bp_ret, sizeof(bp_ret));
507 	set_bp_ret(); // Apply branch predictor retention settings to boot CPU
508 #endif
509 
510 	PE_parse_boot_argn("immediate_NMI", &force_immediate_debug_halt, sizeof(force_immediate_debug_halt));
511 
512 #if __ARM_PAN_AVAILABLE__
513 	__builtin_arm_wsr("pan", 1);
514 #endif  /* __ARM_PAN_AVAILABLE__ */
515 
516 	arm_vm_init(xmaxmem, args);
517 
518 	if (debug_boot_arg) {
519 		patch_low_glo();
520 	}
521 
522 #if __arm64__ && WITH_CLASSIC_S2R
523 	sleep_token_buffer_init();
524 #endif
525 
526 	PE_consistent_debug_inherit();
527 
528 	/* Setup debugging output. */
529 	const unsigned int serial_exists = serial_init();
530 	kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF);
531 	kprintf("kprintf initialized\n");
532 
533 	serialmode = 0;
534 	if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) {
535 		/* Do we want a serial keyboard and/or console? */
536 		kprintf("Serial mode specified: %08X\n", serialmode);
537 		disable_iolog_serial_output = (serialmode & SERIALMODE_NO_IOLOG) != 0;
538 		enable_dklog_serial_output = (serialmode & SERIALMODE_DKLOG) != 0;
539 		int force_sync = serialmode & SERIALMODE_SYNCDRAIN;
540 		if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) {
541 			if (force_sync) {
542 				serialmode |= SERIALMODE_SYNCDRAIN;
543 				kprintf(
544 					"WARNING: Forcing uart driver to output synchronously."
545 					"printf()s/IOLogs will impact kernel performance.\n"
546 					"You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
547 			}
548 		}
549 		/* If on-demand is selected, disable serials until reception. */
550 		bool on_demand = !!(serialmode & SERIALMODE_ON_DEMAND);
551 		if (on_demand && !(serialmode & SERIALMODE_INPUT)) {
552 			kprintf(
553 				"WARNING: invalid serial boot-args : ON_DEMAND (0x%x) flag "
554 				"requires INPUT(0x%x). Ignoring ON_DEMAND.\n",
555 				SERIALMODE_ON_DEMAND, SERIALMODE_INPUT
556 				);
557 			on_demand = 0;
558 		}
559 		serial_set_on_demand(on_demand);
560 	}
561 	if (kern_feature_override(KF_SERIAL_OVRD)) {
562 		serialmode = 0;
563 	}
564 
565 	/* Start serial if requested and a serial device was enumerated in serial_init(). */
566 	if ((serialmode & SERIALMODE_OUTPUT) && serial_exists) {
567 		serial_console_enabled = true;
568 		(void)switch_to_serial_console(); /* Switch into serial mode from video console */
569 		disableConsoleOutput = FALSE;     /* Allow printfs to happen */
570 	}
571 	PE_create_console();
572 
573 	/* setup console output */
574 	PE_init_printf(FALSE);
575 
576 #if __arm64__
577 #if DEBUG
578 	dump_kva_space();
579 #endif
580 #endif
581 
582 	cpu_machine_idle_init(TRUE);
583 
584 	PE_init_platform(TRUE, &BootCpuData);
585 
586 #if __arm64__
587 	extern bool cpu_config_correct;
588 	if (!cpu_config_correct) {
589 		panic("The cpumask=N boot arg cannot be used together with cpus=N, and the boot CPU must be enabled");
590 	}
591 
592 	ml_map_cpu_pio();
593 
594 #if APPLE_ARM64_ARCH_FAMILY
595 	configure_late_apple_regs();
596 #endif
597 
598 #endif
599 
600 	cpu_timebase_init(TRUE);
601 
602 #if KPERF
603 	/* kptimer_curcpu_up() must be called after cpu_timebase_init */
604 	kptimer_curcpu_up();
605 #endif /* KPERF */
606 
607 	PE_init_cpu();
608 	fiq_context_init(TRUE);
609 
610 
611 #if HIBERNATION
612 	pal_hib_init();
613 #endif /* HIBERNATION */
614 
615 	/*
616 	 * gPhysBase/Size only represent kernel-managed memory. These globals represent
617 	 * the actual DRAM base address and size as reported by iBoot through the
618 	 * device tree.
619 	 */
620 	unsigned long const *dram_base;
621 	unsigned long const *dram_size;
622 
623 	if (SecureDTLookupEntry(NULL, "/chosen", &chosen) != kSuccess) {
624 		panic("%s: Unable to find 'chosen' DT node", __FUNCTION__);
625 	}
626 
627 	if (SecureDTGetProperty(chosen, "dram-base", (void const **)&dram_base, &dt_entry_size) != kSuccess) {
628 		panic("%s: Unable to find 'dram-base' entry in the 'chosen' DT node", __FUNCTION__);
629 	}
630 
631 	if (SecureDTGetProperty(chosen, "dram-size", (void const **)&dram_size, &dt_entry_size) != kSuccess) {
632 		panic("%s: Unable to find 'dram-size' entry in the 'chosen' DT node", __FUNCTION__);
633 	}
634 
635 	gDramBase = *dram_base;
636 	gDramSize = *dram_size;
637 
638 	/*
639 	 * Initialize the stack protector for all future calls
640 	 * to C code. Since kernel_bootstrap() eventually
641 	 * switches stack context without returning through this
642 	 * function, we do not risk failing the check even though
643 	 * we mutate the guard word during execution.
644 	 */
645 	__stack_chk_guard = (unsigned long)early_random();
646 	/* Zero a byte of the protector to guard
647 	 * against string vulnerabilities
648 	 */
649 	__stack_chk_guard &= ~(0xFFULL << 8);
650 	machine_startup(args);
651 }
652 
653 /*
654  * Routine:        arm_init_cpu
655  * Function:
656  *    Runs on S2R resume (all CPUs) and SMP boot (non-boot CPUs only).
657  */
658 
659 void
arm_init_cpu(cpu_data_t * cpu_data_ptr)660 arm_init_cpu(
661 	cpu_data_t      *cpu_data_ptr)
662 {
663 #if __ARM_PAN_AVAILABLE__
664 	__builtin_arm_wsr("pan", 1);
665 #endif
666 
667 #ifdef __arm64__
668 	configure_timer_apple_regs();
669 	configure_misc_apple_regs();
670 #endif
671 
672 	cpu_data_ptr->cpu_flags &= ~SleepState;
673 
674 
675 	machine_set_current_thread(cpu_data_ptr->cpu_active_thread);
676 
677 #if APPLE_ARM64_ARCH_FAMILY
678 	configure_late_apple_regs();
679 #endif
680 
681 #if HIBERNATION
682 	if ((cpu_data_ptr == &BootCpuData) && (gIOHibernateState == kIOHibernateStateWakingFromHibernate)) {
683 		// the "normal" S2R code captures wake_abstime too early, so on a hibernation resume we fix it up here
684 		extern uint64_t wake_abstime;
685 		wake_abstime = gIOHibernateCurrentHeader->lastHibAbsTime;
686 
687 		// since the hw clock stops ticking across hibernation, we need to apply an offset;
688 		// iBoot computes this offset for us and passes it via the hibernation header
689 		extern uint64_t hwclock_conttime_offset;
690 		hwclock_conttime_offset = gIOHibernateCurrentHeader->hwClockOffset;
691 
692 		// during hibernation, we captured the idle thread's state from inside the PPL context, so we have to
693 		// fix up its preemption count
694 		unsigned int expected_preemption_count = (gEnforcePlatformActionSafety ? 2 : 1);
695 		if (get_preemption_level_for_thread(cpu_data_ptr->cpu_active_thread) !=
696 		    expected_preemption_count) {
697 			panic("unexpected preemption count %u on boot cpu thread (should be %u)",
698 			    get_preemption_level_for_thread(cpu_data_ptr->cpu_active_thread),
699 			    expected_preemption_count);
700 		}
701 		cpu_data_ptr->cpu_active_thread->machine.preemption_count--;
702 	}
703 #endif /* HIBERNATION */
704 
705 #if __arm64__
706 	wfe_timeout_init();
707 	pmap_clear_user_ttb();
708 	flush_mmu_tlb();
709 #endif
710 
711 	cpu_machine_idle_init(FALSE);
712 
713 	cpu_init();
714 
715 #ifdef  APPLETYPHOON
716 	if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) {
717 		cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF);
718 	}
719 #endif
720 	/* Initialize the timebase before serial_init, as some serial
721 	 * drivers use mach_absolute_time() to implement rate control
722 	 */
723 	cpu_timebase_init(FALSE);
724 
725 #if KPERF
726 	/* kptimer_curcpu_up() must be called after cpu_timebase_init */
727 	kptimer_curcpu_up();
728 #endif /* KPERF */
729 
730 	if (cpu_data_ptr == &BootCpuData) {
731 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
732 		/*
733 		 * Prevent CPUs from going into deep sleep until all
734 		 * CPUs are ready to do so.
735 		 */
736 		arm64_stall_sleep = TRUE;
737 #endif
738 		serial_init();
739 		PE_init_platform(TRUE, NULL);
740 		commpage_update_timebase();
741 	}
742 	PE_init_cpu();
743 
744 	fiq_context_init(TRUE);
745 	cpu_data_ptr->rtcPop = EndOfAllTime;
746 	timer_resync_deadlines();
747 
748 	processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
749 	bool should_kprintf = processor_should_kprintf(processor, true);
750 
751 #if DEVELOPMENT || DEBUG
752 	PE_arm_debug_enable_trace(should_kprintf);
753 #endif /* DEVELOPMENT || DEBUG */
754 
755 
756 	if (should_kprintf) {
757 		kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr->cpu_number);
758 	}
759 
760 	if (cpu_data_ptr == &BootCpuData) {
761 		if (kdebug_enable == 0) {
762 			__kdebug_only uint64_t elapsed = kdebug_wake();
763 			KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), mach_absolute_time() - elapsed);
764 		}
765 
766 #if CONFIG_TELEMETRY
767 		bootprofile_wake_from_sleep();
768 #endif /* CONFIG_TELEMETRY */
769 	}
770 #if MONOTONIC && defined(__arm64__)
771 	mt_wake_per_core();
772 #endif /* MONOTONIC && defined(__arm64__) */
773 
774 #if defined(KERNEL_INTEGRITY_CTRR)
775 	if (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKED) {
776 		lck_spin_lock(&ctrr_cpu_start_lck);
777 		ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKED;
778 		thread_wakeup(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]);
779 		lck_spin_unlock(&ctrr_cpu_start_lck);
780 	}
781 #endif
782 
783 
784 	slave_main(NULL);
785 }
786 
787 /*
788  * Routine:		arm_init_idle_cpu
789  * Function:	Resume from non-retention WFI.  Called from the reset vector.
790  */
791 void __attribute__((noreturn))
arm_init_idle_cpu(cpu_data_t * cpu_data_ptr)792 arm_init_idle_cpu(
793 	cpu_data_t      *cpu_data_ptr)
794 {
795 #if __ARM_PAN_AVAILABLE__
796 	__builtin_arm_wsr("pan", 1);
797 #endif
798 
799 	machine_set_current_thread(cpu_data_ptr->cpu_active_thread);
800 
801 #if __arm64__
802 	wfe_timeout_init();
803 	pmap_clear_user_ttb();
804 	flush_mmu_tlb();
805 	/* Enable asynchronous exceptions */
806 	__builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF);
807 #endif
808 
809 #ifdef  APPLETYPHOON
810 	if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) {
811 		cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF);
812 	}
813 #endif
814 
815 	/*
816 	 * Update the active debug object to reflect that debug registers have been reset.
817 	 * This will force any thread with active debug state to resync the debug registers
818 	 * if it returns to userspace on this CPU.
819 	 */
820 	if (cpu_data_ptr->cpu_user_debug != NULL) {
821 		arm_debug_set(NULL);
822 	}
823 
824 	fiq_context_init(FALSE);
825 
826 	cpu_idle_exit(TRUE);
827 }
828