xref: /xnu-11417.140.69/pexpert/arm/pe_init.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  *    arm platform expert initialization.
5  */
6 #include <sys/types.h>
7 #include <sys/kdebug.h>
8 #include <mach/vm_param.h>
9 #include <pexpert/protos.h>
10 #include <pexpert/pexpert.h>
11 #include <pexpert/boot.h>
12 #include <pexpert/device_tree.h>
13 #include <pexpert/pe_images.h>
14 #include <kern/sched_prim.h>
15 #include <kern/socd_client.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_routines.h>
18 #include <arm/caches_internal.h>
19 #include <kern/debug.h>
20 #include <libkern/section_keywords.h>
21 #include <os/overflow.h>
22 
23 #include <pexpert/arm64/board_config.h>
24 
25 #if CONFIG_SPTM
26 #include <arm64/sptm/sptm.h>
27 #endif
28 
29 /* extern references */
30 extern void     pe_identify_machine(boot_args *bootArgs);
31 
32 /* static references */
33 static void     pe_prepare_images(void);
34 
35 /* private globals */
36 SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
37 TUNABLE_DT(uint32_t, PE_srd_fused, "/chosen", "research-enabled",
38     "srd_fusing", 0, TUNABLE_DT_NONE);
39 
40 #define FW_VERS_LEN 128
41 
42 char iBoot_version[FW_VERS_LEN];
43 #if defined(TARGET_OS_OSX) && defined(__arm64__)
44 char iBoot_Stage_2_version[FW_VERS_LEN];
45 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
46 
47 /*
48  * This variable is only modified once, when the BSP starts executing. We put it in __DATA_CONST
49  * as page protections on kernel text early in startup are read-write. The kernel is
50  * locked down later in start-up, said mappings become RO and thus this
51  * variable becomes immutable.
52  *
53  * See osfmk/arm/arm_vm_init.c for more information.
54  */
55 SECURITY_READ_ONLY_LATE(volatile uint32_t) debug_enabled = FALSE;
56 
57 /*
58  * This variable indicates the page protection security policy used by the system.
59  * It is intended mostly for debugging purposes.
60  */
61 SECURITY_READ_ONLY_LATE(ml_page_protection_t) page_protection_type;
62 
63 uint8_t         gPlatformECID[8];
64 uint32_t        gPlatformMemoryID;
65 #if defined(XNU_TARGET_OS_XR)
66 uint32_t        gPlatformChipRole = UINT32_MAX;
67 #endif /* not XNU_TARGET_OS_XR */
68 static boolean_t vc_progress_initialized = FALSE;
69 uint64_t    last_hwaccess_thread = 0;
70 uint8_t last_hwaccess_type = 0;
71 uint8_t last_hwaccess_size = 0;
72 uint64_t last_hwaccess_paddr = 0;
73 char     gTargetTypeBuffer[16];
74 char     gModelTypeBuffer[32];
75 
76 /* Clock Frequency Info */
77 clock_frequency_info_t gPEClockFrequencyInfo;
78 
79 vm_offset_t gPanicBase = 0;
80 unsigned int gPanicSize;
81 struct embedded_panic_header *panic_info = NULL;
82 
83 #if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
84 /*
85  * On DEVELOPMENT bridgeOS, we map the x86 panic region
86  * so we can include this data in bridgeOS corefiles
87  */
88 uint64_t macos_panic_base = 0;
89 unsigned int macos_panic_size = 0;
90 
91 struct macos_panic_header *mac_panic_header = NULL;
92 #endif
93 
94 /* Maximum size of panic log excluding headers, in bytes */
95 static unsigned int panic_text_len;
96 
97 /* Whether a console is standing by for panic logging */
98 static boolean_t panic_console_available = FALSE;
99 
100 /* socd trace ram attributes */
101 static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
102 static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
103 
104 extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
105 
106 void PE_slide_devicetree(vm_offset_t);
107 
108 static void
check_for_panic_log(void)109 check_for_panic_log(void)
110 {
111 #ifdef PLATFORM_PANIC_LOG_PADDR
112 	gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
113 	panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
114 	gPanicSize = PLATFORM_PANIC_LOG_SIZE;
115 #else
116 	DTEntry entry, chosen;
117 	unsigned int size;
118 	uintptr_t const *reg_prop;
119 	uint32_t const *panic_region_length;
120 
121 	/*
122 	 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
123 	 *
124 	 * chosen {
125 	 *   embedded-panic-log-size = <0x00080000>;
126 	 *   [a bunch of other stuff]
127 	 * };
128 	 *
129 	 * pram {
130 	 *   reg = <0x00000008_fbc48000 0x00000000_000b4000>;
131 	 * };
132 	 *
133 	 * reg[0] is the physical address
134 	 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
135 	 * embedded-panic-log-size is the maximum amount of data to store in the buffer
136 	 */
137 	if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) {
138 		return;
139 	}
140 
141 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
142 		return;
143 	}
144 
145 	if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
146 		return;
147 	}
148 
149 	if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) {
150 		return;
151 	}
152 
153 	gPanicBase = ml_io_map_wcomb(reg_prop[0], panic_region_length[0]);
154 
155 	/* Deduct the size of the panic header from the panic region size */
156 	panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
157 	gPanicSize = panic_region_length[0];
158 
159 #if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
160 	if (PE_consistent_debug_enabled()) {
161 		uint64_t macos_panic_physbase = 0;
162 		uint64_t macos_panic_physlen = 0;
163 		/* Populate the macOS panic region data if it's present in consistent debug */
164 		if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
165 			macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
166 			mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
167 			macos_panic_size = macos_panic_physlen;
168 		}
169 	}
170 #endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
171 
172 #endif
173 	panic_info = (struct embedded_panic_header *)gPanicBase;
174 
175 	/* Check if a shared memory console is running in the panic buffer */
176 	if (panic_info->eph_magic == 'SHMC') {
177 		panic_console_available = TRUE;
178 		return;
179 	}
180 
181 	/* Check if there's a boot profile in the panic buffer */
182 	if (panic_info->eph_magic == 'BTRC') {
183 		return;
184 	}
185 
186 	/*
187 	 * Check to see if a panic (FUNK) is in VRAM from the last time
188 	 */
189 	if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
190 		printf("iBoot didn't extract panic log from previous session crash, this is bad\n");
191 	}
192 
193 	/* Clear panic region */
194 	bzero((void *)gPanicBase, gPanicSize);
195 }
196 
197 int
PE_initialize_console(PE_Video * info,int op)198 PE_initialize_console(PE_Video * info, int op)
199 {
200 	static int last_console = -1;
201 
202 	if (info && (info != &PE_state.video)) {
203 		info->v_scale = PE_state.video.v_scale;
204 	}
205 
206 	switch (op) {
207 	case kPEDisableScreen:
208 		initialize_screen(info, op);
209 		last_console = switch_to_serial_console();
210 		kprintf("kPEDisableScreen %d\n", last_console);
211 		break;
212 
213 	case kPEEnableScreen:
214 		initialize_screen(info, op);
215 		if (info) {
216 			PE_state.video = *info;
217 		}
218 		kprintf("kPEEnableScreen %d\n", last_console);
219 		if (last_console != -1) {
220 			switch_to_old_console(last_console);
221 		}
222 		break;
223 
224 	case kPEReleaseScreen:
225 		/*
226 		 * we don't show the progress indicator on boot, but want to
227 		 * show it afterwards.
228 		 */
229 		if (!vc_progress_initialized) {
230 			default_progress.dx = 0;
231 			default_progress.dy = 0;
232 			vc_progress_initialize(&default_progress,
233 			    default_progress_data1x,
234 			    default_progress_data2x,
235 			    default_progress_data3x,
236 			    (unsigned char *) appleClut8);
237 			vc_progress_initialized = TRUE;
238 		}
239 		initialize_screen(info, op);
240 		break;
241 
242 	default:
243 		initialize_screen(info, op);
244 		break;
245 	}
246 
247 	return 0;
248 }
249 
250 void
PE_init_iokit(void)251 PE_init_iokit(void)
252 {
253 	DTEntry         entry;
254 	unsigned int    size, scale;
255 	unsigned long   display_size;
256 	void const * const *map;
257 	unsigned int    show_progress;
258 	int             *delta, image_size, flip;
259 	uint32_t        start_time_value = 0;
260 	uint32_t        debug_wait_start_value = 0;
261 	uint32_t        load_kernel_start_value = 0;
262 	uint32_t        populate_registry_time_value = 0;
263 
264 	PE_init_printf(TRUE);
265 
266 	printf("iBoot version: %s\n", iBoot_version);
267 #if defined(TARGET_OS_OSX) && defined(__arm64__)
268 	printf("iBoot Stage 2 version: %s\n", iBoot_Stage_2_version);
269 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
270 
271 	if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) {
272 		boot_progress_element const *bootPict;
273 
274 		if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) {
275 			bcopy(map[0], appleClut8, sizeof(appleClut8));
276 		}
277 
278 		if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) {
279 			bootPict = (boot_progress_element const *) map[0];
280 			default_noroot.width = bootPict->width;
281 			default_noroot.height = bootPict->height;
282 			default_noroot.dx = 0;
283 			default_noroot.dy = bootPict->yOffset;
284 			default_noroot_data = &bootPict->data[0];
285 		}
286 	}
287 
288 	pe_prepare_images();
289 
290 	scale = PE_state.video.v_scale;
291 	flip = 1;
292 
293 #if defined(XNU_TARGET_OS_OSX)
294 	int notused;
295 	show_progress = TRUE;
296 	if (PE_parse_boot_argn("-restore", &notused, sizeof(notused))) {
297 		show_progress = FALSE;
298 	}
299 	if (PE_parse_boot_argn("-noprogress", &notused, sizeof(notused))) {
300 		show_progress = FALSE;
301 	}
302 #else
303 	show_progress = FALSE;
304 	PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
305 #endif /* XNU_TARGET_OS_OSX */
306 	if (show_progress) {
307 		/* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
308 		switch (PE_state.video.v_rotate) {
309 		case 2:
310 			flip = -1;
311 			OS_FALLTHROUGH;
312 		case 0:
313 			display_size = PE_state.video.v_height;
314 			image_size = default_progress.height;
315 			delta = &default_progress.dy;
316 			break;
317 		case 1:
318 			flip = -1;
319 			OS_FALLTHROUGH;
320 		case 3:
321 		default:
322 			display_size = PE_state.video.v_width;
323 			image_size = default_progress.width;
324 			delta = &default_progress.dx;
325 		}
326 		assert(*delta >= 0);
327 		while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
328 			*delta -= 50 * scale;
329 			assert(*delta >= 0);
330 		}
331 		*delta *= flip;
332 
333 		/* Check for DT-defined progress y delta */
334 		PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy));
335 
336 		vc_progress_initialize(&default_progress,
337 		    default_progress_data1x,
338 		    default_progress_data2x,
339 		    default_progress_data3x,
340 		    (unsigned char *) appleClut8);
341 		vc_progress_initialized = TRUE;
342 	}
343 
344 	if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
345 		/* Trace iBoot-provided timing information. */
346 		if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) {
347 			uint32_t const * value_ptr;
348 
349 			if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) {
350 				if (size == sizeof(start_time_value)) {
351 					start_time_value = *value_ptr;
352 				}
353 			}
354 
355 			if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) {
356 				if (size == sizeof(debug_wait_start_value)) {
357 					debug_wait_start_value = *value_ptr;
358 				}
359 			}
360 
361 			if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) {
362 				if (size == sizeof(load_kernel_start_value)) {
363 					load_kernel_start_value = *value_ptr;
364 				}
365 			}
366 
367 			if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) {
368 				if (size == sizeof(populate_registry_time_value)) {
369 					populate_registry_time_value = *value_ptr;
370 				}
371 			}
372 		}
373 
374 		KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
375 #if CONFIG_SPTM
376 		KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 1), SPTMArgs->timestamp_sk_bootstrap, SPTMArgs->timestamp_xnu_bootstrap);
377 #endif
378 	}
379 
380 	InitIOKit(PE_state.deviceTreeHead);
381 	ConfigureIOKit();
382 }
383 
384 void
PE_lockdown_iokit(void)385 PE_lockdown_iokit(void)
386 {
387 	/*
388 	 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
389 	 * machine_lockdown() is treated as a hard security checkpoint, such that
390 	 * code which executes prior to lockdown must be minimized and limited only to
391 	 * trusted parts of the kernel and specially-entitled kexts.  We therefore
392 	 * cannot start the general-purpose IOKit matching process until after lockdown,
393 	 * as it may involve execution of untrusted/non-entitled kext code.
394 	 * Furthermore, such kext code may process attacker controlled data (e.g.
395 	 * network packets), which dramatically increases the potential attack surface
396 	 * against a kernel which has not yet enabled the full set of available
397 	 * hardware protections.
398 	 */
399 	zalloc_iokit_lockdown();
400 	StartIOKitMatching();
401 }
402 
403 void
PE_slide_devicetree(vm_offset_t slide)404 PE_slide_devicetree(vm_offset_t slide)
405 {
406 	assert(PE_state.initialized);
407 	PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
408 	SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
409 }
410 
411 void
PE_init_platform(boolean_t vm_initialized,void * args)412 PE_init_platform(boolean_t vm_initialized, void *args)
413 {
414 	DTEntry         entry;
415 	unsigned int    size;
416 	void * const    *prop;
417 	boot_args      *boot_args_ptr = (boot_args *) args;
418 
419 	if (PE_state.initialized == FALSE) {
420 		page_protection_type = ml_page_protection_type();
421 		PE_state.initialized = TRUE;
422 		PE_state.bootArgs = boot_args_ptr;
423 		PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
424 		PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
425 		PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
426 		PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
427 		PE_state.video.v_width = boot_args_ptr->Video.v_width;
428 		PE_state.video.v_height = boot_args_ptr->Video.v_height;
429 		PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
430 		PE_state.video.v_rotate = (
431 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) +    // rotation
432 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift)  & kBootVideoDepthMask) // add extra boot rotation
433 			) % 4;
434 		PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
435 		PE_state.video.v_display = boot_args_ptr->Video.v_display;
436 		strlcpy(PE_state.video.v_pixelFormat, "BBBBBBBBGGGGGGGGRRRRRRRR", sizeof(PE_state.video.v_pixelFormat));
437 	}
438 	if (!vm_initialized) {
439 		/*
440 		 * Setup the Device Tree routines
441 		 * so the console can be found and the right I/O space
442 		 * can be used..
443 		 */
444 		SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
445 		pe_identify_machine(boot_args_ptr);
446 	} else {
447 		pe_arm_init_interrupts(args);
448 	}
449 
450 	if (!vm_initialized) {
451 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
452 			if (kSuccess == SecureDTGetProperty(entry, "target-type",
453 			    (void const **)&prop, &size)) {
454 				if (size > sizeof(gTargetTypeBuffer)) {
455 					size = sizeof(gTargetTypeBuffer);
456 				}
457 				bcopy(prop, gTargetTypeBuffer, size);
458 				gTargetTypeBuffer[size - 1] = '\0';
459 			}
460 		}
461 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
462 			if (kSuccess == SecureDTGetProperty(entry, "model",
463 			    (void const **)&prop, &size)) {
464 				if (size > sizeof(gModelTypeBuffer)) {
465 					size = sizeof(gModelTypeBuffer);
466 				}
467 				bcopy(prop, gModelTypeBuffer, size);
468 				gModelTypeBuffer[size - 1] = '\0';
469 			}
470 		}
471 		if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
472 			if (kSuccess == SecureDTGetProperty(entry, "debug-enabled",
473 			    (void const **) &prop, &size)) {
474 				/*
475 				 * We purposefully modify a constified variable as
476 				 * it will get locked down by a trusted monitor or
477 				 * via page table mappings. We don't want people easily
478 				 * modifying this variable...
479 				 */
480 #pragma clang diagnostic push
481 #pragma clang diagnostic ignored "-Wcast-qual"
482 				boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
483 				if (size > sizeof(uint32_t)) {
484 					size = sizeof(uint32_t);
485 				}
486 				bcopy(prop, modify_debug_enabled, size);
487 #pragma clang diagnostic pop
488 			}
489 			if (kSuccess == SecureDTGetProperty(entry, "firmware-version", (void const **) &prop, &size)) {
490 				if (size > sizeof(iBoot_version)) {
491 					size = sizeof(iBoot_version);
492 				}
493 				bcopy(prop, iBoot_version, size);
494 				iBoot_version[size - 1] = '\0';
495 			}
496 #if defined(TARGET_OS_OSX) && defined(__arm64__)
497 			if (kSuccess == SecureDTGetProperty(entry, "system-firmware-version", (void const **) &prop, &size)) {
498 				if (size > sizeof(iBoot_Stage_2_version)) {
499 					size = sizeof(iBoot_Stage_2_version);
500 				}
501 				bcopy(prop, iBoot_Stage_2_version, size);
502 				iBoot_Stage_2_version[size - 1] = '\0';
503 			}
504 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
505 			if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id",
506 			    (void const **) &prop, &size)) {
507 				if (size > sizeof(gPlatformECID)) {
508 					size = sizeof(gPlatformECID);
509 				}
510 				bcopy(prop, gPlatformECID, size);
511 			}
512 			if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id",
513 			    (void const **) &prop, &size)) {
514 				if (size > sizeof(gPlatformMemoryID)) {
515 					size = sizeof(gPlatformMemoryID);
516 				}
517 				bcopy(prop, &gPlatformMemoryID, size);
518 			}
519 		}
520 #if defined(XNU_TARGET_OS_XR)
521 		if (kSuccess == SecureDTLookupEntry(NULL, "/product", &entry)) {
522 			if (kSuccess == SecureDTGetProperty(entry, "chip-role",
523 			    (void const **) &prop, &size)) {
524 				if (size > sizeof(gPlatformChipRole)) {
525 					size = sizeof(gPlatformChipRole);
526 				}
527 				bcopy(prop, &gPlatformChipRole, size);
528 			}
529 		}
530 #endif /* not XNU_TARGET_OS_XR */
531 		pe_init_debug();
532 	}
533 }
534 
535 void
PE_create_console(void)536 PE_create_console(void)
537 {
538 	/*
539 	 * Check the head of VRAM for a panic log saved on last panic.
540 	 * Do this before the VRAM is trashed.
541 	 */
542 	check_for_panic_log();
543 
544 	if (PE_state.video.v_display) {
545 		PE_initialize_console(&PE_state.video, kPEGraphicsMode);
546 	} else {
547 		PE_initialize_console(&PE_state.video, kPETextMode);
548 	}
549 }
550 
551 int
PE_current_console(PE_Video * info)552 PE_current_console(PE_Video * info)
553 {
554 	*info = PE_state.video;
555 	return 0;
556 }
557 
558 void
PE_display_icon(__unused unsigned int flags,__unused const char * name)559 PE_display_icon(__unused unsigned int flags, __unused const char *name)
560 {
561 	if (default_noroot_data) {
562 		vc_display_icon(&default_noroot, default_noroot_data);
563 	}
564 }
565 
566 extern          boolean_t
PE_get_hotkey(__unused unsigned char key)567 PE_get_hotkey(__unused unsigned char key)
568 {
569 	return FALSE;
570 }
571 
572 static timebase_callback_func gTimebaseCallback;
573 
574 void
PE_register_timebase_callback(timebase_callback_func callback)575 PE_register_timebase_callback(timebase_callback_func callback)
576 {
577 	gTimebaseCallback = callback;
578 
579 	PE_call_timebase_callback();
580 }
581 
582 void
PE_call_timebase_callback(void)583 PE_call_timebase_callback(void)
584 {
585 	struct timebase_freq_t timebase_freq;
586 
587 	timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
588 	timebase_freq.timebase_den = 1;
589 
590 	if (gTimebaseCallback) {
591 		gTimebaseCallback(&timebase_freq);
592 	}
593 }
594 
595 /*
596  * The default PE_poll_input handler.
597  */
598 int
PE_stub_poll_input(__unused unsigned int options,char * c)599 PE_stub_poll_input(__unused unsigned int options, char *c)
600 {
601 	*c = (char)uart_getc();
602 	return 0; /* 0 for success, 1 for unsupported */
603 }
604 
605 /*
606  * This routine will return 1 if you are running on a device with a variant
607  * of iBoot that allows debugging. This is typically not the case on production
608  * fused parts (even when running development variants of iBoot).
609  *
610  * The routine takes an optional argument of the flags passed to debug="" so
611  * kexts don't have to parse the boot arg themselves.
612  */
613 uint32_t
PE_i_can_has_debugger(uint32_t * debug_flags)614 PE_i_can_has_debugger(uint32_t *debug_flags)
615 {
616 	if (debug_flags) {
617 #if DEVELOPMENT || DEBUG
618 		assert(startup_phase >= STARTUP_SUB_TUNABLES);
619 #endif
620 		if (debug_enabled) {
621 			*debug_flags = debug_boot_arg;
622 		} else {
623 			*debug_flags = 0;
624 		}
625 	}
626 	return debug_enabled;
627 }
628 
629 /*
630  * This routine returns TRUE if the device is configured
631  * with panic debugging enabled.
632  */
633 boolean_t
PE_panic_debugging_enabled()634 PE_panic_debugging_enabled()
635 {
636 	return panicDebugging;
637 }
638 
639 void
PE_update_panic_crc(unsigned char * buf,unsigned int * size)640 PE_update_panic_crc(unsigned char *buf, unsigned int *size)
641 {
642 	if (!panic_info || !size) {
643 		return;
644 	}
645 
646 	if (!buf) {
647 		*size = panic_text_len;
648 		return;
649 	}
650 
651 	if (*size == 0) {
652 		return;
653 	}
654 
655 	*size = *size > panic_text_len ? panic_text_len : *size;
656 	if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
657 		// rdar://88696402 (PanicTest: test case for MAGIC check in PE_update_panic_crc)
658 		printf("Error!! Current Magic 0x%X, expected value 0x%x\n", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
659 	}
660 
661 	/* CRC everything after the CRC itself - starting with the panic header version */
662 	panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len +
663 	    sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
664 }
665 
666 uint32_t
PE_get_offset_into_panic_region(char * location)667 PE_get_offset_into_panic_region(char *location)
668 {
669 	assert(gPanicBase != 0);
670 	assert(location >= (char *) gPanicBase);
671 	assert((unsigned int)(location - gPanicBase) < gPanicSize);
672 
673 	return (uint32_t)(uintptr_t)(location - gPanicBase);
674 }
675 
676 void
PE_init_panicheader()677 PE_init_panicheader()
678 {
679 	if (!panic_info) {
680 		return;
681 	}
682 
683 	bzero(panic_info, sizeof(struct embedded_panic_header));
684 
685 	/*
686 	 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
687 	 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
688 	 */
689 	panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0;
690 
691 	panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
692 	panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
693 
694 	return;
695 }
696 
697 /*
698  * Tries to update the panic header to keep it consistent on nested panics.
699  *
700  * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
701  *       it is to update the panic header to make it consistent when we nest panics.
702  */
703 void
PE_update_panicheader_nestedpanic()704 PE_update_panicheader_nestedpanic()
705 {
706 	/*
707 	 * if the panic header pointer is bogus (e.g. someone stomped on it) then bail.
708 	 */
709 	if (!panic_info) {
710 		/* if this happens in development then blow up bigly */
711 		assert(panic_info);
712 		return;
713 	}
714 
715 	/*
716 	 * If the panic log offset is not set, re-init the panic header
717 	 *
718 	 * note that this should not be possible unless someone stomped on the panic header to zero it out, since by the time
719 	 * we reach this location *someone* should have appended something to the log..
720 	 */
721 	if (panic_info->eph_panic_log_offset == 0) {
722 		PE_init_panicheader();
723 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
724 		return;
725 	}
726 
727 	panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
728 
729 	/*
730 	 * If the panic log length is not set, set the end to
731 	 * the current location of the debug_buf_ptr to close it.
732 	 */
733 	if (panic_info->eph_panic_log_len == 0) {
734 		panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr);
735 
736 		/* indicative of corruption in the panic region, consumer beware */
737 		if ((panic_info->eph_other_log_offset == 0) &&
738 		    (panic_info->eph_other_log_len == 0)) {
739 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
740 		}
741 	}
742 
743 	/* likely indicative of corruption in the panic region, consumer beware */
744 	if (((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))) {
745 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
746 	}
747 
748 	/*
749 	 * If we haven't set up the other log yet, set the beginning of the other log
750 	 * to the current location of the debug_buf_ptr
751 	 */
752 	if (panic_info->eph_other_log_offset == 0) {
753 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
754 
755 		/* indicative of corruption in the panic region, consumer beware */
756 		if (panic_info->eph_other_log_len == 0) {
757 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
758 		}
759 	}
760 
761 	return;
762 }
763 
764 boolean_t
PE_reboot_on_panic(void)765 PE_reboot_on_panic(void)
766 {
767 	uint32_t debug_flags;
768 
769 	if (PE_i_can_has_debugger(&debug_flags)
770 	    && (debug_flags & DB_NMI)) {
771 		/* kernel debugging is active */
772 		return FALSE;
773 	} else {
774 		return TRUE;
775 	}
776 }
777 
778 void
PE_sync_panic_buffers(void)779 PE_sync_panic_buffers(void)
780 {
781 	/*
782 	 * rdar://problem/26453070:
783 	 * The iBoot panic region is write-combined on arm64.  We must flush dirty lines
784 	 * from L1/L2 as late as possible before reset, with no further reads of the panic
785 	 * region between the flush and the reset.  Some targets have an additional memcache (L3),
786 	 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
787 	 * be discarded on reset.  If we can make sure the lines are flushed to L3/DRAM,
788 	 * the platform reset handler will flush any L3.
789 	 */
790 	if (gPanicBase) {
791 		CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize);
792 	}
793 }
794 
795 static void
pe_prepare_images(void)796 pe_prepare_images(void)
797 {
798 	if ((1 & PE_state.video.v_rotate) != 0) {
799 		// Only square square images with radial symmetry are supported
800 		// No need to actually rotate the data
801 
802 		// Swap the dx and dy offsets
803 		uint32_t tmp = default_progress.dx;
804 		default_progress.dx = default_progress.dy;
805 		default_progress.dy = tmp;
806 	}
807 #if 0
808 	uint32_t cnt, cnt2, cnt3, cnt4;
809 	uint32_t tmp, width, height;
810 	uint8_t  data, *new_data;
811 	const uint8_t *old_data;
812 
813 	width  = default_progress.width;
814 	height = default_progress.height * default_progress.count;
815 
816 	// Scale images if the UI is being scaled
817 	if (PE_state.video.v_scale > 1) {
818 		new_data = kalloc(width * height * scale * scale);
819 		if (new_data != 0) {
820 			old_data = default_progress_data;
821 			default_progress_data = new_data;
822 			for (cnt = 0; cnt < height; cnt++) {
823 				for (cnt2 = 0; cnt2 < width; cnt2++) {
824 					data = *(old_data++);
825 					for (cnt3 = 0; cnt3 < scale; cnt3++) {
826 						for (cnt4 = 0; cnt4 < scale; cnt4++) {
827 							new_data[width * scale * cnt3 + cnt4] = data;
828 						}
829 					}
830 					new_data += scale;
831 				}
832 				new_data += width * scale * (scale - 1);
833 			}
834 			default_progress.width  *= scale;
835 			default_progress.height *= scale;
836 			default_progress.dx     *= scale;
837 			default_progress.dy     *= scale;
838 		}
839 	}
840 #endif
841 }
842 
843 void
PE_mark_hwaccess(uint64_t thread)844 PE_mark_hwaccess(uint64_t thread)
845 {
846 	last_hwaccess_thread = thread;
847 	__builtin_arm_dmb(DMB_ISH);
848 }
849 
850 void
PE_mark_hwaccess_data(uint8_t type,uint8_t size,uint64_t paddr)851 PE_mark_hwaccess_data(uint8_t type, uint8_t size, uint64_t paddr)
852 {
853 	last_hwaccess_type = type;
854 	last_hwaccess_size = size;
855 	last_hwaccess_paddr = paddr;
856 	__builtin_arm_dmb(DMB_ISH);
857 }
858 __startup_func
859 vm_size_t
PE_init_socd_client(void)860 PE_init_socd_client(void)
861 {
862 	DTEntry entry;
863 	uintptr_t const *reg_prop;
864 	unsigned int size;
865 
866 	if (kSuccess != SecureDTLookupEntry(0, "socd-trace-ram", &entry)) {
867 		return 0;
868 	}
869 
870 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
871 		return 0;
872 	}
873 
874 	socd_trace_ram_base = ml_io_map(reg_prop[0], (vm_size_t)reg_prop[1]);
875 	socd_trace_ram_size = (vm_size_t)reg_prop[1];
876 
877 	return socd_trace_ram_size;
878 }
879 
880 /*
881  *  see comments in PE_write_socd_client_buffer
882  */
883 void
PE_read_socd_client_buffer(vm_offset_t offset,void * out_buff,vm_size_t size)884 PE_read_socd_client_buffer(vm_offset_t offset, void *out_buff, vm_size_t size)
885 {
886 	volatile uint32_t *client_buff = (volatile uint32_t *)(socd_trace_ram_base + offset);
887 	vm_size_t len = size / sizeof(client_buff[0]);
888 
889 	assert(out_buff);
890 	assert3u((offset + size), <=, socd_trace_ram_size);
891 
892 	/* Perform 4 byte aligned accesses */
893 	if ((offset % 4 != 0) || (size % 4 != 0)) {
894 		panic("unaligned read of 0x%lu bytes from socd trace ram address 0x%lu", size, offset);
895 	}
896 
897 	for (vm_size_t i = 0; i < len; i++) {
898 		((uint32_t *)out_buff)[i] = client_buff[i];
899 	}
900 }
901 
902 /*
903  * PE_write_socd_client_buffer solves two problems:
904  * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
905  * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
906  *    by a SRAM that must be written to only 4 bytes at a time.
907  */
908 void
PE_write_socd_client_buffer(vm_offset_t offset,const void * in_buff,vm_size_t size)909 PE_write_socd_client_buffer(vm_offset_t offset, const void *in_buff, vm_size_t size)
910 {
911 	volatile uint32_t *client_buff = (volatile uint32_t *)(socd_trace_ram_base + offset);
912 	vm_size_t len = size / sizeof(client_buff[0]);
913 
914 	assert(in_buff);
915 	assert3u((offset + size), <=, socd_trace_ram_size);
916 
917 	/* Perform 4 byte aligned accesses */
918 	if ((offset % 4 != 0) || (size % 4 != 0)) {
919 		panic("unaligned write of 0x%lu bytes to socd trace ram address 0x%lu", size, offset);
920 	}
921 
922 	for (vm_size_t i = 0; i < len; i++) {
923 		client_buff[i] = ((const uint32_t *)in_buff)[i];
924 	}
925 }
926