xref: /xnu-10063.121.3/pexpert/arm/pe_init.c (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  *    arm platform expert initialization.
5  */
6 #include <sys/types.h>
7 #include <sys/kdebug.h>
8 #include <mach/vm_param.h>
9 #include <pexpert/protos.h>
10 #include <pexpert/pexpert.h>
11 #include <pexpert/boot.h>
12 #include <pexpert/device_tree.h>
13 #include <pexpert/pe_images.h>
14 #include <kern/sched_prim.h>
15 #include <kern/socd_client.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_routines.h>
18 #include <arm/caches_internal.h>
19 #include <kern/debug.h>
20 #include <libkern/section_keywords.h>
21 #include <os/overflow.h>
22 
23 #include <pexpert/arm64/board_config.h>
24 
25 #if CONFIG_SPTM
26 #include <arm64/sptm/sptm.h>
27 #endif
28 
29 /* extern references */
30 extern void     pe_identify_machine(boot_args *bootArgs);
31 
32 /* static references */
33 static void     pe_prepare_images(void);
34 
35 /* private globals */
36 SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
37 TUNABLE_DT(uint32_t, PE_srd_fused, "/chosen", "research-enabled",
38     "srd_fusing", 0, TUNABLE_DT_NONE);
39 
40 #define FW_VERS_LEN 128
41 
42 char iBoot_version[FW_VERS_LEN];
43 #if defined(TARGET_OS_OSX) && defined(__arm64__)
44 char iBoot_Stage_2_version[FW_VERS_LEN];
45 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
46 
47 /*
48  * This variable is only modified once, when the BSP starts executing. We put it in __DATA_CONST
49  * as page protections on kernel text early in startup are read-write. The kernel is
50  * locked down later in start-up, said mappings become RO and thus this
51  * variable becomes immutable.
52  *
53  * See osfmk/arm/arm_vm_init.c for more information.
54  */
55 SECURITY_READ_ONLY_LATE(volatile uint32_t) debug_enabled = FALSE;
56 
57 /*
58  * This variable indicates the page protection security policy used by the system.
59  * It is intended mostly for debugging purposes.
60  */
61 SECURITY_READ_ONLY_LATE(ml_page_protection_t) page_protection_type;
62 
63 uint8_t         gPlatformECID[8];
64 uint32_t        gPlatformMemoryID;
65 static boolean_t vc_progress_initialized = FALSE;
66 uint64_t    last_hwaccess_thread = 0;
67 char     gTargetTypeBuffer[16];
68 char     gModelTypeBuffer[32];
69 
70 /* Clock Frequency Info */
71 clock_frequency_info_t gPEClockFrequencyInfo;
72 
73 vm_offset_t gPanicBase = 0;
74 unsigned int gPanicSize;
75 struct embedded_panic_header *panic_info = NULL;
76 
77 #if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
78 /*
79  * On DEVELOPMENT bridgeOS, we map the x86 panic region
80  * so we can include this data in bridgeOS corefiles
81  */
82 uint64_t macos_panic_base = 0;
83 unsigned int macos_panic_size = 0;
84 
85 struct macos_panic_header *mac_panic_header = NULL;
86 #endif
87 
88 /* Maximum size of panic log excluding headers, in bytes */
89 static unsigned int panic_text_len;
90 
91 /* Whether a console is standing by for panic logging */
92 static boolean_t panic_console_available = FALSE;
93 
94 /* socd trace ram attributes */
95 static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
96 static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
97 
98 extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
99 
100 void PE_slide_devicetree(vm_offset_t);
101 
102 static void
check_for_panic_log(void)103 check_for_panic_log(void)
104 {
105 #ifdef PLATFORM_PANIC_LOG_PADDR
106 	gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
107 	panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
108 	gPanicSize = PLATFORM_PANIC_LOG_SIZE;
109 #else
110 	DTEntry entry, chosen;
111 	unsigned int size;
112 	uintptr_t const *reg_prop;
113 	uint32_t const *panic_region_length;
114 
115 	/*
116 	 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
117 	 *
118 	 * chosen {
119 	 *   embedded-panic-log-size = <0x00080000>;
120 	 *   [a bunch of other stuff]
121 	 * };
122 	 *
123 	 * pram {
124 	 *   reg = <0x00000008_fbc48000 0x00000000_000b4000>;
125 	 * };
126 	 *
127 	 * reg[0] is the physical address
128 	 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
129 	 * embedded-panic-log-size is the maximum amount of data to store in the buffer
130 	 */
131 	if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) {
132 		return;
133 	}
134 
135 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
136 		return;
137 	}
138 
139 	if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
140 		return;
141 	}
142 
143 	if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) {
144 		return;
145 	}
146 
147 	gPanicBase = ml_io_map_wcomb(reg_prop[0], panic_region_length[0]);
148 
149 	/* Deduct the size of the panic header from the panic region size */
150 	panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
151 	gPanicSize = panic_region_length[0];
152 
153 #if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
154 	if (PE_consistent_debug_enabled()) {
155 		uint64_t macos_panic_physbase = 0;
156 		uint64_t macos_panic_physlen = 0;
157 		/* Populate the macOS panic region data if it's present in consistent debug */
158 		if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
159 			macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
160 			mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
161 			macos_panic_size = macos_panic_physlen;
162 		}
163 	}
164 #endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
165 
166 #endif
167 	panic_info = (struct embedded_panic_header *)gPanicBase;
168 
169 	/* Check if a shared memory console is running in the panic buffer */
170 	if (panic_info->eph_magic == 'SHMC') {
171 		panic_console_available = TRUE;
172 		return;
173 	}
174 
175 	/* Check if there's a boot profile in the panic buffer */
176 	if (panic_info->eph_magic == 'BTRC') {
177 		return;
178 	}
179 
180 	/*
181 	 * Check to see if a panic (FUNK) is in VRAM from the last time
182 	 */
183 	if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
184 		printf("iBoot didn't extract panic log from previous session crash, this is bad\n");
185 	}
186 
187 	/* Clear panic region */
188 	bzero((void *)gPanicBase, gPanicSize);
189 }
190 
191 int
PE_initialize_console(PE_Video * info,int op)192 PE_initialize_console(PE_Video * info, int op)
193 {
194 	static int last_console = -1;
195 
196 	if (info && (info != &PE_state.video)) {
197 		info->v_scale = PE_state.video.v_scale;
198 	}
199 
200 	switch (op) {
201 	case kPEDisableScreen:
202 		initialize_screen(info, op);
203 		last_console = switch_to_serial_console();
204 		kprintf("kPEDisableScreen %d\n", last_console);
205 		break;
206 
207 	case kPEEnableScreen:
208 		initialize_screen(info, op);
209 		if (info) {
210 			PE_state.video = *info;
211 		}
212 		kprintf("kPEEnableScreen %d\n", last_console);
213 		if (last_console != -1) {
214 			switch_to_old_console(last_console);
215 		}
216 		break;
217 
218 	case kPEReleaseScreen:
219 		/*
220 		 * we don't show the progress indicator on boot, but want to
221 		 * show it afterwards.
222 		 */
223 		if (!vc_progress_initialized) {
224 			default_progress.dx = 0;
225 			default_progress.dy = 0;
226 			vc_progress_initialize(&default_progress,
227 			    default_progress_data1x,
228 			    default_progress_data2x,
229 			    default_progress_data3x,
230 			    (unsigned char *) appleClut8);
231 			vc_progress_initialized = TRUE;
232 		}
233 		initialize_screen(info, op);
234 		break;
235 
236 	default:
237 		initialize_screen(info, op);
238 		break;
239 	}
240 
241 	return 0;
242 }
243 
244 void
PE_init_iokit(void)245 PE_init_iokit(void)
246 {
247 	DTEntry         entry;
248 	unsigned int    size, scale;
249 	unsigned long   display_size;
250 	void const * const *map;
251 	unsigned int    show_progress;
252 	int             *delta, image_size, flip;
253 	uint32_t        start_time_value = 0;
254 	uint32_t        debug_wait_start_value = 0;
255 	uint32_t        load_kernel_start_value = 0;
256 	uint32_t        populate_registry_time_value = 0;
257 
258 	PE_init_printf(TRUE);
259 
260 	printf("iBoot version: %s\n", iBoot_version);
261 #if defined(TARGET_OS_OSX) && defined(__arm64__)
262 	printf("iBoot Stage 2 version: %s\n", iBoot_Stage_2_version);
263 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
264 
265 	if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) {
266 		boot_progress_element const *bootPict;
267 
268 		if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) {
269 			bcopy(map[0], appleClut8, sizeof(appleClut8));
270 		}
271 
272 		if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) {
273 			bootPict = (boot_progress_element const *) map[0];
274 			default_noroot.width = bootPict->width;
275 			default_noroot.height = bootPict->height;
276 			default_noroot.dx = 0;
277 			default_noroot.dy = bootPict->yOffset;
278 			default_noroot_data = &bootPict->data[0];
279 		}
280 	}
281 
282 	pe_prepare_images();
283 
284 	scale = PE_state.video.v_scale;
285 	flip = 1;
286 
287 #if defined(XNU_TARGET_OS_OSX)
288 	int notused;
289 	show_progress = TRUE;
290 	if (PE_parse_boot_argn("-restore", &notused, sizeof(notused))) {
291 		show_progress = FALSE;
292 	}
293 	if (PE_parse_boot_argn("-noprogress", &notused, sizeof(notused))) {
294 		show_progress = FALSE;
295 	}
296 #else
297 	show_progress = FALSE;
298 	PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
299 #endif /* XNU_TARGET_OS_OSX */
300 	if (show_progress) {
301 		/* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
302 		switch (PE_state.video.v_rotate) {
303 		case 2:
304 			flip = -1;
305 			OS_FALLTHROUGH;
306 		case 0:
307 			display_size = PE_state.video.v_height;
308 			image_size = default_progress.height;
309 			delta = &default_progress.dy;
310 			break;
311 		case 1:
312 			flip = -1;
313 			OS_FALLTHROUGH;
314 		case 3:
315 		default:
316 			display_size = PE_state.video.v_width;
317 			image_size = default_progress.width;
318 			delta = &default_progress.dx;
319 		}
320 		assert(*delta >= 0);
321 		while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
322 			*delta -= 50 * scale;
323 			assert(*delta >= 0);
324 		}
325 		*delta *= flip;
326 
327 		/* Check for DT-defined progress y delta */
328 		PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy));
329 
330 		vc_progress_initialize(&default_progress,
331 		    default_progress_data1x,
332 		    default_progress_data2x,
333 		    default_progress_data3x,
334 		    (unsigned char *) appleClut8);
335 		vc_progress_initialized = TRUE;
336 	}
337 
338 	if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
339 		/* Trace iBoot-provided timing information. */
340 		if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) {
341 			uint32_t const * value_ptr;
342 
343 			if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) {
344 				if (size == sizeof(start_time_value)) {
345 					start_time_value = *value_ptr;
346 				}
347 			}
348 
349 			if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) {
350 				if (size == sizeof(debug_wait_start_value)) {
351 					debug_wait_start_value = *value_ptr;
352 				}
353 			}
354 
355 			if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) {
356 				if (size == sizeof(load_kernel_start_value)) {
357 					load_kernel_start_value = *value_ptr;
358 				}
359 			}
360 
361 			if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) {
362 				if (size == sizeof(populate_registry_time_value)) {
363 					populate_registry_time_value = *value_ptr;
364 				}
365 			}
366 		}
367 
368 		KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
369 #if CONFIG_SPTM
370 		KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 1), SPTMArgs->timestamp_sk_bootstrap, SPTMArgs->timestamp_xnu_bootstrap);
371 #endif
372 	}
373 
374 	InitIOKit(PE_state.deviceTreeHead);
375 	ConfigureIOKit();
376 }
377 
378 void
PE_lockdown_iokit(void)379 PE_lockdown_iokit(void)
380 {
381 	/*
382 	 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
383 	 * machine_lockdown() is treated as a hard security checkpoint, such that
384 	 * code which executes prior to lockdown must be minimized and limited only to
385 	 * trusted parts of the kernel and specially-entitled kexts.  We therefore
386 	 * cannot start the general-purpose IOKit matching process until after lockdown,
387 	 * as it may involve execution of untrusted/non-entitled kext code.
388 	 * Furthermore, such kext code may process attacker controlled data (e.g.
389 	 * network packets), which dramatically increases the potential attack surface
390 	 * against a kernel which has not yet enabled the full set of available
391 	 * hardware protections.
392 	 */
393 	zalloc_iokit_lockdown();
394 	StartIOKitMatching();
395 }
396 
397 void
PE_slide_devicetree(vm_offset_t slide)398 PE_slide_devicetree(vm_offset_t slide)
399 {
400 	assert(PE_state.initialized);
401 	PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
402 	SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
403 }
404 
405 void
PE_init_platform(boolean_t vm_initialized,void * args)406 PE_init_platform(boolean_t vm_initialized, void *args)
407 {
408 	DTEntry         entry;
409 	unsigned int    size;
410 	void * const    *prop;
411 	boot_args      *boot_args_ptr = (boot_args *) args;
412 
413 	if (PE_state.initialized == FALSE) {
414 		page_protection_type = ml_page_protection_type();
415 		PE_state.initialized = TRUE;
416 		PE_state.bootArgs = boot_args_ptr;
417 		PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
418 		PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
419 		PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
420 		PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
421 		PE_state.video.v_width = boot_args_ptr->Video.v_width;
422 		PE_state.video.v_height = boot_args_ptr->Video.v_height;
423 		PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
424 		PE_state.video.v_rotate = (
425 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) +    // rotation
426 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift)  & kBootVideoDepthMask) // add extra boot rotation
427 			) % 4;
428 		PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
429 		PE_state.video.v_display = boot_args_ptr->Video.v_display;
430 		strlcpy(PE_state.video.v_pixelFormat, "BBBBBBBBGGGGGGGGRRRRRRRR", sizeof(PE_state.video.v_pixelFormat));
431 	}
432 	if (!vm_initialized) {
433 		/*
434 		 * Setup the Device Tree routines
435 		 * so the console can be found and the right I/O space
436 		 * can be used..
437 		 */
438 		SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
439 		pe_identify_machine(boot_args_ptr);
440 	} else {
441 		pe_arm_init_interrupts(args);
442 		pe_arm_init_debug(args);
443 	}
444 
445 	if (!vm_initialized) {
446 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
447 			if (kSuccess == SecureDTGetProperty(entry, "target-type",
448 			    (void const **)&prop, &size)) {
449 				if (size > sizeof(gTargetTypeBuffer)) {
450 					size = sizeof(gTargetTypeBuffer);
451 				}
452 				bcopy(prop, gTargetTypeBuffer, size);
453 				gTargetTypeBuffer[size - 1] = '\0';
454 			}
455 		}
456 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
457 			if (kSuccess == SecureDTGetProperty(entry, "model",
458 			    (void const **)&prop, &size)) {
459 				if (size > sizeof(gModelTypeBuffer)) {
460 					size = sizeof(gModelTypeBuffer);
461 				}
462 				bcopy(prop, gModelTypeBuffer, size);
463 				gModelTypeBuffer[size - 1] = '\0';
464 			}
465 		}
466 		if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
467 			if (kSuccess == SecureDTGetProperty(entry, "debug-enabled",
468 			    (void const **) &prop, &size)) {
469 				/*
470 				 * We purposefully modify a constified variable as
471 				 * it will get locked down by a trusted monitor or
472 				 * via page table mappings. We don't want people easily
473 				 * modifying this variable...
474 				 */
475 #pragma clang diagnostic push
476 #pragma clang diagnostic ignored "-Wcast-qual"
477 				boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
478 				if (size > sizeof(uint32_t)) {
479 					size = sizeof(uint32_t);
480 				}
481 				bcopy(prop, modify_debug_enabled, size);
482 #pragma clang diagnostic pop
483 			}
484 			if (kSuccess == SecureDTGetProperty(entry, "firmware-version", (void const **) &prop, &size)) {
485 				if (size > sizeof(iBoot_version)) {
486 					size = sizeof(iBoot_version);
487 				}
488 				bcopy(prop, iBoot_version, size);
489 				iBoot_version[size - 1] = '\0';
490 			}
491 #if defined(TARGET_OS_OSX) && defined(__arm64__)
492 			if (kSuccess == SecureDTGetProperty(entry, "system-firmware-version", (void const **) &prop, &size)) {
493 				if (size > sizeof(iBoot_Stage_2_version)) {
494 					size = sizeof(iBoot_Stage_2_version);
495 				}
496 				bcopy(prop, iBoot_Stage_2_version, size);
497 				iBoot_Stage_2_version[size - 1] = '\0';
498 			}
499 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
500 			if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id",
501 			    (void const **) &prop, &size)) {
502 				if (size > sizeof(gPlatformECID)) {
503 					size = sizeof(gPlatformECID);
504 				}
505 				bcopy(prop, gPlatformECID, size);
506 			}
507 			if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id",
508 			    (void const **) &prop, &size)) {
509 				if (size > sizeof(gPlatformMemoryID)) {
510 					size = sizeof(gPlatformMemoryID);
511 				}
512 				bcopy(prop, &gPlatformMemoryID, size);
513 			}
514 		}
515 		pe_init_debug();
516 	}
517 }
518 
519 void
PE_create_console(void)520 PE_create_console(void)
521 {
522 	/*
523 	 * Check the head of VRAM for a panic log saved on last panic.
524 	 * Do this before the VRAM is trashed.
525 	 */
526 	check_for_panic_log();
527 
528 	if (PE_state.video.v_display) {
529 		PE_initialize_console(&PE_state.video, kPEGraphicsMode);
530 	} else {
531 		PE_initialize_console(&PE_state.video, kPETextMode);
532 	}
533 }
534 
535 int
PE_current_console(PE_Video * info)536 PE_current_console(PE_Video * info)
537 {
538 	*info = PE_state.video;
539 	return 0;
540 }
541 
542 void
PE_display_icon(__unused unsigned int flags,__unused const char * name)543 PE_display_icon(__unused unsigned int flags, __unused const char *name)
544 {
545 	if (default_noroot_data) {
546 		vc_display_icon(&default_noroot, default_noroot_data);
547 	}
548 }
549 
550 extern          boolean_t
PE_get_hotkey(__unused unsigned char key)551 PE_get_hotkey(__unused unsigned char key)
552 {
553 	return FALSE;
554 }
555 
556 static timebase_callback_func gTimebaseCallback;
557 
558 void
PE_register_timebase_callback(timebase_callback_func callback)559 PE_register_timebase_callback(timebase_callback_func callback)
560 {
561 	gTimebaseCallback = callback;
562 
563 	PE_call_timebase_callback();
564 }
565 
566 void
PE_call_timebase_callback(void)567 PE_call_timebase_callback(void)
568 {
569 	struct timebase_freq_t timebase_freq;
570 
571 	timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
572 	timebase_freq.timebase_den = 1;
573 
574 	if (gTimebaseCallback) {
575 		gTimebaseCallback(&timebase_freq);
576 	}
577 }
578 
579 /*
580  * The default PE_poll_input handler.
581  */
582 int
PE_stub_poll_input(__unused unsigned int options,char * c)583 PE_stub_poll_input(__unused unsigned int options, char *c)
584 {
585 	*c = (char)uart_getc();
586 	return 0; /* 0 for success, 1 for unsupported */
587 }
588 
589 /*
590  * This routine will return 1 if you are running on a device with a variant
591  * of iBoot that allows debugging. This is typically not the case on production
592  * fused parts (even when running development variants of iBoot).
593  *
594  * The routine takes an optional argument of the flags passed to debug="" so
595  * kexts don't have to parse the boot arg themselves.
596  */
597 uint32_t
PE_i_can_has_debugger(uint32_t * debug_flags)598 PE_i_can_has_debugger(uint32_t *debug_flags)
599 {
600 	if (debug_flags) {
601 #if DEVELOPMENT || DEBUG
602 		assert(startup_phase >= STARTUP_SUB_TUNABLES);
603 #endif
604 		if (debug_enabled) {
605 			*debug_flags = debug_boot_arg;
606 		} else {
607 			*debug_flags = 0;
608 		}
609 	}
610 	return debug_enabled;
611 }
612 
613 /*
614  * This routine returns TRUE if the device is configured
615  * with panic debugging enabled.
616  */
617 boolean_t
PE_panic_debugging_enabled()618 PE_panic_debugging_enabled()
619 {
620 	return panicDebugging;
621 }
622 
623 void
PE_update_panic_crc(unsigned char * buf,unsigned int * size)624 PE_update_panic_crc(unsigned char *buf, unsigned int *size)
625 {
626 	if (!panic_info || !size) {
627 		return;
628 	}
629 
630 	if (!buf) {
631 		*size = panic_text_len;
632 		return;
633 	}
634 
635 	if (*size == 0) {
636 		return;
637 	}
638 
639 	*size = *size > panic_text_len ? panic_text_len : *size;
640 	if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
641 		// rdar://88696402 (PanicTest: test case for MAGIC check in PE_update_panic_crc)
642 		printf("Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
643 	}
644 
645 	/* CRC everything after the CRC itself - starting with the panic header version */
646 	panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len +
647 	    sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
648 }
649 
650 uint32_t
PE_get_offset_into_panic_region(char * location)651 PE_get_offset_into_panic_region(char *location)
652 {
653 	assert(gPanicBase != 0);
654 	assert(location >= (char *) gPanicBase);
655 	assert((unsigned int)(location - gPanicBase) < gPanicSize);
656 
657 	return (uint32_t)(uintptr_t)(location - gPanicBase);
658 }
659 
660 void
PE_init_panicheader()661 PE_init_panicheader()
662 {
663 	if (!panic_info) {
664 		return;
665 	}
666 
667 	bzero(panic_info, sizeof(struct embedded_panic_header));
668 
669 	/*
670 	 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
671 	 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
672 	 */
673 	panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0;
674 
675 	panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
676 	panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
677 
678 	return;
679 }
680 
681 /*
682  * Tries to update the panic header to keep it consistent on nested panics.
683  *
684  * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
685  *       it is to update the panic header to make it consistent when we nest panics.
686  */
687 void
PE_update_panicheader_nestedpanic()688 PE_update_panicheader_nestedpanic()
689 {
690 	/*
691 	 * if the panic header pointer is bogus (e.g. someone stomped on it) then bail.
692 	 */
693 	if (!panic_info) {
694 		/* if this happens in development then blow up bigly */
695 		assert(panic_info);
696 		return;
697 	}
698 
699 	/*
700 	 * If the panic log offset is not set, re-init the panic header
701 	 *
702 	 * note that this should not be possible unless someone stomped on the panic header to zero it out, since by the time
703 	 * we reach this location *someone* should have appended something to the log..
704 	 */
705 	if (panic_info->eph_panic_log_offset == 0) {
706 		PE_init_panicheader();
707 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
708 		return;
709 	}
710 
711 	panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
712 
713 	/*
714 	 * If the panic log length is not set, set the end to
715 	 * the current location of the debug_buf_ptr to close it.
716 	 */
717 	if (panic_info->eph_panic_log_len == 0) {
718 		panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr);
719 
720 		/* indicative of corruption in the panic region, consumer beware */
721 		if ((panic_info->eph_other_log_offset == 0) &&
722 		    (panic_info->eph_other_log_len == 0)) {
723 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
724 		}
725 	}
726 
727 	/* likely indicative of corruption in the panic region, consumer beware */
728 	if (((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))) {
729 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
730 	}
731 
732 	/*
733 	 * If we haven't set up the other log yet, set the beginning of the other log
734 	 * to the current location of the debug_buf_ptr
735 	 */
736 	if (panic_info->eph_other_log_offset == 0) {
737 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
738 
739 		/* indicative of corruption in the panic region, consumer beware */
740 		if (panic_info->eph_other_log_len == 0) {
741 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
742 		}
743 	}
744 
745 	return;
746 }
747 
748 boolean_t
PE_reboot_on_panic(void)749 PE_reboot_on_panic(void)
750 {
751 	uint32_t debug_flags;
752 
753 	if (PE_i_can_has_debugger(&debug_flags)
754 	    && (debug_flags & DB_NMI)) {
755 		/* kernel debugging is active */
756 		return FALSE;
757 	} else {
758 		return TRUE;
759 	}
760 }
761 
762 void
PE_sync_panic_buffers(void)763 PE_sync_panic_buffers(void)
764 {
765 	/*
766 	 * rdar://problem/26453070:
767 	 * The iBoot panic region is write-combined on arm64.  We must flush dirty lines
768 	 * from L1/L2 as late as possible before reset, with no further reads of the panic
769 	 * region between the flush and the reset.  Some targets have an additional memcache (L3),
770 	 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
771 	 * be discarded on reset.  If we can make sure the lines are flushed to L3/DRAM,
772 	 * the platform reset handler will flush any L3.
773 	 */
774 	if (gPanicBase) {
775 		CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize);
776 	}
777 }
778 
779 static void
pe_prepare_images(void)780 pe_prepare_images(void)
781 {
782 	if ((1 & PE_state.video.v_rotate) != 0) {
783 		// Only square square images with radial symmetry are supported
784 		// No need to actually rotate the data
785 
786 		// Swap the dx and dy offsets
787 		uint32_t tmp = default_progress.dx;
788 		default_progress.dx = default_progress.dy;
789 		default_progress.dy = tmp;
790 	}
791 #if 0
792 	uint32_t cnt, cnt2, cnt3, cnt4;
793 	uint32_t tmp, width, height;
794 	uint8_t  data, *new_data;
795 	const uint8_t *old_data;
796 
797 	width  = default_progress.width;
798 	height = default_progress.height * default_progress.count;
799 
800 	// Scale images if the UI is being scaled
801 	if (PE_state.video.v_scale > 1) {
802 		new_data = kalloc(width * height * scale * scale);
803 		if (new_data != 0) {
804 			old_data = default_progress_data;
805 			default_progress_data = new_data;
806 			for (cnt = 0; cnt < height; cnt++) {
807 				for (cnt2 = 0; cnt2 < width; cnt2++) {
808 					data = *(old_data++);
809 					for (cnt3 = 0; cnt3 < scale; cnt3++) {
810 						for (cnt4 = 0; cnt4 < scale; cnt4++) {
811 							new_data[width * scale * cnt3 + cnt4] = data;
812 						}
813 					}
814 					new_data += scale;
815 				}
816 				new_data += width * scale * (scale - 1);
817 			}
818 			default_progress.width  *= scale;
819 			default_progress.height *= scale;
820 			default_progress.dx     *= scale;
821 			default_progress.dy     *= scale;
822 		}
823 	}
824 #endif
825 }
826 
827 void
PE_mark_hwaccess(uint64_t thread)828 PE_mark_hwaccess(uint64_t thread)
829 {
830 	last_hwaccess_thread = thread;
831 	__builtin_arm_dmb(DMB_ISH);
832 }
833 
834 __startup_func
835 vm_size_t
PE_init_socd_client(void)836 PE_init_socd_client(void)
837 {
838 	DTEntry entry;
839 	uintptr_t const *reg_prop;
840 	unsigned int size;
841 
842 	if (kSuccess != SecureDTLookupEntry(0, "socd-trace-ram", &entry)) {
843 		return 0;
844 	}
845 
846 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
847 		return 0;
848 	}
849 
850 	socd_trace_ram_base = ml_io_map(reg_prop[0], (vm_size_t)reg_prop[1]);
851 	socd_trace_ram_size = (vm_size_t)reg_prop[1];
852 
853 	return socd_trace_ram_size;
854 }
855 
856 /*
857  * PE_write_socd_client_buffer solves two problems:
858  * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
859  * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
860  *    by a SRAM that must be written to only 4 bytes at a time.
861  */
862 void
PE_write_socd_client_buffer(vm_offset_t offset,const void * buff,vm_size_t size)863 PE_write_socd_client_buffer(vm_offset_t offset, const void *buff, vm_size_t size)
864 {
865 	volatile uint32_t *dst = (volatile uint32_t *)(socd_trace_ram_base + offset);
866 	vm_size_t len = size / sizeof(dst[0]);
867 
868 	assert(offset + size <= socd_trace_ram_size);
869 
870 	/* Perform 4 byte aligned accesses */
871 	if ((offset % 4 != 0) || (size % 4 != 0)) {
872 		panic("unaligned acccess to socd trace ram");
873 	}
874 
875 	for (vm_size_t i = 0; i < len; i++) {
876 		dst[i] = ((const uint32_t *)buff)[i];
877 	}
878 }
879