xref: /xnu-8796.141.3/pexpert/arm/pe_init.c (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  *    arm platform expert initialization.
5  */
6 #include <sys/types.h>
7 #include <sys/kdebug.h>
8 #include <mach/vm_param.h>
9 #include <pexpert/protos.h>
10 #include <pexpert/pexpert.h>
11 #include <pexpert/boot.h>
12 #include <pexpert/device_tree.h>
13 #include <pexpert/pe_images.h>
14 #include <kern/sched_prim.h>
15 #include <kern/socd_client.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_routines.h>
18 #include <arm/caches_internal.h>
19 #include <kern/debug.h>
20 #include <libkern/section_keywords.h>
21 #include <os/overflow.h>
22 
23 #include <pexpert/arm64/board_config.h>
24 
25 
26 /* extern references */
27 extern void     pe_identify_machine(boot_args *bootArgs);
28 
29 /* static references */
30 static void     pe_prepare_images(void);
31 
32 /* private globals */
33 SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
34 TUNABLE_DT(uint32_t, PE_srd_fused, "/chosen", "research-enabled",
35     "srd_fusing", 0, TUNABLE_DT_NONE);
36 
37 #define FW_VERS_LEN 128
38 
39 char iBoot_version[FW_VERS_LEN];
40 #if defined(TARGET_OS_OSX) && defined(__arm64__)
41 char iBoot_Stage_2_version[FW_VERS_LEN];
42 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
43 
44 /*
45  * This variable is only modified once, when the BSP starts executing. We put it in __DATA_CONST
46  * as page protections on kernel text early in startup are read-write. The kernel is
47  * locked down later in start-up, said mappings become RO and thus this
48  * variable becomes immutable.
49  *
50  * See osfmk/arm/arm_vm_init.c for more information.
51  */
52 SECURITY_READ_ONLY_LATE(volatile uint32_t) debug_enabled = FALSE;
53 
54 uint8_t         gPlatformECID[8];
55 uint32_t        gPlatformMemoryID;
56 static boolean_t vc_progress_initialized = FALSE;
57 uint64_t    last_hwaccess_thread = 0;
58 char     gTargetTypeBuffer[16];
59 char     gModelTypeBuffer[32];
60 
61 /* Clock Frequency Info */
62 clock_frequency_info_t gPEClockFrequencyInfo;
63 
64 vm_offset_t gPanicBase = 0;
65 unsigned int gPanicSize;
66 struct embedded_panic_header *panic_info = NULL;
67 
68 #if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
69 /*
70  * On DEVELOPMENT bridgeOS, we map the x86 panic region
71  * so we can include this data in bridgeOS corefiles
72  */
73 uint64_t macos_panic_base = 0;
74 unsigned int macos_panic_size = 0;
75 
76 struct macos_panic_header *mac_panic_header = NULL;
77 #endif
78 
79 /* Maximum size of panic log excluding headers, in bytes */
80 static unsigned int panic_text_len;
81 
82 /* Whether a console is standing by for panic logging */
83 static boolean_t panic_console_available = FALSE;
84 
85 /* socd trace ram attributes */
86 static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
87 static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
88 
89 extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
90 
91 void PE_slide_devicetree(vm_offset_t);
92 
93 static void
check_for_panic_log(void)94 check_for_panic_log(void)
95 {
96 #ifdef PLATFORM_PANIC_LOG_PADDR
97 	gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
98 	panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
99 	gPanicSize = PLATFORM_PANIC_LOG_SIZE;
100 #else
101 	DTEntry entry, chosen;
102 	unsigned int size;
103 	uintptr_t const *reg_prop;
104 	uint32_t const *panic_region_length;
105 
106 	/*
107 	 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
108 	 *
109 	 * chosen {
110 	 *   embedded-panic-log-size = <0x00080000>;
111 	 *   [a bunch of other stuff]
112 	 * };
113 	 *
114 	 * pram {
115 	 *   reg = <0x00000008_fbc48000 0x00000000_000b4000>;
116 	 * };
117 	 *
118 	 * reg[0] is the physical address
119 	 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
120 	 * embedded-panic-log-size is the maximum amount of data to store in the buffer
121 	 */
122 	if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) {
123 		return;
124 	}
125 
126 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
127 		return;
128 	}
129 
130 	if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
131 		return;
132 	}
133 
134 	if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) {
135 		return;
136 	}
137 
138 	gPanicBase = ml_io_map_wcomb(reg_prop[0], panic_region_length[0]);
139 
140 	/* Deduct the size of the panic header from the panic region size */
141 	panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
142 	gPanicSize = panic_region_length[0];
143 
144 #if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
145 	if (PE_consistent_debug_enabled()) {
146 		uint64_t macos_panic_physbase = 0;
147 		uint64_t macos_panic_physlen = 0;
148 		/* Populate the macOS panic region data if it's present in consistent debug */
149 		if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
150 			macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
151 			mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
152 			macos_panic_size = macos_panic_physlen;
153 		}
154 	}
155 #endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
156 
157 #endif
158 	panic_info = (struct embedded_panic_header *)gPanicBase;
159 
160 	/* Check if a shared memory console is running in the panic buffer */
161 	if (panic_info->eph_magic == 'SHMC') {
162 		panic_console_available = TRUE;
163 		return;
164 	}
165 
166 	/* Check if there's a boot profile in the panic buffer */
167 	if (panic_info->eph_magic == 'BTRC') {
168 		return;
169 	}
170 
171 	/*
172 	 * Check to see if a panic (FUNK) is in VRAM from the last time
173 	 */
174 	if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
175 		printf("iBoot didn't extract panic log from previous session crash, this is bad\n");
176 	}
177 
178 	/* Clear panic region */
179 	bzero((void *)gPanicBase, gPanicSize);
180 }
181 
182 int
PE_initialize_console(PE_Video * info,int op)183 PE_initialize_console(PE_Video * info, int op)
184 {
185 	static int last_console = -1;
186 
187 	if (info && (info != &PE_state.video)) {
188 		info->v_scale = PE_state.video.v_scale;
189 	}
190 
191 	switch (op) {
192 	case kPEDisableScreen:
193 		initialize_screen(info, op);
194 		last_console = switch_to_serial_console();
195 		kprintf("kPEDisableScreen %d\n", last_console);
196 		break;
197 
198 	case kPEEnableScreen:
199 		initialize_screen(info, op);
200 		if (info) {
201 			PE_state.video = *info;
202 		}
203 		kprintf("kPEEnableScreen %d\n", last_console);
204 		if (last_console != -1) {
205 			switch_to_old_console(last_console);
206 		}
207 		break;
208 
209 	case kPEReleaseScreen:
210 		/*
211 		 * we don't show the progress indicator on boot, but want to
212 		 * show it afterwards.
213 		 */
214 		if (!vc_progress_initialized) {
215 			default_progress.dx = 0;
216 			default_progress.dy = 0;
217 			vc_progress_initialize(&default_progress,
218 			    default_progress_data1x,
219 			    default_progress_data2x,
220 			    default_progress_data3x,
221 			    (unsigned char *) appleClut8);
222 			vc_progress_initialized = TRUE;
223 		}
224 		initialize_screen(info, op);
225 		break;
226 
227 	default:
228 		initialize_screen(info, op);
229 		break;
230 	}
231 
232 	return 0;
233 }
234 
235 void
PE_init_iokit(void)236 PE_init_iokit(void)
237 {
238 	DTEntry         entry;
239 	unsigned int    size, scale;
240 	unsigned long   display_size;
241 	void const * const *map;
242 	unsigned int    show_progress;
243 	int             *delta, image_size, flip;
244 	uint32_t        start_time_value = 0;
245 	uint32_t        debug_wait_start_value = 0;
246 	uint32_t        load_kernel_start_value = 0;
247 	uint32_t        populate_registry_time_value = 0;
248 
249 	PE_init_printf(TRUE);
250 
251 	printf("iBoot version: %s\n", iBoot_version);
252 #if defined(TARGET_OS_OSX) && defined(__arm64__)
253 	printf("iBoot Stage 2 version: %s\n", iBoot_Stage_2_version);
254 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
255 
256 	if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) {
257 		boot_progress_element const *bootPict;
258 
259 		if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) {
260 			bcopy(map[0], appleClut8, sizeof(appleClut8));
261 		}
262 
263 		if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) {
264 			bootPict = (boot_progress_element const *) map[0];
265 			default_noroot.width = bootPict->width;
266 			default_noroot.height = bootPict->height;
267 			default_noroot.dx = 0;
268 			default_noroot.dy = bootPict->yOffset;
269 			default_noroot_data = &bootPict->data[0];
270 		}
271 	}
272 
273 	pe_prepare_images();
274 
275 	scale = PE_state.video.v_scale;
276 	flip = 1;
277 
278 #if defined(XNU_TARGET_OS_OSX)
279 	int notused;
280 	show_progress = TRUE;
281 	if (PE_parse_boot_argn("-restore", &notused, sizeof(notused))) {
282 		show_progress = FALSE;
283 	}
284 	if (PE_parse_boot_argn("-noprogress", &notused, sizeof(notused))) {
285 		show_progress = FALSE;
286 	}
287 #else
288 	show_progress = FALSE;
289 	PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
290 #endif /* XNU_TARGET_OS_OSX */
291 	if (show_progress) {
292 		/* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
293 		switch (PE_state.video.v_rotate) {
294 		case 2:
295 			flip = -1;
296 			OS_FALLTHROUGH;
297 		case 0:
298 			display_size = PE_state.video.v_height;
299 			image_size = default_progress.height;
300 			delta = &default_progress.dy;
301 			break;
302 		case 1:
303 			flip = -1;
304 			OS_FALLTHROUGH;
305 		case 3:
306 		default:
307 			display_size = PE_state.video.v_width;
308 			image_size = default_progress.width;
309 			delta = &default_progress.dx;
310 		}
311 		assert(*delta >= 0);
312 		while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
313 			*delta -= 50 * scale;
314 			assert(*delta >= 0);
315 		}
316 		*delta *= flip;
317 
318 		/* Check for DT-defined progress y delta */
319 		PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy));
320 
321 		vc_progress_initialize(&default_progress,
322 		    default_progress_data1x,
323 		    default_progress_data2x,
324 		    default_progress_data3x,
325 		    (unsigned char *) appleClut8);
326 		vc_progress_initialized = TRUE;
327 	}
328 
329 	if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
330 		/* Trace iBoot-provided timing information. */
331 		if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) {
332 			uint32_t const * value_ptr;
333 
334 			if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) {
335 				if (size == sizeof(start_time_value)) {
336 					start_time_value = *value_ptr;
337 				}
338 			}
339 
340 			if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) {
341 				if (size == sizeof(debug_wait_start_value)) {
342 					debug_wait_start_value = *value_ptr;
343 				}
344 			}
345 
346 			if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) {
347 				if (size == sizeof(load_kernel_start_value)) {
348 					load_kernel_start_value = *value_ptr;
349 				}
350 			}
351 
352 			if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) {
353 				if (size == sizeof(populate_registry_time_value)) {
354 					populate_registry_time_value = *value_ptr;
355 				}
356 			}
357 		}
358 
359 		KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
360 	}
361 
362 	InitIOKit(PE_state.deviceTreeHead);
363 	ConfigureIOKit();
364 }
365 
366 void
PE_lockdown_iokit(void)367 PE_lockdown_iokit(void)
368 {
369 	/*
370 	 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
371 	 * machine_lockdown() is treated as a hard security checkpoint, such that
372 	 * code which executes prior to lockdown must be minimized and limited only to
373 	 * trusted parts of the kernel and specially-entitled kexts.  We therefore
374 	 * cannot start the general-purpose IOKit matching process until after lockdown,
375 	 * as it may involve execution of untrusted/non-entitled kext code.
376 	 * Furthermore, such kext code may process attacker controlled data (e.g.
377 	 * network packets), which dramatically increases the potential attack surface
378 	 * against a kernel which has not yet enabled the full set of available
379 	 * hardware protections.
380 	 */
381 	zalloc_iokit_lockdown();
382 	StartIOKitMatching();
383 }
384 
385 void
PE_slide_devicetree(vm_offset_t slide)386 PE_slide_devicetree(vm_offset_t slide)
387 {
388 	assert(PE_state.initialized);
389 	PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
390 	SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
391 }
392 
393 void
PE_init_platform(boolean_t vm_initialized,void * args)394 PE_init_platform(boolean_t vm_initialized, void *args)
395 {
396 	DTEntry         entry;
397 	unsigned int    size;
398 	void * const    *prop;
399 	boot_args      *boot_args_ptr = (boot_args *) args;
400 
401 	if (PE_state.initialized == FALSE) {
402 		PE_state.initialized = TRUE;
403 		PE_state.bootArgs = boot_args_ptr;
404 		PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
405 		PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
406 		PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
407 		PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
408 		PE_state.video.v_width = boot_args_ptr->Video.v_width;
409 		PE_state.video.v_height = boot_args_ptr->Video.v_height;
410 		PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
411 		PE_state.video.v_rotate = (
412 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) +    // rotation
413 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift)  & kBootVideoDepthMask) // add extra boot rotation
414 			) % 4;
415 		PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
416 		PE_state.video.v_display = boot_args_ptr->Video.v_display;
417 		strlcpy(PE_state.video.v_pixelFormat, "BBBBBBBBGGGGGGGGRRRRRRRR", sizeof(PE_state.video.v_pixelFormat));
418 	}
419 	if (!vm_initialized) {
420 		/*
421 		 * Setup the Device Tree routines
422 		 * so the console can be found and the right I/O space
423 		 * can be used..
424 		 */
425 		SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
426 		pe_identify_machine(boot_args_ptr);
427 	} else {
428 		pe_arm_init_interrupts(args);
429 		pe_arm_init_debug(args);
430 	}
431 
432 	if (!vm_initialized) {
433 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
434 			if (kSuccess == SecureDTGetProperty(entry, "target-type",
435 			    (void const **)&prop, &size)) {
436 				if (size > sizeof(gTargetTypeBuffer)) {
437 					size = sizeof(gTargetTypeBuffer);
438 				}
439 				bcopy(prop, gTargetTypeBuffer, size);
440 				gTargetTypeBuffer[size - 1] = '\0';
441 			}
442 		}
443 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
444 			if (kSuccess == SecureDTGetProperty(entry, "model",
445 			    (void const **)&prop, &size)) {
446 				if (size > sizeof(gModelTypeBuffer)) {
447 					size = sizeof(gModelTypeBuffer);
448 				}
449 				bcopy(prop, gModelTypeBuffer, size);
450 				gModelTypeBuffer[size - 1] = '\0';
451 			}
452 		}
453 		if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
454 			if (kSuccess == SecureDTGetProperty(entry, "debug-enabled",
455 			    (void const **) &prop, &size)) {
456 				/*
457 				 * We purposefully modify a constified variable as
458 				 * it will get locked down by a trusted monitor or
459 				 * via page table mappings. We don't want people easily
460 				 * modifying this variable...
461 				 */
462 #pragma clang diagnostic push
463 #pragma clang diagnostic ignored "-Wcast-qual"
464 				boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
465 				if (size > sizeof(uint32_t)) {
466 					size = sizeof(uint32_t);
467 				}
468 				bcopy(prop, modify_debug_enabled, size);
469 #pragma clang diagnostic pop
470 			}
471 			if (kSuccess == SecureDTGetProperty(entry, "firmware-version", (void const **) &prop, &size)) {
472 				if (size > sizeof(iBoot_version)) {
473 					size = sizeof(iBoot_version);
474 				}
475 				bcopy(prop, iBoot_version, size);
476 				iBoot_version[size - 1] = '\0';
477 			}
478 #if defined(TARGET_OS_OSX) && defined(__arm64__)
479 			if (kSuccess == SecureDTGetProperty(entry, "system-firmware-version", (void const **) &prop, &size)) {
480 				if (size > sizeof(iBoot_Stage_2_version)) {
481 					size = sizeof(iBoot_Stage_2_version);
482 				}
483 				bcopy(prop, iBoot_Stage_2_version, size);
484 				iBoot_Stage_2_version[size - 1] = '\0';
485 			}
486 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
487 			if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id",
488 			    (void const **) &prop, &size)) {
489 				if (size > sizeof(gPlatformECID)) {
490 					size = sizeof(gPlatformECID);
491 				}
492 				bcopy(prop, gPlatformECID, size);
493 			}
494 			if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id",
495 			    (void const **) &prop, &size)) {
496 				if (size > sizeof(gPlatformMemoryID)) {
497 					size = sizeof(gPlatformMemoryID);
498 				}
499 				bcopy(prop, &gPlatformMemoryID, size);
500 			}
501 		}
502 		pe_init_debug();
503 	}
504 }
505 
506 void
PE_create_console(void)507 PE_create_console(void)
508 {
509 	/*
510 	 * Check the head of VRAM for a panic log saved on last panic.
511 	 * Do this before the VRAM is trashed.
512 	 */
513 	check_for_panic_log();
514 
515 	if (PE_state.video.v_display) {
516 		PE_initialize_console(&PE_state.video, kPEGraphicsMode);
517 	} else {
518 		PE_initialize_console(&PE_state.video, kPETextMode);
519 	}
520 }
521 
522 int
PE_current_console(PE_Video * info)523 PE_current_console(PE_Video * info)
524 {
525 	*info = PE_state.video;
526 	return 0;
527 }
528 
529 void
PE_display_icon(__unused unsigned int flags,__unused const char * name)530 PE_display_icon(__unused unsigned int flags, __unused const char *name)
531 {
532 	if (default_noroot_data) {
533 		vc_display_icon(&default_noroot, default_noroot_data);
534 	}
535 }
536 
537 extern          boolean_t
PE_get_hotkey(__unused unsigned char key)538 PE_get_hotkey(__unused unsigned char key)
539 {
540 	return FALSE;
541 }
542 
543 static timebase_callback_func gTimebaseCallback;
544 
545 void
PE_register_timebase_callback(timebase_callback_func callback)546 PE_register_timebase_callback(timebase_callback_func callback)
547 {
548 	gTimebaseCallback = callback;
549 
550 	PE_call_timebase_callback();
551 }
552 
553 void
PE_call_timebase_callback(void)554 PE_call_timebase_callback(void)
555 {
556 	struct timebase_freq_t timebase_freq;
557 
558 	timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
559 	timebase_freq.timebase_den = 1;
560 
561 	if (gTimebaseCallback) {
562 		gTimebaseCallback(&timebase_freq);
563 	}
564 }
565 
566 /*
567  * The default PE_poll_input handler.
568  */
569 int
PE_stub_poll_input(__unused unsigned int options,char * c)570 PE_stub_poll_input(__unused unsigned int options, char *c)
571 {
572 	*c = (char)uart_getc();
573 	return 0; /* 0 for success, 1 for unsupported */
574 }
575 
576 /*
577  * This routine will return 1 if you are running on a device with a variant
578  * of iBoot that allows debugging. This is typically not the case on production
579  * fused parts (even when running development variants of iBoot).
580  *
581  * The routine takes an optional argument of the flags passed to debug="" so
582  * kexts don't have to parse the boot arg themselves.
583  */
584 uint32_t
PE_i_can_has_debugger(uint32_t * debug_flags)585 PE_i_can_has_debugger(uint32_t *debug_flags)
586 {
587 	if (debug_flags) {
588 #if DEVELOPMENT || DEBUG
589 		assert(startup_phase >= STARTUP_SUB_TUNABLES);
590 #endif
591 		if (debug_enabled) {
592 			*debug_flags = debug_boot_arg;
593 		} else {
594 			*debug_flags = 0;
595 		}
596 	}
597 	return debug_enabled;
598 }
599 
600 /*
601  * This routine returns TRUE if the device is configured
602  * with panic debugging enabled.
603  */
604 boolean_t
PE_panic_debugging_enabled()605 PE_panic_debugging_enabled()
606 {
607 	return panicDebugging;
608 }
609 
610 void
PE_update_panic_crc(unsigned char * buf,unsigned int * size)611 PE_update_panic_crc(unsigned char *buf, unsigned int *size)
612 {
613 	if (!panic_info || !size) {
614 		return;
615 	}
616 
617 	if (!buf) {
618 		*size = panic_text_len;
619 		return;
620 	}
621 
622 	if (*size == 0) {
623 		return;
624 	}
625 
626 	*size = *size > panic_text_len ? panic_text_len : *size;
627 	if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
628 		// rdar://88696402 (PanicTest: test case for MAGIC check in PE_update_panic_crc)
629 		printf("Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
630 	}
631 
632 	/* CRC everything after the CRC itself - starting with the panic header version */
633 	panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len +
634 	    sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
635 }
636 
637 uint32_t
PE_get_offset_into_panic_region(char * location)638 PE_get_offset_into_panic_region(char *location)
639 {
640 	assert(gPanicBase != 0);
641 	assert(location >= (char *) gPanicBase);
642 	assert((unsigned int)(location - gPanicBase) < gPanicSize);
643 
644 	return (uint32_t)(uintptr_t)(location - gPanicBase);
645 }
646 
647 void
PE_init_panicheader()648 PE_init_panicheader()
649 {
650 	if (!panic_info) {
651 		return;
652 	}
653 
654 	bzero(panic_info, sizeof(struct embedded_panic_header));
655 
656 	/*
657 	 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
658 	 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
659 	 */
660 	panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0;
661 
662 	panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
663 	panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
664 
665 	return;
666 }
667 
668 /*
669  * Tries to update the panic header to keep it consistent on nested panics.
670  *
671  * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
672  *       it is to update the panic header to make it consistent when we nest panics.
673  */
674 void
PE_update_panicheader_nestedpanic()675 PE_update_panicheader_nestedpanic()
676 {
677 	/*
678 	 * if the panic header pointer is bogus (e.g. someone stomped on it) then bail.
679 	 */
680 	if (!panic_info) {
681 		/* if this happens in development then blow up bigly */
682 		assert(panic_info);
683 		return;
684 	}
685 
686 	/*
687 	 * If the panic log offset is not set, re-init the panic header
688 	 *
689 	 * note that this should not be possible unless someone stomped on the panic header to zero it out, since by the time
690 	 * we reach this location *someone* should have appended something to the log..
691 	 */
692 	if (panic_info->eph_panic_log_offset == 0) {
693 		PE_init_panicheader();
694 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
695 		return;
696 	}
697 
698 	panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
699 
700 	/*
701 	 * If the panic log length is not set, set the end to
702 	 * the current location of the debug_buf_ptr to close it.
703 	 */
704 	if (panic_info->eph_panic_log_len == 0) {
705 		panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr);
706 
707 		/* indicative of corruption in the panic region, consumer beware */
708 		if ((panic_info->eph_other_log_offset == 0) &&
709 		    (panic_info->eph_other_log_len == 0)) {
710 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
711 		}
712 	}
713 
714 	/* likely indicative of corruption in the panic region, consumer beware */
715 	if (((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))) {
716 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
717 	}
718 
719 	/*
720 	 * If we haven't set up the other log yet, set the beginning of the other log
721 	 * to the current location of the debug_buf_ptr
722 	 */
723 	if (panic_info->eph_other_log_offset == 0) {
724 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
725 
726 		/* indicative of corruption in the panic region, consumer beware */
727 		if (panic_info->eph_other_log_len == 0) {
728 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
729 		}
730 	}
731 
732 	return;
733 }
734 
735 boolean_t
PE_reboot_on_panic(void)736 PE_reboot_on_panic(void)
737 {
738 	uint32_t debug_flags;
739 
740 	if (PE_i_can_has_debugger(&debug_flags)
741 	    && (debug_flags & DB_NMI)) {
742 		/* kernel debugging is active */
743 		return FALSE;
744 	} else {
745 		return TRUE;
746 	}
747 }
748 
749 void
PE_sync_panic_buffers(void)750 PE_sync_panic_buffers(void)
751 {
752 	/*
753 	 * rdar://problem/26453070:
754 	 * The iBoot panic region is write-combined on arm64.  We must flush dirty lines
755 	 * from L1/L2 as late as possible before reset, with no further reads of the panic
756 	 * region between the flush and the reset.  Some targets have an additional memcache (L3),
757 	 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
758 	 * be discarded on reset.  If we can make sure the lines are flushed to L3/DRAM,
759 	 * the platform reset handler will flush any L3.
760 	 */
761 	if (gPanicBase) {
762 		CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize);
763 	}
764 }
765 
766 static void
pe_prepare_images(void)767 pe_prepare_images(void)
768 {
769 	if ((1 & PE_state.video.v_rotate) != 0) {
770 		// Only square square images with radial symmetry are supported
771 		// No need to actually rotate the data
772 
773 		// Swap the dx and dy offsets
774 		uint32_t tmp = default_progress.dx;
775 		default_progress.dx = default_progress.dy;
776 		default_progress.dy = tmp;
777 	}
778 #if 0
779 	uint32_t cnt, cnt2, cnt3, cnt4;
780 	uint32_t tmp, width, height;
781 	uint8_t  data, *new_data;
782 	const uint8_t *old_data;
783 
784 	width  = default_progress.width;
785 	height = default_progress.height * default_progress.count;
786 
787 	// Scale images if the UI is being scaled
788 	if (PE_state.video.v_scale > 1) {
789 		new_data = kalloc(width * height * scale * scale);
790 		if (new_data != 0) {
791 			old_data = default_progress_data;
792 			default_progress_data = new_data;
793 			for (cnt = 0; cnt < height; cnt++) {
794 				for (cnt2 = 0; cnt2 < width; cnt2++) {
795 					data = *(old_data++);
796 					for (cnt3 = 0; cnt3 < scale; cnt3++) {
797 						for (cnt4 = 0; cnt4 < scale; cnt4++) {
798 							new_data[width * scale * cnt3 + cnt4] = data;
799 						}
800 					}
801 					new_data += scale;
802 				}
803 				new_data += width * scale * (scale - 1);
804 			}
805 			default_progress.width  *= scale;
806 			default_progress.height *= scale;
807 			default_progress.dx     *= scale;
808 			default_progress.dy     *= scale;
809 		}
810 	}
811 #endif
812 }
813 
814 void
PE_mark_hwaccess(uint64_t thread)815 PE_mark_hwaccess(uint64_t thread)
816 {
817 	last_hwaccess_thread = thread;
818 	__builtin_arm_dmb(DMB_ISH);
819 }
820 
821 __startup_func
822 vm_size_t
PE_init_socd_client(void)823 PE_init_socd_client(void)
824 {
825 	DTEntry entry;
826 	uintptr_t const *reg_prop;
827 	unsigned int size;
828 
829 	if (kSuccess != SecureDTLookupEntry(0, "socd-trace-ram", &entry)) {
830 		return 0;
831 	}
832 
833 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
834 		return 0;
835 	}
836 
837 	socd_trace_ram_base = ml_io_map(reg_prop[0], (vm_size_t)reg_prop[1]);
838 	socd_trace_ram_size = (vm_size_t)reg_prop[1];
839 
840 	return socd_trace_ram_size;
841 }
842 
843 /*
844  * PE_write_socd_client_buffer solves two problems:
845  * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
846  * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
847  *    by a SRAM that must be written to only 4 bytes at a time.
848  */
849 void
PE_write_socd_client_buffer(vm_offset_t offset,const void * buff,vm_size_t size)850 PE_write_socd_client_buffer(vm_offset_t offset, const void *buff, vm_size_t size)
851 {
852 	volatile uint32_t *dst = (volatile uint32_t *)(socd_trace_ram_base + offset);
853 	vm_size_t len = size / sizeof(dst[0]);
854 
855 	assert(offset + size <= socd_trace_ram_size);
856 
857 	/* Perform 4 byte aligned accesses */
858 	if ((offset % 4 != 0) || (size % 4 != 0)) {
859 		panic("unaligned acccess to socd trace ram");
860 	}
861 
862 	for (vm_size_t i = 0; i < len; i++) {
863 		dst[i] = ((const uint32_t *)buff)[i];
864 	}
865 }
866