xref: /xnu-8020.140.41/pexpert/arm/pe_init.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc) !
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  *    arm platform expert initialization.
5  */
6 #include <sys/types.h>
7 #include <sys/kdebug.h>
8 #include <mach/vm_param.h>
9 #include <pexpert/protos.h>
10 #include <pexpert/pexpert.h>
11 #include <pexpert/boot.h>
12 #include <pexpert/device_tree.h>
13 #include <pexpert/pe_images.h>
14 #include <kern/sched_prim.h>
15 #include <kern/socd_client.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_routines.h>
18 #include <arm/caches_internal.h>
19 #include <kern/debug.h>
20 #include <libkern/section_keywords.h>
21 #include <os/overflow.h>
22 
23 #if defined __arm__
24 #include <pexpert/arm/board_config.h>
25 #elif defined __arm64__
26 #include <pexpert/arm64/board_config.h>
27 #endif
28 
29 
30 /* extern references */
31 extern void     pe_identify_machine(boot_args *bootArgs);
32 
33 /* static references */
34 static void     pe_prepare_images(void);
35 
36 /* private globals */
37 SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
38 #define FW_VERS_LEN 128
39 char            firmware_version[FW_VERS_LEN];
40 
41 /*
42  * This variable is only modified once, when the BSP starts executing. We put it in __TEXT
43  * as page protections on kernel text early in startup are read-write. The kernel is
44  * locked down later in start-up, said mappings become RO and thus this
45  * variable becomes immutable.
46  *
47  * See osfmk/arm/arm_vm_init.c for more information.
48  */
49 SECURITY_READ_ONLY_SPECIAL_SECTION(volatile uint32_t, "__TEXT,__const") debug_enabled = FALSE;
50 
51 uint8_t         gPlatformECID[8];
52 uint32_t        gPlatformMemoryID;
53 static boolean_t vc_progress_initialized = FALSE;
54 uint64_t    last_hwaccess_thread = 0;
55 char     gTargetTypeBuffer[16];
56 char     gModelTypeBuffer[32];
57 
58 /* Clock Frequency Info */
59 clock_frequency_info_t gPEClockFrequencyInfo;
60 
61 vm_offset_t gPanicBase = 0;
62 unsigned int gPanicSize;
63 struct embedded_panic_header *panic_info = NULL;
64 
65 #if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
66 /*
67  * On DEVELOPMENT bridgeOS, we map the x86 panic region
68  * so we can include this data in bridgeOS corefiles
69  */
70 uint64_t macos_panic_base = 0;
71 unsigned int macos_panic_size = 0;
72 
73 struct macos_panic_header *mac_panic_header = NULL;
74 #endif
75 
76 /* Maximum size of panic log excluding headers, in bytes */
77 static unsigned int panic_text_len;
78 
79 /* Whether a console is standing by for panic logging */
80 static boolean_t panic_console_available = FALSE;
81 
82 /* socd trace ram attributes */
83 static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
84 static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
85 
86 extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
87 
88 void PE_slide_devicetree(vm_offset_t);
89 
90 static void
check_for_panic_log(void)91 check_for_panic_log(void)
92 {
93 #ifdef PLATFORM_PANIC_LOG_PADDR
94 	gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
95 	panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
96 	gPanicSize = PLATFORM_PANIC_LOG_SIZE;
97 #else
98 	DTEntry entry, chosen;
99 	unsigned int size;
100 	uintptr_t const *reg_prop;
101 	uint32_t const *panic_region_length;
102 
103 	/*
104 	 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
105 	 *
106 	 * chosen {
107 	 *   embedded-panic-log-size = <0x00080000>;
108 	 *   [a bunch of other stuff]
109 	 * };
110 	 *
111 	 * pram {
112 	 *   reg = <0x00000008_fbc48000 0x00000000_000b4000>;
113 	 * };
114 	 *
115 	 * reg[0] is the physical address
116 	 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
117 	 * embedded-panic-log-size is the maximum amount of data to store in the buffer
118 	 */
119 	if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) {
120 		return;
121 	}
122 
123 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
124 		return;
125 	}
126 
127 	if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
128 		return;
129 	}
130 
131 	if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) {
132 		return;
133 	}
134 
135 	gPanicBase = ml_io_map_wcomb(reg_prop[0], panic_region_length[0]);
136 
137 	/* Deduct the size of the panic header from the panic region size */
138 	panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
139 	gPanicSize = panic_region_length[0];
140 
141 #if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
142 	if (PE_consistent_debug_enabled()) {
143 		uint64_t macos_panic_physbase = 0;
144 		uint64_t macos_panic_physlen = 0;
145 		/* Populate the macOS panic region data if it's present in consistent debug */
146 		if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
147 			macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
148 			mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
149 			macos_panic_size = macos_panic_physlen;
150 		}
151 	}
152 #endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
153 
154 #endif
155 	panic_info = (struct embedded_panic_header *)gPanicBase;
156 
157 	/* Check if a shared memory console is running in the panic buffer */
158 	if (panic_info->eph_magic == 'SHMC') {
159 		panic_console_available = TRUE;
160 		return;
161 	}
162 
163 	/* Check if there's a boot profile in the panic buffer */
164 	if (panic_info->eph_magic == 'BTRC') {
165 		return;
166 	}
167 
168 	/*
169 	 * Check to see if a panic (FUNK) is in VRAM from the last time
170 	 */
171 	if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
172 		printf("iBoot didn't extract panic log from previous session crash, this is bad\n");
173 	}
174 
175 	/* Clear panic region */
176 	bzero((void *)gPanicBase, gPanicSize);
177 }
178 
179 int
PE_initialize_console(PE_Video * info,int op)180 PE_initialize_console(PE_Video * info, int op)
181 {
182 	static int last_console = -1;
183 
184 	if (info && (info != &PE_state.video)) {
185 		info->v_scale = PE_state.video.v_scale;
186 	}
187 
188 	switch (op) {
189 	case kPEDisableScreen:
190 		initialize_screen(info, op);
191 		last_console = switch_to_serial_console();
192 		kprintf("kPEDisableScreen %d\n", last_console);
193 		break;
194 
195 	case kPEEnableScreen:
196 		initialize_screen(info, op);
197 		if (info) {
198 			PE_state.video = *info;
199 		}
200 		kprintf("kPEEnableScreen %d\n", last_console);
201 		if (last_console != -1) {
202 			switch_to_old_console(last_console);
203 		}
204 		break;
205 
206 	case kPEReleaseScreen:
207 		/*
208 		 * we don't show the progress indicator on boot, but want to
209 		 * show it afterwards.
210 		 */
211 		if (!vc_progress_initialized) {
212 			default_progress.dx = 0;
213 			default_progress.dy = 0;
214 			vc_progress_initialize(&default_progress,
215 			    default_progress_data1x,
216 			    default_progress_data2x,
217 			    default_progress_data3x,
218 			    (unsigned char *) appleClut8);
219 			vc_progress_initialized = TRUE;
220 		}
221 		initialize_screen(info, op);
222 		break;
223 
224 	default:
225 		initialize_screen(info, op);
226 		break;
227 	}
228 
229 	return 0;
230 }
231 
232 void
PE_init_iokit(void)233 PE_init_iokit(void)
234 {
235 	DTEntry         entry;
236 	unsigned int    size, scale;
237 	unsigned long   display_size;
238 	void const * const *map;
239 	unsigned int    show_progress;
240 	int             *delta, image_size, flip;
241 	uint32_t        start_time_value = 0;
242 	uint32_t        debug_wait_start_value = 0;
243 	uint32_t        load_kernel_start_value = 0;
244 	uint32_t        populate_registry_time_value = 0;
245 
246 	PE_init_printf(TRUE);
247 
248 	printf("iBoot version: %s\n", firmware_version);
249 
250 	if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) {
251 		boot_progress_element const *bootPict;
252 
253 		if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) {
254 			bcopy(map[0], appleClut8, sizeof(appleClut8));
255 		}
256 
257 		if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) {
258 			bootPict = (boot_progress_element const *) map[0];
259 			default_noroot.width = bootPict->width;
260 			default_noroot.height = bootPict->height;
261 			default_noroot.dx = 0;
262 			default_noroot.dy = bootPict->yOffset;
263 			default_noroot_data = &bootPict->data[0];
264 		}
265 	}
266 
267 	pe_prepare_images();
268 
269 	scale = PE_state.video.v_scale;
270 	flip = 1;
271 
272 #if defined(XNU_TARGET_OS_OSX)
273 	int notused;
274 	show_progress = TRUE;
275 	if (PE_parse_boot_argn("-restore", &notused, sizeof(notused))) {
276 		show_progress = FALSE;
277 	}
278 	if (PE_parse_boot_argn("-noprogress", &notused, sizeof(notused))) {
279 		show_progress = FALSE;
280 	}
281 #else
282 	show_progress = FALSE;
283 	PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
284 #endif /* XNU_TARGET_OS_OSX */
285 	if (show_progress) {
286 		/* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
287 		switch (PE_state.video.v_rotate) {
288 		case 2:
289 			flip = -1;
290 			OS_FALLTHROUGH;
291 		case 0:
292 			display_size = PE_state.video.v_height;
293 			image_size = default_progress.height;
294 			delta = &default_progress.dy;
295 			break;
296 		case 1:
297 			flip = -1;
298 			OS_FALLTHROUGH;
299 		case 3:
300 		default:
301 			display_size = PE_state.video.v_width;
302 			image_size = default_progress.width;
303 			delta = &default_progress.dx;
304 		}
305 		assert(*delta >= 0);
306 		while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
307 			*delta -= 50 * scale;
308 			assert(*delta >= 0);
309 		}
310 		*delta *= flip;
311 
312 		/* Check for DT-defined progress y delta */
313 		PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy));
314 
315 		vc_progress_initialize(&default_progress,
316 		    default_progress_data1x,
317 		    default_progress_data2x,
318 		    default_progress_data3x,
319 		    (unsigned char *) appleClut8);
320 		vc_progress_initialized = TRUE;
321 	}
322 
323 	if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
324 		/* Trace iBoot-provided timing information. */
325 		if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) {
326 			uint32_t const * value_ptr;
327 
328 			if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) {
329 				if (size == sizeof(start_time_value)) {
330 					start_time_value = *value_ptr;
331 				}
332 			}
333 
334 			if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) {
335 				if (size == sizeof(debug_wait_start_value)) {
336 					debug_wait_start_value = *value_ptr;
337 				}
338 			}
339 
340 			if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) {
341 				if (size == sizeof(load_kernel_start_value)) {
342 					load_kernel_start_value = *value_ptr;
343 				}
344 			}
345 
346 			if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) {
347 				if (size == sizeof(populate_registry_time_value)) {
348 					populate_registry_time_value = *value_ptr;
349 				}
350 			}
351 		}
352 
353 		KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
354 	}
355 
356 	InitIOKit(PE_state.deviceTreeHead);
357 	ConfigureIOKit();
358 }
359 
360 void
PE_lockdown_iokit(void)361 PE_lockdown_iokit(void)
362 {
363 	/*
364 	 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
365 	 * machine_lockdown() is treated as a hard security checkpoint, such that
366 	 * code which executes prior to lockdown must be minimized and limited only to
367 	 * trusted parts of the kernel and specially-entitled kexts.  We therefore
368 	 * cannot start the general-purpose IOKit matching process until after lockdown,
369 	 * as it may involve execution of untrusted/non-entitled kext code.
370 	 * Furthermore, such kext code may process attacker controlled data (e.g.
371 	 * network packets), which dramatically increases the potential attack surface
372 	 * against a kernel which has not yet enabled the full set of available
373 	 * hardware protections.
374 	 */
375 	StartIOKitMatching();
376 }
377 
378 void
PE_slide_devicetree(vm_offset_t slide)379 PE_slide_devicetree(vm_offset_t slide)
380 {
381 	assert(PE_state.initialized);
382 	PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
383 	SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
384 }
385 
386 void
PE_init_platform(boolean_t vm_initialized,void * args)387 PE_init_platform(boolean_t vm_initialized, void *args)
388 {
389 	DTEntry         entry;
390 	unsigned int    size;
391 	void * const    *prop;
392 	boot_args      *boot_args_ptr = (boot_args *) args;
393 
394 	if (PE_state.initialized == FALSE) {
395 		PE_state.initialized = TRUE;
396 		PE_state.bootArgs = boot_args_ptr;
397 		PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
398 		PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
399 		PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
400 		PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
401 		PE_state.video.v_width = boot_args_ptr->Video.v_width;
402 		PE_state.video.v_height = boot_args_ptr->Video.v_height;
403 		PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
404 		PE_state.video.v_rotate = (
405 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) +    // rotation
406 			((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift)  & kBootVideoDepthMask) // add extra boot rotation
407 			) % 4;
408 		PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
409 		PE_state.video.v_display = boot_args_ptr->Video.v_display;
410 		strlcpy(PE_state.video.v_pixelFormat, "BBBBBBBBGGGGGGGGRRRRRRRR", sizeof(PE_state.video.v_pixelFormat));
411 	}
412 	if (!vm_initialized) {
413 		/*
414 		 * Setup the Device Tree routines
415 		 * so the console can be found and the right I/O space
416 		 * can be used..
417 		 */
418 		SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
419 		pe_identify_machine(boot_args_ptr);
420 	} else {
421 		pe_arm_init_interrupts(args);
422 		pe_arm_init_debug(args);
423 	}
424 
425 	if (!vm_initialized) {
426 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
427 			if (kSuccess == SecureDTGetProperty(entry, "target-type",
428 			    (void const **)&prop, &size)) {
429 				if (size > sizeof(gTargetTypeBuffer)) {
430 					size = sizeof(gTargetTypeBuffer);
431 				}
432 				bcopy(prop, gTargetTypeBuffer, size);
433 				gTargetTypeBuffer[size - 1] = '\0';
434 			}
435 		}
436 		if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
437 			if (kSuccess == SecureDTGetProperty(entry, "model",
438 			    (void const **)&prop, &size)) {
439 				if (size > sizeof(gModelTypeBuffer)) {
440 					size = sizeof(gModelTypeBuffer);
441 				}
442 				bcopy(prop, gModelTypeBuffer, size);
443 				gModelTypeBuffer[size - 1] = '\0';
444 			}
445 		}
446 		if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
447 			if (kSuccess == SecureDTGetProperty(entry, "debug-enabled",
448 			    (void const **) &prop, &size)) {
449 				/*
450 				 * We purposefully modify a constified variable as
451 				 * it will get locked down by a trusted monitor or
452 				 * via page table mappings. We don't want people easily
453 				 * modifying this variable...
454 				 */
455 #pragma clang diagnostic push
456 #pragma clang diagnostic ignored "-Wcast-qual"
457 				boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
458 				if (size > sizeof(uint32_t)) {
459 					size = sizeof(uint32_t);
460 				}
461 				bcopy(prop, modify_debug_enabled, size);
462 #pragma clang diagnostic pop
463 			}
464 			if (kSuccess == SecureDTGetProperty(entry, "firmware-version",
465 			    (void const **) &prop, &size)) {
466 				if (size > sizeof(firmware_version)) {
467 					size = sizeof(firmware_version);
468 				}
469 				bcopy(prop, firmware_version, size);
470 				firmware_version[size - 1] = '\0';
471 			}
472 			if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id",
473 			    (void const **) &prop, &size)) {
474 				if (size > sizeof(gPlatformECID)) {
475 					size = sizeof(gPlatformECID);
476 				}
477 				bcopy(prop, gPlatformECID, size);
478 			}
479 			if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id",
480 			    (void const **) &prop, &size)) {
481 				if (size > sizeof(gPlatformMemoryID)) {
482 					size = sizeof(gPlatformMemoryID);
483 				}
484 				bcopy(prop, &gPlatformMemoryID, size);
485 			}
486 		}
487 		pe_init_debug();
488 	}
489 }
490 
491 void
PE_create_console(void)492 PE_create_console(void)
493 {
494 	/*
495 	 * Check the head of VRAM for a panic log saved on last panic.
496 	 * Do this before the VRAM is trashed.
497 	 */
498 	check_for_panic_log();
499 
500 	if (PE_state.video.v_display) {
501 		PE_initialize_console(&PE_state.video, kPEGraphicsMode);
502 	} else {
503 		PE_initialize_console(&PE_state.video, kPETextMode);
504 	}
505 }
506 
507 int
PE_current_console(PE_Video * info)508 PE_current_console(PE_Video * info)
509 {
510 	*info = PE_state.video;
511 	return 0;
512 }
513 
514 void
PE_display_icon(__unused unsigned int flags,__unused const char * name)515 PE_display_icon(__unused unsigned int flags, __unused const char *name)
516 {
517 	if (default_noroot_data) {
518 		vc_display_icon(&default_noroot, default_noroot_data);
519 	}
520 }
521 
522 extern          boolean_t
PE_get_hotkey(__unused unsigned char key)523 PE_get_hotkey(__unused unsigned char key)
524 {
525 	return FALSE;
526 }
527 
528 static timebase_callback_func gTimebaseCallback;
529 
530 void
PE_register_timebase_callback(timebase_callback_func callback)531 PE_register_timebase_callback(timebase_callback_func callback)
532 {
533 	gTimebaseCallback = callback;
534 
535 	PE_call_timebase_callback();
536 }
537 
538 void
PE_call_timebase_callback(void)539 PE_call_timebase_callback(void)
540 {
541 	struct timebase_freq_t timebase_freq;
542 
543 	timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
544 	timebase_freq.timebase_den = 1;
545 
546 	if (gTimebaseCallback) {
547 		gTimebaseCallback(&timebase_freq);
548 	}
549 }
550 
551 /*
552  * The default PE_poll_input handler.
553  */
554 int
PE_stub_poll_input(__unused unsigned int options,char * c)555 PE_stub_poll_input(__unused unsigned int options, char *c)
556 {
557 	*c = (char)uart_getc();
558 	return 0; /* 0 for success, 1 for unsupported */
559 }
560 
561 /*
562  * This routine will return 1 if you are running on a device with a variant
563  * of iBoot that allows debugging. This is typically not the case on production
564  * fused parts (even when running development variants of iBoot).
565  *
566  * The routine takes an optional argument of the flags passed to debug="" so
567  * kexts don't have to parse the boot arg themselves.
568  */
569 uint32_t
PE_i_can_has_debugger(uint32_t * debug_flags)570 PE_i_can_has_debugger(uint32_t *debug_flags)
571 {
572 	if (debug_flags) {
573 #if DEVELOPMENT || DEBUG
574 		assert(startup_phase >= STARTUP_SUB_TUNABLES);
575 #endif
576 		if (debug_enabled) {
577 			*debug_flags = debug_boot_arg;
578 		} else {
579 			*debug_flags = 0;
580 		}
581 	}
582 	return debug_enabled;
583 }
584 
585 /*
586  * This routine returns TRUE if the device is configured
587  * with panic debugging enabled.
588  */
589 boolean_t
PE_panic_debugging_enabled()590 PE_panic_debugging_enabled()
591 {
592 	return panicDebugging;
593 }
594 
595 void
PE_save_buffer_to_vram(unsigned char * buf,unsigned int * size)596 PE_save_buffer_to_vram(unsigned char *buf, unsigned int *size)
597 {
598 	if (!panic_info || !size) {
599 		return;
600 	}
601 
602 	if (!buf) {
603 		*size = panic_text_len;
604 		return;
605 	}
606 
607 	if (*size == 0) {
608 		return;
609 	}
610 
611 	*size = *size > panic_text_len ? panic_text_len : *size;
612 	if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
613 		printf("Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
614 	}
615 
616 	/* CRC everything after the CRC itself - starting with the panic header version */
617 	panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len +
618 	    sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
619 }
620 
621 uint32_t
PE_get_offset_into_panic_region(char * location)622 PE_get_offset_into_panic_region(char *location)
623 {
624 	assert(gPanicBase != 0);
625 	assert(location >= (char *) gPanicBase);
626 	assert((unsigned int)(location - gPanicBase) < gPanicSize);
627 
628 	return (uint32_t)(uintptr_t)(location - gPanicBase);
629 }
630 
631 void
PE_init_panicheader()632 PE_init_panicheader()
633 {
634 	if (!panic_info) {
635 		return;
636 	}
637 
638 	bzero(panic_info, sizeof(struct embedded_panic_header));
639 
640 	/*
641 	 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
642 	 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
643 	 */
644 	panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0;
645 
646 	panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
647 	panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
648 
649 	return;
650 }
651 
652 /*
653  * Tries to update the panic header to keep it consistent on nested panics.
654  *
655  * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
656  *       it is to update the panic header to make it consistent when we nest panics.
657  */
658 void
PE_update_panicheader_nestedpanic()659 PE_update_panicheader_nestedpanic()
660 {
661 	if (!panic_info) {
662 		return;
663 	}
664 
665 	/*
666 	 * If the panic log offset is not set, re-init the panic header
667 	 */
668 	if (panic_info->eph_panic_log_offset == 0) {
669 		PE_init_panicheader();
670 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
671 		return;
672 	}
673 
674 	panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
675 
676 	/*
677 	 * If the panic log length is not set, set the end to
678 	 * the current location of the debug_buf_ptr to close it.
679 	 */
680 	if (panic_info->eph_panic_log_len == 0) {
681 		panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr);
682 
683 		/* If this assert fires, it's indicative of corruption in the panic region */
684 		assert(panic_info->eph_other_log_offset == panic_info->eph_other_log_len == 0);
685 	}
686 
687 	/* If this assert fires, it's likely indicative of corruption in the panic region */
688 	assert(((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) ||
689 	    ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0)));
690 
691 	/*
692 	 * If we haven't set up the other log yet, set the beginning of the other log
693 	 * to the current location of the debug_buf_ptr
694 	 */
695 	if (panic_info->eph_other_log_offset == 0) {
696 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
697 
698 		/* If this assert fires, it's indicative of corruption in the panic region */
699 		assert(panic_info->eph_other_log_len == 0);
700 	}
701 
702 	return;
703 }
704 
705 boolean_t
PE_reboot_on_panic(void)706 PE_reboot_on_panic(void)
707 {
708 	uint32_t debug_flags;
709 
710 	if (PE_i_can_has_debugger(&debug_flags)
711 	    && (debug_flags & DB_NMI)) {
712 		/* kernel debugging is active */
713 		return FALSE;
714 	} else {
715 		return TRUE;
716 	}
717 }
718 
719 void
PE_sync_panic_buffers(void)720 PE_sync_panic_buffers(void)
721 {
722 	/*
723 	 * rdar://problem/26453070:
724 	 * The iBoot panic region is write-combined on arm64.  We must flush dirty lines
725 	 * from L1/L2 as late as possible before reset, with no further reads of the panic
726 	 * region between the flush and the reset.  Some targets have an additional memcache (L3),
727 	 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
728 	 * be discarded on reset.  If we can make sure the lines are flushed to L3/DRAM,
729 	 * the platform reset handler will flush any L3.
730 	 */
731 	if (gPanicBase) {
732 		CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize);
733 	}
734 }
735 
736 static void
pe_prepare_images(void)737 pe_prepare_images(void)
738 {
739 	if ((1 & PE_state.video.v_rotate) != 0) {
740 		// Only square square images with radial symmetry are supported
741 		// No need to actually rotate the data
742 
743 		// Swap the dx and dy offsets
744 		uint32_t tmp = default_progress.dx;
745 		default_progress.dx = default_progress.dy;
746 		default_progress.dy = tmp;
747 	}
748 #if 0
749 	uint32_t cnt, cnt2, cnt3, cnt4;
750 	uint32_t tmp, width, height;
751 	uint8_t  data, *new_data;
752 	const uint8_t *old_data;
753 
754 	width  = default_progress.width;
755 	height = default_progress.height * default_progress.count;
756 
757 	// Scale images if the UI is being scaled
758 	if (PE_state.video.v_scale > 1) {
759 		new_data = kalloc(width * height * scale * scale);
760 		if (new_data != 0) {
761 			old_data = default_progress_data;
762 			default_progress_data = new_data;
763 			for (cnt = 0; cnt < height; cnt++) {
764 				for (cnt2 = 0; cnt2 < width; cnt2++) {
765 					data = *(old_data++);
766 					for (cnt3 = 0; cnt3 < scale; cnt3++) {
767 						for (cnt4 = 0; cnt4 < scale; cnt4++) {
768 							new_data[width * scale * cnt3 + cnt4] = data;
769 						}
770 					}
771 					new_data += scale;
772 				}
773 				new_data += width * scale * (scale - 1);
774 			}
775 			default_progress.width  *= scale;
776 			default_progress.height *= scale;
777 			default_progress.dx     *= scale;
778 			default_progress.dy     *= scale;
779 		}
780 	}
781 #endif
782 }
783 
784 void
PE_mark_hwaccess(uint64_t thread)785 PE_mark_hwaccess(uint64_t thread)
786 {
787 	last_hwaccess_thread = thread;
788 	__builtin_arm_dmb(DMB_ISH);
789 }
790 
791 __startup_func
792 vm_size_t
PE_init_socd_client(void)793 PE_init_socd_client(void)
794 {
795 	DTEntry entry;
796 	uintptr_t const *reg_prop;
797 	unsigned int size;
798 
799 	if (kSuccess != SecureDTLookupEntry(0, "socd-trace-ram", &entry)) {
800 		return 0;
801 	}
802 
803 	if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)&reg_prop, &size)) {
804 		return 0;
805 	}
806 
807 	socd_trace_ram_base = ml_io_map(reg_prop[0], (vm_size_t)reg_prop[1]);
808 	socd_trace_ram_size = (vm_size_t)reg_prop[1];
809 
810 	return socd_trace_ram_size;
811 }
812 
813 /*
814  * PE_write_socd_client_buffer solves two problems:
815  * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
816  * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
817  *    by a SRAM that must be written to only 4 bytes at a time.
818  */
819 void
PE_write_socd_client_buffer(vm_offset_t offset,const void * buff,vm_size_t size)820 PE_write_socd_client_buffer(vm_offset_t offset, const void *buff, vm_size_t size)
821 {
822 	volatile uint32_t *dst = (volatile uint32_t *)(socd_trace_ram_base + offset);
823 	vm_size_t len = size / sizeof(dst[0]);
824 
825 	assert(offset + size <= socd_trace_ram_size);
826 
827 	/* Perform 4 byte aligned accesses */
828 	if ((offset % 4 != 0) || (size % 4 != 0)) {
829 		panic("unaligned acccess to socd trace ram");
830 	}
831 
832 	for (vm_size_t i = 0; i < len; i++) {
833 		dst[i] = ((const uint32_t *)buff)[i];
834 	}
835 }
836