1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * arm platform expert initialization.
5 */
6 #include <sys/types.h>
7 #include <sys/kdebug.h>
8 #include <mach/vm_param.h>
9 #include <pexpert/protos.h>
10 #include <pexpert/pexpert.h>
11 #include <pexpert/boot.h>
12 #include <pexpert/device_tree.h>
13 #include <pexpert/pe_images.h>
14 #include <kern/sched_prim.h>
15 #include <kern/socd_client.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_routines.h>
18 #include <arm/caches_internal.h>
19 #include <kern/debug.h>
20 #include <libkern/section_keywords.h>
21 #include <os/overflow.h>
22
23 #include <pexpert/arm64/board_config.h>
24
25
26 /* extern references */
27 extern void pe_identify_machine(boot_args *bootArgs);
28
29 /* static references */
30 static void pe_prepare_images(void);
31
32 /* private globals */
33 SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
34 TUNABLE_DT(uint32_t, PE_srd_fused, "/chosen", "research-enabled",
35 "srd_fusing", 0, TUNABLE_DT_NONE);
36
37 #define FW_VERS_LEN 128
38
39 char iBoot_version[FW_VERS_LEN];
40 #if defined(TARGET_OS_OSX) && defined(__arm64__)
41 char iBoot_Stage_2_version[FW_VERS_LEN];
42 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
43
44 /*
45 * This variable is only modified once, when the BSP starts executing. We put it in __DATA_CONST
46 * as page protections on kernel text early in startup are read-write. The kernel is
47 * locked down later in start-up, said mappings become RO and thus this
48 * variable becomes immutable.
49 *
50 * See osfmk/arm/arm_vm_init.c for more information.
51 */
52 SECURITY_READ_ONLY_LATE(volatile uint32_t) debug_enabled = FALSE;
53
54 /*
55 * This variable indicates the page protection security policy used by the system.
56 * It is intended mostly for debugging purposes.
57 */
58 SECURITY_READ_ONLY_LATE(ml_page_protection_t) page_protection_type;
59
60 uint8_t gPlatformECID[8];
61 uint32_t gPlatformMemoryID;
62 static boolean_t vc_progress_initialized = FALSE;
63 uint64_t last_hwaccess_thread = 0;
64 char gTargetTypeBuffer[16];
65 char gModelTypeBuffer[32];
66
67 /* Clock Frequency Info */
68 clock_frequency_info_t gPEClockFrequencyInfo;
69
70 vm_offset_t gPanicBase = 0;
71 unsigned int gPanicSize;
72 struct embedded_panic_header *panic_info = NULL;
73
74 #if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
75 /*
76 * On DEVELOPMENT bridgeOS, we map the x86 panic region
77 * so we can include this data in bridgeOS corefiles
78 */
79 uint64_t macos_panic_base = 0;
80 unsigned int macos_panic_size = 0;
81
82 struct macos_panic_header *mac_panic_header = NULL;
83 #endif
84
85 /* Maximum size of panic log excluding headers, in bytes */
86 static unsigned int panic_text_len;
87
88 /* Whether a console is standing by for panic logging */
89 static boolean_t panic_console_available = FALSE;
90
91 /* socd trace ram attributes */
92 static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
93 static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
94
95 extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
96
97 void PE_slide_devicetree(vm_offset_t);
98
99 static void
check_for_panic_log(void)100 check_for_panic_log(void)
101 {
102 #ifdef PLATFORM_PANIC_LOG_PADDR
103 gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
104 panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
105 gPanicSize = PLATFORM_PANIC_LOG_SIZE;
106 #else
107 DTEntry entry, chosen;
108 unsigned int size;
109 uintptr_t const *reg_prop;
110 uint32_t const *panic_region_length;
111
112 /*
113 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
114 *
115 * chosen {
116 * embedded-panic-log-size = <0x00080000>;
117 * [a bunch of other stuff]
118 * };
119 *
120 * pram {
121 * reg = <0x00000008_fbc48000 0x00000000_000b4000>;
122 * };
123 *
124 * reg[0] is the physical address
125 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
126 * embedded-panic-log-size is the maximum amount of data to store in the buffer
127 */
128 if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) {
129 return;
130 }
131
132 if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)®_prop, &size)) {
133 return;
134 }
135
136 if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
137 return;
138 }
139
140 if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) {
141 return;
142 }
143
144 gPanicBase = ml_io_map_wcomb(reg_prop[0], panic_region_length[0]);
145
146 /* Deduct the size of the panic header from the panic region size */
147 panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
148 gPanicSize = panic_region_length[0];
149
150 #if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
151 if (PE_consistent_debug_enabled()) {
152 uint64_t macos_panic_physbase = 0;
153 uint64_t macos_panic_physlen = 0;
154 /* Populate the macOS panic region data if it's present in consistent debug */
155 if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
156 macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
157 mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
158 macos_panic_size = macos_panic_physlen;
159 }
160 }
161 #endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
162
163 #endif
164 panic_info = (struct embedded_panic_header *)gPanicBase;
165
166 /* Check if a shared memory console is running in the panic buffer */
167 if (panic_info->eph_magic == 'SHMC') {
168 panic_console_available = TRUE;
169 return;
170 }
171
172 /* Check if there's a boot profile in the panic buffer */
173 if (panic_info->eph_magic == 'BTRC') {
174 return;
175 }
176
177 /*
178 * Check to see if a panic (FUNK) is in VRAM from the last time
179 */
180 if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
181 printf("iBoot didn't extract panic log from previous session crash, this is bad\n");
182 }
183
184 /* Clear panic region */
185 bzero((void *)gPanicBase, gPanicSize);
186 }
187
188 int
PE_initialize_console(PE_Video * info,int op)189 PE_initialize_console(PE_Video * info, int op)
190 {
191 static int last_console = -1;
192
193 if (info && (info != &PE_state.video)) {
194 info->v_scale = PE_state.video.v_scale;
195 }
196
197 switch (op) {
198 case kPEDisableScreen:
199 initialize_screen(info, op);
200 last_console = switch_to_serial_console();
201 kprintf("kPEDisableScreen %d\n", last_console);
202 break;
203
204 case kPEEnableScreen:
205 initialize_screen(info, op);
206 if (info) {
207 PE_state.video = *info;
208 }
209 kprintf("kPEEnableScreen %d\n", last_console);
210 if (last_console != -1) {
211 switch_to_old_console(last_console);
212 }
213 break;
214
215 case kPEReleaseScreen:
216 /*
217 * we don't show the progress indicator on boot, but want to
218 * show it afterwards.
219 */
220 if (!vc_progress_initialized) {
221 default_progress.dx = 0;
222 default_progress.dy = 0;
223 vc_progress_initialize(&default_progress,
224 default_progress_data1x,
225 default_progress_data2x,
226 default_progress_data3x,
227 (unsigned char *) appleClut8);
228 vc_progress_initialized = TRUE;
229 }
230 initialize_screen(info, op);
231 break;
232
233 default:
234 initialize_screen(info, op);
235 break;
236 }
237
238 return 0;
239 }
240
241 void
PE_init_iokit(void)242 PE_init_iokit(void)
243 {
244 DTEntry entry;
245 unsigned int size, scale;
246 unsigned long display_size;
247 void const * const *map;
248 unsigned int show_progress;
249 int *delta, image_size, flip;
250 uint32_t start_time_value = 0;
251 uint32_t debug_wait_start_value = 0;
252 uint32_t load_kernel_start_value = 0;
253 uint32_t populate_registry_time_value = 0;
254
255 PE_init_printf(TRUE);
256
257 printf("iBoot version: %s\n", iBoot_version);
258 #if defined(TARGET_OS_OSX) && defined(__arm64__)
259 printf("iBoot Stage 2 version: %s\n", iBoot_Stage_2_version);
260 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
261
262 if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) {
263 boot_progress_element const *bootPict;
264
265 if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) {
266 bcopy(map[0], appleClut8, sizeof(appleClut8));
267 }
268
269 if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) {
270 bootPict = (boot_progress_element const *) map[0];
271 default_noroot.width = bootPict->width;
272 default_noroot.height = bootPict->height;
273 default_noroot.dx = 0;
274 default_noroot.dy = bootPict->yOffset;
275 default_noroot_data = &bootPict->data[0];
276 }
277 }
278
279 pe_prepare_images();
280
281 scale = PE_state.video.v_scale;
282 flip = 1;
283
284 #if defined(XNU_TARGET_OS_OSX)
285 int notused;
286 show_progress = TRUE;
287 if (PE_parse_boot_argn("-restore", ¬used, sizeof(notused))) {
288 show_progress = FALSE;
289 }
290 if (PE_parse_boot_argn("-noprogress", ¬used, sizeof(notused))) {
291 show_progress = FALSE;
292 }
293 #else
294 show_progress = FALSE;
295 PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
296 #endif /* XNU_TARGET_OS_OSX */
297 if (show_progress) {
298 /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
299 switch (PE_state.video.v_rotate) {
300 case 2:
301 flip = -1;
302 OS_FALLTHROUGH;
303 case 0:
304 display_size = PE_state.video.v_height;
305 image_size = default_progress.height;
306 delta = &default_progress.dy;
307 break;
308 case 1:
309 flip = -1;
310 OS_FALLTHROUGH;
311 case 3:
312 default:
313 display_size = PE_state.video.v_width;
314 image_size = default_progress.width;
315 delta = &default_progress.dx;
316 }
317 assert(*delta >= 0);
318 while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
319 *delta -= 50 * scale;
320 assert(*delta >= 0);
321 }
322 *delta *= flip;
323
324 /* Check for DT-defined progress y delta */
325 PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy));
326
327 vc_progress_initialize(&default_progress,
328 default_progress_data1x,
329 default_progress_data2x,
330 default_progress_data3x,
331 (unsigned char *) appleClut8);
332 vc_progress_initialized = TRUE;
333 }
334
335 if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
336 /* Trace iBoot-provided timing information. */
337 if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) {
338 uint32_t const * value_ptr;
339
340 if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) {
341 if (size == sizeof(start_time_value)) {
342 start_time_value = *value_ptr;
343 }
344 }
345
346 if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) {
347 if (size == sizeof(debug_wait_start_value)) {
348 debug_wait_start_value = *value_ptr;
349 }
350 }
351
352 if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) {
353 if (size == sizeof(load_kernel_start_value)) {
354 load_kernel_start_value = *value_ptr;
355 }
356 }
357
358 if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) {
359 if (size == sizeof(populate_registry_time_value)) {
360 populate_registry_time_value = *value_ptr;
361 }
362 }
363 }
364
365 KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
366 }
367
368 InitIOKit(PE_state.deviceTreeHead);
369 ConfigureIOKit();
370 }
371
372 void
PE_lockdown_iokit(void)373 PE_lockdown_iokit(void)
374 {
375 /*
376 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
377 * machine_lockdown() is treated as a hard security checkpoint, such that
378 * code which executes prior to lockdown must be minimized and limited only to
379 * trusted parts of the kernel and specially-entitled kexts. We therefore
380 * cannot start the general-purpose IOKit matching process until after lockdown,
381 * as it may involve execution of untrusted/non-entitled kext code.
382 * Furthermore, such kext code may process attacker controlled data (e.g.
383 * network packets), which dramatically increases the potential attack surface
384 * against a kernel which has not yet enabled the full set of available
385 * hardware protections.
386 */
387 zalloc_iokit_lockdown();
388 StartIOKitMatching();
389 }
390
391 void
PE_slide_devicetree(vm_offset_t slide)392 PE_slide_devicetree(vm_offset_t slide)
393 {
394 assert(PE_state.initialized);
395 PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
396 SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
397 }
398
399 void
PE_init_platform(boolean_t vm_initialized,void * args)400 PE_init_platform(boolean_t vm_initialized, void *args)
401 {
402 DTEntry entry;
403 unsigned int size;
404 void * const *prop;
405 boot_args *boot_args_ptr = (boot_args *) args;
406
407 if (PE_state.initialized == FALSE) {
408 page_protection_type = ml_page_protection_type();
409 PE_state.initialized = TRUE;
410 PE_state.bootArgs = boot_args_ptr;
411 PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
412 PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
413 PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
414 PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
415 PE_state.video.v_width = boot_args_ptr->Video.v_width;
416 PE_state.video.v_height = boot_args_ptr->Video.v_height;
417 PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
418 PE_state.video.v_rotate = (
419 ((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) + // rotation
420 ((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift) & kBootVideoDepthMask) // add extra boot rotation
421 ) % 4;
422 PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
423 PE_state.video.v_display = boot_args_ptr->Video.v_display;
424 strlcpy(PE_state.video.v_pixelFormat, "BBBBBBBBGGGGGGGGRRRRRRRR", sizeof(PE_state.video.v_pixelFormat));
425 }
426 if (!vm_initialized) {
427 /*
428 * Setup the Device Tree routines
429 * so the console can be found and the right I/O space
430 * can be used..
431 */
432 SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
433 pe_identify_machine(boot_args_ptr);
434 } else {
435 pe_arm_init_interrupts(args);
436 pe_arm_init_debug(args);
437 }
438
439 if (!vm_initialized) {
440 if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
441 if (kSuccess == SecureDTGetProperty(entry, "target-type",
442 (void const **)&prop, &size)) {
443 if (size > sizeof(gTargetTypeBuffer)) {
444 size = sizeof(gTargetTypeBuffer);
445 }
446 bcopy(prop, gTargetTypeBuffer, size);
447 gTargetTypeBuffer[size - 1] = '\0';
448 }
449 }
450 if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
451 if (kSuccess == SecureDTGetProperty(entry, "model",
452 (void const **)&prop, &size)) {
453 if (size > sizeof(gModelTypeBuffer)) {
454 size = sizeof(gModelTypeBuffer);
455 }
456 bcopy(prop, gModelTypeBuffer, size);
457 gModelTypeBuffer[size - 1] = '\0';
458 }
459 }
460 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
461 if (kSuccess == SecureDTGetProperty(entry, "debug-enabled",
462 (void const **) &prop, &size)) {
463 /*
464 * We purposefully modify a constified variable as
465 * it will get locked down by a trusted monitor or
466 * via page table mappings. We don't want people easily
467 * modifying this variable...
468 */
469 #pragma clang diagnostic push
470 #pragma clang diagnostic ignored "-Wcast-qual"
471 boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
472 if (size > sizeof(uint32_t)) {
473 size = sizeof(uint32_t);
474 }
475 bcopy(prop, modify_debug_enabled, size);
476 #pragma clang diagnostic pop
477 }
478 if (kSuccess == SecureDTGetProperty(entry, "firmware-version", (void const **) &prop, &size)) {
479 if (size > sizeof(iBoot_version)) {
480 size = sizeof(iBoot_version);
481 }
482 bcopy(prop, iBoot_version, size);
483 iBoot_version[size - 1] = '\0';
484 }
485 #if defined(TARGET_OS_OSX) && defined(__arm64__)
486 if (kSuccess == SecureDTGetProperty(entry, "system-firmware-version", (void const **) &prop, &size)) {
487 if (size > sizeof(iBoot_Stage_2_version)) {
488 size = sizeof(iBoot_Stage_2_version);
489 }
490 bcopy(prop, iBoot_Stage_2_version, size);
491 iBoot_Stage_2_version[size - 1] = '\0';
492 }
493 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
494 if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id",
495 (void const **) &prop, &size)) {
496 if (size > sizeof(gPlatformECID)) {
497 size = sizeof(gPlatformECID);
498 }
499 bcopy(prop, gPlatformECID, size);
500 }
501 if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id",
502 (void const **) &prop, &size)) {
503 if (size > sizeof(gPlatformMemoryID)) {
504 size = sizeof(gPlatformMemoryID);
505 }
506 bcopy(prop, &gPlatformMemoryID, size);
507 }
508 }
509 pe_init_debug();
510 }
511 }
512
513 void
PE_create_console(void)514 PE_create_console(void)
515 {
516 /*
517 * Check the head of VRAM for a panic log saved on last panic.
518 * Do this before the VRAM is trashed.
519 */
520 check_for_panic_log();
521
522 if (PE_state.video.v_display) {
523 PE_initialize_console(&PE_state.video, kPEGraphicsMode);
524 } else {
525 PE_initialize_console(&PE_state.video, kPETextMode);
526 }
527 }
528
529 int
PE_current_console(PE_Video * info)530 PE_current_console(PE_Video * info)
531 {
532 *info = PE_state.video;
533 return 0;
534 }
535
536 void
PE_display_icon(__unused unsigned int flags,__unused const char * name)537 PE_display_icon(__unused unsigned int flags, __unused const char *name)
538 {
539 if (default_noroot_data) {
540 vc_display_icon(&default_noroot, default_noroot_data);
541 }
542 }
543
544 extern boolean_t
PE_get_hotkey(__unused unsigned char key)545 PE_get_hotkey(__unused unsigned char key)
546 {
547 return FALSE;
548 }
549
550 static timebase_callback_func gTimebaseCallback;
551
552 void
PE_register_timebase_callback(timebase_callback_func callback)553 PE_register_timebase_callback(timebase_callback_func callback)
554 {
555 gTimebaseCallback = callback;
556
557 PE_call_timebase_callback();
558 }
559
560 void
PE_call_timebase_callback(void)561 PE_call_timebase_callback(void)
562 {
563 struct timebase_freq_t timebase_freq;
564
565 timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
566 timebase_freq.timebase_den = 1;
567
568 if (gTimebaseCallback) {
569 gTimebaseCallback(&timebase_freq);
570 }
571 }
572
573 /*
574 * The default PE_poll_input handler.
575 */
576 int
PE_stub_poll_input(__unused unsigned int options,char * c)577 PE_stub_poll_input(__unused unsigned int options, char *c)
578 {
579 *c = (char)uart_getc();
580 return 0; /* 0 for success, 1 for unsupported */
581 }
582
583 /*
584 * This routine will return 1 if you are running on a device with a variant
585 * of iBoot that allows debugging. This is typically not the case on production
586 * fused parts (even when running development variants of iBoot).
587 *
588 * The routine takes an optional argument of the flags passed to debug="" so
589 * kexts don't have to parse the boot arg themselves.
590 */
591 uint32_t
PE_i_can_has_debugger(uint32_t * debug_flags)592 PE_i_can_has_debugger(uint32_t *debug_flags)
593 {
594 if (debug_flags) {
595 #if DEVELOPMENT || DEBUG
596 assert(startup_phase >= STARTUP_SUB_TUNABLES);
597 #endif
598 if (debug_enabled) {
599 *debug_flags = debug_boot_arg;
600 } else {
601 *debug_flags = 0;
602 }
603 }
604 return debug_enabled;
605 }
606
607 /*
608 * This routine returns TRUE if the device is configured
609 * with panic debugging enabled.
610 */
611 boolean_t
PE_panic_debugging_enabled()612 PE_panic_debugging_enabled()
613 {
614 return panicDebugging;
615 }
616
617 void
PE_update_panic_crc(unsigned char * buf,unsigned int * size)618 PE_update_panic_crc(unsigned char *buf, unsigned int *size)
619 {
620 if (!panic_info || !size) {
621 return;
622 }
623
624 if (!buf) {
625 *size = panic_text_len;
626 return;
627 }
628
629 if (*size == 0) {
630 return;
631 }
632
633 *size = *size > panic_text_len ? panic_text_len : *size;
634 if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
635 // rdar://88696402 (PanicTest: test case for MAGIC check in PE_update_panic_crc)
636 printf("Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
637 }
638
639 /* CRC everything after the CRC itself - starting with the panic header version */
640 panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len +
641 sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
642 }
643
644 uint32_t
PE_get_offset_into_panic_region(char * location)645 PE_get_offset_into_panic_region(char *location)
646 {
647 assert(gPanicBase != 0);
648 assert(location >= (char *) gPanicBase);
649 assert((unsigned int)(location - gPanicBase) < gPanicSize);
650
651 return (uint32_t)(uintptr_t)(location - gPanicBase);
652 }
653
654 void
PE_init_panicheader()655 PE_init_panicheader()
656 {
657 if (!panic_info) {
658 return;
659 }
660
661 bzero(panic_info, sizeof(struct embedded_panic_header));
662
663 /*
664 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
665 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
666 */
667 panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0;
668
669 panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
670 panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
671
672 return;
673 }
674
675 /*
676 * Tries to update the panic header to keep it consistent on nested panics.
677 *
678 * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
679 * it is to update the panic header to make it consistent when we nest panics.
680 */
681 void
PE_update_panicheader_nestedpanic()682 PE_update_panicheader_nestedpanic()
683 {
684 /*
685 * if the panic header pointer is bogus (e.g. someone stomped on it) then bail.
686 */
687 if (!panic_info) {
688 /* if this happens in development then blow up bigly */
689 assert(panic_info);
690 return;
691 }
692
693 /*
694 * If the panic log offset is not set, re-init the panic header
695 *
696 * note that this should not be possible unless someone stomped on the panic header to zero it out, since by the time
697 * we reach this location *someone* should have appended something to the log..
698 */
699 if (panic_info->eph_panic_log_offset == 0) {
700 PE_init_panicheader();
701 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
702 return;
703 }
704
705 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
706
707 /*
708 * If the panic log length is not set, set the end to
709 * the current location of the debug_buf_ptr to close it.
710 */
711 if (panic_info->eph_panic_log_len == 0) {
712 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr);
713
714 /* indicative of corruption in the panic region, consumer beware */
715 if ((panic_info->eph_other_log_offset == 0) &&
716 (panic_info->eph_other_log_len == 0)) {
717 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
718 }
719 }
720
721 /* likely indicative of corruption in the panic region, consumer beware */
722 if (((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))) {
723 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
724 }
725
726 /*
727 * If we haven't set up the other log yet, set the beginning of the other log
728 * to the current location of the debug_buf_ptr
729 */
730 if (panic_info->eph_other_log_offset == 0) {
731 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
732
733 /* indicative of corruption in the panic region, consumer beware */
734 if (panic_info->eph_other_log_len == 0) {
735 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
736 }
737 }
738
739 return;
740 }
741
742 boolean_t
PE_reboot_on_panic(void)743 PE_reboot_on_panic(void)
744 {
745 uint32_t debug_flags;
746
747 if (PE_i_can_has_debugger(&debug_flags)
748 && (debug_flags & DB_NMI)) {
749 /* kernel debugging is active */
750 return FALSE;
751 } else {
752 return TRUE;
753 }
754 }
755
756 void
PE_sync_panic_buffers(void)757 PE_sync_panic_buffers(void)
758 {
759 /*
760 * rdar://problem/26453070:
761 * The iBoot panic region is write-combined on arm64. We must flush dirty lines
762 * from L1/L2 as late as possible before reset, with no further reads of the panic
763 * region between the flush and the reset. Some targets have an additional memcache (L3),
764 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
765 * be discarded on reset. If we can make sure the lines are flushed to L3/DRAM,
766 * the platform reset handler will flush any L3.
767 */
768 if (gPanicBase) {
769 CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize);
770 }
771 }
772
773 static void
pe_prepare_images(void)774 pe_prepare_images(void)
775 {
776 if ((1 & PE_state.video.v_rotate) != 0) {
777 // Only square square images with radial symmetry are supported
778 // No need to actually rotate the data
779
780 // Swap the dx and dy offsets
781 uint32_t tmp = default_progress.dx;
782 default_progress.dx = default_progress.dy;
783 default_progress.dy = tmp;
784 }
785 #if 0
786 uint32_t cnt, cnt2, cnt3, cnt4;
787 uint32_t tmp, width, height;
788 uint8_t data, *new_data;
789 const uint8_t *old_data;
790
791 width = default_progress.width;
792 height = default_progress.height * default_progress.count;
793
794 // Scale images if the UI is being scaled
795 if (PE_state.video.v_scale > 1) {
796 new_data = kalloc(width * height * scale * scale);
797 if (new_data != 0) {
798 old_data = default_progress_data;
799 default_progress_data = new_data;
800 for (cnt = 0; cnt < height; cnt++) {
801 for (cnt2 = 0; cnt2 < width; cnt2++) {
802 data = *(old_data++);
803 for (cnt3 = 0; cnt3 < scale; cnt3++) {
804 for (cnt4 = 0; cnt4 < scale; cnt4++) {
805 new_data[width * scale * cnt3 + cnt4] = data;
806 }
807 }
808 new_data += scale;
809 }
810 new_data += width * scale * (scale - 1);
811 }
812 default_progress.width *= scale;
813 default_progress.height *= scale;
814 default_progress.dx *= scale;
815 default_progress.dy *= scale;
816 }
817 }
818 #endif
819 }
820
821 void
PE_mark_hwaccess(uint64_t thread)822 PE_mark_hwaccess(uint64_t thread)
823 {
824 last_hwaccess_thread = thread;
825 __builtin_arm_dmb(DMB_ISH);
826 }
827
828 __startup_func
829 vm_size_t
PE_init_socd_client(void)830 PE_init_socd_client(void)
831 {
832 DTEntry entry;
833 uintptr_t const *reg_prop;
834 unsigned int size;
835
836 if (kSuccess != SecureDTLookupEntry(0, "socd-trace-ram", &entry)) {
837 return 0;
838 }
839
840 if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)®_prop, &size)) {
841 return 0;
842 }
843
844 socd_trace_ram_base = ml_io_map(reg_prop[0], (vm_size_t)reg_prop[1]);
845 socd_trace_ram_size = (vm_size_t)reg_prop[1];
846
847 return socd_trace_ram_size;
848 }
849
850 /*
851 * PE_write_socd_client_buffer solves two problems:
852 * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
853 * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
854 * by a SRAM that must be written to only 4 bytes at a time.
855 */
856 void
PE_write_socd_client_buffer(vm_offset_t offset,const void * buff,vm_size_t size)857 PE_write_socd_client_buffer(vm_offset_t offset, const void *buff, vm_size_t size)
858 {
859 volatile uint32_t *dst = (volatile uint32_t *)(socd_trace_ram_base + offset);
860 vm_size_t len = size / sizeof(dst[0]);
861
862 assert(offset + size <= socd_trace_ram_size);
863
864 /* Perform 4 byte aligned accesses */
865 if ((offset % 4 != 0) || (size % 4 != 0)) {
866 panic("unaligned acccess to socd trace ram");
867 }
868
869 for (vm_size_t i = 0; i < len; i++) {
870 dst[i] = ((const uint32_t *)buff)[i];
871 }
872 }
873