1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * arm platform expert initialization.
5 */
6 #include <sys/types.h>
7 #include <sys/kdebug.h>
8 #include <mach/vm_param.h>
9 #include <pexpert/protos.h>
10 #include <pexpert/pexpert.h>
11 #include <pexpert/boot.h>
12 #include <pexpert/device_tree.h>
13 #include <pexpert/pe_images.h>
14 #include <kern/sched_prim.h>
15 #include <kern/socd_client.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_routines.h>
18 #include <arm/caches_internal.h>
19 #include <kern/debug.h>
20 #include <libkern/section_keywords.h>
21 #include <os/overflow.h>
22
23 #include <pexpert/arm64/board_config.h>
24
25
26 /* extern references */
27 extern void pe_identify_machine(boot_args *bootArgs);
28
29 /* static references */
30 static void pe_prepare_images(void);
31
32 /* private globals */
33 SECURITY_READ_ONLY_LATE(PE_state_t) PE_state;
34 TUNABLE_DT(uint32_t, PE_srd_fused, "/chosen", "research-enabled",
35 "srd_fusing", 0, TUNABLE_DT_NONE);
36
37 #define FW_VERS_LEN 128
38
39 char iBoot_version[FW_VERS_LEN];
40 #if defined(TARGET_OS_OSX) && defined(__arm64__)
41 char iBoot_Stage_2_version[FW_VERS_LEN];
42 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
43
44 /*
45 * This variable is only modified once, when the BSP starts executing. We put it in __DATA_CONST
46 * as page protections on kernel text early in startup are read-write. The kernel is
47 * locked down later in start-up, said mappings become RO and thus this
48 * variable becomes immutable.
49 *
50 * See osfmk/arm/arm_vm_init.c for more information.
51 */
52 SECURITY_READ_ONLY_LATE(volatile uint32_t) debug_enabled = FALSE;
53
54 uint8_t gPlatformECID[8];
55 uint32_t gPlatformMemoryID;
56 static boolean_t vc_progress_initialized = FALSE;
57 uint64_t last_hwaccess_thread = 0;
58 char gTargetTypeBuffer[16];
59 char gModelTypeBuffer[32];
60
61 /* Clock Frequency Info */
62 clock_frequency_info_t gPEClockFrequencyInfo;
63
64 vm_offset_t gPanicBase = 0;
65 unsigned int gPanicSize;
66 struct embedded_panic_header *panic_info = NULL;
67
68 #if (DEVELOPMENT || DEBUG) && defined(XNU_TARGET_OS_BRIDGE)
69 /*
70 * On DEVELOPMENT bridgeOS, we map the x86 panic region
71 * so we can include this data in bridgeOS corefiles
72 */
73 uint64_t macos_panic_base = 0;
74 unsigned int macos_panic_size = 0;
75
76 struct macos_panic_header *mac_panic_header = NULL;
77 #endif
78
79 /* Maximum size of panic log excluding headers, in bytes */
80 static unsigned int panic_text_len;
81
82 /* Whether a console is standing by for panic logging */
83 static boolean_t panic_console_available = FALSE;
84
85 /* socd trace ram attributes */
86 static SECURITY_READ_ONLY_LATE(vm_offset_t) socd_trace_ram_base = 0;
87 static SECURITY_READ_ONLY_LATE(vm_size_t) socd_trace_ram_size = 0;
88
89 extern uint32_t crc32(uint32_t crc, const void *buf, size_t size);
90
91 void PE_slide_devicetree(vm_offset_t);
92
93 static void
check_for_panic_log(void)94 check_for_panic_log(void)
95 {
96 #ifdef PLATFORM_PANIC_LOG_PADDR
97 gPanicBase = ml_io_map_wcomb(PLATFORM_PANIC_LOG_PADDR, PLATFORM_PANIC_LOG_SIZE);
98 panic_text_len = PLATFORM_PANIC_LOG_SIZE - sizeof(struct embedded_panic_header);
99 gPanicSize = PLATFORM_PANIC_LOG_SIZE;
100 #else
101 DTEntry entry, chosen;
102 unsigned int size;
103 uintptr_t const *reg_prop;
104 uint32_t const *panic_region_length;
105
106 /*
107 * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot:
108 *
109 * chosen {
110 * embedded-panic-log-size = <0x00080000>;
111 * [a bunch of other stuff]
112 * };
113 *
114 * pram {
115 * reg = <0x00000008_fbc48000 0x00000000_000b4000>;
116 * };
117 *
118 * reg[0] is the physical address
119 * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used)
120 * embedded-panic-log-size is the maximum amount of data to store in the buffer
121 */
122 if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) {
123 return;
124 }
125
126 if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)®_prop, &size)) {
127 return;
128 }
129
130 if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) {
131 return;
132 }
133
134 if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) {
135 return;
136 }
137
138 gPanicBase = ml_io_map_wcomb(reg_prop[0], panic_region_length[0]);
139
140 /* Deduct the size of the panic header from the panic region size */
141 panic_text_len = panic_region_length[0] - sizeof(struct embedded_panic_header);
142 gPanicSize = panic_region_length[0];
143
144 #if DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE)
145 if (PE_consistent_debug_enabled()) {
146 uint64_t macos_panic_physbase = 0;
147 uint64_t macos_panic_physlen = 0;
148 /* Populate the macOS panic region data if it's present in consistent debug */
149 if (PE_consistent_debug_lookup_entry(kDbgIdMacOSPanicRegion, &macos_panic_physbase, &macos_panic_physlen)) {
150 macos_panic_base = ml_io_map_with_prot(macos_panic_physbase, macos_panic_physlen, VM_PROT_READ);
151 mac_panic_header = (struct macos_panic_header *) ((void *) macos_panic_base);
152 macos_panic_size = macos_panic_physlen;
153 }
154 }
155 #endif /* DEVELOPMENT && defined(XNU_TARGET_OS_BRIDGE) */
156
157 #endif
158 panic_info = (struct embedded_panic_header *)gPanicBase;
159
160 /* Check if a shared memory console is running in the panic buffer */
161 if (panic_info->eph_magic == 'SHMC') {
162 panic_console_available = TRUE;
163 return;
164 }
165
166 /* Check if there's a boot profile in the panic buffer */
167 if (panic_info->eph_magic == 'BTRC') {
168 return;
169 }
170
171 /*
172 * Check to see if a panic (FUNK) is in VRAM from the last time
173 */
174 if (panic_info->eph_magic == EMBEDDED_PANIC_MAGIC) {
175 printf("iBoot didn't extract panic log from previous session crash, this is bad\n");
176 }
177
178 /* Clear panic region */
179 bzero((void *)gPanicBase, gPanicSize);
180 }
181
182 int
PE_initialize_console(PE_Video * info,int op)183 PE_initialize_console(PE_Video * info, int op)
184 {
185 static int last_console = -1;
186
187 if (info && (info != &PE_state.video)) {
188 info->v_scale = PE_state.video.v_scale;
189 }
190
191 switch (op) {
192 case kPEDisableScreen:
193 initialize_screen(info, op);
194 last_console = switch_to_serial_console();
195 kprintf("kPEDisableScreen %d\n", last_console);
196 break;
197
198 case kPEEnableScreen:
199 initialize_screen(info, op);
200 if (info) {
201 PE_state.video = *info;
202 }
203 kprintf("kPEEnableScreen %d\n", last_console);
204 if (last_console != -1) {
205 switch_to_old_console(last_console);
206 }
207 break;
208
209 case kPEReleaseScreen:
210 /*
211 * we don't show the progress indicator on boot, but want to
212 * show it afterwards.
213 */
214 if (!vc_progress_initialized) {
215 default_progress.dx = 0;
216 default_progress.dy = 0;
217 vc_progress_initialize(&default_progress,
218 default_progress_data1x,
219 default_progress_data2x,
220 default_progress_data3x,
221 (unsigned char *) appleClut8);
222 vc_progress_initialized = TRUE;
223 }
224 initialize_screen(info, op);
225 break;
226
227 default:
228 initialize_screen(info, op);
229 break;
230 }
231
232 return 0;
233 }
234
235 void
PE_init_iokit(void)236 PE_init_iokit(void)
237 {
238 DTEntry entry;
239 unsigned int size, scale;
240 unsigned long display_size;
241 void const * const *map;
242 unsigned int show_progress;
243 int *delta, image_size, flip;
244 uint32_t start_time_value = 0;
245 uint32_t debug_wait_start_value = 0;
246 uint32_t load_kernel_start_value = 0;
247 uint32_t populate_registry_time_value = 0;
248
249 PE_init_printf(TRUE);
250
251 printf("iBoot version: %s\n", iBoot_version);
252 #if defined(TARGET_OS_OSX) && defined(__arm64__)
253 printf("iBoot Stage 2 version: %s\n", iBoot_Stage_2_version);
254 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
255
256 if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) {
257 boot_progress_element const *bootPict;
258
259 if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) {
260 bcopy(map[0], appleClut8, sizeof(appleClut8));
261 }
262
263 if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) {
264 bootPict = (boot_progress_element const *) map[0];
265 default_noroot.width = bootPict->width;
266 default_noroot.height = bootPict->height;
267 default_noroot.dx = 0;
268 default_noroot.dy = bootPict->yOffset;
269 default_noroot_data = &bootPict->data[0];
270 }
271 }
272
273 pe_prepare_images();
274
275 scale = PE_state.video.v_scale;
276 flip = 1;
277
278 #if defined(XNU_TARGET_OS_OSX)
279 int notused;
280 show_progress = TRUE;
281 if (PE_parse_boot_argn("-restore", ¬used, sizeof(notused))) {
282 show_progress = FALSE;
283 }
284 if (PE_parse_boot_argn("-noprogress", ¬used, sizeof(notused))) {
285 show_progress = FALSE;
286 }
287 #else
288 show_progress = FALSE;
289 PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress));
290 #endif /* XNU_TARGET_OS_OSX */
291 if (show_progress) {
292 /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
293 switch (PE_state.video.v_rotate) {
294 case 2:
295 flip = -1;
296 OS_FALLTHROUGH;
297 case 0:
298 display_size = PE_state.video.v_height;
299 image_size = default_progress.height;
300 delta = &default_progress.dy;
301 break;
302 case 1:
303 flip = -1;
304 OS_FALLTHROUGH;
305 case 3:
306 default:
307 display_size = PE_state.video.v_width;
308 image_size = default_progress.width;
309 delta = &default_progress.dx;
310 }
311 assert(*delta >= 0);
312 while (((unsigned)(*delta + image_size)) >= (display_size / 2)) {
313 *delta -= 50 * scale;
314 assert(*delta >= 0);
315 }
316 *delta *= flip;
317
318 /* Check for DT-defined progress y delta */
319 PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy));
320
321 vc_progress_initialize(&default_progress,
322 default_progress_data1x,
323 default_progress_data2x,
324 default_progress_data3x,
325 (unsigned char *) appleClut8);
326 vc_progress_initialized = TRUE;
327 }
328
329 if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) {
330 /* Trace iBoot-provided timing information. */
331 if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) {
332 uint32_t const * value_ptr;
333
334 if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) {
335 if (size == sizeof(start_time_value)) {
336 start_time_value = *value_ptr;
337 }
338 }
339
340 if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) {
341 if (size == sizeof(debug_wait_start_value)) {
342 debug_wait_start_value = *value_ptr;
343 }
344 }
345
346 if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) {
347 if (size == sizeof(load_kernel_start_value)) {
348 load_kernel_start_value = *value_ptr;
349 }
350 }
351
352 if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) {
353 if (size == sizeof(populate_registry_time_value)) {
354 populate_registry_time_value = *value_ptr;
355 }
356 }
357 }
358
359 KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value);
360 }
361
362 InitIOKit(PE_state.deviceTreeHead);
363 ConfigureIOKit();
364 }
365
366 void
PE_lockdown_iokit(void)367 PE_lockdown_iokit(void)
368 {
369 /*
370 * On arm/arm64 platforms, and especially those that employ KTRR/CTRR,
371 * machine_lockdown() is treated as a hard security checkpoint, such that
372 * code which executes prior to lockdown must be minimized and limited only to
373 * trusted parts of the kernel and specially-entitled kexts. We therefore
374 * cannot start the general-purpose IOKit matching process until after lockdown,
375 * as it may involve execution of untrusted/non-entitled kext code.
376 * Furthermore, such kext code may process attacker controlled data (e.g.
377 * network packets), which dramatically increases the potential attack surface
378 * against a kernel which has not yet enabled the full set of available
379 * hardware protections.
380 */
381 StartIOKitMatching();
382 }
383
384 void
PE_slide_devicetree(vm_offset_t slide)385 PE_slide_devicetree(vm_offset_t slide)
386 {
387 assert(PE_state.initialized);
388 PE_state.deviceTreeHead = (void *)((uintptr_t)PE_state.deviceTreeHead + slide);
389 SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
390 }
391
392 void
PE_init_platform(boolean_t vm_initialized,void * args)393 PE_init_platform(boolean_t vm_initialized, void *args)
394 {
395 DTEntry entry;
396 unsigned int size;
397 void * const *prop;
398 boot_args *boot_args_ptr = (boot_args *) args;
399
400 if (PE_state.initialized == FALSE) {
401 PE_state.initialized = TRUE;
402 PE_state.bootArgs = boot_args_ptr;
403 PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP;
404 PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength;
405 PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr;
406 PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes;
407 PE_state.video.v_width = boot_args_ptr->Video.v_width;
408 PE_state.video.v_height = boot_args_ptr->Video.v_height;
409 PE_state.video.v_depth = (boot_args_ptr->Video.v_depth >> kBootVideoDepthDepthShift) & kBootVideoDepthMask;
410 PE_state.video.v_rotate = (
411 ((boot_args_ptr->Video.v_depth >> kBootVideoDepthRotateShift) & kBootVideoDepthMask) + // rotation
412 ((boot_args_ptr->Video.v_depth >> kBootVideoDepthBootRotateShift) & kBootVideoDepthMask) // add extra boot rotation
413 ) % 4;
414 PE_state.video.v_scale = ((boot_args_ptr->Video.v_depth >> kBootVideoDepthScaleShift) & kBootVideoDepthMask) + 1;
415 PE_state.video.v_display = boot_args_ptr->Video.v_display;
416 strlcpy(PE_state.video.v_pixelFormat, "BBBBBBBBGGGGGGGGRRRRRRRR", sizeof(PE_state.video.v_pixelFormat));
417 }
418 if (!vm_initialized) {
419 /*
420 * Setup the Device Tree routines
421 * so the console can be found and the right I/O space
422 * can be used..
423 */
424 SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize);
425 pe_identify_machine(boot_args_ptr);
426 } else {
427 pe_arm_init_interrupts(args);
428 pe_arm_init_debug(args);
429 }
430
431 if (!vm_initialized) {
432 if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
433 if (kSuccess == SecureDTGetProperty(entry, "target-type",
434 (void const **)&prop, &size)) {
435 if (size > sizeof(gTargetTypeBuffer)) {
436 size = sizeof(gTargetTypeBuffer);
437 }
438 bcopy(prop, gTargetTypeBuffer, size);
439 gTargetTypeBuffer[size - 1] = '\0';
440 }
441 }
442 if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) {
443 if (kSuccess == SecureDTGetProperty(entry, "model",
444 (void const **)&prop, &size)) {
445 if (size > sizeof(gModelTypeBuffer)) {
446 size = sizeof(gModelTypeBuffer);
447 }
448 bcopy(prop, gModelTypeBuffer, size);
449 gModelTypeBuffer[size - 1] = '\0';
450 }
451 }
452 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
453 if (kSuccess == SecureDTGetProperty(entry, "debug-enabled",
454 (void const **) &prop, &size)) {
455 /*
456 * We purposefully modify a constified variable as
457 * it will get locked down by a trusted monitor or
458 * via page table mappings. We don't want people easily
459 * modifying this variable...
460 */
461 #pragma clang diagnostic push
462 #pragma clang diagnostic ignored "-Wcast-qual"
463 boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled;
464 if (size > sizeof(uint32_t)) {
465 size = sizeof(uint32_t);
466 }
467 bcopy(prop, modify_debug_enabled, size);
468 #pragma clang diagnostic pop
469 }
470 if (kSuccess == SecureDTGetProperty(entry, "firmware-version", (void const **) &prop, &size)) {
471 if (size > sizeof(iBoot_version)) {
472 size = sizeof(iBoot_version);
473 }
474 bcopy(prop, iBoot_version, size);
475 iBoot_version[size - 1] = '\0';
476 }
477 #if defined(TARGET_OS_OSX) && defined(__arm64__)
478 if (kSuccess == SecureDTGetProperty(entry, "system-firmware-version", (void const **) &prop, &size)) {
479 if (size > sizeof(iBoot_Stage_2_version)) {
480 size = sizeof(iBoot_Stage_2_version);
481 }
482 bcopy(prop, iBoot_Stage_2_version, size);
483 iBoot_Stage_2_version[size - 1] = '\0';
484 }
485 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
486 if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id",
487 (void const **) &prop, &size)) {
488 if (size > sizeof(gPlatformECID)) {
489 size = sizeof(gPlatformECID);
490 }
491 bcopy(prop, gPlatformECID, size);
492 }
493 if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id",
494 (void const **) &prop, &size)) {
495 if (size > sizeof(gPlatformMemoryID)) {
496 size = sizeof(gPlatformMemoryID);
497 }
498 bcopy(prop, &gPlatformMemoryID, size);
499 }
500 }
501 pe_init_debug();
502 }
503 }
504
505 void
PE_create_console(void)506 PE_create_console(void)
507 {
508 /*
509 * Check the head of VRAM for a panic log saved on last panic.
510 * Do this before the VRAM is trashed.
511 */
512 check_for_panic_log();
513
514 if (PE_state.video.v_display) {
515 PE_initialize_console(&PE_state.video, kPEGraphicsMode);
516 } else {
517 PE_initialize_console(&PE_state.video, kPETextMode);
518 }
519 }
520
521 int
PE_current_console(PE_Video * info)522 PE_current_console(PE_Video * info)
523 {
524 *info = PE_state.video;
525 return 0;
526 }
527
528 void
PE_display_icon(__unused unsigned int flags,__unused const char * name)529 PE_display_icon(__unused unsigned int flags, __unused const char *name)
530 {
531 if (default_noroot_data) {
532 vc_display_icon(&default_noroot, default_noroot_data);
533 }
534 }
535
536 extern boolean_t
PE_get_hotkey(__unused unsigned char key)537 PE_get_hotkey(__unused unsigned char key)
538 {
539 return FALSE;
540 }
541
542 static timebase_callback_func gTimebaseCallback;
543
544 void
PE_register_timebase_callback(timebase_callback_func callback)545 PE_register_timebase_callback(timebase_callback_func callback)
546 {
547 gTimebaseCallback = callback;
548
549 PE_call_timebase_callback();
550 }
551
552 void
PE_call_timebase_callback(void)553 PE_call_timebase_callback(void)
554 {
555 struct timebase_freq_t timebase_freq;
556
557 timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz;
558 timebase_freq.timebase_den = 1;
559
560 if (gTimebaseCallback) {
561 gTimebaseCallback(&timebase_freq);
562 }
563 }
564
565 /*
566 * The default PE_poll_input handler.
567 */
568 int
PE_stub_poll_input(__unused unsigned int options,char * c)569 PE_stub_poll_input(__unused unsigned int options, char *c)
570 {
571 *c = (char)uart_getc();
572 return 0; /* 0 for success, 1 for unsupported */
573 }
574
575 /*
576 * This routine will return 1 if you are running on a device with a variant
577 * of iBoot that allows debugging. This is typically not the case on production
578 * fused parts (even when running development variants of iBoot).
579 *
580 * The routine takes an optional argument of the flags passed to debug="" so
581 * kexts don't have to parse the boot arg themselves.
582 */
583 uint32_t
PE_i_can_has_debugger(uint32_t * debug_flags)584 PE_i_can_has_debugger(uint32_t *debug_flags)
585 {
586 if (debug_flags) {
587 #if DEVELOPMENT || DEBUG
588 assert(startup_phase >= STARTUP_SUB_TUNABLES);
589 #endif
590 if (debug_enabled) {
591 *debug_flags = debug_boot_arg;
592 } else {
593 *debug_flags = 0;
594 }
595 }
596 return debug_enabled;
597 }
598
599 /*
600 * This routine returns TRUE if the device is configured
601 * with panic debugging enabled.
602 */
603 boolean_t
PE_panic_debugging_enabled()604 PE_panic_debugging_enabled()
605 {
606 return panicDebugging;
607 }
608
609 void
PE_update_panic_crc(unsigned char * buf,unsigned int * size)610 PE_update_panic_crc(unsigned char *buf, unsigned int *size)
611 {
612 if (!panic_info || !size) {
613 return;
614 }
615
616 if (!buf) {
617 *size = panic_text_len;
618 return;
619 }
620
621 if (*size == 0) {
622 return;
623 }
624
625 *size = *size > panic_text_len ? panic_text_len : *size;
626 if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) {
627 // rdar://88696402 (PanicTest: test case for MAGIC check in PE_update_panic_crc)
628 printf("Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC);
629 }
630
631 /* CRC everything after the CRC itself - starting with the panic header version */
632 panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len +
633 sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version)));
634 }
635
636 uint32_t
PE_get_offset_into_panic_region(char * location)637 PE_get_offset_into_panic_region(char *location)
638 {
639 assert(gPanicBase != 0);
640 assert(location >= (char *) gPanicBase);
641 assert((unsigned int)(location - gPanicBase) < gPanicSize);
642
643 return (uint32_t)(uintptr_t)(location - gPanicBase);
644 }
645
646 void
PE_init_panicheader()647 PE_init_panicheader()
648 {
649 if (!panic_info) {
650 return;
651 }
652
653 bzero(panic_info, sizeof(struct embedded_panic_header));
654
655 /*
656 * The panic log begins immediately after the panic header -- debugger synchronization and other functions
657 * may log into this region before we've become the exclusive panicking CPU and initialize the header here.
658 */
659 panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0;
660
661 panic_info->eph_magic = EMBEDDED_PANIC_MAGIC;
662 panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION;
663
664 return;
665 }
666
667 /*
668 * Tries to update the panic header to keep it consistent on nested panics.
669 *
670 * NOTE: The purpose of this function is NOT to detect/correct corruption in the panic region,
671 * it is to update the panic header to make it consistent when we nest panics.
672 */
673 void
PE_update_panicheader_nestedpanic()674 PE_update_panicheader_nestedpanic()
675 {
676 /*
677 * if the panic header pointer is bogus (e.g. someone stomped on it) then bail.
678 */
679 if (!panic_info) {
680 /* if this happens in development then blow up bigly */
681 assert(panic_info);
682 return;
683 }
684
685 /*
686 * If the panic log offset is not set, re-init the panic header
687 *
688 * note that this should not be possible unless someone stomped on the panic header to zero it out, since by the time
689 * we reach this location *someone* should have appended something to the log..
690 */
691 if (panic_info->eph_panic_log_offset == 0) {
692 PE_init_panicheader();
693 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
694 return;
695 }
696
697 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_NESTED_PANIC;
698
699 /*
700 * If the panic log length is not set, set the end to
701 * the current location of the debug_buf_ptr to close it.
702 */
703 if (panic_info->eph_panic_log_len == 0) {
704 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr);
705
706 /* indicative of corruption in the panic region, consumer beware */
707 if ((panic_info->eph_other_log_offset == 0) &&
708 (panic_info->eph_other_log_len == 0)) {
709 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
710 }
711 }
712
713 /* likely indicative of corruption in the panic region, consumer beware */
714 if (((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))) {
715 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
716 }
717
718 /*
719 * If we haven't set up the other log yet, set the beginning of the other log
720 * to the current location of the debug_buf_ptr
721 */
722 if (panic_info->eph_other_log_offset == 0) {
723 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
724
725 /* indicative of corruption in the panic region, consumer beware */
726 if (panic_info->eph_other_log_len == 0) {
727 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INCOHERENT_PANICLOG;
728 }
729 }
730
731 return;
732 }
733
734 boolean_t
PE_reboot_on_panic(void)735 PE_reboot_on_panic(void)
736 {
737 uint32_t debug_flags;
738
739 if (PE_i_can_has_debugger(&debug_flags)
740 && (debug_flags & DB_NMI)) {
741 /* kernel debugging is active */
742 return FALSE;
743 } else {
744 return TRUE;
745 }
746 }
747
748 void
PE_sync_panic_buffers(void)749 PE_sync_panic_buffers(void)
750 {
751 /*
752 * rdar://problem/26453070:
753 * The iBoot panic region is write-combined on arm64. We must flush dirty lines
754 * from L1/L2 as late as possible before reset, with no further reads of the panic
755 * region between the flush and the reset. Some targets have an additional memcache (L3),
756 * and a read may bring dirty lines out of L3 and back into L1/L2, causing the lines to
757 * be discarded on reset. If we can make sure the lines are flushed to L3/DRAM,
758 * the platform reset handler will flush any L3.
759 */
760 if (gPanicBase) {
761 CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize);
762 }
763 }
764
765 static void
pe_prepare_images(void)766 pe_prepare_images(void)
767 {
768 if ((1 & PE_state.video.v_rotate) != 0) {
769 // Only square square images with radial symmetry are supported
770 // No need to actually rotate the data
771
772 // Swap the dx and dy offsets
773 uint32_t tmp = default_progress.dx;
774 default_progress.dx = default_progress.dy;
775 default_progress.dy = tmp;
776 }
777 #if 0
778 uint32_t cnt, cnt2, cnt3, cnt4;
779 uint32_t tmp, width, height;
780 uint8_t data, *new_data;
781 const uint8_t *old_data;
782
783 width = default_progress.width;
784 height = default_progress.height * default_progress.count;
785
786 // Scale images if the UI is being scaled
787 if (PE_state.video.v_scale > 1) {
788 new_data = kalloc(width * height * scale * scale);
789 if (new_data != 0) {
790 old_data = default_progress_data;
791 default_progress_data = new_data;
792 for (cnt = 0; cnt < height; cnt++) {
793 for (cnt2 = 0; cnt2 < width; cnt2++) {
794 data = *(old_data++);
795 for (cnt3 = 0; cnt3 < scale; cnt3++) {
796 for (cnt4 = 0; cnt4 < scale; cnt4++) {
797 new_data[width * scale * cnt3 + cnt4] = data;
798 }
799 }
800 new_data += scale;
801 }
802 new_data += width * scale * (scale - 1);
803 }
804 default_progress.width *= scale;
805 default_progress.height *= scale;
806 default_progress.dx *= scale;
807 default_progress.dy *= scale;
808 }
809 }
810 #endif
811 }
812
813 void
PE_mark_hwaccess(uint64_t thread)814 PE_mark_hwaccess(uint64_t thread)
815 {
816 last_hwaccess_thread = thread;
817 __builtin_arm_dmb(DMB_ISH);
818 }
819
820 __startup_func
821 vm_size_t
PE_init_socd_client(void)822 PE_init_socd_client(void)
823 {
824 DTEntry entry;
825 uintptr_t const *reg_prop;
826 unsigned int size;
827
828 if (kSuccess != SecureDTLookupEntry(0, "socd-trace-ram", &entry)) {
829 return 0;
830 }
831
832 if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)®_prop, &size)) {
833 return 0;
834 }
835
836 socd_trace_ram_base = ml_io_map(reg_prop[0], (vm_size_t)reg_prop[1]);
837 socd_trace_ram_size = (vm_size_t)reg_prop[1];
838
839 return socd_trace_ram_size;
840 }
841
842 /*
843 * PE_write_socd_client_buffer solves two problems:
844 * 1. Prevents accidentally trusting a value read from socd client buffer. socd client buffer is considered untrusted.
845 * 2. Ensures only 4 byte store instructions are used. On some platforms, socd client buffer is backed up
846 * by a SRAM that must be written to only 4 bytes at a time.
847 */
848 void
PE_write_socd_client_buffer(vm_offset_t offset,const void * buff,vm_size_t size)849 PE_write_socd_client_buffer(vm_offset_t offset, const void *buff, vm_size_t size)
850 {
851 volatile uint32_t *dst = (volatile uint32_t *)(socd_trace_ram_base + offset);
852 vm_size_t len = size / sizeof(dst[0]);
853
854 assert(offset + size <= socd_trace_ram_size);
855
856 /* Perform 4 byte aligned accesses */
857 if ((offset % 4 != 0) || (size % 4 != 0)) {
858 panic("unaligned acccess to socd trace ram");
859 }
860
861 for (vm_size_t i = 0; i < len; i++) {
862 dst[i] = ((const uint32_t *)buff)[i];
863 }
864 }
865