1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_stackshot.h>
76 #include <kern/kern_cdata.h>
77 #include <kern/zalloc_internal.h>
78 #include <kern/iotrace.h>
79 #include <pexpert/device_tree.h>
80 #include <vm/vm_kern_xnu.h>
81 #include <vm/vm_map.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_compressor_xnu.h>
84 #include <stdarg.h>
85 #include <stdatomic.h>
86 #include <sys/pgo.h>
87 #include <console/serial_protos.h>
88 #include <IOKit/IOBSD.h>
89
90 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
91 #include <kdp/kdp_udp.h>
92 #endif
93 #include <kern/processor.h>
94
95 #if defined(__i386__) || defined(__x86_64__)
96 #include <IOKit/IOBSD.h>
97
98 #include <i386/cpu_threads.h>
99 #include <i386/pmCPU.h>
100 #include <i386/lbr.h>
101 #endif
102
103 #include <IOKit/IOPlatformExpert.h>
104 #include <machine/machine_cpu.h>
105 #include <machine/pal_routines.h>
106
107 #include <sys/kdebug.h>
108 #include <libkern/OSKextLibPrivate.h>
109 #include <libkern/OSAtomic.h>
110 #include <libkern/kernel_mach_header.h>
111 #include <libkern/section_keywords.h>
112 #include <uuid/uuid.h>
113 #include <mach_debug/zone_info.h>
114 #include <mach/resource_monitors.h>
115 #include <machine/machine_routines.h>
116 #include <sys/proc_require.h>
117
118 #include <os/log_private.h>
119
120 #include <kern/ext_paniclog.h>
121
122 #if defined(__arm64__)
123 #include <pexpert/pexpert.h> /* For gPanicBase */
124 #include <arm/caches_internal.h>
125 #include <arm/misc_protos.h>
126 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
127 #endif
128
129 #include <san/kcov.h>
130
131 #if CONFIG_XNUPOST
132 #include <tests/xnupost.h>
133 extern int vsnprintf(char *, size_t, const char *, va_list);
134 #endif
135
136 #if CONFIG_CSR
137 #include <sys/csr.h>
138 #endif
139
140 #if CONFIG_EXCLAVES
141 #include <xnuproxy/panic.h>
142 #include "exclaves_panic.h"
143 #endif
144
145 #if CONFIG_SPTM
146 #include <arm64/sptm/sptm.h>
147 #include <arm64/sptm/pmap/pmap_data.h>
148 #endif /* CONFIG_SPTM */
149
150 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
151 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
152
153 unsigned int halt_in_debugger = 0;
154 unsigned int current_debugger = 0;
155 unsigned int active_debugger = 0;
156 SECURITY_READ_ONLY_LATE(unsigned int) panicDebugging = FALSE;
157 unsigned int kernel_debugger_entry_count = 0;
158
159 #if DEVELOPMENT || DEBUG
160 unsigned int panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
161 unsigned int panic_test_action_count = 1;
162 unsigned int panic_test_case = PANIC_TEST_CASE_DISABLED;
163 #endif
164
165 #if defined(__arm64__)
166 struct additional_panic_data_buffer *panic_data_buffers = NULL;
167 #endif
168
169 #if defined(__arm64__)
170 /*
171 * Magic number; this should be identical to the armv7 encoding for trap.
172 */
173 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
174 #elif defined (__x86_64__)
175 #define TRAP_DEBUGGER __asm__("int3")
176 #else
177 #error No TRAP_DEBUGGER for this architecture
178 #endif
179
180 #if defined(__i386__) || defined(__x86_64__)
181 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
182 #else
183 #define panic_stop() panic_spin_forever()
184 #endif
185
186 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
187 /*
188 * More than enough for any typical format string passed to panic();
189 * anything longer will be truncated but that's better than nothing.
190 */
191 #define EARLY_PANIC_BUFLEN 256
192 #endif
193
194 struct debugger_state {
195 uint64_t db_panic_options;
196 debugger_op db_current_op;
197 boolean_t db_proceed_on_sync_failure;
198 const char *db_message;
199 const char *db_panic_str;
200 va_list *db_panic_args;
201 void *db_panic_data_ptr;
202 unsigned long db_panic_caller;
203 const char *db_panic_initiator;
204 /* incremented whenever we panic or call Debugger (current CPU panic level) */
205 uint32_t db_entry_count;
206 kern_return_t db_op_return;
207 };
208 static struct debugger_state PERCPU_DATA(debugger_state);
209
210 /* __pure2 is correct if this function is called with preemption disabled */
211 static inline __pure2 struct debugger_state *
current_debugger_state(void)212 current_debugger_state(void)
213 {
214 return PERCPU_GET(debugger_state);
215 }
216
217 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
218 #define CPUDEBUGGERMSG current_debugger_state()->db_message
219 #define CPUPANICSTR current_debugger_state()->db_panic_str
220 #define CPUPANICARGS current_debugger_state()->db_panic_args
221 #define CPUPANICOPTS current_debugger_state()->db_panic_options
222 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
223 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
224 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
225 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
226 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
227 #define CPUPANICINITIATOR current_debugger_state()->db_panic_initiator
228
229
230 /*
231 * Usage:
232 * panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
233 * panic_test_failure_mode is a bit map of things to do
234 * panic_test_case is what sort of test we are injecting
235 *
236 * For more details see definitions in debugger.h
237 *
238 * Note that not all combinations are sensible, but some actions can be combined, e.g.
239 * - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
240 * - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
241 * - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
242 */
243 #if DEVELOPMENT || DEBUG
244 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested) \
245 MACRO_BEGIN \
246 if ((panic_test_case & requested) && panic_test_action_count) { \
247 panic_test_action_count--; \
248 volatile int *panic_test_badpointer = (int *)4; \
249 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); } \
250 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; } \
251 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); } \
252 } \
253 MACRO_END
254
255 #endif /* DEVELOPMENT || DEBUG */
256
257 debugger_op debugger_current_op = DBOP_NONE;
258 const char *debugger_panic_str = NULL;
259 va_list *debugger_panic_args = NULL;
260 void *debugger_panic_data = NULL;
261 uint64_t debugger_panic_options = 0;
262 const char *debugger_message = NULL;
263 unsigned long debugger_panic_caller = 0;
264 const char *debugger_panic_initiator = "";
265
266 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
267 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
268 unsigned long panic_caller, const char *panic_initiator) __dead2 __printflike(1, 0);
269 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
270 void panic_spin_forever(void) __dead2;
271 void panic_stackshot_release_lock(void);
272 extern void PE_panic_hook(const char*);
273 extern int sync_internal(void);
274
275 #define NESTEDDEBUGGERENTRYMAX 5
276 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
277 NESTEDDEBUGGERENTRYMAX);
278
279 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
280 static bool PERCPU_DATA(hv_entry_detected); // = false
281 static void awl_set_scratch_reg_hv_bit(void);
282 void awl_mark_hv_entry(void);
283 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
284
285 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
286 static boolean_t device_corefile_valid_on_ephemeral(void);
287 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
288
289 #if defined(__arm64__)
290 #define DEBUG_BUF_SIZE (4096)
291
292 /* debug_buf is directly linked with iBoot panic region for arm targets */
293 char *debug_buf_base = NULL;
294 char *debug_buf_ptr = NULL;
295 unsigned int debug_buf_size = 0;
296
297 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
298 #else /* defined(__arm64__) */
299 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
300 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
301 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
302
303 char debug_buf[DEBUG_BUF_SIZE];
304 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
305 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
306 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
307
308 /*
309 * We don't include the size of the panic header in the length of the data we actually write.
310 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
311 * the end of the log because we only support writing (3*PAGESIZE) bytes.
312 */
313 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
314
315 boolean_t extended_debug_log_enabled = FALSE;
316 #endif /* defined(__arm64__) */
317
318 #if defined(XNU_TARGET_OS_OSX)
319 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
320 #else
321 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
322 #endif
323
324 /* Debugger state */
325 atomic_int debugger_cpu = DEBUGGER_NO_CPU;
326 boolean_t debugger_allcpus_halted = FALSE;
327 boolean_t debugger_safe_to_return = TRUE;
328 unsigned int debugger_context = 0;
329
330 static char model_name[64];
331 unsigned char *kernel_uuid;
332
333 boolean_t kernelcache_uuid_valid = FALSE;
334 uuid_t kernelcache_uuid;
335 uuid_string_t kernelcache_uuid_string;
336
337 boolean_t pageablekc_uuid_valid = FALSE;
338 uuid_t pageablekc_uuid;
339 uuid_string_t pageablekc_uuid_string;
340
341 boolean_t auxkc_uuid_valid = FALSE;
342 uuid_t auxkc_uuid;
343 uuid_string_t auxkc_uuid_string;
344
345
346 /*
347 * By default we treat Debugger() the same as calls to panic(), unless
348 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
349 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
350 *
351 * Return from Debugger() is currently only implemented on x86
352 */
353 static boolean_t debugger_is_panic = TRUE;
354
355 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
356
357 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
358
359 char kernel_uuid_string[37]; /* uuid_string_t */
360 char kernelcache_uuid_string[37]; /* uuid_string_t */
361 char panic_disk_error_description[512];
362 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
363
364 extern unsigned int write_trace_on_panic;
365 int kext_assertions_enable =
366 #if DEBUG || DEVELOPMENT
367 TRUE;
368 #else
369 FALSE;
370 #endif
371
372 #if (DEVELOPMENT || DEBUG)
373 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
374 #endif
375
376 /*
377 * Maintain the physically-contiguous carveouts for the carveout bootargs.
378 */
379 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
380
381 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
382 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
383 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
384 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
385
386
387 /*
388 * Returns whether kernel debugging is expected to be restricted
389 * on the device currently based on CSR or other platform restrictions.
390 */
391 boolean_t
kernel_debugging_restricted(void)392 kernel_debugging_restricted(void)
393 {
394 #if XNU_TARGET_OS_OSX
395 #if CONFIG_CSR
396 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
397 return TRUE;
398 }
399 #endif /* CONFIG_CSR */
400 return FALSE;
401 #else /* XNU_TARGET_OS_OSX */
402 return FALSE;
403 #endif /* XNU_TARGET_OS_OSX */
404 }
405
406 __startup_func
407 static void
panic_init(void)408 panic_init(void)
409 {
410 unsigned long uuidlen = 0;
411 void *uuid;
412
413 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
414 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
415 kernel_uuid = uuid;
416 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
417 }
418
419 /*
420 * Take the value of the debug boot-arg into account
421 */
422 #if MACH_KDP
423 if (!kernel_debugging_restricted() && debug_boot_arg) {
424 if (debug_boot_arg & DB_HALT) {
425 halt_in_debugger = 1;
426 }
427
428 #if defined(__arm64__)
429 if (debug_boot_arg & DB_NMI) {
430 panicDebugging = TRUE;
431 }
432 #else
433 panicDebugging = TRUE;
434 #endif /* defined(__arm64__) */
435 }
436
437 #if defined(__arm64__)
438 char kdpname[80];
439
440 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
441 #endif /* defined(__arm64__) */
442
443 #endif /* MACH_KDP */
444
445 #if defined (__x86_64__)
446 /*
447 * By default we treat Debugger() the same as calls to panic(), unless
448 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
449 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
450 * This is because writing an on-device corefile is a destructive operation.
451 *
452 * Return from Debugger() is currently only implemented on x86
453 */
454 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
455 debugger_is_panic = FALSE;
456 }
457 #endif
458 }
459 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
460
461 #if defined (__x86_64__)
462 void
extended_debug_log_init(void)463 extended_debug_log_init(void)
464 {
465 assert(coprocessor_paniclog_flush);
466 /*
467 * Allocate an extended panic log buffer that has space for the panic
468 * stackshot at the end. Update the debug buf pointers appropriately
469 * to point at this new buffer.
470 *
471 * iBoot pre-initializes the panic region with the NULL character. We set this here
472 * so we can accurately calculate the CRC for the region without needing to flush the
473 * full region over SMC.
474 */
475 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
476
477 panic_info = (struct macos_panic_header *)new_debug_buf;
478 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
479 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
480
481 extended_debug_log_enabled = TRUE;
482
483 /*
484 * Insert a compiler barrier so we don't free the other panic stackshot buffer
485 * until after we've marked the new one as available
486 */
487 __compiler_barrier();
488 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
489 panic_stackshot_buf = 0;
490 panic_stackshot_buf_len = 0;
491 }
492 #endif /* defined (__x86_64__) */
493
494 void
debug_log_init(void)495 debug_log_init(void)
496 {
497 #if defined(__arm64__)
498 if (!gPanicBase) {
499 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
500 return;
501 }
502 /* Shift debug buf start location and size by the length of the panic header */
503 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
504 debug_buf_ptr = debug_buf_base;
505 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
506
507 #if CONFIG_EXT_PANICLOG
508 ext_paniclog_init();
509 #endif
510 #else
511 kern_return_t kr = KERN_SUCCESS;
512 bzero(panic_info, DEBUG_BUF_SIZE);
513
514 assert(debug_buf_base != NULL);
515 assert(debug_buf_ptr != NULL);
516 assert(debug_buf_size != 0);
517
518 /*
519 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
520 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
521 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
522 * up.
523 */
524 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
525 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
526 assert(kr == KERN_SUCCESS);
527 if (kr == KERN_SUCCESS) {
528 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
529 }
530 #endif
531 }
532
533 void
phys_carveout_init(void)534 phys_carveout_init(void)
535 {
536 if (!PE_i_can_has_debugger(NULL)) {
537 return;
538 }
539
540 #if __arm__ || __arm64__
541 #if DEVELOPMENT || DEBUG
542 #endif /* DEVELOPMENT || DEBUG */
543 #endif /* __arm__ || __arm64__ */
544
545 struct carveout {
546 const char *name;
547 vm_offset_t *va;
548 uint32_t requested_size;
549 uintptr_t *pa;
550 size_t *allocated_size;
551 uint64_t present;
552 } carveouts[] = {
553 {
554 "phys_carveout",
555 &phys_carveout,
556 phys_carveout_mb,
557 &phys_carveout_pa,
558 &phys_carveout_size,
559 phys_carveout_mb != 0,
560 }
561 };
562
563 for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
564 if (carveouts[i].present) {
565 size_t temp_carveout_size = 0;
566 if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
567 panic("%s_mb size overflowed (%uMB)",
568 carveouts[i].name, carveouts[i].requested_size);
569 return;
570 }
571
572 kmem_alloc_contig(kernel_map, carveouts[i].va,
573 temp_carveout_size, PAGE_MASK, 0, 0,
574 KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
575 VM_KERN_MEMORY_DIAG);
576
577 *carveouts[i].pa = kvtophys(*carveouts[i].va);
578 *carveouts[i].allocated_size = temp_carveout_size;
579 }
580 }
581
582 #if __arm64__ && (DEVELOPMENT || DEBUG)
583 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
584 PE_arm_debug_enable_trace(true);
585 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
586 }
587
588 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)589 debug_is_in_phys_carveout(vm_map_offset_t va)
590 {
591 return phys_carveout_size && va >= phys_carveout &&
592 va < (phys_carveout + phys_carveout_size);
593 }
594
595 boolean_t
debug_can_coredump_phys_carveout(void)596 debug_can_coredump_phys_carveout(void)
597 {
598 return phys_carveout_core;
599 }
600
601 static void
DebuggerLock(void)602 DebuggerLock(void)
603 {
604 int my_cpu = cpu_number();
605 int debugger_exp_cpu = DEBUGGER_NO_CPU;
606 assert(ml_get_interrupts_enabled() == FALSE);
607
608 if (atomic_load(&debugger_cpu) == my_cpu) {
609 return;
610 }
611
612 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
613 debugger_exp_cpu = DEBUGGER_NO_CPU;
614 }
615
616 return;
617 }
618
619 static void
DebuggerUnlock(void)620 DebuggerUnlock(void)
621 {
622 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
623
624 /*
625 * We don't do an atomic exchange here in case
626 * there's another CPU spinning to acquire the debugger_lock
627 * and we never get a chance to update it. We already have the
628 * lock so we can simply store DEBUGGER_NO_CPU and follow with
629 * a barrier.
630 */
631 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
632 OSMemoryBarrier();
633
634 return;
635 }
636
637 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)638 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
639 {
640 #if defined(__arm64__)
641 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
642 #else /* defined(__arm64__) */
643 #pragma unused(proceed_on_failure)
644 mp_kdp_enter(proceed_on_failure, is_stackshot);
645 return KERN_SUCCESS;
646 #endif
647 }
648
649 static void
DebuggerResumeOtherCores(void)650 DebuggerResumeOtherCores(void)
651 {
652 #if defined(__arm64__)
653 DebuggerXCallReturn();
654 #else /* defined(__arm64__) */
655 mp_kdp_exit();
656 #endif
657 }
658
659 __printflike(3, 0)
660 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)661 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
662 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
663 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char *db_panic_initiator)
664 {
665 CPUDEBUGGEROP = db_op;
666
667 /*
668 * Note:
669 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
670 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
671 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
672 *
673 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
674 */
675 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
676 CPUDEBUGGERMSG = db_message;
677 CPUPANICSTR = db_panic_str;
678 CPUPANICARGS = db_panic_args;
679 CPUPANICDATAPTR = db_panic_data_ptr;
680 CPUPANICCALLER = db_panic_caller;
681 CPUPANICINITIATOR = db_panic_initiator;
682
683 #if CONFIG_EXCLAVES
684 char *panic_str;
685 if (exclaves_panic_get_string(&panic_str) == KERN_SUCCESS) {
686 CPUPANICSTR = panic_str;
687 }
688 #endif
689 }
690
691 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
692 CPUDEBUGGERRET = KERN_SUCCESS;
693
694 /* Reset these on any nested panics */
695 // follow up in rdar://88497308 (nested panics should not clobber panic flags)
696 CPUPANICOPTS = db_panic_options;
697
698 return;
699 }
700
701 /*
702 * Save the requested debugger state/action into the current processor's
703 * percu state and trap to the debugger.
704 */
705 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)706 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
707 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
708 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char* db_panic_initiator)
709 {
710 kern_return_t ret;
711
712 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
713 if (!PE_arm_debug_and_trace_initialized()) {
714 /*
715 * In practice this can only happen if we panicked very early,
716 * when only the boot CPU is online and before it has finished
717 * initializing the debug and trace infrastructure. We're going
718 * to hang soon, so let's at least make sure the message passed
719 * to panic() is actually logged.
720 */
721 char buf[EARLY_PANIC_BUFLEN];
722 vsnprintf(buf, EARLY_PANIC_BUFLEN, db_panic_str, *db_panic_args);
723 paniclog_append_noflush("%s\n", buf);
724 }
725 #endif
726
727 assert(ml_get_interrupts_enabled() == FALSE);
728 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
729 db_panic_options, db_panic_data_ptr,
730 db_proceed_on_sync_failure, db_panic_caller, db_panic_initiator);
731
732 /*
733 * On ARM this generates an uncategorized exception -> sleh code ->
734 * DebuggerCall -> kdp_trap -> handle_debugger_trap
735 * So that is how XNU ensures that only one core can panic.
736 * The rest of the cores are halted by IPI if possible; if that
737 * fails it will fall back to dbgwrap.
738 */
739 TRAP_DEBUGGER;
740
741 ret = CPUDEBUGGERRET;
742
743 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0, NULL);
744
745 return ret;
746 }
747
748 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)749 Assert(
750 const char *file,
751 int line,
752 const char *expression
753 )
754 {
755 #if CONFIG_NONFATAL_ASSERTS
756 static TUNABLE(bool, mach_assert, "assertions", true);
757
758 if (!mach_assert) {
759 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
760 return;
761 }
762 #endif
763
764 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
765 }
766
767 boolean_t
debug_is_current_cpu_in_panic_state(void)768 debug_is_current_cpu_in_panic_state(void)
769 {
770 return current_debugger_state()->db_entry_count > 0;
771 }
772
773 /*
774 * check if we are in a nested panic, report findings, take evasive action where necessary
775 *
776 * see also PE_update_panicheader_nestedpanic
777 */
778 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)779 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
780 {
781 if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
782 // Note: this is the first indication in the panic log or serial that we are off the rails...
783 //
784 // if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
785 // if we panic *after* the log is finalized then we will only see it in the serial log
786 //
787 paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
788 paniclog_flush();
789
790 // print the *new* panic string to the console, we might not get it by other means...
791 // TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
792 // rdar://87846117 (NestedPanic: output panic string to paniclog)
793 if (db_panic_str) {
794 printf("Nested panic string:\n");
795 #pragma clang diagnostic push
796 #pragma clang diagnostic ignored "-Wformat-nonliteral"
797 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
798 #pragma clang diagnostic pop
799 printf("\n<end nested panic string>\n");
800 }
801 }
802
803 // Stage 1 bailout
804 //
805 // Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog. If this fails with another nested
806 // panic then we will land in Stage 2 below...
807 //
808 if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
809 uint32_t panic_details = 0;
810
811 // if this is a force-reset panic then capture a log and reboot immediately.
812 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
813 panic_details |= kPanicDetailsForcePowerOff;
814 }
815
816 // normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
817 // there. To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
818 //
819 PEHaltRestartInternal(kPEPanicBegin, panic_details);
820
821 paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
822 PE_update_panicheader_nestedpanic();
823 paniclog_flush();
824
825 if (!panicDebugging) {
826 // note that this will also send kPEPanicEnd
827 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
828 }
829
830 // prints to console
831 paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
832 panic_spin_forever();
833 }
834
835 // Stage 2 bailout
836 //
837 // Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path. Try to issue
838 // a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
839 //
840 if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
841 if (!panicDebugging) {
842 // note that:
843 // - this code path should be audited for prints, as that is a common cause of nested panics
844 // - this code path should take the fastest route to the actual reset, and not call any un-necessary code
845 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
846 }
847
848 // prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
849 paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
850 panic_spin_forever();
851 }
852
853 // Stage 3 bailout
854 //
855 // We are done here, we were unable to reset the platform without another nested panic. Spin until the watchdog kicks in.
856 //
857 if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
858 kdp_machine_reboot_type(kPEHangCPU, 0);
859 }
860 }
861
862 void
Debugger(const char * message)863 Debugger(const char *message)
864 {
865 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
866 }
867
868 /*
869 * Enter the Debugger
870 *
871 * This is similar to, but not the same as a panic
872 *
873 * Key differences:
874 * - we get here from a debugger entry action (e.g. NMI)
875 * - the system is resumable on x86 (in theory, however it is not clear if this is tested)
876 * - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
877 *
878 */
879 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)880 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
881 uint64_t debugger_options_mask, unsigned long debugger_caller)
882 {
883 spl_t previous_interrupts_state;
884 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
885
886 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
887 read_lbr();
888 #endif
889 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
890 disable_preemption();
891
892 /* track depth of debugger/panic entry */
893 CPUDEBUGGERCOUNT++;
894
895 /* emit a tracepoint as early as possible in case of hang */
896 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
897
898 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
899 check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
900
901 /* Handle any necessary platform specific actions before we proceed */
902 PEInitiatePanic();
903
904 #if DEVELOPMENT || DEBUG
905 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
906 #endif
907
908 PE_panic_hook(message);
909
910 doprnt_hide_pointers = FALSE;
911
912 if (ctx != NULL) {
913 DebuggerSaveState(DBOP_DEBUGGER, message,
914 NULL, NULL, debugger_options_mask, NULL, TRUE, 0, "");
915 handle_debugger_trap(reason, 0, 0, ctx);
916 DebuggerSaveState(DBOP_NONE, NULL, NULL,
917 NULL, 0, NULL, FALSE, 0, "");
918 } else {
919 DebuggerTrapWithState(DBOP_DEBUGGER, message,
920 NULL, NULL, debugger_options_mask, NULL, TRUE, 0, NULL);
921 }
922
923 /* resume from the debugger */
924
925 CPUDEBUGGERCOUNT--;
926 doprnt_hide_pointers = old_doprnt_hide_pointers;
927 enable_preemption();
928 ml_set_interrupts_enabled(previous_interrupts_state);
929 }
930
931 static struct kdp_callout {
932 struct kdp_callout * callout_next;
933 kdp_callout_fn_t callout_fn;
934 boolean_t callout_in_progress;
935 void * callout_arg;
936 } * kdp_callout_list = NULL;
937
938 /*
939 * Called from kernel context to register a kdp event callout.
940 */
941 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)942 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
943 {
944 struct kdp_callout * kcp;
945 struct kdp_callout * list_head;
946
947 kcp = zalloc_permanent_type(struct kdp_callout);
948
949 kcp->callout_fn = fn;
950 kcp->callout_arg = arg;
951 kcp->callout_in_progress = FALSE;
952
953 /* Lock-less list insertion using compare and exchange. */
954 do {
955 list_head = kdp_callout_list;
956 kcp->callout_next = list_head;
957 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
958 }
959
960 static void
kdp_callouts(kdp_event_t event)961 kdp_callouts(kdp_event_t event)
962 {
963 struct kdp_callout *kcp = kdp_callout_list;
964
965 while (kcp) {
966 if (!kcp->callout_in_progress) {
967 kcp->callout_in_progress = TRUE;
968 kcp->callout_fn(kcp->callout_arg, event);
969 kcp->callout_in_progress = FALSE;
970 }
971 kcp = kcp->callout_next;
972 }
973 }
974
975 #if defined(__arm64__)
976 /*
977 * Register an additional buffer with data to include in the panic log
978 *
979 * <rdar://problem/50137705> tracks supporting more than one buffer
980 *
981 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
982 */
983 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)984 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
985 {
986 if (panic_data_buffers != NULL) {
987 panic("register_additional_panic_data_buffer called with buffer already registered");
988 }
989
990 if (producer_name == NULL || (strlen(producer_name) == 0)) {
991 panic("register_additional_panic_data_buffer called with invalid producer_name");
992 }
993
994 if (buf == NULL) {
995 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
996 }
997
998 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
999 panic("register_additional_panic_data_buffer called with invalid length");
1000 }
1001
1002 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
1003 new_panic_data_buffer->producer_name = producer_name;
1004 new_panic_data_buffer->buf = buf;
1005 new_panic_data_buffer->len = len;
1006
1007 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
1008 panic("register_additional_panic_data_buffer called with buffer already registered");
1009 }
1010
1011 return;
1012 }
1013 #endif /* defined(__arm64__) */
1014
1015 /*
1016 * An overview of the xnu panic path:
1017 *
1018 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
1019 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
1020 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
1021 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
1022 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
1023 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
1024 * according to the device's boot-args.
1025 */
1026 #undef panic
1027 void
panic(const char * str,...)1028 panic(const char *str, ...)
1029 {
1030 va_list panic_str_args;
1031
1032 va_start(panic_str_args, str);
1033 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1034 va_end(panic_str_args);
1035 }
1036
1037 void
panic_with_data(uuid_t uuid,void * addr,uint32_t len,const char * str,...)1038 panic_with_data(uuid_t uuid, void *addr, uint32_t len, const char *str, ...)
1039 {
1040 va_list panic_str_args;
1041
1042 ext_paniclog_panic_with_data(uuid, addr, len);
1043
1044 va_start(panic_str_args, str);
1045 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1046 va_end(panic_str_args);
1047 }
1048
1049 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1050 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1051 {
1052 va_list panic_str_args;
1053
1054 va_start(panic_str_args, str);
1055 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1056 NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1057 va_end(panic_str_args);
1058 }
1059
1060 void
panic_with_options_and_initiator(const char * initiator,unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1061 panic_with_options_and_initiator(const char* initiator, unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1062 {
1063 va_list panic_str_args;
1064
1065 va_start(panic_str_args, str);
1066 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1067 NULL, (unsigned long)(char *)__builtin_return_address(0), initiator);
1068 va_end(panic_str_args);
1069 }
1070
1071 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)1072 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
1073 {
1074 if (ptr == NULL) {
1075 paniclog_append_noflush("NULL %s pointer\n", what);
1076 return false;
1077 }
1078
1079 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1080 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1081 what, ptr, (uint32_t)size);
1082 return false;
1083 }
1084
1085 return true;
1086 }
1087
1088 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1089 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1090 {
1091 if (!PANIC_VALIDATE_PTR(thread)) {
1092 return false;
1093 }
1094
1095 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1096 return false;
1097 }
1098
1099 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1100 return false;
1101 }
1102
1103 if (task) {
1104 *task = thread->t_tro->tro_task;
1105 }
1106
1107 if (!panic_validate_ptr(thread->t_tro->tro_proc,
1108 sizeof(struct proc *), "bsd_info")) {
1109 *proc = NULL;
1110 } else {
1111 *proc = thread->t_tro->tro_proc;
1112 }
1113
1114 return true;
1115 }
1116
1117 #if defined (__x86_64__)
1118 /*
1119 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1120 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1121 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1122 * thread when writing the panic log.
1123 *
1124 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1125 */
1126 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1127 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1128 {
1129 va_list panic_str_args;
1130 __assert_only os_ref_count_t th_ref_count;
1131
1132 assert_thread_magic(thread);
1133 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1134 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1135
1136 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1137 thread_reference(thread);
1138
1139 va_start(panic_str_args, str);
1140 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1141 thread, (unsigned long)(char *)__builtin_return_address(0), "");
1142
1143 va_end(panic_str_args);
1144 }
1145 #endif /* defined (__x86_64__) */
1146
1147 #pragma clang diagnostic push
1148 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1149 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller,const char * panic_initiator)1150 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1151 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller, const char *panic_initiator)
1152 {
1153 #pragma clang diagnostic pop
1154
1155 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1156 read_lbr();
1157 #endif
1158
1159 /* optionally call sync, to reduce lost logs on restart, avoid on recursive panic. Unsafe due to unbounded sync() duration */
1160 if ((panic_options_mask & DEBUGGER_OPTION_SYNC_ON_PANIC_UNSAFE) && (CPUDEBUGGERCOUNT == 0)) {
1161 sync_internal();
1162 }
1163
1164 /* Turn off I/O tracing once we've panicked */
1165 iotrace_disable();
1166
1167 /* call machine-layer panic handler */
1168 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller, panic_initiator);
1169
1170 /* track depth of debugger/panic entry */
1171 CPUDEBUGGERCOUNT++;
1172
1173 /* emit a tracepoint as early as possible in case of hang */
1174 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1175
1176 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1177 check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1178
1179 /* If we're in a stackshot, signal that we've started panicking and wait for other CPUs to coalesce and spin before proceeding */
1180 stackshot_cpu_signal_panic();
1181
1182 /* Handle any necessary platform specific actions before we proceed */
1183 PEInitiatePanic();
1184
1185 #if DEVELOPMENT || DEBUG
1186 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1187 #endif
1188
1189 PE_panic_hook(panic_format_str);
1190
1191 #if defined (__x86_64__)
1192 plctrace_disable();
1193 #endif
1194
1195 if (write_trace_on_panic && kdebug_enable) {
1196 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1197 ml_set_interrupts_enabled(TRUE);
1198 KDBG_RELEASE(TRACE_PANIC);
1199 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1200 }
1201 }
1202
1203 ml_set_interrupts_enabled(FALSE);
1204 disable_preemption();
1205
1206 #if CONFIG_SPTM
1207 /*
1208 * If SPTM has not itself already panicked, trigger a panic lockdown. This
1209 * check is necessary since attempting to re-enter the SPTM after it calls
1210 * panic will lead to a hang, which harms kernel field debugability.
1211 *
1212 * Whether or not this check can be subverted is murky. This doesn't really
1213 * matter, however, because any security critical panics events will have
1214 * already initiated lockdown before calling panic. Thus, lockdown from
1215 * panic itself is merely a "best effort".
1216 */
1217 libsptm_error_t sptm_error = LIBSPTM_SUCCESS;
1218 bool sptm_has_panicked = false;
1219 if (((sptm_error = sptm_triggered_panic(&sptm_has_panicked)) == LIBSPTM_SUCCESS) &&
1220 !sptm_has_panicked) {
1221 sptm_xnu_panic_begin();
1222 }
1223 #endif /* CONFIG_SPTM */
1224
1225 #if defined (__x86_64__)
1226 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1227 #endif /* defined (__x86_64__) */
1228
1229 /* Never hide pointers from panic logs. */
1230 doprnt_hide_pointers = FALSE;
1231
1232 if (ctx != NULL) {
1233 /*
1234 * We called into panic from a trap, no need to trap again. Set the
1235 * state on the current CPU and then jump to handle_debugger_trap.
1236 */
1237 DebuggerSaveState(DBOP_PANIC, "panic",
1238 panic_format_str, panic_args,
1239 panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1240 handle_debugger_trap(reason, 0, 0, ctx);
1241 }
1242
1243 #if defined(__arm64__) && !APPLEVIRTUALPLATFORM
1244 /*
1245 * Signal to fastsim that it should open debug ports (nop on hardware)
1246 */
1247 __asm__ volatile ("hint #0x45");
1248 #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */
1249
1250 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1251 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1252
1253 /*
1254 * Not reached.
1255 */
1256 panic_stop();
1257 __builtin_unreachable();
1258 }
1259
1260 void
panic_spin_forever(void)1261 panic_spin_forever(void)
1262 {
1263 for (;;) {
1264 #if defined(__arm__) || defined(__arm64__)
1265 /* On arm32, which doesn't have a WFE timeout, this may not return. But that should be OK on this path. */
1266 __builtin_arm_wfe();
1267 #else
1268 cpu_pause();
1269 #endif
1270 }
1271 }
1272
1273 void
panic_stackshot_release_lock(void)1274 panic_stackshot_release_lock(void)
1275 {
1276 assert(!not_in_kdp);
1277 DebuggerUnlock();
1278 }
1279
1280 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1281 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1282 {
1283 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1284 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1285 } else {
1286 PEHaltRestart(type);
1287 }
1288 halt_all_cpus(TRUE);
1289 }
1290
1291 void
kdp_machine_reboot(void)1292 kdp_machine_reboot(void)
1293 {
1294 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1295 }
1296
1297 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1298 panic_debugger_log(const char *string, ...)
1299 {
1300 va_list panic_debugger_log_args;
1301
1302 va_start(panic_debugger_log_args, string);
1303 #pragma clang diagnostic push
1304 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1305 _doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1306 #pragma clang diagnostic pop
1307 va_end(panic_debugger_log_args);
1308
1309 #if defined(__arm64__)
1310 paniclog_flush();
1311 #endif
1312 }
1313
1314 /*
1315 * Gather and save diagnostic information about a panic (or Debugger call).
1316 *
1317 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1318 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1319 * paniclog is written and no core is written unless we request a core on NMI.
1320 *
1321 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1322 * and calling out to any other functions we have for collecting diagnostic info.
1323 */
1324 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1325 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1326 {
1327 #if DEVELOPMENT || DEBUG
1328 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1329 #endif
1330
1331 #if defined(__x86_64__)
1332 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1333 #endif
1334 /*
1335 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1336 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1337 * we'll just spin in kdp_raise_exception.
1338 */
1339 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1340 kdp_raise_exception(exception, code, subcode, state);
1341 if (debugger_safe_to_return && !debugger_is_panic) {
1342 return;
1343 }
1344 }
1345
1346 #ifdef CONFIG_KCOV
1347 /* Try not to break core dump path by sanitizer. */
1348 kcov_panic_disable();
1349 #endif
1350
1351 if ((debugger_current_op == DBOP_PANIC) ||
1352 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1353 /*
1354 * Attempt to notify listeners once and only once that we've started
1355 * panicking. Only do this for Debugger() calls if we're treating
1356 * Debugger() calls like panic().
1357 */
1358 uint32_t panic_details = 0;
1359 /* if this is a force-reset panic then capture a log and reboot immediately. */
1360 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1361 panic_details |= kPanicDetailsForcePowerOff;
1362 }
1363 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1364
1365 /*
1366 * Set the begin pointer in the panic log structure. We key off of this
1367 * static variable rather than contents from the panic header itself in case someone
1368 * has stomped over the panic_info structure. Also initializes the header magic.
1369 */
1370 static boolean_t began_writing_paniclog = FALSE;
1371 if (!began_writing_paniclog) {
1372 PE_init_panicheader();
1373 began_writing_paniclog = TRUE;
1374 }
1375
1376 if (CPUDEBUGGERCOUNT > 1) {
1377 /*
1378 * we are in a nested panic. Record the nested bit in panic flags and do some housekeeping
1379 */
1380 PE_update_panicheader_nestedpanic();
1381 paniclog_flush();
1382 }
1383 }
1384
1385 /*
1386 * Write panic string if this was a panic.
1387 *
1388 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1389 */
1390 if (debugger_current_op == DBOP_PANIC) {
1391 paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1392 if (debugger_panic_str) {
1393 #pragma clang diagnostic push
1394 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1395 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1396 #pragma clang diagnostic pop
1397 }
1398 paniclog_append_noflush("\n");
1399 }
1400 #if defined(__x86_64__)
1401 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1402 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1403 }
1404
1405 /*
1406 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1407 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1408 * conventional sense.
1409 */
1410 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1411 #endif /* __x86_64__ */
1412 {
1413 kdp_callouts(KDP_EVENT_PANICLOG);
1414
1415 /*
1416 * Write paniclog and panic stackshot (if supported)
1417 * TODO: Need to clear panic log when return from debugger
1418 * hooked up for embedded
1419 */
1420 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options, debugger_panic_initiator);
1421
1422 #if DEVELOPMENT || DEBUG
1423 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1424 #endif
1425
1426 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1427 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1428 PEHaltRestart(kPEPanicDiagnosticsDone);
1429 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1430 }
1431 }
1432
1433 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1434 /*
1435 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1436 * local coredumps, don't try to write these and instead go straight to reboot. This
1437 * allows us to persist any data that's stored in the panic log.
1438 */
1439 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1440 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1441 PEHaltRestart(kPEPanicDiagnosticsDone);
1442 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1443 }
1444
1445 /*
1446 * Consider generating a local corefile if the infrastructure is configured
1447 * and we haven't disabled on-device coredumps.
1448 */
1449 if (on_device_corefile_enabled()) {
1450 #if CONFIG_SPTM
1451 /* We want to skip taking a local core dump if this is a panic from SPTM/TXM/cL4. */
1452 extern uint8_t sptm_supports_local_coredump;
1453 bool sptm_interrupted = false;
1454 pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu);
1455 sptm_get_cpu_state(sptm_pcpu->sptm_cpu_id, CPUSTATE_SPTM_INTERRUPTED, &sptm_interrupted);
1456 #endif
1457 if (!kdp_has_polled_corefile()) {
1458 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1459 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1460 kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1461 #if defined(__arm64__)
1462 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1463 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1464 }
1465 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1466 paniclog_flush();
1467 #else /* defined(__arm64__) */
1468 if (panic_info->mph_panic_log_offset != 0) {
1469 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1470 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1471 }
1472 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1473 paniclog_flush();
1474 }
1475 #endif /* defined(__arm64__) */
1476 }
1477 }
1478 #if XNU_MONITOR
1479 else if (pmap_get_cpu_data()->ppl_state != PPL_STATE_KERNEL) {
1480 paniclog_append_noflush("skipping local kernel core because the PPL is not in KERNEL state\n");
1481 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1482 paniclog_flush();
1483 }
1484 #elif CONFIG_SPTM
1485 else if (!sptm_supports_local_coredump) {
1486 paniclog_append_noflush("skipping local kernel core because the SPTM is in PANIC state and can't support core dump generation\n");
1487 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1488 paniclog_flush();
1489 } else if (sptm_interrupted) {
1490 paniclog_append_noflush("skipping local kernel core because the SPTM is in INTERRUPTED state and can't support core dump generation\n");
1491 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1492 paniclog_flush();
1493 }
1494 #endif /* XNU_MONITOR */
1495 else {
1496 int ret = -1;
1497
1498 #if defined (__x86_64__)
1499 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1500 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1501 #endif
1502 {
1503 /*
1504 * Doing an on-device coredump leaves the disk driver in a state
1505 * that can not be resumed.
1506 */
1507 debugger_safe_to_return = FALSE;
1508 begin_panic_transfer();
1509 vm_memtag_disable_checking();
1510 ret = kern_dump(KERN_DUMP_DISK);
1511 vm_memtag_enable_checking();
1512 abort_panic_transfer();
1513
1514 #if DEVELOPMENT || DEBUG
1515 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1516 #endif
1517 }
1518
1519 /*
1520 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1521 * or if option to ignore failures is set.
1522 */
1523 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1524 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1525 PEHaltRestart(kPEPanicDiagnosticsDone);
1526 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1527 }
1528 }
1529 }
1530
1531 if (debugger_current_op == DBOP_PANIC ||
1532 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1533 PEHaltRestart(kPEPanicDiagnosticsDone);
1534 }
1535
1536 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1537 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1538 }
1539
1540 /* If KDP is configured, try to trap to the debugger */
1541 #if defined(__arm64__)
1542 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1543 #else
1544 if (current_debugger != NO_CUR_DB) {
1545 #endif
1546 kdp_raise_exception(exception, code, subcode, state);
1547 /*
1548 * Only return if we entered via Debugger and it's safe to return
1549 * (we halted the other cores successfully, this isn't a nested panic, etc)
1550 */
1551 if (debugger_current_op == DBOP_DEBUGGER &&
1552 debugger_safe_to_return &&
1553 kernel_debugger_entry_count == 1 &&
1554 !debugger_is_panic) {
1555 return;
1556 }
1557 }
1558
1559 #if defined(__arm64__)
1560 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1561 /*
1562 * Print panic string at the end of serial output
1563 * to make panic more obvious when someone connects a debugger
1564 */
1565 if (debugger_panic_str) {
1566 panic_debugger_log("Original panic string:\n");
1567 panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1568 #pragma clang diagnostic push
1569 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1570 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1571 #pragma clang diagnostic pop
1572 panic_debugger_log("\n");
1573 }
1574
1575 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1576 panic_spin_shmcon();
1577 }
1578 #endif /* defined(__arm64__) */
1579
1580 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1581
1582 PEHaltRestart(kPEPanicDiagnosticsDone);
1583
1584 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1585
1586 if (!panicDebugging) {
1587 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1588 }
1589
1590 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1591 panic_spin_forever();
1592 }
1593
1594 #if SCHED_HYGIENE_DEBUG
1595 uint64_t debugger_trap_timestamps[9];
1596 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1597 #else
1598 # define DEBUGGER_TRAP_TIMESTAMP(i)
1599 #endif /* SCHED_HYGIENE_DEBUG */
1600
1601 void
1602 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1603 {
1604 unsigned int initial_not_in_kdp = not_in_kdp;
1605 kern_return_t ret = KERN_SUCCESS;
1606 debugger_op db_prev_op = debugger_current_op;
1607
1608 DEBUGGER_TRAP_TIMESTAMP(0);
1609
1610 DebuggerLock();
1611 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1612
1613 DEBUGGER_TRAP_TIMESTAMP(1);
1614
1615 #if SCHED_HYGIENE_DEBUG
1616 if (serialmode & SERIALMODE_OUTPUT) {
1617 ml_spin_debug_reset(current_thread());
1618 }
1619 #endif /* SCHED_HYGIENE_DEBUG */
1620 if (ret != KERN_SUCCESS) {
1621 CPUDEBUGGERRET = ret;
1622 DebuggerUnlock();
1623 return;
1624 }
1625
1626 /* Update the global panic/debugger nested entry level */
1627 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1628 if (kernel_debugger_entry_count > 0) {
1629 console_suspend();
1630 }
1631
1632 /*
1633 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1634 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1635 */
1636
1637 if (CPUDEBUGGEROP == DBOP_NONE) {
1638 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1639 debugger_current_op = DBOP_BREAKPOINT;
1640 } else {
1641 /* Not safe to return from a nested panic/debugger call */
1642 if (debugger_current_op == DBOP_PANIC ||
1643 debugger_current_op == DBOP_DEBUGGER) {
1644 debugger_safe_to_return = FALSE;
1645 }
1646
1647 debugger_current_op = CPUDEBUGGEROP;
1648
1649 /* Only overwrite the panic message if there is none already - save the data from the first call */
1650 if (debugger_panic_str == NULL) {
1651 debugger_panic_str = CPUPANICSTR;
1652 debugger_panic_args = CPUPANICARGS;
1653 debugger_panic_data = CPUPANICDATAPTR;
1654 debugger_message = CPUDEBUGGERMSG;
1655 debugger_panic_caller = CPUPANICCALLER;
1656 debugger_panic_initiator = CPUPANICINITIATOR;
1657 }
1658
1659 debugger_panic_options = CPUPANICOPTS;
1660 }
1661
1662 /*
1663 * Clear the op from the processor debugger context so we can handle
1664 * breakpoints in the debugger
1665 */
1666 CPUDEBUGGEROP = DBOP_NONE;
1667
1668 DEBUGGER_TRAP_TIMESTAMP(2);
1669
1670 kdp_callouts(KDP_EVENT_ENTER);
1671 not_in_kdp = 0;
1672
1673 DEBUGGER_TRAP_TIMESTAMP(3);
1674
1675 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1676 shmem_mark_as_busy();
1677 #endif
1678
1679 if (debugger_current_op == DBOP_BREAKPOINT) {
1680 kdp_raise_exception(exception, code, subcode, state);
1681 } else if (debugger_current_op == DBOP_STACKSHOT) {
1682 CPUDEBUGGERRET = do_stackshot(NULL);
1683 #if PGO
1684 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1685 CPUDEBUGGERRET = do_pgo_reset_counters();
1686 #endif
1687 } else {
1688 /* note: this is the panic path... */
1689 #if CONFIG_SPTM
1690 /*
1691 * Debug trap panics do not go through the standard panic flows so we
1692 * have to notify the SPTM that we're going down now. This is not so
1693 * much for security (critical cases are handled elsewhere) but rather
1694 * to just keep the SPTM bit in sync with the actual XNU state.
1695 */
1696 bool sptm_has_panicked = false;
1697 if (sptm_triggered_panic(&sptm_has_panicked) == LIBSPTM_SUCCESS &&
1698 !sptm_has_panicked) {
1699 sptm_xnu_panic_begin();
1700 }
1701 #endif /* CONFIG_SPTM */
1702 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
1703 if (!PE_arm_debug_and_trace_initialized()) {
1704 paniclog_append_noflush("kernel panicked before debug and trace infrastructure initialized!\n"
1705 "spinning forever...\n");
1706 panic_spin_forever();
1707 }
1708 #endif
1709 debugger_collect_diagnostics(exception, code, subcode, state);
1710 }
1711
1712 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1713 shmem_unmark_as_busy();
1714 #endif
1715
1716 DEBUGGER_TRAP_TIMESTAMP(4);
1717
1718 not_in_kdp = initial_not_in_kdp;
1719 kdp_callouts(KDP_EVENT_EXIT);
1720
1721 DEBUGGER_TRAP_TIMESTAMP(5);
1722
1723 if (debugger_current_op != DBOP_BREAKPOINT) {
1724 debugger_panic_str = NULL;
1725 debugger_panic_args = NULL;
1726 debugger_panic_data = NULL;
1727 debugger_panic_options = 0;
1728 debugger_message = NULL;
1729 }
1730
1731 /* Restore the previous debugger state */
1732 debugger_current_op = db_prev_op;
1733
1734 DEBUGGER_TRAP_TIMESTAMP(6);
1735
1736 DebuggerResumeOtherCores();
1737
1738 DEBUGGER_TRAP_TIMESTAMP(7);
1739
1740 DebuggerUnlock();
1741
1742 DEBUGGER_TRAP_TIMESTAMP(8);
1743
1744 return;
1745 }
1746
1747 __attribute__((noinline, not_tail_called))
1748 void
1749 log(__unused int level, char *fmt, ...)
1750 {
1751 void *caller = __builtin_return_address(0);
1752 va_list listp;
1753 va_list listp2;
1754
1755
1756 #ifdef lint
1757 level++;
1758 #endif /* lint */
1759 #ifdef MACH_BSD
1760 va_start(listp, fmt);
1761 va_copy(listp2, listp);
1762
1763 disable_preemption();
1764 _doprnt(fmt, &listp, cons_putc_locked, 0);
1765 enable_preemption();
1766
1767 va_end(listp);
1768
1769 #pragma clang diagnostic push
1770 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1771 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1772 #pragma clang diagnostic pop
1773 va_end(listp2);
1774 #endif
1775 }
1776
1777 /*
1778 * Per <rdar://problem/24974766>, skip appending log messages to
1779 * the new logging infrastructure in contexts where safety is
1780 * uncertain. These contexts include:
1781 * - When we're in the debugger
1782 * - We're in a panic
1783 * - Interrupts are disabled
1784 * - Or Pre-emption is disabled
1785 * In all the above cases, it is potentially unsafe to log messages.
1786 */
1787
1788 boolean_t
1789 oslog_is_safe(void)
1790 {
1791 return kernel_debugger_entry_count == 0 &&
1792 not_in_kdp == 1 &&
1793 get_preemption_level() == 0 &&
1794 ml_get_interrupts_enabled() == TRUE;
1795 }
1796
1797 boolean_t
1798 debug_mode_active(void)
1799 {
1800 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1801 }
1802
1803 void
1804 debug_putc(char c)
1805 {
1806 if ((debug_buf_size != 0) &&
1807 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size) &&
1808 (!is_debug_ptr_in_ext_paniclog())) {
1809 *debug_buf_ptr = c;
1810 debug_buf_ptr++;
1811 }
1812 }
1813
1814 #if defined (__x86_64__)
1815 struct pasc {
1816 unsigned a: 7;
1817 unsigned b: 7;
1818 unsigned c: 7;
1819 unsigned d: 7;
1820 unsigned e: 7;
1821 unsigned f: 7;
1822 unsigned g: 7;
1823 unsigned h: 7;
1824 } __attribute__((packed));
1825
1826 typedef struct pasc pasc_t;
1827
1828 /*
1829 * In-place packing routines -- inefficient, but they're called at most once.
1830 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1831 */
1832 int
1833 packA(char *inbuf, uint32_t length, uint32_t buflen)
1834 {
1835 unsigned int i, j = 0;
1836 pasc_t pack;
1837
1838 length = MIN(((length + 7) & ~7), buflen);
1839
1840 for (i = 0; i < length; i += 8) {
1841 pack.a = inbuf[i];
1842 pack.b = inbuf[i + 1];
1843 pack.c = inbuf[i + 2];
1844 pack.d = inbuf[i + 3];
1845 pack.e = inbuf[i + 4];
1846 pack.f = inbuf[i + 5];
1847 pack.g = inbuf[i + 6];
1848 pack.h = inbuf[i + 7];
1849 bcopy((char *) &pack, inbuf + j, 7);
1850 j += 7;
1851 }
1852 return j;
1853 }
1854
1855 void
1856 unpackA(char *inbuf, uint32_t length)
1857 {
1858 pasc_t packs;
1859 unsigned i = 0;
1860 length = (length * 8) / 7;
1861
1862 while (i < length) {
1863 packs = *(pasc_t *)&inbuf[i];
1864 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1865 inbuf[i++] = packs.a;
1866 inbuf[i++] = packs.b;
1867 inbuf[i++] = packs.c;
1868 inbuf[i++] = packs.d;
1869 inbuf[i++] = packs.e;
1870 inbuf[i++] = packs.f;
1871 inbuf[i++] = packs.g;
1872 inbuf[i++] = packs.h;
1873 }
1874 }
1875 #endif /* defined (__x86_64__) */
1876
1877 extern char *proc_name_address(void *);
1878 extern char *proc_longname_address(void *);
1879
1880 __private_extern__ void
1881 panic_display_process_name(void)
1882 {
1883 proc_name_t proc_name = {};
1884 struct proc *cbsd_info = NULL;
1885 task_t ctask = NULL;
1886 vm_size_t size;
1887
1888 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1889 goto out;
1890 }
1891
1892 if (cbsd_info == NULL) {
1893 goto out;
1894 }
1895
1896 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1897 (vm_offset_t)&proc_name, sizeof(proc_name));
1898
1899 if (size == 0 || proc_name[0] == '\0') {
1900 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1901 (vm_offset_t)&proc_name,
1902 MIN(sizeof(command_t), sizeof(proc_name)));
1903 if (size > 0) {
1904 proc_name[size - 1] = '\0';
1905 }
1906 }
1907
1908 out:
1909 proc_name[sizeof(proc_name) - 1] = '\0';
1910 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1911 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1912 }
1913
1914 unsigned
1915 panic_active(void)
1916 {
1917 return debugger_current_op == DBOP_PANIC ||
1918 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1919 }
1920
1921 void
1922 populate_model_name(char *model_string)
1923 {
1924 strlcpy(model_name, model_string, sizeof(model_name));
1925 }
1926
1927 void
1928 panic_display_model_name(void)
1929 {
1930 char tmp_model_name[sizeof(model_name)];
1931
1932 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1933 return;
1934 }
1935
1936 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1937
1938 if (tmp_model_name[0] != 0) {
1939 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1940 }
1941 }
1942
1943 void
1944 panic_display_kernel_uuid(void)
1945 {
1946 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1947
1948 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1949 return;
1950 }
1951
1952 if (tmp_kernel_uuid[0] != '\0') {
1953 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1954 }
1955 }
1956
1957 #if CONFIG_SPTM
1958 static void
1959 panic_display_component_uuid(char const *component_name, void *component_address)
1960 {
1961 uuid_t *component_uuid;
1962 unsigned long component_uuid_len = 0;
1963 uuid_string_t component_uuid_string;
1964
1965 component_uuid = getuuidfromheader((kernel_mach_header_t *)component_address, &component_uuid_len);
1966
1967 if (component_uuid != NULL && component_uuid_len == sizeof(uuid_t)) {
1968 uuid_unparse_upper(*component_uuid, component_uuid_string);
1969 paniclog_append_noflush("%s UUID: %s\n", component_name, component_uuid_string);
1970 }
1971 }
1972 #endif /* CONFIG_SPTM */
1973
1974 void
1975 panic_display_kernel_aslr(void)
1976 {
1977 #if CONFIG_SPTM
1978 {
1979 struct debug_header const *dh = SPTMArgs->debug_header;
1980
1981 paniclog_append_noflush("Debug Header address: %p\n", dh);
1982
1983 if (dh != NULL) {
1984 void *component_address;
1985
1986 paniclog_append_noflush("Debug Header entry count: %d\n", dh->count);
1987
1988 switch (dh->count) {
1989 default: // 3 or more
1990 component_address = dh->image[DEBUG_HEADER_ENTRY_TXM];
1991 paniclog_append_noflush("TXM load address: %p\n", component_address);
1992
1993 panic_display_component_uuid("TXM", component_address);
1994 OS_FALLTHROUGH;
1995 case 2:
1996 component_address = dh->image[DEBUG_HEADER_ENTRY_XNU];
1997 paniclog_append_noflush("Debug Header kernelcache load address: %p\n", component_address);
1998
1999 panic_display_component_uuid("Debug Header kernelcache", component_address);
2000 OS_FALLTHROUGH;
2001 case 1:
2002 component_address = dh->image[DEBUG_HEADER_ENTRY_SPTM];
2003 paniclog_append_noflush("SPTM load address: %p\n", component_address);
2004
2005 panic_display_component_uuid("SPTM", component_address);
2006 OS_FALLTHROUGH;
2007 case 0:
2008 ; // nothing to print
2009 }
2010 }
2011 }
2012 #endif /* CONFIG_SPTM */
2013
2014 kc_format_t kc_format;
2015
2016 PE_get_primary_kc_format(&kc_format);
2017
2018 if (kc_format == KCFormatFileset) {
2019 void *kch = PE_get_kc_header(KCKindPrimary);
2020 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
2021 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
2022 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
2023 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
2024 #if defined(__arm64__)
2025 extern vm_offset_t segTEXTEXECB;
2026 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
2027 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
2028 #endif /* defined(__arm64__) */
2029 } else if (vm_kernel_slide) {
2030 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
2031 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
2032 } else {
2033 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
2034 }
2035 }
2036
2037 void
2038 panic_display_hibb(void)
2039 {
2040 #if defined(__i386__) || defined (__x86_64__)
2041 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
2042 #endif
2043 }
2044
2045 #if CONFIG_ECC_LOGGING
2046 __private_extern__ void
2047 panic_display_ecc_errors(void)
2048 {
2049 uint32_t count = ecc_log_get_correction_count();
2050
2051 if (count > 0) {
2052 paniclog_append_noflush("ECC Corrections:%u\n", count);
2053 }
2054 }
2055 #endif /* CONFIG_ECC_LOGGING */
2056
2057 #if CONFIG_FREEZE
2058 extern bool freezer_incore_cseg_acct;
2059 extern int32_t c_segment_pages_compressed_incore;
2060 #endif
2061
2062 extern uint32_t c_segment_pages_compressed;
2063 extern uint32_t c_segment_count;
2064 extern uint32_t c_segments_limit;
2065 extern uint32_t c_segment_pages_compressed_limit;
2066 extern uint32_t c_segment_pages_compressed_nearing_limit;
2067 extern uint32_t c_segments_nearing_limit;
2068 extern int vm_num_swap_files;
2069
2070 void
2071 panic_display_compressor_stats(void)
2072 {
2073 int isswaplow = vm_swap_low_on_space();
2074 #if CONFIG_FREEZE
2075 uint32_t incore_seg_count;
2076 uint32_t incore_compressed_pages;
2077 if (freezer_incore_cseg_acct) {
2078 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
2079 incore_compressed_pages = c_segment_pages_compressed_incore;
2080 } else {
2081 incore_seg_count = c_segment_count;
2082 incore_compressed_pages = c_segment_pages_compressed;
2083 }
2084
2085 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2086 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
2087 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2088 (incore_seg_count * 100) / c_segments_limit,
2089 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
2090 vm_num_swap_files,
2091 isswaplow ? "LOW":"OK");
2092 #else /* CONFIG_FREEZE */
2093 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2094 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
2095 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2096 (c_segment_count * 100) / c_segments_limit,
2097 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
2098 vm_num_swap_files,
2099 isswaplow ? "LOW":"OK");
2100 #endif /* CONFIG_FREEZE */
2101 }
2102
2103 #if !CONFIG_TELEMETRY
2104 int
2105 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
2106 {
2107 return KERN_NOT_SUPPORTED;
2108 }
2109 #endif
2110
2111 #include <machine/machine_cpu.h>
2112
2113 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
2114
2115 boolean_t
2116 kern_feature_override(uint32_t fmask)
2117 {
2118 return (kern_feature_overrides & fmask) == fmask;
2119 }
2120
2121 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
2122 static boolean_t
2123 device_corefile_valid_on_ephemeral(void)
2124 {
2125 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
2126 DTEntry node;
2127 const uint32_t *value = NULL;
2128 unsigned int size = 0;
2129 if (kSuccess != SecureDTLookupEntry(NULL, "/product", &node)) {
2130 return TRUE;
2131 }
2132 if (kSuccess != SecureDTGetProperty(node, "ephemeral-data-mode", (void const **) &value, &size)) {
2133 return TRUE;
2134 }
2135
2136 if (size != sizeof(uint32_t)) {
2137 return TRUE;
2138 }
2139
2140 if ((*value) && (kern_dump_should_enforce_encryption() == true)) {
2141 return FALSE;
2142 }
2143 #endif /* ifdef CONFIG_KDP_COREDUMP_ENCRYPTION */
2144
2145 return TRUE;
2146 }
2147 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
2148
2149 boolean_t
2150 on_device_corefile_enabled(void)
2151 {
2152 assert(startup_phase >= STARTUP_SUB_TUNABLES);
2153 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
2154 if (debug_boot_arg == 0) {
2155 return FALSE;
2156 }
2157 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
2158 return FALSE;
2159 }
2160 #if !XNU_TARGET_OS_OSX
2161 if (device_corefile_valid_on_ephemeral() == FALSE) {
2162 return FALSE;
2163 }
2164 /*
2165 * outside of macOS, if there's a debug boot-arg set and local
2166 * cores aren't explicitly disabled, we always write a corefile.
2167 */
2168 return TRUE;
2169 #else /* !XNU_TARGET_OS_OSX */
2170 /*
2171 * on macOS, if corefiles on panic are requested and local cores
2172 * aren't disabled we write a local core.
2173 */
2174 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
2175 return TRUE;
2176 }
2177 #endif /* !XNU_TARGET_OS_OSX */
2178 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
2179 return FALSE;
2180 }
2181
2182 boolean_t
2183 panic_stackshot_to_disk_enabled(void)
2184 {
2185 assert(startup_phase >= STARTUP_SUB_TUNABLES);
2186 #if defined(__x86_64__)
2187 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
2188 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
2189 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
2190 return FALSE;
2191 }
2192
2193 return TRUE;
2194 }
2195 #endif
2196 return FALSE;
2197 }
2198
2199 const char *
2200 sysctl_debug_get_preoslog(size_t *size)
2201 {
2202 int result = 0;
2203 void *preoslog_pa = NULL;
2204 int preoslog_size = 0;
2205
2206 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2207 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2208 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2209 *size = 0;
2210 return NULL;
2211 }
2212
2213 /*
2214 * Beware:
2215 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
2216 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
2217 */
2218 *size = preoslog_size;
2219 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
2220 }
2221
2222 void
2223 sysctl_debug_free_preoslog(void)
2224 {
2225 #if RELEASE
2226 int result = 0;
2227 void *preoslog_pa = NULL;
2228 int preoslog_size = 0;
2229
2230 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2231 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2232 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2233 return;
2234 }
2235
2236 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
2237 #else
2238 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2239 #endif // RELEASE
2240 }
2241
2242
2243 #if (DEVELOPMENT || DEBUG)
2244
2245 void
2246 platform_stall_panic_or_spin(uint32_t req)
2247 {
2248 if (xnu_platform_stall_value & req) {
2249 if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2250 panic("Platform stall: User requested panic");
2251 } else {
2252 paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2253 panic_spin_forever();
2254 }
2255 }
2256 }
2257 #endif
2258
2259
2260 #define AWL_HV_ENTRY_FLAG (0x1)
2261
2262 static inline void
2263 awl_set_scratch_reg_hv_bit(void)
2264 {
2265 #if defined(__arm64__)
2266 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
2267 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2268 awl_diag0 |= AWL_HV_ENTRY_FLAG;
2269 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2270 #endif // defined(__arm64__)
2271 }
2272
2273 void
2274 awl_mark_hv_entry(void)
2275 {
2276 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2277 return;
2278 }
2279 *PERCPU_GET(hv_entry_detected) = true;
2280
2281 awl_set_scratch_reg_hv_bit();
2282 }
2283
2284 /*
2285 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2286 * so restore it manually.
2287 */
2288 static bool
2289 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2290 {
2291 if (event == CPU_BOOTED) {
2292 if (*PERCPU_GET(hv_entry_detected)) {
2293 awl_set_scratch_reg_hv_bit();
2294 }
2295 }
2296
2297 return true;
2298 }
2299
2300 /*
2301 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2302 * for a callback to restore register after hibernation
2303 */
2304 __startup_func
2305 static void
2306 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2307 {
2308 DTEntry base = NULL;
2309
2310 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2311 return;
2312 }
2313 const uint8_t *data = NULL;
2314 unsigned int data_size = sizeof(uint8_t);
2315
2316 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2317 for (unsigned int i = 0; i < data_size; i++) {
2318 if (data[i] != 0) {
2319 awl_scratch_reg_supported = true;
2320 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2321 break;
2322 }
2323 }
2324 }
2325 }
2326 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2327