1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_cdata.h>
76 #include <kern/zalloc_internal.h>
77 #include <kern/iotrace.h>
78 #include <pexpert/device_tree.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_compressor.h>
83 #include <stdarg.h>
84 #include <stdatomic.h>
85 #include <sys/pgo.h>
86 #include <console/serial_protos.h>
87 #include <IOKit/IOBSD.h>
88
89 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
90 #include <kdp/kdp_udp.h>
91 #endif
92 #include <kern/processor.h>
93
94 #if defined(__i386__) || defined(__x86_64__)
95 #include <IOKit/IOBSD.h>
96
97 #include <i386/cpu_threads.h>
98 #include <i386/pmCPU.h>
99 #include <i386/lbr.h>
100 #endif
101
102 #include <IOKit/IOPlatformExpert.h>
103 #include <machine/machine_cpu.h>
104 #include <machine/pal_routines.h>
105
106 #include <sys/kdebug.h>
107 #include <libkern/OSKextLibPrivate.h>
108 #include <libkern/OSAtomic.h>
109 #include <libkern/kernel_mach_header.h>
110 #include <libkern/section_keywords.h>
111 #include <uuid/uuid.h>
112 #include <mach_debug/zone_info.h>
113 #include <mach/resource_monitors.h>
114 #include <machine/machine_routines.h>
115 #include <sys/proc_require.h>
116
117 #include <os/log_private.h>
118
119 #include <kern/ext_paniclog.h>
120
121 #if defined(__arm64__)
122 #include <pexpert/pexpert.h> /* For gPanicBase */
123 #include <arm/caches_internal.h>
124 #include <arm/misc_protos.h>
125 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
126 #endif
127
128 #include <san/kcov.h>
129
130 #if CONFIG_XNUPOST
131 #include <tests/xnupost.h>
132 extern int vsnprintf(char *, size_t, const char *, va_list);
133 #endif
134
135 #if CONFIG_CSR
136 #include <sys/csr.h>
137 #endif
138
139
140 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
141 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
142
143 unsigned int halt_in_debugger = 0;
144 unsigned int current_debugger = 0;
145 unsigned int active_debugger = 0;
146 SECURITY_READ_ONLY_LATE(unsigned int) panicDebugging = FALSE;
147 unsigned int kernel_debugger_entry_count = 0;
148
149 #if DEVELOPMENT || DEBUG
150 unsigned int panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
151 unsigned int panic_test_action_count = 1;
152 unsigned int panic_test_case = PANIC_TEST_CASE_DISABLED;
153 #endif
154
155 #if defined(__arm64__)
156 struct additional_panic_data_buffer *panic_data_buffers = NULL;
157 #endif
158
159 #if defined(__arm64__)
160 /*
161 * Magic number; this should be identical to the armv7 encoding for trap.
162 */
163 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
164 #elif defined (__x86_64__)
165 #define TRAP_DEBUGGER __asm__("int3")
166 #else
167 #error No TRAP_DEBUGGER for this architecture
168 #endif
169
170 #if defined(__i386__) || defined(__x86_64__)
171 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
172 #else
173 #define panic_stop() panic_spin_forever()
174 #endif
175
176 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
177 /*
178 * More than enough for any typical format string passed to panic();
179 * anything longer will be truncated but that's better than nothing.
180 */
181 #define EARLY_PANIC_BUFLEN 256
182 #endif
183
184 struct debugger_state {
185 uint64_t db_panic_options;
186 debugger_op db_current_op;
187 boolean_t db_proceed_on_sync_failure;
188 const char *db_message;
189 const char *db_panic_str;
190 va_list *db_panic_args;
191 void *db_panic_data_ptr;
192 unsigned long db_panic_caller;
193 /* incremented whenever we panic or call Debugger (current CPU panic level) */
194 uint32_t db_entry_count;
195 kern_return_t db_op_return;
196 };
197 static struct debugger_state PERCPU_DATA(debugger_state);
198
199 /* __pure2 is correct if this function is called with preemption disabled */
200 static inline __pure2 struct debugger_state *
current_debugger_state(void)201 current_debugger_state(void)
202 {
203 return PERCPU_GET(debugger_state);
204 }
205
206 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
207 #define CPUDEBUGGERMSG current_debugger_state()->db_message
208 #define CPUPANICSTR current_debugger_state()->db_panic_str
209 #define CPUPANICARGS current_debugger_state()->db_panic_args
210 #define CPUPANICOPTS current_debugger_state()->db_panic_options
211 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
212 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
213 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
214 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
215 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
216
217
218 /*
219 * Usage:
220 * panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
221 * panic_test_failure_mode is a bit map of things to do
222 * panic_test_case is what sort of test we are injecting
223 *
224 * For more details see definitions in debugger.h
225 *
226 * Note that not all combinations are sensible, but some actions can be combined, e.g.
227 * - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
228 * - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
229 * - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
230 */
231 #if DEVELOPMENT || DEBUG
232 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested) \
233 MACRO_BEGIN \
234 if ((panic_test_case & requested) && panic_test_action_count) { \
235 panic_test_action_count--; \
236 volatile int *panic_test_badpointer = (int *)4; \
237 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); } \
238 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; } \
239 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); } \
240 } \
241 MACRO_END
242
243 #endif /* DEVELOPMENT || DEBUG */
244
245 debugger_op debugger_current_op = DBOP_NONE;
246 const char *debugger_panic_str = NULL;
247 va_list *debugger_panic_args = NULL;
248 void *debugger_panic_data = NULL;
249 uint64_t debugger_panic_options = 0;
250 const char *debugger_message = NULL;
251 unsigned long debugger_panic_caller = 0;
252
253 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
254 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
255 unsigned long panic_caller) __dead2 __printflike(1, 0);
256 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
257 void panic_spin_forever(void) __dead2;
258 extern kern_return_t do_stackshot(void);
259 extern void PE_panic_hook(const char*);
260 extern int sync(proc_t p, void *, void *);
261
262 #define NESTEDDEBUGGERENTRYMAX 5
263 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
264 NESTEDDEBUGGERENTRYMAX);
265
266 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
267 static bool PERCPU_DATA(hv_entry_detected); // = false
268 static void awl_set_scratch_reg_hv_bit(void);
269 void awl_mark_hv_entry(void);
270 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
271
272 #if defined(__arm64__)
273 #define DEBUG_BUF_SIZE (4096)
274
275 /* debug_buf is directly linked with iBoot panic region for arm targets */
276 char *debug_buf_base = NULL;
277 char *debug_buf_ptr = NULL;
278 unsigned int debug_buf_size = 0;
279
280 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
281 #else /* defined(__arm64__) */
282 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
283 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
284 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
285
286 char debug_buf[DEBUG_BUF_SIZE];
287 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
288 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
289 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
290
291 /*
292 * We don't include the size of the panic header in the length of the data we actually write.
293 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
294 * the end of the log because we only support writing (3*PAGESIZE) bytes.
295 */
296 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
297
298 boolean_t extended_debug_log_enabled = FALSE;
299 #endif /* defined(__arm64__) */
300
301 #if defined(XNU_TARGET_OS_OSX)
302 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
303 #else
304 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
305 #endif
306
307 /* Debugger state */
308 atomic_int debugger_cpu = DEBUGGER_NO_CPU;
309 boolean_t debugger_allcpus_halted = FALSE;
310 boolean_t debugger_safe_to_return = TRUE;
311 unsigned int debugger_context = 0;
312
313 static char model_name[64];
314 unsigned char *kernel_uuid;
315
316 boolean_t kernelcache_uuid_valid = FALSE;
317 uuid_t kernelcache_uuid;
318 uuid_string_t kernelcache_uuid_string;
319
320 boolean_t pageablekc_uuid_valid = FALSE;
321 uuid_t pageablekc_uuid;
322 uuid_string_t pageablekc_uuid_string;
323
324 boolean_t auxkc_uuid_valid = FALSE;
325 uuid_t auxkc_uuid;
326 uuid_string_t auxkc_uuid_string;
327
328
329 /*
330 * By default we treat Debugger() the same as calls to panic(), unless
331 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
332 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
333 *
334 * Return from Debugger() is currently only implemented on x86
335 */
336 static boolean_t debugger_is_panic = TRUE;
337
338 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
339
340 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
341
342 char kernel_uuid_string[37]; /* uuid_string_t */
343 char kernelcache_uuid_string[37]; /* uuid_string_t */
344 char panic_disk_error_description[512];
345 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
346
347 extern unsigned int write_trace_on_panic;
348 int kext_assertions_enable =
349 #if DEBUG || DEVELOPMENT
350 TRUE;
351 #else
352 FALSE;
353 #endif
354
355 #if (DEVELOPMENT || DEBUG)
356 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
357 #endif
358
359 /*
360 * Maintain the physically-contiguous carveouts for the carveout bootargs.
361 */
362 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
363
364 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
365 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
366 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
367 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
368
369
370 /*
371 * Returns whether kernel debugging is expected to be restricted
372 * on the device currently based on CSR or other platform restrictions.
373 */
374 boolean_t
kernel_debugging_restricted(void)375 kernel_debugging_restricted(void)
376 {
377 #if XNU_TARGET_OS_OSX
378 #if CONFIG_CSR
379 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
380 return TRUE;
381 }
382 #endif /* CONFIG_CSR */
383 return FALSE;
384 #else /* XNU_TARGET_OS_OSX */
385 return FALSE;
386 #endif /* XNU_TARGET_OS_OSX */
387 }
388
389 __startup_func
390 static void
panic_init(void)391 panic_init(void)
392 {
393 unsigned long uuidlen = 0;
394 void *uuid;
395
396 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
397 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
398 kernel_uuid = uuid;
399 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
400 }
401
402 /*
403 * Take the value of the debug boot-arg into account
404 */
405 #if MACH_KDP
406 if (!kernel_debugging_restricted() && debug_boot_arg) {
407 if (debug_boot_arg & DB_HALT) {
408 halt_in_debugger = 1;
409 }
410
411 #if defined(__arm64__)
412 if (debug_boot_arg & DB_NMI) {
413 panicDebugging = TRUE;
414 }
415 #else
416 panicDebugging = TRUE;
417 #endif /* defined(__arm64__) */
418 }
419
420 #if defined(__arm64__)
421 char kdpname[80];
422
423 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
424 #endif /* defined(__arm64__) */
425
426 #endif /* MACH_KDP */
427
428 #if defined (__x86_64__)
429 /*
430 * By default we treat Debugger() the same as calls to panic(), unless
431 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
432 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
433 * This is because writing an on-device corefile is a destructive operation.
434 *
435 * Return from Debugger() is currently only implemented on x86
436 */
437 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
438 debugger_is_panic = FALSE;
439 }
440 #endif
441 }
442 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
443
444 #if defined (__x86_64__)
445 void
extended_debug_log_init(void)446 extended_debug_log_init(void)
447 {
448 assert(coprocessor_paniclog_flush);
449 /*
450 * Allocate an extended panic log buffer that has space for the panic
451 * stackshot at the end. Update the debug buf pointers appropriately
452 * to point at this new buffer.
453 *
454 * iBoot pre-initializes the panic region with the NULL character. We set this here
455 * so we can accurately calculate the CRC for the region without needing to flush the
456 * full region over SMC.
457 */
458 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
459
460 panic_info = (struct macos_panic_header *)new_debug_buf;
461 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
462 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
463
464 extended_debug_log_enabled = TRUE;
465
466 /*
467 * Insert a compiler barrier so we don't free the other panic stackshot buffer
468 * until after we've marked the new one as available
469 */
470 __compiler_barrier();
471 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
472 panic_stackshot_buf = 0;
473 panic_stackshot_buf_len = 0;
474 }
475 #endif /* defined (__x86_64__) */
476
477 void
debug_log_init(void)478 debug_log_init(void)
479 {
480 #if defined(__arm64__)
481 if (!gPanicBase) {
482 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
483 return;
484 }
485 /* Shift debug buf start location and size by the length of the panic header */
486 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
487 debug_buf_ptr = debug_buf_base;
488 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
489
490 #if CONFIG_EXT_PANICLOG
491 ext_paniclog_init();
492 #endif
493 #else
494 kern_return_t kr = KERN_SUCCESS;
495 bzero(panic_info, DEBUG_BUF_SIZE);
496
497 assert(debug_buf_base != NULL);
498 assert(debug_buf_ptr != NULL);
499 assert(debug_buf_size != 0);
500
501 /*
502 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
503 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
504 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
505 * up.
506 */
507 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
508 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
509 assert(kr == KERN_SUCCESS);
510 if (kr == KERN_SUCCESS) {
511 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
512 }
513 #endif
514 }
515
516 void
phys_carveout_init(void)517 phys_carveout_init(void)
518 {
519 if (!PE_i_can_has_debugger(NULL)) {
520 return;
521 }
522
523 #if __arm__ || __arm64__
524 #if DEVELOPMENT || DEBUG
525 #endif /* DEVELOPMENT || DEBUG */
526 #endif /* __arm__ || __arm64__ */
527
528 struct carveout {
529 const char *name;
530 vm_offset_t *va;
531 uint32_t requested_size;
532 uintptr_t *pa;
533 size_t *allocated_size;
534 uint64_t present;
535 } carveouts[] = {
536 {
537 "phys_carveout",
538 &phys_carveout,
539 phys_carveout_mb,
540 &phys_carveout_pa,
541 &phys_carveout_size,
542 phys_carveout_mb != 0,
543 }
544 };
545
546 for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
547 if (carveouts[i].present) {
548 size_t temp_carveout_size = 0;
549 if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
550 panic("%s_mb size overflowed (%uMB)",
551 carveouts[i].name, carveouts[i].requested_size);
552 return;
553 }
554
555 kmem_alloc_contig(kernel_map, carveouts[i].va,
556 temp_carveout_size, PAGE_MASK, 0, 0,
557 KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
558 VM_KERN_MEMORY_DIAG);
559
560 *carveouts[i].pa = kvtophys(*carveouts[i].va);
561 *carveouts[i].allocated_size = temp_carveout_size;
562 }
563 }
564
565 #if __arm64__ && (DEVELOPMENT || DEBUG)
566 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
567 PE_arm_debug_enable_trace(true);
568 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
569 }
570
571 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)572 debug_is_in_phys_carveout(vm_map_offset_t va)
573 {
574 return phys_carveout_size && va >= phys_carveout &&
575 va < (phys_carveout + phys_carveout_size);
576 }
577
578 boolean_t
debug_can_coredump_phys_carveout(void)579 debug_can_coredump_phys_carveout(void)
580 {
581 return phys_carveout_core;
582 }
583
584 static void
DebuggerLock(void)585 DebuggerLock(void)
586 {
587 int my_cpu = cpu_number();
588 int debugger_exp_cpu = DEBUGGER_NO_CPU;
589 assert(ml_get_interrupts_enabled() == FALSE);
590
591 if (atomic_load(&debugger_cpu) == my_cpu) {
592 return;
593 }
594
595 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
596 debugger_exp_cpu = DEBUGGER_NO_CPU;
597 }
598
599 return;
600 }
601
602 static void
DebuggerUnlock(void)603 DebuggerUnlock(void)
604 {
605 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
606
607 /*
608 * We don't do an atomic exchange here in case
609 * there's another CPU spinning to acquire the debugger_lock
610 * and we never get a chance to update it. We already have the
611 * lock so we can simply store DEBUGGER_NO_CPU and follow with
612 * a barrier.
613 */
614 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
615 OSMemoryBarrier();
616
617 return;
618 }
619
620 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)621 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
622 {
623 #if defined(__arm64__)
624 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
625 #else /* defined(__arm64__) */
626 #pragma unused(proceed_on_failure)
627 #pragma unused(is_stackshot)
628 mp_kdp_enter(proceed_on_failure);
629 return KERN_SUCCESS;
630 #endif
631 }
632
633 static void
DebuggerResumeOtherCores(void)634 DebuggerResumeOtherCores(void)
635 {
636 #if defined(__arm64__)
637 DebuggerXCallReturn();
638 #else /* defined(__arm64__) */
639 mp_kdp_exit();
640 #endif
641 }
642
643 __printflike(3, 0)
644 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)645 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
646 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
647 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
648 {
649 CPUDEBUGGEROP = db_op;
650
651 /*
652 * Note:
653 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
654 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
655 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
656 *
657 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
658 */
659 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
660 CPUDEBUGGERMSG = db_message;
661 CPUPANICSTR = db_panic_str;
662 CPUPANICARGS = db_panic_args;
663 CPUPANICDATAPTR = db_panic_data_ptr;
664 CPUPANICCALLER = db_panic_caller;
665 }
666
667 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
668 CPUDEBUGGERRET = KERN_SUCCESS;
669
670 /* Reset these on any nested panics */
671 // follow up in rdar://88497308 (nested panics should not clobber panic flags)
672 CPUPANICOPTS = db_panic_options;
673
674 return;
675 }
676
677 /*
678 * Save the requested debugger state/action into the current processor's
679 * percu state and trap to the debugger.
680 */
681 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)682 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
683 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
684 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
685 {
686 kern_return_t ret;
687
688 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
689 if (!PE_arm_debug_and_trace_initialized()) {
690 /*
691 * In practice this can only happen if we panicked very early,
692 * when only the boot CPU is online and before it has finished
693 * initializing the debug and trace infrastructure. We're going
694 * to hang soon, so let's at least make sure the message passed
695 * to panic() is actually logged.
696 */
697 char buf[EARLY_PANIC_BUFLEN];
698 vsnprintf(buf, EARLY_PANIC_BUFLEN, db_panic_str, *db_panic_args);
699 paniclog_append_noflush("%s\n", buf);
700 }
701 #endif
702
703 assert(ml_get_interrupts_enabled() == FALSE);
704 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
705 db_panic_options, db_panic_data_ptr,
706 db_proceed_on_sync_failure, db_panic_caller);
707
708 /*
709 * On ARM this generates an uncategorized exception -> sleh code ->
710 * DebuggerCall -> kdp_trap -> handle_debugger_trap
711 * So that is how XNU ensures that only one core can panic.
712 * The rest of the cores are halted by IPI if possible; if that
713 * fails it will fall back to dbgwrap.
714 */
715 TRAP_DEBUGGER;
716
717 ret = CPUDEBUGGERRET;
718
719 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
720
721 return ret;
722 }
723
724 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)725 Assert(
726 const char *file,
727 int line,
728 const char *expression
729 )
730 {
731 #if CONFIG_NONFATAL_ASSERTS
732 static TUNABLE(bool, mach_assert, "assertions", true);
733
734 if (!mach_assert) {
735 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
736 return;
737 }
738 #endif
739
740 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
741 }
742
743 boolean_t
debug_is_current_cpu_in_panic_state(void)744 debug_is_current_cpu_in_panic_state(void)
745 {
746 return current_debugger_state()->db_entry_count > 0;
747 }
748
749 /*
750 * check if we are in a nested panic, report findings, take evasive action where necessary
751 *
752 * see also PE_update_panicheader_nestedpanic
753 */
754 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)755 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
756 {
757 if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
758 // Note: this is the first indication in the panic log or serial that we are off the rails...
759 //
760 // if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
761 // if we panic *after* the log is finalized then we will only see it in the serial log
762 //
763 paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
764 paniclog_flush();
765
766 // print the *new* panic string to the console, we might not get it by other means...
767 // TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
768 // rdar://87846117 (NestedPanic: output panic string to paniclog)
769 if (db_panic_str) {
770 printf("Nested panic string:\n");
771 #pragma clang diagnostic push
772 #pragma clang diagnostic ignored "-Wformat-nonliteral"
773 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
774 #pragma clang diagnostic pop
775 printf("\n<end nested panic string>\n");
776 }
777 }
778
779 // Stage 1 bailout
780 //
781 // Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog. If this fails with another nested
782 // panic then we will land in Stage 2 below...
783 //
784 if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
785 uint32_t panic_details = 0;
786
787 // if this is a force-reset panic then capture a log and reboot immediately.
788 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
789 panic_details |= kPanicDetailsForcePowerOff;
790 }
791
792 // normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
793 // there. To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
794 //
795 PEHaltRestartInternal(kPEPanicBegin, panic_details);
796
797 paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
798 PE_update_panicheader_nestedpanic();
799 paniclog_flush();
800
801 if (!panicDebugging) {
802 // note that this will also send kPEPanicEnd
803 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
804 }
805
806 // prints to console
807 paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
808 panic_spin_forever();
809 }
810
811 // Stage 2 bailout
812 //
813 // Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path. Try to issue
814 // a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
815 //
816 if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
817 if (!panicDebugging) {
818 // note that:
819 // - this code path should be audited for prints, as that is a common cause of nested panics
820 // - this code path should take the fastest route to the actual reset, and not call any un-necessary code
821 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
822 }
823
824 // prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
825 paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
826 panic_spin_forever();
827 }
828
829 // Stage 3 bailout
830 //
831 // We are done here, we were unable to reset the platform without another nested panic. Spin until the watchdog kicks in.
832 //
833 if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
834 kdp_machine_reboot_type(kPEHangCPU, 0);
835 }
836 }
837
838 void
Debugger(const char * message)839 Debugger(const char *message)
840 {
841 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
842 }
843
844 /*
845 * Enter the Debugger
846 *
847 * This is similar to, but not the same as a panic
848 *
849 * Key differences:
850 * - we get here from a debugger entry action (e.g. NMI)
851 * - the system is resumable on x86 (in theory, however it is not clear if this is tested)
852 * - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
853 *
854 */
855 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)856 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
857 uint64_t debugger_options_mask, unsigned long debugger_caller)
858 {
859 spl_t previous_interrupts_state;
860 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
861
862 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
863 read_lbr();
864 #endif
865 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
866 disable_preemption();
867
868 /* track depth of debugger/panic entry */
869 CPUDEBUGGERCOUNT++;
870
871 /* emit a tracepoint as early as possible in case of hang */
872 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
873
874 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
875 check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
876
877 /* Handle any necessary platform specific actions before we proceed */
878 PEInitiatePanic();
879
880 #if DEVELOPMENT || DEBUG
881 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
882 #endif
883
884 PE_panic_hook(message);
885
886 doprnt_hide_pointers = FALSE;
887
888 if (ctx != NULL) {
889 DebuggerSaveState(DBOP_DEBUGGER, message,
890 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
891 handle_debugger_trap(reason, 0, 0, ctx);
892 DebuggerSaveState(DBOP_NONE, NULL, NULL,
893 NULL, 0, NULL, FALSE, 0);
894 } else {
895 DebuggerTrapWithState(DBOP_DEBUGGER, message,
896 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
897 }
898
899 /* resume from the debugger */
900
901 CPUDEBUGGERCOUNT--;
902 doprnt_hide_pointers = old_doprnt_hide_pointers;
903 enable_preemption();
904 ml_set_interrupts_enabled(previous_interrupts_state);
905 }
906
907 static struct kdp_callout {
908 struct kdp_callout * callout_next;
909 kdp_callout_fn_t callout_fn;
910 boolean_t callout_in_progress;
911 void * callout_arg;
912 } * kdp_callout_list = NULL;
913
914 /*
915 * Called from kernel context to register a kdp event callout.
916 */
917 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)918 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
919 {
920 struct kdp_callout * kcp;
921 struct kdp_callout * list_head;
922
923 kcp = zalloc_permanent_type(struct kdp_callout);
924
925 kcp->callout_fn = fn;
926 kcp->callout_arg = arg;
927 kcp->callout_in_progress = FALSE;
928
929 /* Lock-less list insertion using compare and exchange. */
930 do {
931 list_head = kdp_callout_list;
932 kcp->callout_next = list_head;
933 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
934 }
935
936 static void
kdp_callouts(kdp_event_t event)937 kdp_callouts(kdp_event_t event)
938 {
939 struct kdp_callout *kcp = kdp_callout_list;
940
941 while (kcp) {
942 if (!kcp->callout_in_progress) {
943 kcp->callout_in_progress = TRUE;
944 kcp->callout_fn(kcp->callout_arg, event);
945 kcp->callout_in_progress = FALSE;
946 }
947 kcp = kcp->callout_next;
948 }
949 }
950
951 #if defined(__arm64__)
952 /*
953 * Register an additional buffer with data to include in the panic log
954 *
955 * <rdar://problem/50137705> tracks supporting more than one buffer
956 *
957 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
958 */
959 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)960 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
961 {
962 if (panic_data_buffers != NULL) {
963 panic("register_additional_panic_data_buffer called with buffer already registered");
964 }
965
966 if (producer_name == NULL || (strlen(producer_name) == 0)) {
967 panic("register_additional_panic_data_buffer called with invalid producer_name");
968 }
969
970 if (buf == NULL) {
971 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
972 }
973
974 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
975 panic("register_additional_panic_data_buffer called with invalid length");
976 }
977
978 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
979 new_panic_data_buffer->producer_name = producer_name;
980 new_panic_data_buffer->buf = buf;
981 new_panic_data_buffer->len = len;
982
983 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
984 panic("register_additional_panic_data_buffer called with buffer already registered");
985 }
986
987 return;
988 }
989 #endif /* defined(__arm64__) */
990
991 /*
992 * An overview of the xnu panic path:
993 *
994 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
995 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
996 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
997 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
998 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
999 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
1000 * according to the device's boot-args.
1001 */
1002 #undef panic
1003 void
panic(const char * str,...)1004 panic(const char *str, ...)
1005 {
1006 va_list panic_str_args;
1007
1008 va_start(panic_str_args, str);
1009 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
1010 va_end(panic_str_args);
1011 }
1012
1013 void
panic_with_data(uuid_t uuid,void * addr,uint32_t len,const char * str,...)1014 panic_with_data(uuid_t uuid, void *addr, uint32_t len, const char *str, ...)
1015 {
1016 va_list panic_str_args;
1017
1018 ext_paniclog_panic_with_data(uuid, addr, len);
1019
1020 va_start(panic_str_args, str);
1021 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
1022 va_end(panic_str_args);
1023 }
1024
1025 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1026 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1027 {
1028 va_list panic_str_args;
1029
1030 va_start(panic_str_args, str);
1031 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1032 NULL, (unsigned long)(char *)__builtin_return_address(0));
1033 va_end(panic_str_args);
1034 }
1035
1036 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)1037 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
1038 {
1039 if (ptr == NULL) {
1040 paniclog_append_noflush("NULL %s pointer\n", what);
1041 return false;
1042 }
1043
1044 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1045 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1046 what, ptr, (uint32_t)size);
1047 return false;
1048 }
1049
1050 return true;
1051 }
1052
1053 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1054 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1055 {
1056 if (!PANIC_VALIDATE_PTR(thread)) {
1057 return false;
1058 }
1059
1060 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1061 return false;
1062 }
1063
1064 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1065 return false;
1066 }
1067
1068 if (task) {
1069 *task = thread->t_tro->tro_task;
1070 }
1071
1072 if (!panic_validate_ptr(thread->t_tro->tro_proc,
1073 sizeof(struct proc *), "bsd_info")) {
1074 *proc = NULL;
1075 } else {
1076 *proc = thread->t_tro->tro_proc;
1077 }
1078
1079 return true;
1080 }
1081
1082 #if defined (__x86_64__)
1083 /*
1084 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1085 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1086 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1087 * thread when writing the panic log.
1088 *
1089 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1090 */
1091 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1092 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1093 {
1094 va_list panic_str_args;
1095 __assert_only os_ref_count_t th_ref_count;
1096
1097 assert_thread_magic(thread);
1098 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1099 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1100
1101 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1102 thread_reference(thread);
1103
1104 va_start(panic_str_args, str);
1105 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1106 thread, (unsigned long)(char *)__builtin_return_address(0));
1107
1108 va_end(panic_str_args);
1109 }
1110 #endif /* defined (__x86_64__) */
1111
1112 #pragma clang diagnostic push
1113 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1114 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller)1115 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1116 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
1117 {
1118 #pragma clang diagnostic pop
1119
1120 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1121 read_lbr();
1122 #endif
1123
1124 /* optionally call sync, to reduce lost logs on restart, avoid on recursive panic. Unsafe due to unbounded sync() duration */
1125 if ((panic_options_mask & DEBUGGER_OPTION_SYNC_ON_PANIC_UNSAFE) && (CPUDEBUGGERCOUNT == 0)) {
1126 sync(NULL, NULL, NULL);
1127 }
1128
1129 /* Turn off I/O tracing once we've panicked */
1130 iotrace_disable();
1131
1132 /* call machine-layer panic handler */
1133 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
1134
1135 /* track depth of debugger/panic entry */
1136 CPUDEBUGGERCOUNT++;
1137
1138 /* emit a tracepoint as early as possible in case of hang */
1139 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1140
1141 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1142 check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1143
1144 /* Handle any necessary platform specific actions before we proceed */
1145 PEInitiatePanic();
1146
1147 #if DEVELOPMENT || DEBUG
1148 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1149 #endif
1150
1151 PE_panic_hook(panic_format_str);
1152
1153 #if defined (__x86_64__)
1154 plctrace_disable();
1155 #endif
1156
1157 if (write_trace_on_panic && kdebug_enable) {
1158 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1159 ml_set_interrupts_enabled(TRUE);
1160 KDBG_RELEASE(TRACE_PANIC);
1161 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1162 }
1163 }
1164
1165 ml_set_interrupts_enabled(FALSE);
1166 disable_preemption();
1167
1168 #if defined (__x86_64__)
1169 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1170 #endif /* defined (__x86_64__) */
1171
1172 /* Never hide pointers from panic logs. */
1173 doprnt_hide_pointers = FALSE;
1174
1175 if (ctx != NULL) {
1176 /*
1177 * We called into panic from a trap, no need to trap again. Set the
1178 * state on the current CPU and then jump to handle_debugger_trap.
1179 */
1180 DebuggerSaveState(DBOP_PANIC, "panic",
1181 panic_format_str, panic_args,
1182 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1183 handle_debugger_trap(reason, 0, 0, ctx);
1184 }
1185
1186 #if defined(__arm64__) && !APPLEVIRTUALPLATFORM
1187 /*
1188 * Signal to fastsim that it should open debug ports (nop on hardware)
1189 */
1190 __asm__ volatile ("hint #0x45");
1191 #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */
1192
1193 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1194 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1195
1196 /*
1197 * Not reached.
1198 */
1199 panic_stop();
1200 __builtin_unreachable();
1201 }
1202
1203 void
panic_spin_forever(void)1204 panic_spin_forever(void)
1205 {
1206 for (;;) {
1207 #if defined(__arm__) || defined(__arm64__)
1208 /* On arm32, which doesn't have a WFE timeout, this may not return. But that should be OK on this path. */
1209 __builtin_arm_wfe();
1210 #else
1211 cpu_pause();
1212 #endif
1213 }
1214 }
1215
1216 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1217 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1218 {
1219 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1220 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1221 } else {
1222 PEHaltRestart(type);
1223 }
1224 halt_all_cpus(TRUE);
1225 }
1226
1227 void
kdp_machine_reboot(void)1228 kdp_machine_reboot(void)
1229 {
1230 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1231 }
1232
1233 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1234 panic_debugger_log(const char *string, ...)
1235 {
1236 va_list panic_debugger_log_args;
1237
1238 va_start(panic_debugger_log_args, string);
1239 #pragma clang diagnostic push
1240 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1241 _doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1242 #pragma clang diagnostic pop
1243 va_end(panic_debugger_log_args);
1244
1245 #if defined(__arm64__)
1246 paniclog_flush();
1247 #endif
1248 }
1249
1250 /*
1251 * Gather and save diagnostic information about a panic (or Debugger call).
1252 *
1253 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1254 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1255 * paniclog is written and no core is written unless we request a core on NMI.
1256 *
1257 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1258 * and calling out to any other functions we have for collecting diagnostic info.
1259 */
1260 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1261 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1262 {
1263 #if DEVELOPMENT || DEBUG
1264 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1265 #endif
1266
1267 #if defined(__x86_64__)
1268 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1269 #endif
1270 /*
1271 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1272 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1273 * we'll just spin in kdp_raise_exception.
1274 */
1275 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1276 kdp_raise_exception(exception, code, subcode, state);
1277 if (debugger_safe_to_return && !debugger_is_panic) {
1278 return;
1279 }
1280 }
1281
1282 #ifdef CONFIG_KCOV
1283 /* Try not to break core dump path by sanitizer. */
1284 kcov_panic_disable();
1285 #endif
1286
1287 if ((debugger_current_op == DBOP_PANIC) ||
1288 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1289 /*
1290 * Attempt to notify listeners once and only once that we've started
1291 * panicking. Only do this for Debugger() calls if we're treating
1292 * Debugger() calls like panic().
1293 */
1294 uint32_t panic_details = 0;
1295 /* if this is a force-reset panic then capture a log and reboot immediately. */
1296 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1297 panic_details |= kPanicDetailsForcePowerOff;
1298 }
1299 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1300
1301 /*
1302 * Set the begin pointer in the panic log structure. We key off of this
1303 * static variable rather than contents from the panic header itself in case someone
1304 * has stomped over the panic_info structure. Also initializes the header magic.
1305 */
1306 static boolean_t began_writing_paniclog = FALSE;
1307 if (!began_writing_paniclog) {
1308 PE_init_panicheader();
1309 began_writing_paniclog = TRUE;
1310 }
1311
1312 if (CPUDEBUGGERCOUNT > 1) {
1313 /*
1314 * we are in a nested panic. Record the nested bit in panic flags and do some housekeeping
1315 */
1316 PE_update_panicheader_nestedpanic();
1317 paniclog_flush();
1318 }
1319 }
1320
1321 /*
1322 * Write panic string if this was a panic.
1323 *
1324 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1325 */
1326 if (debugger_current_op == DBOP_PANIC) {
1327 paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1328 if (debugger_panic_str) {
1329 #pragma clang diagnostic push
1330 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1331 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1332 #pragma clang diagnostic pop
1333 }
1334 paniclog_append_noflush("\n");
1335 }
1336 #if defined(__x86_64__)
1337 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1338 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1339 }
1340
1341 /*
1342 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1343 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1344 * conventional sense.
1345 */
1346 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1347 #endif /* __x86_64__ */
1348 {
1349 kdp_callouts(KDP_EVENT_PANICLOG);
1350
1351 /*
1352 * Write paniclog and panic stackshot (if supported)
1353 * TODO: Need to clear panic log when return from debugger
1354 * hooked up for embedded
1355 */
1356 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
1357
1358 #if DEVELOPMENT || DEBUG
1359 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1360 #endif
1361
1362 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1363 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1364 PEHaltRestart(kPEPanicDiagnosticsDone);
1365 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1366 }
1367 }
1368
1369 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1370 /*
1371 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1372 * local coredumps, don't try to write these and instead go straight to reboot. This
1373 * allows us to persist any data that's stored in the panic log.
1374 */
1375 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1376 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1377 PEHaltRestart(kPEPanicDiagnosticsDone);
1378 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1379 }
1380
1381 /*
1382 * Consider generating a local corefile if the infrastructure is configured
1383 * and we haven't disabled on-device coredumps.
1384 */
1385 if (on_device_corefile_enabled()) {
1386 if (!kdp_has_polled_corefile()) {
1387 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1388 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1389 kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1390 #if defined(__arm64__)
1391 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1392 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1393 }
1394 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1395 paniclog_flush();
1396 #else /* defined(__arm64__) */
1397 if (panic_info->mph_panic_log_offset != 0) {
1398 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1399 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1400 }
1401 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1402 paniclog_flush();
1403 }
1404 #endif /* defined(__arm64__) */
1405 }
1406 }
1407 #if XNU_MONITOR
1408 else if (pmap_get_cpu_data()->ppl_state != PPL_STATE_KERNEL) {
1409 paniclog_append_noflush("skipping local kernel core because the PPL is not in KERNEL state\n");
1410 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1411 paniclog_flush();
1412 }
1413 #endif /* XNU_MONITOR */
1414 else {
1415 int ret = -1;
1416
1417 #if defined (__x86_64__)
1418 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1419 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1420 #endif
1421 {
1422 /*
1423 * Doing an on-device coredump leaves the disk driver in a state
1424 * that can not be resumed.
1425 */
1426 debugger_safe_to_return = FALSE;
1427 begin_panic_transfer();
1428 ret = kern_dump(KERN_DUMP_DISK);
1429 abort_panic_transfer();
1430
1431 #if DEVELOPMENT || DEBUG
1432 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1433 #endif
1434 }
1435
1436 /*
1437 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1438 * or if option to ignore failures is set.
1439 */
1440 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1441 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1442 PEHaltRestart(kPEPanicDiagnosticsDone);
1443 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1444 }
1445 }
1446 }
1447
1448 if (debugger_current_op == DBOP_PANIC ||
1449 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1450 PEHaltRestart(kPEPanicDiagnosticsDone);
1451 }
1452
1453 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1454 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1455 }
1456
1457 /* If KDP is configured, try to trap to the debugger */
1458 #if defined(__arm64__)
1459 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1460 #else
1461 if (current_debugger != NO_CUR_DB) {
1462 #endif
1463 kdp_raise_exception(exception, code, subcode, state);
1464 /*
1465 * Only return if we entered via Debugger and it's safe to return
1466 * (we halted the other cores successfully, this isn't a nested panic, etc)
1467 */
1468 if (debugger_current_op == DBOP_DEBUGGER &&
1469 debugger_safe_to_return &&
1470 kernel_debugger_entry_count == 1 &&
1471 !debugger_is_panic) {
1472 return;
1473 }
1474 }
1475
1476 #if defined(__arm64__)
1477 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1478 /*
1479 * Print panic string at the end of serial output
1480 * to make panic more obvious when someone connects a debugger
1481 */
1482 if (debugger_panic_str) {
1483 panic_debugger_log("Original panic string:\n");
1484 panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1485 #pragma clang diagnostic push
1486 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1487 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1488 #pragma clang diagnostic pop
1489 panic_debugger_log("\n");
1490 }
1491
1492 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1493 panic_spin_shmcon();
1494 }
1495 #endif /* defined(__arm64__) */
1496
1497 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1498
1499 PEHaltRestart(kPEPanicDiagnosticsDone);
1500
1501 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1502
1503 if (!panicDebugging) {
1504 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1505 }
1506
1507 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1508 panic_spin_forever();
1509 }
1510
1511 #if SCHED_HYGIENE_DEBUG
1512 uint64_t debugger_trap_timestamps[9];
1513 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1514 #else
1515 # define DEBUGGER_TRAP_TIMESTAMP(i)
1516 #endif /* SCHED_HYGIENE_DEBUG */
1517
1518 void
1519 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1520 {
1521 unsigned int initial_not_in_kdp = not_in_kdp;
1522 kern_return_t ret;
1523 debugger_op db_prev_op = debugger_current_op;
1524
1525 DEBUGGER_TRAP_TIMESTAMP(0);
1526
1527 DebuggerLock();
1528 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1529
1530 DEBUGGER_TRAP_TIMESTAMP(1);
1531
1532 #if SCHED_HYGIENE_DEBUG
1533 if (serialmode & SERIALMODE_OUTPUT) {
1534 ml_spin_debug_reset(current_thread());
1535 }
1536 #endif /* SCHED_HYGIENE_DEBUG */
1537 if (ret != KERN_SUCCESS) {
1538 CPUDEBUGGERRET = ret;
1539 DebuggerUnlock();
1540 return;
1541 }
1542
1543 /* Update the global panic/debugger nested entry level */
1544 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1545 if (kernel_debugger_entry_count > 0) {
1546 console_suspend();
1547 }
1548
1549 /*
1550 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1551 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1552 */
1553
1554 if (CPUDEBUGGEROP == DBOP_NONE) {
1555 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1556 debugger_current_op = DBOP_BREAKPOINT;
1557 } else {
1558 /* Not safe to return from a nested panic/debugger call */
1559 if (debugger_current_op == DBOP_PANIC ||
1560 debugger_current_op == DBOP_DEBUGGER) {
1561 debugger_safe_to_return = FALSE;
1562 }
1563
1564 debugger_current_op = CPUDEBUGGEROP;
1565
1566 /* Only overwrite the panic message if there is none already - save the data from the first call */
1567 if (debugger_panic_str == NULL) {
1568 debugger_panic_str = CPUPANICSTR;
1569 debugger_panic_args = CPUPANICARGS;
1570 debugger_panic_data = CPUPANICDATAPTR;
1571 debugger_message = CPUDEBUGGERMSG;
1572 debugger_panic_caller = CPUPANICCALLER;
1573 }
1574
1575 debugger_panic_options = CPUPANICOPTS;
1576 }
1577
1578 /*
1579 * Clear the op from the processor debugger context so we can handle
1580 * breakpoints in the debugger
1581 */
1582 CPUDEBUGGEROP = DBOP_NONE;
1583
1584 DEBUGGER_TRAP_TIMESTAMP(2);
1585
1586 kdp_callouts(KDP_EVENT_ENTER);
1587 not_in_kdp = 0;
1588
1589 DEBUGGER_TRAP_TIMESTAMP(3);
1590
1591 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1592 shmem_mark_as_busy();
1593 #endif
1594
1595 if (debugger_current_op == DBOP_BREAKPOINT) {
1596 kdp_raise_exception(exception, code, subcode, state);
1597 } else if (debugger_current_op == DBOP_STACKSHOT) {
1598 CPUDEBUGGERRET = do_stackshot();
1599 #if PGO
1600 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1601 CPUDEBUGGERRET = do_pgo_reset_counters();
1602 #endif
1603 } else {
1604 /* note: this is the panic path... */
1605 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
1606 if (!PE_arm_debug_and_trace_initialized()) {
1607 paniclog_append_noflush("kernel panicked before debug and trace infrastructure initialized!\n"
1608 "spinning forever...\n");
1609 panic_spin_forever();
1610 }
1611 #endif
1612 debugger_collect_diagnostics(exception, code, subcode, state);
1613 }
1614
1615 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1616 shmem_unmark_as_busy();
1617 #endif
1618
1619 DEBUGGER_TRAP_TIMESTAMP(4);
1620
1621 not_in_kdp = initial_not_in_kdp;
1622 kdp_callouts(KDP_EVENT_EXIT);
1623
1624 DEBUGGER_TRAP_TIMESTAMP(5);
1625
1626 if (debugger_current_op != DBOP_BREAKPOINT) {
1627 debugger_panic_str = NULL;
1628 debugger_panic_args = NULL;
1629 debugger_panic_data = NULL;
1630 debugger_panic_options = 0;
1631 debugger_message = NULL;
1632 }
1633
1634 /* Restore the previous debugger state */
1635 debugger_current_op = db_prev_op;
1636
1637 DEBUGGER_TRAP_TIMESTAMP(6);
1638
1639 DebuggerResumeOtherCores();
1640
1641 DEBUGGER_TRAP_TIMESTAMP(7);
1642
1643 DebuggerUnlock();
1644
1645 DEBUGGER_TRAP_TIMESTAMP(8);
1646
1647 return;
1648 }
1649
1650 __attribute__((noinline, not_tail_called))
1651 void
1652 log(__unused int level, char *fmt, ...)
1653 {
1654 void *caller = __builtin_return_address(0);
1655 va_list listp;
1656 va_list listp2;
1657
1658
1659 #ifdef lint
1660 level++;
1661 #endif /* lint */
1662 #ifdef MACH_BSD
1663 va_start(listp, fmt);
1664 va_copy(listp2, listp);
1665
1666 disable_preemption();
1667 _doprnt(fmt, &listp, cons_putc_locked, 0);
1668 enable_preemption();
1669
1670 va_end(listp);
1671
1672 #pragma clang diagnostic push
1673 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1674 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1675 #pragma clang diagnostic pop
1676 va_end(listp2);
1677 #endif
1678 }
1679
1680 /*
1681 * Per <rdar://problem/24974766>, skip appending log messages to
1682 * the new logging infrastructure in contexts where safety is
1683 * uncertain. These contexts include:
1684 * - When we're in the debugger
1685 * - We're in a panic
1686 * - Interrupts are disabled
1687 * - Or Pre-emption is disabled
1688 * In all the above cases, it is potentially unsafe to log messages.
1689 */
1690
1691 boolean_t
1692 oslog_is_safe(void)
1693 {
1694 return kernel_debugger_entry_count == 0 &&
1695 not_in_kdp == 1 &&
1696 get_preemption_level() == 0 &&
1697 ml_get_interrupts_enabled() == TRUE;
1698 }
1699
1700 boolean_t
1701 debug_mode_active(void)
1702 {
1703 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1704 }
1705
1706 void
1707 debug_putc(char c)
1708 {
1709 if ((debug_buf_size != 0) &&
1710 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size) &&
1711 (!is_debug_ptr_in_ext_paniclog())) {
1712 *debug_buf_ptr = c;
1713 debug_buf_ptr++;
1714 }
1715 }
1716
1717 #if defined (__x86_64__)
1718 struct pasc {
1719 unsigned a: 7;
1720 unsigned b: 7;
1721 unsigned c: 7;
1722 unsigned d: 7;
1723 unsigned e: 7;
1724 unsigned f: 7;
1725 unsigned g: 7;
1726 unsigned h: 7;
1727 } __attribute__((packed));
1728
1729 typedef struct pasc pasc_t;
1730
1731 /*
1732 * In-place packing routines -- inefficient, but they're called at most once.
1733 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1734 */
1735 int
1736 packA(char *inbuf, uint32_t length, uint32_t buflen)
1737 {
1738 unsigned int i, j = 0;
1739 pasc_t pack;
1740
1741 length = MIN(((length + 7) & ~7), buflen);
1742
1743 for (i = 0; i < length; i += 8) {
1744 pack.a = inbuf[i];
1745 pack.b = inbuf[i + 1];
1746 pack.c = inbuf[i + 2];
1747 pack.d = inbuf[i + 3];
1748 pack.e = inbuf[i + 4];
1749 pack.f = inbuf[i + 5];
1750 pack.g = inbuf[i + 6];
1751 pack.h = inbuf[i + 7];
1752 bcopy((char *) &pack, inbuf + j, 7);
1753 j += 7;
1754 }
1755 return j;
1756 }
1757
1758 void
1759 unpackA(char *inbuf, uint32_t length)
1760 {
1761 pasc_t packs;
1762 unsigned i = 0;
1763 length = (length * 8) / 7;
1764
1765 while (i < length) {
1766 packs = *(pasc_t *)&inbuf[i];
1767 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1768 inbuf[i++] = packs.a;
1769 inbuf[i++] = packs.b;
1770 inbuf[i++] = packs.c;
1771 inbuf[i++] = packs.d;
1772 inbuf[i++] = packs.e;
1773 inbuf[i++] = packs.f;
1774 inbuf[i++] = packs.g;
1775 inbuf[i++] = packs.h;
1776 }
1777 }
1778 #endif /* defined (__x86_64__) */
1779
1780 extern char *proc_name_address(void *);
1781 extern char *proc_longname_address(void *);
1782
1783 __private_extern__ void
1784 panic_display_process_name(void)
1785 {
1786 proc_name_t proc_name = {};
1787 struct proc *cbsd_info = NULL;
1788 task_t ctask = NULL;
1789 vm_size_t size;
1790
1791 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1792 goto out;
1793 }
1794
1795 if (cbsd_info == NULL) {
1796 goto out;
1797 }
1798
1799 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1800 (vm_offset_t)&proc_name, sizeof(proc_name));
1801
1802 if (size == 0 || proc_name[0] == '\0') {
1803 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1804 (vm_offset_t)&proc_name,
1805 MIN(sizeof(command_t), sizeof(proc_name)));
1806 if (size > 0) {
1807 proc_name[size - 1] = '\0';
1808 }
1809 }
1810
1811 out:
1812 proc_name[sizeof(proc_name) - 1] = '\0';
1813 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1814 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1815 }
1816
1817 unsigned
1818 panic_active(void)
1819 {
1820 return debugger_current_op == DBOP_PANIC ||
1821 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1822 }
1823
1824 void
1825 populate_model_name(char *model_string)
1826 {
1827 strlcpy(model_name, model_string, sizeof(model_name));
1828 }
1829
1830 void
1831 panic_display_model_name(void)
1832 {
1833 char tmp_model_name[sizeof(model_name)];
1834
1835 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1836 return;
1837 }
1838
1839 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1840
1841 if (tmp_model_name[0] != 0) {
1842 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1843 }
1844 }
1845
1846 void
1847 panic_display_kernel_uuid(void)
1848 {
1849 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1850
1851 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1852 return;
1853 }
1854
1855 if (tmp_kernel_uuid[0] != '\0') {
1856 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1857 }
1858 }
1859
1860
1861 void
1862 panic_display_kernel_aslr(void)
1863 {
1864
1865 kc_format_t kc_format;
1866
1867 PE_get_primary_kc_format(&kc_format);
1868
1869 if (kc_format == KCFormatFileset) {
1870 void *kch = PE_get_kc_header(KCKindPrimary);
1871 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1872 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1873 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1874 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1875 #if defined(__arm64__)
1876 extern vm_offset_t segTEXTEXECB;
1877 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
1878 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
1879 #endif /* defined(__arm64__) */
1880 } else if (vm_kernel_slide) {
1881 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1882 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1883 } else {
1884 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1885 }
1886 }
1887
1888 void
1889 panic_display_hibb(void)
1890 {
1891 #if defined(__i386__) || defined (__x86_64__)
1892 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1893 #endif
1894 }
1895
1896 #if CONFIG_ECC_LOGGING
1897 __private_extern__ void
1898 panic_display_ecc_errors(void)
1899 {
1900 uint32_t count = ecc_log_get_correction_count();
1901
1902 if (count > 0) {
1903 paniclog_append_noflush("ECC Corrections:%u\n", count);
1904 }
1905 }
1906 #endif /* CONFIG_ECC_LOGGING */
1907
1908 #if CONFIG_FREEZE
1909 extern bool freezer_incore_cseg_acct;
1910 extern int32_t c_segment_pages_compressed_incore;
1911 #endif
1912
1913 extern uint32_t c_segment_pages_compressed;
1914 extern uint32_t c_segment_count;
1915 extern uint32_t c_segments_limit;
1916 extern uint32_t c_segment_pages_compressed_limit;
1917 extern uint32_t c_segment_pages_compressed_nearing_limit;
1918 extern uint32_t c_segments_nearing_limit;
1919 extern int vm_num_swap_files;
1920
1921 void
1922 panic_display_compressor_stats(void)
1923 {
1924 int isswaplow = vm_swap_low_on_space();
1925 #if CONFIG_FREEZE
1926 uint32_t incore_seg_count;
1927 uint32_t incore_compressed_pages;
1928 if (freezer_incore_cseg_acct) {
1929 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
1930 incore_compressed_pages = c_segment_pages_compressed_incore;
1931 } else {
1932 incore_seg_count = c_segment_count;
1933 incore_compressed_pages = c_segment_pages_compressed;
1934 }
1935
1936 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1937 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
1938 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1939 (incore_seg_count * 100) / c_segments_limit,
1940 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
1941 vm_num_swap_files,
1942 isswaplow ? "LOW":"OK");
1943 #else /* CONFIG_FREEZE */
1944 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1945 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
1946 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1947 (c_segment_count * 100) / c_segments_limit,
1948 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
1949 vm_num_swap_files,
1950 isswaplow ? "LOW":"OK");
1951 #endif /* CONFIG_FREEZE */
1952 }
1953
1954 #if !CONFIG_TELEMETRY
1955 int
1956 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
1957 {
1958 return KERN_NOT_SUPPORTED;
1959 }
1960 #endif
1961
1962 #include <machine/machine_cpu.h>
1963
1964 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
1965
1966 boolean_t
1967 kern_feature_override(uint32_t fmask)
1968 {
1969 return (kern_feature_overrides & fmask) == fmask;
1970 }
1971
1972 boolean_t
1973 on_device_corefile_enabled(void)
1974 {
1975 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1976 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1977 if (debug_boot_arg == 0) {
1978 return FALSE;
1979 }
1980 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1981 return FALSE;
1982 }
1983 #if !XNU_TARGET_OS_OSX
1984 /*
1985 * outside of macOS, if there's a debug boot-arg set and local
1986 * cores aren't explicitly disabled, we always write a corefile.
1987 */
1988 return TRUE;
1989 #else /* !XNU_TARGET_OS_OSX */
1990 /*
1991 * on macOS, if corefiles on panic are requested and local cores
1992 * aren't disabled we write a local core.
1993 */
1994 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
1995 return TRUE;
1996 }
1997 #endif /* !XNU_TARGET_OS_OSX */
1998 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1999 return FALSE;
2000 }
2001
2002 boolean_t
2003 panic_stackshot_to_disk_enabled(void)
2004 {
2005 assert(startup_phase >= STARTUP_SUB_TUNABLES);
2006 #if defined(__x86_64__)
2007 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
2008 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
2009 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
2010 return FALSE;
2011 }
2012
2013 return TRUE;
2014 }
2015 #endif
2016 return FALSE;
2017 }
2018
2019 const char *
2020 sysctl_debug_get_preoslog(size_t *size)
2021 {
2022 int result = 0;
2023 void *preoslog_pa = NULL;
2024 int preoslog_size = 0;
2025
2026 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2027 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2028 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2029 *size = 0;
2030 return NULL;
2031 }
2032
2033 /*
2034 * Beware:
2035 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
2036 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
2037 */
2038 *size = preoslog_size;
2039 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
2040 }
2041
2042 void
2043 sysctl_debug_free_preoslog(void)
2044 {
2045 #if RELEASE
2046 int result = 0;
2047 void *preoslog_pa = NULL;
2048 int preoslog_size = 0;
2049
2050 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2051 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2052 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2053 return;
2054 }
2055
2056 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
2057 #else
2058 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2059 #endif // RELEASE
2060 }
2061
2062
2063 #if (DEVELOPMENT || DEBUG)
2064
2065 void
2066 platform_stall_panic_or_spin(uint32_t req)
2067 {
2068 if (xnu_platform_stall_value & req) {
2069 if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2070 panic("Platform stall: User requested panic");
2071 } else {
2072 paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2073 panic_spin_forever();
2074 }
2075 }
2076 }
2077 #endif
2078
2079
2080 #define AWL_HV_ENTRY_FLAG (0x1)
2081
2082 static inline void
2083 awl_set_scratch_reg_hv_bit(void)
2084 {
2085 #if defined(__arm64__)
2086 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
2087 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2088 awl_diag0 |= AWL_HV_ENTRY_FLAG;
2089 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2090 #endif // defined(__arm64__)
2091 }
2092
2093 void
2094 awl_mark_hv_entry(void)
2095 {
2096 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2097 return;
2098 }
2099 *PERCPU_GET(hv_entry_detected) = true;
2100
2101 awl_set_scratch_reg_hv_bit();
2102 }
2103
2104 /*
2105 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2106 * so restore it manually.
2107 */
2108 static bool
2109 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2110 {
2111 if (event == CPU_BOOTED) {
2112 if (*PERCPU_GET(hv_entry_detected)) {
2113 awl_set_scratch_reg_hv_bit();
2114 }
2115 }
2116
2117 return true;
2118 }
2119
2120 /*
2121 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2122 * for a callback to restore register after hibernation
2123 */
2124 __startup_func
2125 static void
2126 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2127 {
2128 DTEntry base = NULL;
2129
2130 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2131 return;
2132 }
2133 const uint8_t *data = NULL;
2134 unsigned int data_size = sizeof(uint8_t);
2135
2136 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2137 for (unsigned int i = 0; i < data_size; i++) {
2138 if (data[i] != 0) {
2139 awl_scratch_reg_supported = true;
2140 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2141 break;
2142 }
2143 }
2144 }
2145 }
2146 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2147