1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_cdata.h>
76 #include <kern/zalloc_internal.h>
77 #include <kern/iotrace.h>
78 #include <pexpert/device_tree.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_compressor.h>
83 #include <stdarg.h>
84 #include <stdatomic.h>
85 #include <sys/pgo.h>
86 #include <console/serial_protos.h>
87 #include <IOKit/IOBSD.h>
88
89 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
90 #include <kdp/kdp_udp.h>
91 #endif
92 #include <kern/processor.h>
93
94 #if defined(__i386__) || defined(__x86_64__)
95 #include <IOKit/IOBSD.h>
96
97 #include <i386/cpu_threads.h>
98 #include <i386/pmCPU.h>
99 #include <i386/lbr.h>
100 #endif
101
102 #include <IOKit/IOPlatformExpert.h>
103 #include <machine/machine_cpu.h>
104 #include <machine/pal_routines.h>
105
106 #include <sys/kdebug.h>
107 #include <libkern/OSKextLibPrivate.h>
108 #include <libkern/OSAtomic.h>
109 #include <libkern/kernel_mach_header.h>
110 #include <libkern/section_keywords.h>
111 #include <uuid/uuid.h>
112 #include <mach_debug/zone_info.h>
113 #include <mach/resource_monitors.h>
114 #include <machine/machine_routines.h>
115
116 #include <os/log_private.h>
117
118 #if defined(__arm64__)
119 #include <pexpert/pexpert.h> /* For gPanicBase */
120 #include <arm/caches_internal.h>
121 #include <arm/misc_protos.h>
122 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
123 #endif
124
125 #include <san/kcov.h>
126
127 #if CONFIG_XNUPOST
128 #include <tests/xnupost.h>
129 extern int vsnprintf(char *, size_t, const char *, va_list);
130 #endif
131
132 #if CONFIG_CSR
133 #include <sys/csr.h>
134 #endif
135
136
137 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
138 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
139
140 unsigned int halt_in_debugger = 0;
141 unsigned int current_debugger = 0;
142 unsigned int active_debugger = 0;
143 unsigned int panicDebugging = FALSE;
144 unsigned int kernel_debugger_entry_count = 0;
145
146 #if DEVELOPMENT || DEBUG
147 unsigned int panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
148 unsigned int panic_test_action_count = 1;
149 unsigned int panic_test_case = PANIC_TEST_CASE_DISABLED;
150 #endif
151
152 #if defined(__arm64__)
153 struct additional_panic_data_buffer *panic_data_buffers = NULL;
154 #endif
155
156 #if defined(__arm64__)
157 /*
158 * Magic number; this should be identical to the armv7 encoding for trap.
159 */
160 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
161 #elif defined (__x86_64__)
162 #define TRAP_DEBUGGER __asm__("int3")
163 #else
164 #error No TRAP_DEBUGGER for this architecture
165 #endif
166
167 #if defined(__i386__) || defined(__x86_64__)
168 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
169 #else
170 #define panic_stop() panic_spin_forever()
171 #endif
172
173 struct debugger_state {
174 uint64_t db_panic_options;
175 debugger_op db_current_op;
176 boolean_t db_proceed_on_sync_failure;
177 const char *db_message;
178 const char *db_panic_str;
179 va_list *db_panic_args;
180 void *db_panic_data_ptr;
181 unsigned long db_panic_caller;
182 /* incremented whenever we panic or call Debugger (current CPU panic level) */
183 uint32_t db_entry_count;
184 kern_return_t db_op_return;
185 };
186 static struct debugger_state PERCPU_DATA(debugger_state);
187
188 /* __pure2 is correct if this function is called with preemption disabled */
189 static inline __pure2 struct debugger_state *
current_debugger_state(void)190 current_debugger_state(void)
191 {
192 return PERCPU_GET(debugger_state);
193 }
194
195 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
196 #define CPUDEBUGGERMSG current_debugger_state()->db_message
197 #define CPUPANICSTR current_debugger_state()->db_panic_str
198 #define CPUPANICARGS current_debugger_state()->db_panic_args
199 #define CPUPANICOPTS current_debugger_state()->db_panic_options
200 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
201 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
202 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
203 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
204 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
205
206
207 /*
208 * Usage:
209 * panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
210 * panic_test_failure_mode is a bit map of things to do
211 * panic_test_case is what sort of test we are injecting
212 *
213 * For more details see definitions in debugger.h
214 *
215 * Note that not all combinations are sensible, but some actions can be combined, e.g.
216 * - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
217 * - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
218 * - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
219 */
220 #if DEVELOPMENT || DEBUG
221 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested) \
222 MACRO_BEGIN \
223 if ((panic_test_case & requested) && panic_test_action_count) { \
224 panic_test_action_count--; \
225 volatile int *panic_test_badpointer = (int *)4; \
226 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); } \
227 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; } \
228 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); } \
229 } \
230 MACRO_END
231
232 #endif /* DEVELOPMENT || DEBUG */
233
234 debugger_op debugger_current_op = DBOP_NONE;
235 const char *debugger_panic_str = NULL;
236 va_list *debugger_panic_args = NULL;
237 void *debugger_panic_data = NULL;
238 uint64_t debugger_panic_options = 0;
239 const char *debugger_message = NULL;
240 unsigned long debugger_panic_caller = 0;
241
242 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
243 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
244 unsigned long panic_caller) __dead2 __printflike(1, 0);
245 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
246 void panic_spin_forever(void) __dead2;
247 extern kern_return_t do_stackshot(void);
248 extern void PE_panic_hook(const char*);
249
250 #define NESTEDDEBUGGERENTRYMAX 5
251 static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
252
253 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
254 static bool PERCPU_DATA(hv_entry_detected); // = false
255 static void awl_set_scratch_reg_hv_bit(void);
256 void awl_mark_hv_entry(void);
257 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
258
259 #if defined(__arm64__)
260 #define DEBUG_BUF_SIZE (4096)
261
262 /* debug_buf is directly linked with iBoot panic region for arm targets */
263 char *debug_buf_base = NULL;
264 char *debug_buf_ptr = NULL;
265 unsigned int debug_buf_size = 0;
266
267 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
268 #else /* defined(__arm64__) */
269 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
270 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
271 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
272
273 char debug_buf[DEBUG_BUF_SIZE];
274 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
275 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
276 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
277
278 /*
279 * We don't include the size of the panic header in the length of the data we actually write.
280 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
281 * the end of the log because we only support writing (3*PAGESIZE) bytes.
282 */
283 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
284
285 boolean_t extended_debug_log_enabled = FALSE;
286 #endif /* defined(__arm64__) */
287
288 #if defined(XNU_TARGET_OS_OSX)
289 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
290 #else
291 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
292 #endif
293
294 /* Debugger state */
295 atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU);
296 boolean_t debugger_allcpus_halted = FALSE;
297 boolean_t debugger_safe_to_return = TRUE;
298 unsigned int debugger_context = 0;
299
300 static char model_name[64];
301 unsigned char *kernel_uuid;
302
303 boolean_t kernelcache_uuid_valid = FALSE;
304 uuid_t kernelcache_uuid;
305 uuid_string_t kernelcache_uuid_string;
306
307 boolean_t pageablekc_uuid_valid = FALSE;
308 uuid_t pageablekc_uuid;
309 uuid_string_t pageablekc_uuid_string;
310
311 boolean_t auxkc_uuid_valid = FALSE;
312 uuid_t auxkc_uuid;
313 uuid_string_t auxkc_uuid_string;
314
315
316 /*
317 * By default we treat Debugger() the same as calls to panic(), unless
318 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
319 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
320 *
321 * Return from Debugger() is currently only implemented on x86
322 */
323 static boolean_t debugger_is_panic = TRUE;
324
325 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
326
327 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
328
329 char kernel_uuid_string[37]; /* uuid_string_t */
330 char kernelcache_uuid_string[37]; /* uuid_string_t */
331 char panic_disk_error_description[512];
332 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
333
334 extern unsigned int write_trace_on_panic;
335 int kext_assertions_enable =
336 #if DEBUG || DEVELOPMENT
337 TRUE;
338 #else
339 FALSE;
340 #endif
341
342 #if (DEVELOPMENT || DEBUG)
343 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
344 #endif
345
346 /*
347 * Maintain the physically-contiguous carveouts for the carveout bootargs.
348 */
349 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
350
351 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
352 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
353 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
354 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
355
356
357 /*
358 * Returns whether kernel debugging is expected to be restricted
359 * on the device currently based on CSR or other platform restrictions.
360 */
361 boolean_t
kernel_debugging_restricted(void)362 kernel_debugging_restricted(void)
363 {
364 #if XNU_TARGET_OS_OSX
365 #if CONFIG_CSR
366 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
367 return TRUE;
368 }
369 #endif /* CONFIG_CSR */
370 return FALSE;
371 #else /* XNU_TARGET_OS_OSX */
372 return FALSE;
373 #endif /* XNU_TARGET_OS_OSX */
374 }
375
376 __startup_func
377 static void
panic_init(void)378 panic_init(void)
379 {
380 unsigned long uuidlen = 0;
381 void *uuid;
382
383 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
384 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
385 kernel_uuid = uuid;
386 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
387 }
388
389 /*
390 * Take the value of the debug boot-arg into account
391 */
392 #if MACH_KDP
393 if (!kernel_debugging_restricted() && debug_boot_arg) {
394 if (debug_boot_arg & DB_HALT) {
395 halt_in_debugger = 1;
396 }
397
398 #if defined(__arm64__)
399 if (debug_boot_arg & DB_NMI) {
400 panicDebugging = TRUE;
401 }
402 #else
403 panicDebugging = TRUE;
404 #endif /* defined(__arm64__) */
405 }
406
407 if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) {
408 max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
409 }
410
411 #if defined(__arm64__)
412 char kdpname[80];
413
414 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
415 #endif /* defined(__arm64__) */
416
417 #endif /* MACH_KDP */
418
419 #if defined (__x86_64__)
420 /*
421 * By default we treat Debugger() the same as calls to panic(), unless
422 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
423 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
424 * This is because writing an on-device corefile is a destructive operation.
425 *
426 * Return from Debugger() is currently only implemented on x86
427 */
428 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
429 debugger_is_panic = FALSE;
430 }
431 #endif
432 }
433 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
434
435 #if defined (__x86_64__)
436 void
extended_debug_log_init(void)437 extended_debug_log_init(void)
438 {
439 assert(coprocessor_paniclog_flush);
440 /*
441 * Allocate an extended panic log buffer that has space for the panic
442 * stackshot at the end. Update the debug buf pointers appropriately
443 * to point at this new buffer.
444 *
445 * iBoot pre-initializes the panic region with the NULL character. We set this here
446 * so we can accurately calculate the CRC for the region without needing to flush the
447 * full region over SMC.
448 */
449 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
450
451 panic_info = (struct macos_panic_header *)new_debug_buf;
452 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
453 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
454
455 extended_debug_log_enabled = TRUE;
456
457 /*
458 * Insert a compiler barrier so we don't free the other panic stackshot buffer
459 * until after we've marked the new one as available
460 */
461 __compiler_barrier();
462 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
463 panic_stackshot_buf = 0;
464 panic_stackshot_buf_len = 0;
465 }
466 #endif /* defined (__x86_64__) */
467
468 void
debug_log_init(void)469 debug_log_init(void)
470 {
471 #if defined(__arm64__)
472 if (!gPanicBase) {
473 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
474 return;
475 }
476 /* Shift debug buf start location and size by the length of the panic header */
477 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
478 debug_buf_ptr = debug_buf_base;
479 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
480 #else
481 kern_return_t kr = KERN_SUCCESS;
482 bzero(panic_info, DEBUG_BUF_SIZE);
483
484 assert(debug_buf_base != NULL);
485 assert(debug_buf_ptr != NULL);
486 assert(debug_buf_size != 0);
487
488 /*
489 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
490 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
491 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
492 * up.
493 */
494 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
495 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
496 assert(kr == KERN_SUCCESS);
497 if (kr == KERN_SUCCESS) {
498 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
499 }
500 #endif
501 }
502
503 void
phys_carveout_init(void)504 phys_carveout_init(void)
505 {
506 if (!PE_i_can_has_debugger(NULL)) {
507 return;
508 }
509
510 struct carveout {
511 const char *name;
512 vm_offset_t *va;
513 uint32_t requested_size;
514 uintptr_t *pa;
515 size_t *allocated_size;
516 uint64_t present;
517 } carveouts[] = {
518 {
519 "phys_carveout",
520 &phys_carveout,
521 phys_carveout_mb,
522 &phys_carveout_pa,
523 &phys_carveout_size,
524 phys_carveout_mb != 0,
525 }
526 };
527
528 for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
529 if (carveouts[i].present) {
530 size_t temp_carveout_size = 0;
531 if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
532 panic("%s_mb size overflowed (%uMB)",
533 carveouts[i].name, carveouts[i].requested_size);
534 return;
535 }
536
537 kmem_alloc_contig(kernel_map, carveouts[i].va,
538 temp_carveout_size, PAGE_MASK, 0, 0,
539 KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
540 VM_KERN_MEMORY_DIAG);
541
542 *carveouts[i].pa = kvtophys(*carveouts[i].va);
543 *carveouts[i].allocated_size = temp_carveout_size;
544 }
545 }
546
547 #if __arm64__ && (DEVELOPMENT || DEBUG)
548 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
549 PE_arm_debug_enable_trace(true);
550 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
551 }
552
553 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)554 debug_is_in_phys_carveout(vm_map_offset_t va)
555 {
556 return phys_carveout_size && va >= phys_carveout &&
557 va < (phys_carveout + phys_carveout_size);
558 }
559
560 boolean_t
debug_can_coredump_phys_carveout(void)561 debug_can_coredump_phys_carveout(void)
562 {
563 return phys_carveout_core;
564 }
565
566 static void
DebuggerLock(void)567 DebuggerLock(void)
568 {
569 int my_cpu = cpu_number();
570 int debugger_exp_cpu = DEBUGGER_NO_CPU;
571 assert(ml_get_interrupts_enabled() == FALSE);
572
573 if (atomic_load(&debugger_cpu) == my_cpu) {
574 return;
575 }
576
577 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
578 debugger_exp_cpu = DEBUGGER_NO_CPU;
579 }
580
581 return;
582 }
583
584 static void
DebuggerUnlock(void)585 DebuggerUnlock(void)
586 {
587 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
588
589 /*
590 * We don't do an atomic exchange here in case
591 * there's another CPU spinning to acquire the debugger_lock
592 * and we never get a chance to update it. We already have the
593 * lock so we can simply store DEBUGGER_NO_CPU and follow with
594 * a barrier.
595 */
596 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
597 OSMemoryBarrier();
598
599 return;
600 }
601
602 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)603 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
604 {
605 #if defined(__arm64__)
606 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
607 #else /* defined(__arm64__) */
608 #pragma unused(proceed_on_failure)
609 #pragma unused(is_stackshot)
610 mp_kdp_enter(proceed_on_failure);
611 return KERN_SUCCESS;
612 #endif
613 }
614
615 static void
DebuggerResumeOtherCores(void)616 DebuggerResumeOtherCores(void)
617 {
618 #if defined(__arm64__)
619 DebuggerXCallReturn();
620 #else /* defined(__arm64__) */
621 mp_kdp_exit();
622 #endif
623 }
624
625 __printflike(3, 0)
626 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)627 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
628 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
629 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
630 {
631 CPUDEBUGGEROP = db_op;
632
633 /*
634 * Note:
635 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
636 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
637 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
638 *
639 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
640 */
641 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
642 CPUDEBUGGERMSG = db_message;
643 CPUPANICSTR = db_panic_str;
644 CPUPANICARGS = db_panic_args;
645 CPUPANICDATAPTR = db_panic_data_ptr;
646 CPUPANICCALLER = db_panic_caller;
647 }
648
649 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
650 CPUDEBUGGERRET = KERN_SUCCESS;
651
652 /* Reset these on any nested panics */
653 // follow up in rdar://88497308 (nested panics should not clobber panic flags)
654 CPUPANICOPTS = db_panic_options;
655
656 return;
657 }
658
659 /*
660 * Save the requested debugger state/action into the current processor's
661 * percu state and trap to the debugger.
662 */
663 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)664 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
665 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
666 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
667 {
668 kern_return_t ret;
669
670 assert(ml_get_interrupts_enabled() == FALSE);
671 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
672 db_panic_options, db_panic_data_ptr,
673 db_proceed_on_sync_failure, db_panic_caller);
674
675 /*
676 * On ARM this generates an uncategorized exception -> sleh code ->
677 * DebuggerCall -> kdp_trap -> handle_debugger_trap
678 * So that is how XNU ensures that only one core can panic.
679 * The rest of the cores are halted by IPI if possible; if that
680 * fails it will fall back to dbgwrap.
681 */
682 TRAP_DEBUGGER;
683
684 ret = CPUDEBUGGERRET;
685
686 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
687
688 return ret;
689 }
690
691 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)692 Assert(
693 const char *file,
694 int line,
695 const char *expression
696 )
697 {
698 #if CONFIG_NONFATAL_ASSERTS
699 static TUNABLE(bool, mach_assert, "assertions", true);
700
701 if (!mach_assert) {
702 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
703 return;
704 }
705 #endif
706
707 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
708 }
709
710 boolean_t
debug_is_current_cpu_in_panic_state(void)711 debug_is_current_cpu_in_panic_state(void)
712 {
713 return current_debugger_state()->db_entry_count > 0;
714 }
715
716 /*
717 * check if we are in a nested panic, report findings, take evasive action where necessary
718 *
719 * see also PE_update_panicheader_nestedpanic
720 */
721 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)722 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
723 {
724 if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
725 // Note: this is the first indication in the panic log or serial that we are off the rails...
726 //
727 // if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
728 // if we panic *after* the log is finalized then we will only see it in the serial log
729 //
730 paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
731 paniclog_flush();
732
733 // print the *new* panic string to the console, we might not get it by other means...
734 // TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
735 // rdar://87846117 (NestedPanic: output panic string to paniclog)
736 if (db_panic_str) {
737 printf("Nested panic string:\n");
738 #pragma clang diagnostic push
739 #pragma clang diagnostic ignored "-Wformat-nonliteral"
740 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
741 #pragma clang diagnostic pop
742 printf("\n<end nested panic string>\n");
743 }
744 }
745
746 // Stage 1 bailout
747 //
748 // Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog. If this fails with another nested
749 // panic then we will land in Stage 2 below...
750 //
751 if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
752 uint32_t panic_details = 0;
753
754 // if this is a force-reset panic then capture a log and reboot immediately.
755 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
756 panic_details |= kPanicDetailsForcePowerOff;
757 }
758
759 // normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
760 // there. To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
761 //
762 PEHaltRestartInternal(kPEPanicBegin, panic_details);
763
764 paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
765 PE_update_panicheader_nestedpanic();
766 paniclog_flush();
767
768 if (!panicDebugging) {
769 // note that this will also send kPEPanicEnd
770 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
771 }
772
773 // prints to console
774 paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
775 panic_spin_forever();
776 }
777
778 // Stage 2 bailout
779 //
780 // Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path. Try to issue
781 // a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
782 //
783 if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
784 if (!panicDebugging) {
785 // note that:
786 // - this code path should be audited for prints, as that is a common cause of nested panics
787 // - this code path should take the fastest route to the actual reset, and not call any un-necessary code
788 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
789 }
790
791 // prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
792 paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
793 panic_spin_forever();
794 }
795
796 // Stage 3 bailout
797 //
798 // We are done here, we were unable to reset the platform without another nested panic. Spin until the watchdog kicks in.
799 //
800 if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
801 kdp_machine_reboot_type(kPEHangCPU, 0);
802 }
803 }
804
805 void
Debugger(const char * message)806 Debugger(const char *message)
807 {
808 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
809 }
810
811 /*
812 * Enter the Debugger
813 *
814 * This is similar to, but not the same as a panic
815 *
816 * Key differences:
817 * - we get here from a debugger entry action (e.g. NMI)
818 * - the system is resumable on x86 (in theory, however it is not clear if this is tested)
819 * - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
820 *
821 */
822 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)823 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
824 uint64_t debugger_options_mask, unsigned long debugger_caller)
825 {
826 spl_t previous_interrupts_state;
827 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
828
829 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
830 read_lbr();
831 #endif
832 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
833 disable_preemption();
834
835 /* track depth of debugger/panic entry */
836 CPUDEBUGGERCOUNT++;
837
838 /* emit a tracepoint as early as possible in case of hang */
839 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
840
841 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
842 check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
843
844 /* Handle any necessary platform specific actions before we proceed */
845 PEInitiatePanic();
846
847 #if DEVELOPMENT || DEBUG
848 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
849 #endif
850
851 PE_panic_hook(message);
852
853 doprnt_hide_pointers = FALSE;
854
855 if (ctx != NULL) {
856 DebuggerSaveState(DBOP_DEBUGGER, message,
857 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
858 handle_debugger_trap(reason, 0, 0, ctx);
859 DebuggerSaveState(DBOP_NONE, NULL, NULL,
860 NULL, 0, NULL, FALSE, 0);
861 } else {
862 DebuggerTrapWithState(DBOP_DEBUGGER, message,
863 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
864 }
865
866 /* resume from the debugger */
867
868 CPUDEBUGGERCOUNT--;
869 doprnt_hide_pointers = old_doprnt_hide_pointers;
870 enable_preemption();
871 ml_set_interrupts_enabled(previous_interrupts_state);
872 }
873
874 static struct kdp_callout {
875 struct kdp_callout * callout_next;
876 kdp_callout_fn_t callout_fn;
877 boolean_t callout_in_progress;
878 void * callout_arg;
879 } * kdp_callout_list = NULL;
880
881 /*
882 * Called from kernel context to register a kdp event callout.
883 */
884 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)885 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
886 {
887 struct kdp_callout * kcp;
888 struct kdp_callout * list_head;
889
890 kcp = zalloc_permanent_type(struct kdp_callout);
891
892 kcp->callout_fn = fn;
893 kcp->callout_arg = arg;
894 kcp->callout_in_progress = FALSE;
895
896 /* Lock-less list insertion using compare and exchange. */
897 do {
898 list_head = kdp_callout_list;
899 kcp->callout_next = list_head;
900 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
901 }
902
903 static void
kdp_callouts(kdp_event_t event)904 kdp_callouts(kdp_event_t event)
905 {
906 struct kdp_callout *kcp = kdp_callout_list;
907
908 while (kcp) {
909 if (!kcp->callout_in_progress) {
910 kcp->callout_in_progress = TRUE;
911 kcp->callout_fn(kcp->callout_arg, event);
912 kcp->callout_in_progress = FALSE;
913 }
914 kcp = kcp->callout_next;
915 }
916 }
917
918 #if defined(__arm64__)
919 /*
920 * Register an additional buffer with data to include in the panic log
921 *
922 * <rdar://problem/50137705> tracks supporting more than one buffer
923 *
924 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
925 */
926 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)927 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
928 {
929 if (panic_data_buffers != NULL) {
930 panic("register_additional_panic_data_buffer called with buffer already registered");
931 }
932
933 if (producer_name == NULL || (strlen(producer_name) == 0)) {
934 panic("register_additional_panic_data_buffer called with invalid producer_name");
935 }
936
937 if (buf == NULL) {
938 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
939 }
940
941 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
942 panic("register_additional_panic_data_buffer called with invalid length");
943 }
944
945 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
946 new_panic_data_buffer->producer_name = producer_name;
947 new_panic_data_buffer->buf = buf;
948 new_panic_data_buffer->len = len;
949
950 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
951 panic("register_additional_panic_data_buffer called with buffer already registered");
952 }
953
954 return;
955 }
956 #endif /* defined(__arm64__) */
957
958 /*
959 * An overview of the xnu panic path:
960 *
961 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
962 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
963 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
964 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
965 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
966 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
967 * according to the device's boot-args.
968 */
969 #undef panic
970 void
panic(const char * str,...)971 panic(const char *str, ...)
972 {
973 va_list panic_str_args;
974
975 va_start(panic_str_args, str);
976 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
977 va_end(panic_str_args);
978 }
979
980 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)981 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
982 {
983 va_list panic_str_args;
984
985 va_start(panic_str_args, str);
986 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
987 NULL, (unsigned long)(char *)__builtin_return_address(0));
988 va_end(panic_str_args);
989 }
990
991 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)992 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
993 {
994 if (ptr == NULL) {
995 paniclog_append_noflush("NULL %s pointer\n", what);
996 return false;
997 }
998
999 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1000 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1001 what, ptr, (uint32_t)size);
1002 return false;
1003 }
1004
1005 return true;
1006 }
1007
1008 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1009 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1010 {
1011 if (!PANIC_VALIDATE_PTR(thread)) {
1012 return false;
1013 }
1014
1015 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1016 return false;
1017 }
1018
1019 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1020 return false;
1021 }
1022
1023 if (task) {
1024 *task = thread->t_tro->tro_task;
1025 }
1026
1027 if (!panic_validate_ptr(thread->t_tro->tro_proc,
1028 sizeof(struct proc *), "bsd_info")) {
1029 *proc = NULL;
1030 } else {
1031 *proc = thread->t_tro->tro_proc;
1032 }
1033
1034 return true;
1035 }
1036
1037 #if defined (__x86_64__)
1038 /*
1039 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1040 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1041 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1042 * thread when writing the panic log.
1043 *
1044 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1045 */
1046 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1047 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1048 {
1049 va_list panic_str_args;
1050 __assert_only os_ref_count_t th_ref_count;
1051
1052 assert_thread_magic(thread);
1053 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1054 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1055
1056 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1057 thread_reference(thread);
1058
1059 va_start(panic_str_args, str);
1060 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1061 thread, (unsigned long)(char *)__builtin_return_address(0));
1062
1063 va_end(panic_str_args);
1064 }
1065 #endif /* defined (__x86_64__) */
1066
1067 #pragma clang diagnostic push
1068 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1069 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller)1070 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1071 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
1072 {
1073 #pragma clang diagnostic pop
1074
1075 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1076 read_lbr();
1077 #endif
1078
1079 /* Turn off I/O tracing once we've panicked */
1080 iotrace_disable();
1081
1082 /* call machine-layer panic handler */
1083 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
1084
1085 /* track depth of debugger/panic entry */
1086 CPUDEBUGGERCOUNT++;
1087
1088 /* emit a tracepoint as early as possible in case of hang */
1089 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1090
1091 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1092 check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1093
1094 /* Handle any necessary platform specific actions before we proceed */
1095 PEInitiatePanic();
1096
1097 #if DEVELOPMENT || DEBUG
1098 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1099 #endif
1100
1101 PE_panic_hook(panic_format_str);
1102
1103 #if defined (__x86_64__)
1104 plctrace_disable();
1105 #endif
1106
1107 if (write_trace_on_panic && kdebug_enable) {
1108 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1109 ml_set_interrupts_enabled(TRUE);
1110 KDBG_RELEASE(TRACE_PANIC);
1111 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1112 }
1113 }
1114
1115 ml_set_interrupts_enabled(FALSE);
1116 disable_preemption();
1117
1118 #if defined (__x86_64__)
1119 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1120 #endif /* defined (__x86_64__) */
1121
1122 /* Never hide pointers from panic logs. */
1123 doprnt_hide_pointers = FALSE;
1124
1125 if (ctx != NULL) {
1126 /*
1127 * We called into panic from a trap, no need to trap again. Set the
1128 * state on the current CPU and then jump to handle_debugger_trap.
1129 */
1130 DebuggerSaveState(DBOP_PANIC, "panic",
1131 panic_format_str, panic_args,
1132 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1133 handle_debugger_trap(reason, 0, 0, ctx);
1134 }
1135
1136 #if defined(__arm64__)
1137 /*
1138 * Signal to fastsim that it should open debug ports (nop on hardware)
1139 */
1140 __asm__ volatile ("HINT 0x45");
1141 #endif /* defined(__arm64__) */
1142
1143 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1144 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1145
1146 /*
1147 * Not reached.
1148 */
1149 panic_stop();
1150 __builtin_unreachable();
1151 }
1152
1153 void
panic_spin_forever(void)1154 panic_spin_forever(void)
1155 {
1156 for (;;) {
1157 #if defined(__arm__) || defined(__arm64__)
1158 /* On arm32, which doesn't have a WFE timeout, this may not return. But that should be OK on this path. */
1159 __builtin_arm_wfe();
1160 #else
1161 cpu_pause();
1162 #endif
1163 }
1164 }
1165
1166 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1167 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1168 {
1169 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1170 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1171 } else {
1172 PEHaltRestart(type);
1173 }
1174 halt_all_cpus(TRUE);
1175 }
1176
1177 void
kdp_machine_reboot(void)1178 kdp_machine_reboot(void)
1179 {
1180 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1181 }
1182
1183 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1184 panic_debugger_log(const char *string, ...)
1185 {
1186 va_list panic_debugger_log_args;
1187
1188 va_start(panic_debugger_log_args, string);
1189 #pragma clang diagnostic push
1190 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1191 _doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1192 #pragma clang diagnostic pop
1193 va_end(panic_debugger_log_args);
1194
1195 #if defined(__arm64__)
1196 paniclog_flush();
1197 #endif
1198 }
1199
1200 /*
1201 * Gather and save diagnostic information about a panic (or Debugger call).
1202 *
1203 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1204 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1205 * paniclog is written and no core is written unless we request a core on NMI.
1206 *
1207 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1208 * and calling out to any other functions we have for collecting diagnostic info.
1209 */
1210 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1211 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1212 {
1213 #if DEVELOPMENT || DEBUG
1214 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1215 #endif
1216
1217 #if defined(__x86_64__)
1218 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1219 #endif
1220 /*
1221 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1222 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1223 * we'll just spin in kdp_raise_exception.
1224 */
1225 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1226 kdp_raise_exception(exception, code, subcode, state);
1227 if (debugger_safe_to_return && !debugger_is_panic) {
1228 return;
1229 }
1230 }
1231
1232 #ifdef CONFIG_KCOV
1233 /* Try not to break core dump path by sanitizer. */
1234 kcov_panic_disable();
1235 #endif
1236
1237 if ((debugger_current_op == DBOP_PANIC) ||
1238 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1239 /*
1240 * Attempt to notify listeners once and only once that we've started
1241 * panicking. Only do this for Debugger() calls if we're treating
1242 * Debugger() calls like panic().
1243 */
1244 uint32_t panic_details = 0;
1245 /* if this is a force-reset panic then capture a log and reboot immediately. */
1246 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1247 panic_details |= kPanicDetailsForcePowerOff;
1248 }
1249 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1250
1251 /*
1252 * Set the begin pointer in the panic log structure. We key off of this
1253 * static variable rather than contents from the panic header itself in case someone
1254 * has stomped over the panic_info structure. Also initializes the header magic.
1255 */
1256 static boolean_t began_writing_paniclog = FALSE;
1257 if (!began_writing_paniclog) {
1258 PE_init_panicheader();
1259 began_writing_paniclog = TRUE;
1260 }
1261
1262 if (CPUDEBUGGERCOUNT > 1) {
1263 /*
1264 * we are in a nested panic. Record the nested bit in panic flags and do some housekeeping
1265 */
1266 PE_update_panicheader_nestedpanic();
1267 paniclog_flush();
1268 }
1269 }
1270
1271 /*
1272 * Write panic string if this was a panic.
1273 *
1274 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1275 */
1276 if (debugger_current_op == DBOP_PANIC) {
1277 paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1278 if (debugger_panic_str) {
1279 #pragma clang diagnostic push
1280 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1281 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1282 #pragma clang diagnostic pop
1283 }
1284 paniclog_append_noflush("\n");
1285 }
1286 #if defined(__x86_64__)
1287 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1288 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1289 }
1290
1291 /*
1292 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1293 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1294 * conventional sense.
1295 */
1296 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1297 #endif /* __x86_64__ */
1298 {
1299 kdp_callouts(KDP_EVENT_PANICLOG);
1300
1301 /*
1302 * Write paniclog and panic stackshot (if supported)
1303 * TODO: Need to clear panic log when return from debugger
1304 * hooked up for embedded
1305 */
1306 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
1307
1308 #if DEVELOPMENT || DEBUG
1309 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1310 #endif
1311
1312 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1313 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1314 PEHaltRestart(kPEPanicDiagnosticsDone);
1315 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1316 }
1317 }
1318
1319 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1320 /*
1321 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1322 * local coredumps, don't try to write these and instead go straight to reboot. This
1323 * allows us to persist any data that's stored in the panic log.
1324 */
1325 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1326 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1327 PEHaltRestart(kPEPanicDiagnosticsDone);
1328 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1329 }
1330
1331 /*
1332 * Consider generating a local corefile if the infrastructure is configured
1333 * and we haven't disabled on-device coredumps.
1334 */
1335 if (on_device_corefile_enabled()) {
1336 if (!kdp_has_polled_corefile()) {
1337 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1338 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1339 kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1340 #if defined(__arm64__)
1341 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1342 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1343 }
1344 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1345 paniclog_flush();
1346 #else /* defined(__arm64__) */
1347 if (panic_info->mph_panic_log_offset != 0) {
1348 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1349 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1350 }
1351 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1352 paniclog_flush();
1353 }
1354 #endif /* defined(__arm64__) */
1355 }
1356 }
1357 #if XNU_MONITOR
1358 else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) {
1359 paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state\n");
1360 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1361 paniclog_flush();
1362 }
1363 #endif /* XNU_MONITOR */
1364 else {
1365 int ret = -1;
1366
1367 #if defined (__x86_64__)
1368 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1369 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1370 #endif
1371 {
1372 /*
1373 * Doing an on-device coredump leaves the disk driver in a state
1374 * that can not be resumed.
1375 */
1376 debugger_safe_to_return = FALSE;
1377 begin_panic_transfer();
1378 ret = kern_dump(KERN_DUMP_DISK);
1379 abort_panic_transfer();
1380
1381 #if DEVELOPMENT || DEBUG
1382 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1383 #endif
1384 }
1385
1386 /*
1387 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1388 * or if option to ignore failures is set.
1389 */
1390 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1391 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1392 PEHaltRestart(kPEPanicDiagnosticsDone);
1393 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1394 }
1395 }
1396 }
1397
1398 if (debugger_current_op == DBOP_PANIC ||
1399 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1400 PEHaltRestart(kPEPanicDiagnosticsDone);
1401 }
1402
1403 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1404 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1405 }
1406
1407 /* If KDP is configured, try to trap to the debugger */
1408 #if defined(__arm64__)
1409 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1410 #else
1411 if (current_debugger != NO_CUR_DB) {
1412 #endif
1413 kdp_raise_exception(exception, code, subcode, state);
1414 /*
1415 * Only return if we entered via Debugger and it's safe to return
1416 * (we halted the other cores successfully, this isn't a nested panic, etc)
1417 */
1418 if (debugger_current_op == DBOP_DEBUGGER &&
1419 debugger_safe_to_return &&
1420 kernel_debugger_entry_count == 1 &&
1421 !debugger_is_panic) {
1422 return;
1423 }
1424 }
1425
1426 #if defined(__arm64__)
1427 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1428 /*
1429 * Print panic string at the end of serial output
1430 * to make panic more obvious when someone connects a debugger
1431 */
1432 if (debugger_panic_str) {
1433 panic_debugger_log("Original panic string:\n");
1434 panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1435 #pragma clang diagnostic push
1436 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1437 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1438 #pragma clang diagnostic pop
1439 panic_debugger_log("\n");
1440 }
1441
1442 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1443 panic_spin_shmcon();
1444 }
1445 #endif /* defined(__arm64__) */
1446
1447 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1448
1449 PEHaltRestart(kPEPanicDiagnosticsDone);
1450
1451 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1452
1453 if (!panicDebugging) {
1454 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1455 }
1456
1457 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1458 panic_spin_forever();
1459 }
1460
1461 #if SCHED_HYGIENE_DEBUG
1462 uint64_t debugger_trap_timestamps[9];
1463 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1464 #else
1465 # define DEBUGGER_TRAP_TIMESTAMP(i)
1466 #endif /* SCHED_HYGIENE_DEBUG */
1467
1468 void
1469 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1470 {
1471 unsigned int initial_not_in_kdp = not_in_kdp;
1472 kern_return_t ret;
1473 debugger_op db_prev_op = debugger_current_op;
1474
1475 DEBUGGER_TRAP_TIMESTAMP(0);
1476
1477 DebuggerLock();
1478 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1479
1480 DEBUGGER_TRAP_TIMESTAMP(1);
1481
1482 #if SCHED_HYGIENE_DEBUG
1483 if (serialmode & SERIALMODE_OUTPUT) {
1484 ml_spin_debug_reset(current_thread());
1485 }
1486 #endif /* SCHED_HYGIENE_DEBUG */
1487 if (ret != KERN_SUCCESS) {
1488 CPUDEBUGGERRET = ret;
1489 DebuggerUnlock();
1490 return;
1491 }
1492
1493 /* Update the global panic/debugger nested entry level */
1494 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1495 if (kernel_debugger_entry_count > 0) {
1496 console_suspend();
1497 }
1498
1499 /*
1500 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1501 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1502 */
1503
1504 if (CPUDEBUGGEROP == DBOP_NONE) {
1505 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1506 debugger_current_op = DBOP_BREAKPOINT;
1507 } else {
1508 /* Not safe to return from a nested panic/debugger call */
1509 if (debugger_current_op == DBOP_PANIC ||
1510 debugger_current_op == DBOP_DEBUGGER) {
1511 debugger_safe_to_return = FALSE;
1512 }
1513
1514 debugger_current_op = CPUDEBUGGEROP;
1515
1516 /* Only overwrite the panic message if there is none already - save the data from the first call */
1517 if (debugger_panic_str == NULL) {
1518 debugger_panic_str = CPUPANICSTR;
1519 debugger_panic_args = CPUPANICARGS;
1520 debugger_panic_data = CPUPANICDATAPTR;
1521 debugger_message = CPUDEBUGGERMSG;
1522 debugger_panic_caller = CPUPANICCALLER;
1523 }
1524
1525 debugger_panic_options = CPUPANICOPTS;
1526 }
1527
1528 /*
1529 * Clear the op from the processor debugger context so we can handle
1530 * breakpoints in the debugger
1531 */
1532 CPUDEBUGGEROP = DBOP_NONE;
1533
1534 DEBUGGER_TRAP_TIMESTAMP(2);
1535
1536 kdp_callouts(KDP_EVENT_ENTER);
1537 not_in_kdp = 0;
1538
1539 DEBUGGER_TRAP_TIMESTAMP(3);
1540
1541 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1542 shmem_mark_as_busy();
1543 #endif
1544
1545 if (debugger_current_op == DBOP_BREAKPOINT) {
1546 kdp_raise_exception(exception, code, subcode, state);
1547 } else if (debugger_current_op == DBOP_STACKSHOT) {
1548 CPUDEBUGGERRET = do_stackshot();
1549 #if PGO
1550 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1551 CPUDEBUGGERRET = do_pgo_reset_counters();
1552 #endif
1553 } else {
1554 /* note: this is the panic path... */
1555 debugger_collect_diagnostics(exception, code, subcode, state);
1556 }
1557
1558 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1559 shmem_unmark_as_busy();
1560 #endif
1561
1562 DEBUGGER_TRAP_TIMESTAMP(4);
1563
1564 not_in_kdp = initial_not_in_kdp;
1565 kdp_callouts(KDP_EVENT_EXIT);
1566
1567 DEBUGGER_TRAP_TIMESTAMP(5);
1568
1569 if (debugger_current_op != DBOP_BREAKPOINT) {
1570 debugger_panic_str = NULL;
1571 debugger_panic_args = NULL;
1572 debugger_panic_data = NULL;
1573 debugger_panic_options = 0;
1574 debugger_message = NULL;
1575 }
1576
1577 /* Restore the previous debugger state */
1578 debugger_current_op = db_prev_op;
1579
1580 DEBUGGER_TRAP_TIMESTAMP(6);
1581
1582 DebuggerResumeOtherCores();
1583
1584 DEBUGGER_TRAP_TIMESTAMP(7);
1585
1586 DebuggerUnlock();
1587
1588 DEBUGGER_TRAP_TIMESTAMP(8);
1589
1590 return;
1591 }
1592
1593 __attribute__((noinline, not_tail_called))
1594 void
1595 log(__unused int level, char *fmt, ...)
1596 {
1597 void *caller = __builtin_return_address(0);
1598 va_list listp;
1599 va_list listp2;
1600
1601
1602 #ifdef lint
1603 level++;
1604 #endif /* lint */
1605 #ifdef MACH_BSD
1606 va_start(listp, fmt);
1607 va_copy(listp2, listp);
1608
1609 disable_preemption();
1610 _doprnt(fmt, &listp, cons_putc_locked, 0);
1611 enable_preemption();
1612
1613 va_end(listp);
1614
1615 #pragma clang diagnostic push
1616 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1617 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1618 #pragma clang diagnostic pop
1619 va_end(listp2);
1620 #endif
1621 }
1622
1623 /*
1624 * Per <rdar://problem/24974766>, skip appending log messages to
1625 * the new logging infrastructure in contexts where safety is
1626 * uncertain. These contexts include:
1627 * - When we're in the debugger
1628 * - We're in a panic
1629 * - Interrupts are disabled
1630 * - Or Pre-emption is disabled
1631 * In all the above cases, it is potentially unsafe to log messages.
1632 */
1633
1634 boolean_t
1635 oslog_is_safe(void)
1636 {
1637 return kernel_debugger_entry_count == 0 &&
1638 not_in_kdp == 1 &&
1639 get_preemption_level() == 0 &&
1640 ml_get_interrupts_enabled() == TRUE;
1641 }
1642
1643 boolean_t
1644 debug_mode_active(void)
1645 {
1646 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1647 }
1648
1649 void
1650 debug_putc(char c)
1651 {
1652 if ((debug_buf_size != 0) &&
1653 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) {
1654 *debug_buf_ptr = c;
1655 debug_buf_ptr++;
1656 }
1657 }
1658
1659 #if defined (__x86_64__)
1660 struct pasc {
1661 unsigned a: 7;
1662 unsigned b: 7;
1663 unsigned c: 7;
1664 unsigned d: 7;
1665 unsigned e: 7;
1666 unsigned f: 7;
1667 unsigned g: 7;
1668 unsigned h: 7;
1669 } __attribute__((packed));
1670
1671 typedef struct pasc pasc_t;
1672
1673 /*
1674 * In-place packing routines -- inefficient, but they're called at most once.
1675 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1676 */
1677 int
1678 packA(char *inbuf, uint32_t length, uint32_t buflen)
1679 {
1680 unsigned int i, j = 0;
1681 pasc_t pack;
1682
1683 length = MIN(((length + 7) & ~7), buflen);
1684
1685 for (i = 0; i < length; i += 8) {
1686 pack.a = inbuf[i];
1687 pack.b = inbuf[i + 1];
1688 pack.c = inbuf[i + 2];
1689 pack.d = inbuf[i + 3];
1690 pack.e = inbuf[i + 4];
1691 pack.f = inbuf[i + 5];
1692 pack.g = inbuf[i + 6];
1693 pack.h = inbuf[i + 7];
1694 bcopy((char *) &pack, inbuf + j, 7);
1695 j += 7;
1696 }
1697 return j;
1698 }
1699
1700 void
1701 unpackA(char *inbuf, uint32_t length)
1702 {
1703 pasc_t packs;
1704 unsigned i = 0;
1705 length = (length * 8) / 7;
1706
1707 while (i < length) {
1708 packs = *(pasc_t *)&inbuf[i];
1709 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1710 inbuf[i++] = packs.a;
1711 inbuf[i++] = packs.b;
1712 inbuf[i++] = packs.c;
1713 inbuf[i++] = packs.d;
1714 inbuf[i++] = packs.e;
1715 inbuf[i++] = packs.f;
1716 inbuf[i++] = packs.g;
1717 inbuf[i++] = packs.h;
1718 }
1719 }
1720 #endif /* defined (__x86_64__) */
1721
1722 extern char *proc_name_address(void *);
1723 extern char *proc_longname_address(void *);
1724
1725 __private_extern__ void
1726 panic_display_process_name(void)
1727 {
1728 proc_name_t proc_name = {};
1729 struct proc *cbsd_info = NULL;
1730 task_t ctask = NULL;
1731 vm_size_t size;
1732
1733 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1734 goto out;
1735 }
1736
1737 if (cbsd_info == NULL) {
1738 goto out;
1739 }
1740
1741 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1742 (vm_offset_t)&proc_name, sizeof(proc_name));
1743
1744 if (size == 0 || proc_name[0] == '\0') {
1745 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1746 (vm_offset_t)&proc_name,
1747 MIN(sizeof(command_t), sizeof(proc_name)));
1748 if (size > 0) {
1749 proc_name[size - 1] = '\0';
1750 }
1751 }
1752
1753 out:
1754 proc_name[sizeof(proc_name) - 1] = '\0';
1755 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1756 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1757 }
1758
1759 unsigned
1760 panic_active(void)
1761 {
1762 return debugger_current_op == DBOP_PANIC ||
1763 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1764 }
1765
1766 void
1767 populate_model_name(char *model_string)
1768 {
1769 strlcpy(model_name, model_string, sizeof(model_name));
1770 }
1771
1772 void
1773 panic_display_model_name(void)
1774 {
1775 char tmp_model_name[sizeof(model_name)];
1776
1777 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1778 return;
1779 }
1780
1781 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1782
1783 if (tmp_model_name[0] != 0) {
1784 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1785 }
1786 }
1787
1788 void
1789 panic_display_kernel_uuid(void)
1790 {
1791 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1792
1793 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1794 return;
1795 }
1796
1797 if (tmp_kernel_uuid[0] != '\0') {
1798 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1799 }
1800 }
1801
1802
1803 void
1804 panic_display_kernel_aslr(void)
1805 {
1806
1807 kc_format_t kc_format;
1808
1809 PE_get_primary_kc_format(&kc_format);
1810
1811 if (kc_format == KCFormatFileset) {
1812 void *kch = PE_get_kc_header(KCKindPrimary);
1813 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1814 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1815 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1816 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1817 #if defined(__arm64__)
1818 extern vm_offset_t segTEXTEXECB;
1819 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
1820 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
1821 #endif /* defined(__arm64__) */
1822 } else if (vm_kernel_slide) {
1823 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1824 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1825 } else {
1826 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1827 }
1828 }
1829
1830 void
1831 panic_display_hibb(void)
1832 {
1833 #if defined(__i386__) || defined (__x86_64__)
1834 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1835 #endif
1836 }
1837
1838 #if CONFIG_ECC_LOGGING
1839 __private_extern__ void
1840 panic_display_ecc_errors(void)
1841 {
1842 uint32_t count = ecc_log_get_correction_count();
1843
1844 if (count > 0) {
1845 paniclog_append_noflush("ECC Corrections:%u\n", count);
1846 }
1847 }
1848 #endif /* CONFIG_ECC_LOGGING */
1849
1850 #if CONFIG_FREEZE
1851 extern bool freezer_incore_cseg_acct;
1852 extern int32_t c_segment_pages_compressed_incore;
1853 #endif
1854
1855 extern uint32_t c_segment_pages_compressed;
1856 extern uint32_t c_segment_count;
1857 extern uint32_t c_segments_limit;
1858 extern uint32_t c_segment_pages_compressed_limit;
1859 extern uint32_t c_segment_pages_compressed_nearing_limit;
1860 extern uint32_t c_segments_nearing_limit;
1861 extern int vm_num_swap_files;
1862
1863 void
1864 panic_display_compressor_stats(void)
1865 {
1866 int isswaplow = vm_swap_low_on_space();
1867 #if CONFIG_FREEZE
1868 uint32_t incore_seg_count;
1869 uint32_t incore_compressed_pages;
1870 if (freezer_incore_cseg_acct) {
1871 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
1872 incore_compressed_pages = c_segment_pages_compressed_incore;
1873 } else {
1874 incore_seg_count = c_segment_count;
1875 incore_compressed_pages = c_segment_pages_compressed;
1876 }
1877
1878 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1879 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
1880 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1881 (incore_seg_count * 100) / c_segments_limit,
1882 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
1883 vm_num_swap_files,
1884 isswaplow ? "LOW":"OK");
1885 #else /* CONFIG_FREEZE */
1886 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1887 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
1888 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1889 (c_segment_count * 100) / c_segments_limit,
1890 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
1891 vm_num_swap_files,
1892 isswaplow ? "LOW":"OK");
1893 #endif /* CONFIG_FREEZE */
1894 }
1895
1896 #if !CONFIG_TELEMETRY
1897 int
1898 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
1899 {
1900 return KERN_NOT_SUPPORTED;
1901 }
1902 #endif
1903
1904 #include <machine/machine_cpu.h>
1905
1906 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
1907
1908 boolean_t
1909 kern_feature_override(uint32_t fmask)
1910 {
1911 return (kern_feature_overrides & fmask) == fmask;
1912 }
1913
1914 boolean_t
1915 on_device_corefile_enabled(void)
1916 {
1917 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1918 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1919 if (debug_boot_arg == 0) {
1920 return FALSE;
1921 }
1922 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1923 return FALSE;
1924 }
1925 #if !XNU_TARGET_OS_OSX
1926 /*
1927 * outside of macOS, if there's a debug boot-arg set and local
1928 * cores aren't explicitly disabled, we always write a corefile.
1929 */
1930 return TRUE;
1931 #else /* !XNU_TARGET_OS_OSX */
1932 /*
1933 * on macOS, if corefiles on panic are requested and local cores
1934 * aren't disabled we write a local core.
1935 */
1936 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
1937 return TRUE;
1938 }
1939 #endif /* !XNU_TARGET_OS_OSX */
1940 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1941 return FALSE;
1942 }
1943
1944 boolean_t
1945 panic_stackshot_to_disk_enabled(void)
1946 {
1947 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1948 #if defined(__x86_64__)
1949 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
1950 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
1951 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
1952 return FALSE;
1953 }
1954
1955 return TRUE;
1956 }
1957 #endif
1958 return FALSE;
1959 }
1960
1961 const char *
1962 sysctl_debug_get_preoslog(size_t *size)
1963 {
1964 int result = 0;
1965 void *preoslog_pa = NULL;
1966 int preoslog_size = 0;
1967
1968 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1969 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1970 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1971 *size = 0;
1972 return NULL;
1973 }
1974
1975 /*
1976 * Beware:
1977 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
1978 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
1979 */
1980 *size = preoslog_size;
1981 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
1982 }
1983
1984 void
1985 sysctl_debug_free_preoslog(void)
1986 {
1987 #if RELEASE
1988 int result = 0;
1989 void *preoslog_pa = NULL;
1990 int preoslog_size = 0;
1991
1992 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1993 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1994 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1995 return;
1996 }
1997
1998 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
1999 #else
2000 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2001 #endif // RELEASE
2002 }
2003
2004 #if (DEVELOPMENT || DEBUG)
2005
2006 void
2007 platform_stall_panic_or_spin(uint32_t req)
2008 {
2009 if (xnu_platform_stall_value & req) {
2010 if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2011 panic("Platform stall: User requested panic");
2012 } else {
2013 paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2014 panic_spin_forever();
2015 }
2016 }
2017 }
2018 #endif
2019
2020 #define AWL_HV_ENTRY_FLAG (0x1)
2021
2022 static inline void
2023 awl_set_scratch_reg_hv_bit(void)
2024 {
2025 #if defined(__arm64__)
2026 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
2027 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2028 awl_diag0 |= AWL_HV_ENTRY_FLAG;
2029 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2030 #endif // defined(__arm64__)
2031 }
2032
2033 void
2034 awl_mark_hv_entry(void)
2035 {
2036 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2037 return;
2038 }
2039 *PERCPU_GET(hv_entry_detected) = true;
2040
2041 awl_set_scratch_reg_hv_bit();
2042 }
2043
2044 /*
2045 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2046 * so restore it manually.
2047 */
2048 static bool
2049 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2050 {
2051 if (event == CPU_BOOTED) {
2052 if (*PERCPU_GET(hv_entry_detected)) {
2053 awl_set_scratch_reg_hv_bit();
2054 }
2055 }
2056
2057 return true;
2058 }
2059
2060 /*
2061 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2062 * for a callback to restore register after hibernation
2063 */
2064 __startup_func
2065 static void
2066 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2067 {
2068 DTEntry base = NULL;
2069
2070 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2071 return;
2072 }
2073 const uint8_t *data = NULL;
2074 unsigned int data_size = sizeof(uint8_t);
2075
2076 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2077 for (unsigned int i = 0; i < data_size; i++) {
2078 if (data[i] != 0) {
2079 awl_scratch_reg_supported = true;
2080 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2081 break;
2082 }
2083 }
2084 }
2085 }
2086 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2087