1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_cdata.h>
76 #include <kern/zalloc_internal.h>
77 #include <kern/iotrace.h>
78 #include <pexpert/device_tree.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_compressor.h>
83 #include <stdarg.h>
84 #include <stdatomic.h>
85 #include <sys/pgo.h>
86 #include <console/serial_protos.h>
87 #include <IOKit/IOBSD.h>
88
89 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
90 #include <kdp/kdp_udp.h>
91 #endif
92 #include <kern/processor.h>
93
94 #if defined(__i386__) || defined(__x86_64__)
95 #include <IOKit/IOBSD.h>
96
97 #include <i386/cpu_threads.h>
98 #include <i386/pmCPU.h>
99 #include <i386/lbr.h>
100 #endif
101
102 #include <IOKit/IOPlatformExpert.h>
103 #include <machine/machine_cpu.h>
104 #include <machine/pal_routines.h>
105
106 #include <sys/kdebug.h>
107 #include <libkern/OSKextLibPrivate.h>
108 #include <libkern/OSAtomic.h>
109 #include <libkern/kernel_mach_header.h>
110 #include <libkern/section_keywords.h>
111 #include <uuid/uuid.h>
112 #include <mach_debug/zone_info.h>
113 #include <mach/resource_monitors.h>
114 #include <machine/machine_routines.h>
115
116 #include <os/log_private.h>
117
118 #if defined(__arm64__)
119 #include <pexpert/pexpert.h> /* For gPanicBase */
120 #include <arm/caches_internal.h>
121 #include <arm/misc_protos.h>
122 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
123 #endif
124
125 #include <san/kcov.h>
126
127 #if CONFIG_XNUPOST
128 #include <tests/xnupost.h>
129 extern int vsnprintf(char *, size_t, const char *, va_list);
130 #endif
131
132 #if CONFIG_CSR
133 #include <sys/csr.h>
134 #endif
135
136
137 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
138 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
139
140 unsigned int halt_in_debugger = 0;
141 unsigned int current_debugger = 0;
142 unsigned int active_debugger = 0;
143 SECURITY_READ_ONLY_LATE(unsigned int) panicDebugging = FALSE;
144 unsigned int kernel_debugger_entry_count = 0;
145
146 #if DEVELOPMENT || DEBUG
147 unsigned int panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
148 unsigned int panic_test_action_count = 1;
149 unsigned int panic_test_case = PANIC_TEST_CASE_DISABLED;
150 #endif
151
152 #if defined(__arm64__)
153 struct additional_panic_data_buffer *panic_data_buffers = NULL;
154 #endif
155
156 #if defined(__arm64__)
157 /*
158 * Magic number; this should be identical to the armv7 encoding for trap.
159 */
160 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
161 #elif defined (__x86_64__)
162 #define TRAP_DEBUGGER __asm__("int3")
163 #else
164 #error No TRAP_DEBUGGER for this architecture
165 #endif
166
167 #if defined(__i386__) || defined(__x86_64__)
168 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
169 #else
170 #define panic_stop() panic_spin_forever()
171 #endif
172
173 struct debugger_state {
174 uint64_t db_panic_options;
175 debugger_op db_current_op;
176 boolean_t db_proceed_on_sync_failure;
177 const char *db_message;
178 const char *db_panic_str;
179 va_list *db_panic_args;
180 void *db_panic_data_ptr;
181 unsigned long db_panic_caller;
182 /* incremented whenever we panic or call Debugger (current CPU panic level) */
183 uint32_t db_entry_count;
184 kern_return_t db_op_return;
185 };
186 static struct debugger_state PERCPU_DATA(debugger_state);
187
188 /* __pure2 is correct if this function is called with preemption disabled */
189 static inline __pure2 struct debugger_state *
current_debugger_state(void)190 current_debugger_state(void)
191 {
192 return PERCPU_GET(debugger_state);
193 }
194
195 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
196 #define CPUDEBUGGERMSG current_debugger_state()->db_message
197 #define CPUPANICSTR current_debugger_state()->db_panic_str
198 #define CPUPANICARGS current_debugger_state()->db_panic_args
199 #define CPUPANICOPTS current_debugger_state()->db_panic_options
200 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
201 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
202 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
203 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
204 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
205
206
207 /*
208 * Usage:
209 * panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
210 * panic_test_failure_mode is a bit map of things to do
211 * panic_test_case is what sort of test we are injecting
212 *
213 * For more details see definitions in debugger.h
214 *
215 * Note that not all combinations are sensible, but some actions can be combined, e.g.
216 * - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
217 * - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
218 * - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
219 */
220 #if DEVELOPMENT || DEBUG
221 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested) \
222 MACRO_BEGIN \
223 if ((panic_test_case & requested) && panic_test_action_count) { \
224 panic_test_action_count--; \
225 volatile int *panic_test_badpointer = (int *)4; \
226 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); } \
227 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; } \
228 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); } \
229 } \
230 MACRO_END
231
232 #endif /* DEVELOPMENT || DEBUG */
233
234 debugger_op debugger_current_op = DBOP_NONE;
235 const char *debugger_panic_str = NULL;
236 va_list *debugger_panic_args = NULL;
237 void *debugger_panic_data = NULL;
238 uint64_t debugger_panic_options = 0;
239 const char *debugger_message = NULL;
240 unsigned long debugger_panic_caller = 0;
241
242 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
243 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
244 unsigned long panic_caller) __dead2 __printflike(1, 0);
245 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
246 void panic_spin_forever(void) __dead2;
247 extern kern_return_t do_stackshot(void);
248 extern void PE_panic_hook(const char*);
249
250 #define NESTEDDEBUGGERENTRYMAX 5
251 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
252 NESTEDDEBUGGERENTRYMAX);
253
254 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
255 static bool PERCPU_DATA(hv_entry_detected); // = false
256 static void awl_set_scratch_reg_hv_bit(void);
257 void awl_mark_hv_entry(void);
258 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
259
260 #if defined(__arm64__)
261 #define DEBUG_BUF_SIZE (4096)
262
263 /* debug_buf is directly linked with iBoot panic region for arm targets */
264 char *debug_buf_base = NULL;
265 char *debug_buf_ptr = NULL;
266 unsigned int debug_buf_size = 0;
267
268 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
269 #else /* defined(__arm64__) */
270 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
271 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
272 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
273
274 char debug_buf[DEBUG_BUF_SIZE];
275 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
276 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
277 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
278
279 /*
280 * We don't include the size of the panic header in the length of the data we actually write.
281 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
282 * the end of the log because we only support writing (3*PAGESIZE) bytes.
283 */
284 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
285
286 boolean_t extended_debug_log_enabled = FALSE;
287 #endif /* defined(__arm64__) */
288
289 #if defined(XNU_TARGET_OS_OSX)
290 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
291 #else
292 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
293 #endif
294
295 /* Debugger state */
296 atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU);
297 boolean_t debugger_allcpus_halted = FALSE;
298 boolean_t debugger_safe_to_return = TRUE;
299 unsigned int debugger_context = 0;
300
301 static char model_name[64];
302 unsigned char *kernel_uuid;
303
304 boolean_t kernelcache_uuid_valid = FALSE;
305 uuid_t kernelcache_uuid;
306 uuid_string_t kernelcache_uuid_string;
307
308 boolean_t pageablekc_uuid_valid = FALSE;
309 uuid_t pageablekc_uuid;
310 uuid_string_t pageablekc_uuid_string;
311
312 boolean_t auxkc_uuid_valid = FALSE;
313 uuid_t auxkc_uuid;
314 uuid_string_t auxkc_uuid_string;
315
316
317 /*
318 * By default we treat Debugger() the same as calls to panic(), unless
319 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
320 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
321 *
322 * Return from Debugger() is currently only implemented on x86
323 */
324 static boolean_t debugger_is_panic = TRUE;
325
326 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
327
328 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
329
330 char kernel_uuid_string[37]; /* uuid_string_t */
331 char kernelcache_uuid_string[37]; /* uuid_string_t */
332 char panic_disk_error_description[512];
333 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
334
335 extern unsigned int write_trace_on_panic;
336 int kext_assertions_enable =
337 #if DEBUG || DEVELOPMENT
338 TRUE;
339 #else
340 FALSE;
341 #endif
342
343 #if (DEVELOPMENT || DEBUG)
344 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
345 #endif
346
347 /*
348 * Maintain the physically-contiguous carveouts for the carveout bootargs.
349 */
350 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
351
352 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
353 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
354 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
355 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
356
357
358 /*
359 * Returns whether kernel debugging is expected to be restricted
360 * on the device currently based on CSR or other platform restrictions.
361 */
362 boolean_t
kernel_debugging_restricted(void)363 kernel_debugging_restricted(void)
364 {
365 #if XNU_TARGET_OS_OSX
366 #if CONFIG_CSR
367 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
368 return TRUE;
369 }
370 #endif /* CONFIG_CSR */
371 return FALSE;
372 #else /* XNU_TARGET_OS_OSX */
373 return FALSE;
374 #endif /* XNU_TARGET_OS_OSX */
375 }
376
377 __startup_func
378 static void
panic_init(void)379 panic_init(void)
380 {
381 unsigned long uuidlen = 0;
382 void *uuid;
383
384 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
385 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
386 kernel_uuid = uuid;
387 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
388 }
389
390 /*
391 * Take the value of the debug boot-arg into account
392 */
393 #if MACH_KDP
394 if (!kernel_debugging_restricted() && debug_boot_arg) {
395 if (debug_boot_arg & DB_HALT) {
396 halt_in_debugger = 1;
397 }
398
399 #if defined(__arm64__)
400 if (debug_boot_arg & DB_NMI) {
401 panicDebugging = TRUE;
402 }
403 #else
404 panicDebugging = TRUE;
405 #endif /* defined(__arm64__) */
406 }
407
408 #if defined(__arm64__)
409 char kdpname[80];
410
411 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
412 #endif /* defined(__arm64__) */
413
414 #endif /* MACH_KDP */
415
416 #if defined (__x86_64__)
417 /*
418 * By default we treat Debugger() the same as calls to panic(), unless
419 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
420 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
421 * This is because writing an on-device corefile is a destructive operation.
422 *
423 * Return from Debugger() is currently only implemented on x86
424 */
425 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
426 debugger_is_panic = FALSE;
427 }
428 #endif
429 }
430 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
431
432 #if defined (__x86_64__)
433 void
extended_debug_log_init(void)434 extended_debug_log_init(void)
435 {
436 assert(coprocessor_paniclog_flush);
437 /*
438 * Allocate an extended panic log buffer that has space for the panic
439 * stackshot at the end. Update the debug buf pointers appropriately
440 * to point at this new buffer.
441 *
442 * iBoot pre-initializes the panic region with the NULL character. We set this here
443 * so we can accurately calculate the CRC for the region without needing to flush the
444 * full region over SMC.
445 */
446 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
447
448 panic_info = (struct macos_panic_header *)new_debug_buf;
449 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
450 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
451
452 extended_debug_log_enabled = TRUE;
453
454 /*
455 * Insert a compiler barrier so we don't free the other panic stackshot buffer
456 * until after we've marked the new one as available
457 */
458 __compiler_barrier();
459 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
460 panic_stackshot_buf = 0;
461 panic_stackshot_buf_len = 0;
462 }
463 #endif /* defined (__x86_64__) */
464
465 void
debug_log_init(void)466 debug_log_init(void)
467 {
468 #if defined(__arm64__)
469 if (!gPanicBase) {
470 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
471 return;
472 }
473 /* Shift debug buf start location and size by the length of the panic header */
474 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
475 debug_buf_ptr = debug_buf_base;
476 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
477 #else
478 kern_return_t kr = KERN_SUCCESS;
479 bzero(panic_info, DEBUG_BUF_SIZE);
480
481 assert(debug_buf_base != NULL);
482 assert(debug_buf_ptr != NULL);
483 assert(debug_buf_size != 0);
484
485 /*
486 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
487 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
488 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
489 * up.
490 */
491 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
492 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
493 assert(kr == KERN_SUCCESS);
494 if (kr == KERN_SUCCESS) {
495 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
496 }
497 #endif
498 }
499
500 void
phys_carveout_init(void)501 phys_carveout_init(void)
502 {
503 if (!PE_i_can_has_debugger(NULL)) {
504 return;
505 }
506
507 struct carveout {
508 const char *name;
509 vm_offset_t *va;
510 uint32_t requested_size;
511 uintptr_t *pa;
512 size_t *allocated_size;
513 uint64_t present;
514 } carveouts[] = {
515 {
516 "phys_carveout",
517 &phys_carveout,
518 phys_carveout_mb,
519 &phys_carveout_pa,
520 &phys_carveout_size,
521 phys_carveout_mb != 0,
522 }
523 };
524
525 for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
526 if (carveouts[i].present) {
527 size_t temp_carveout_size = 0;
528 if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
529 panic("%s_mb size overflowed (%uMB)",
530 carveouts[i].name, carveouts[i].requested_size);
531 return;
532 }
533
534 kmem_alloc_contig(kernel_map, carveouts[i].va,
535 temp_carveout_size, PAGE_MASK, 0, 0,
536 KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
537 VM_KERN_MEMORY_DIAG);
538
539 *carveouts[i].pa = kvtophys(*carveouts[i].va);
540 *carveouts[i].allocated_size = temp_carveout_size;
541 }
542 }
543
544 #if __arm64__ && (DEVELOPMENT || DEBUG)
545 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
546 PE_arm_debug_enable_trace(true);
547 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
548 }
549
550 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)551 debug_is_in_phys_carveout(vm_map_offset_t va)
552 {
553 return phys_carveout_size && va >= phys_carveout &&
554 va < (phys_carveout + phys_carveout_size);
555 }
556
557 boolean_t
debug_can_coredump_phys_carveout(void)558 debug_can_coredump_phys_carveout(void)
559 {
560 return phys_carveout_core;
561 }
562
563 static void
DebuggerLock(void)564 DebuggerLock(void)
565 {
566 int my_cpu = cpu_number();
567 int debugger_exp_cpu = DEBUGGER_NO_CPU;
568 assert(ml_get_interrupts_enabled() == FALSE);
569
570 if (atomic_load(&debugger_cpu) == my_cpu) {
571 return;
572 }
573
574 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
575 debugger_exp_cpu = DEBUGGER_NO_CPU;
576 }
577
578 return;
579 }
580
581 static void
DebuggerUnlock(void)582 DebuggerUnlock(void)
583 {
584 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
585
586 /*
587 * We don't do an atomic exchange here in case
588 * there's another CPU spinning to acquire the debugger_lock
589 * and we never get a chance to update it. We already have the
590 * lock so we can simply store DEBUGGER_NO_CPU and follow with
591 * a barrier.
592 */
593 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
594 OSMemoryBarrier();
595
596 return;
597 }
598
599 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)600 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
601 {
602 #if defined(__arm64__)
603 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
604 #else /* defined(__arm64__) */
605 #pragma unused(proceed_on_failure)
606 #pragma unused(is_stackshot)
607 mp_kdp_enter(proceed_on_failure);
608 return KERN_SUCCESS;
609 #endif
610 }
611
612 static void
DebuggerResumeOtherCores(void)613 DebuggerResumeOtherCores(void)
614 {
615 #if defined(__arm64__)
616 DebuggerXCallReturn();
617 #else /* defined(__arm64__) */
618 mp_kdp_exit();
619 #endif
620 }
621
622 __printflike(3, 0)
623 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)624 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
625 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
626 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
627 {
628 CPUDEBUGGEROP = db_op;
629
630 /*
631 * Note:
632 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
633 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
634 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
635 *
636 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
637 */
638 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
639 CPUDEBUGGERMSG = db_message;
640 CPUPANICSTR = db_panic_str;
641 CPUPANICARGS = db_panic_args;
642 CPUPANICDATAPTR = db_panic_data_ptr;
643 CPUPANICCALLER = db_panic_caller;
644 }
645
646 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
647 CPUDEBUGGERRET = KERN_SUCCESS;
648
649 /* Reset these on any nested panics */
650 // follow up in rdar://88497308 (nested panics should not clobber panic flags)
651 CPUPANICOPTS = db_panic_options;
652
653 return;
654 }
655
656 /*
657 * Save the requested debugger state/action into the current processor's
658 * percu state and trap to the debugger.
659 */
660 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)661 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
662 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
663 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
664 {
665 kern_return_t ret;
666
667 assert(ml_get_interrupts_enabled() == FALSE);
668 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
669 db_panic_options, db_panic_data_ptr,
670 db_proceed_on_sync_failure, db_panic_caller);
671
672 /*
673 * On ARM this generates an uncategorized exception -> sleh code ->
674 * DebuggerCall -> kdp_trap -> handle_debugger_trap
675 * So that is how XNU ensures that only one core can panic.
676 * The rest of the cores are halted by IPI if possible; if that
677 * fails it will fall back to dbgwrap.
678 */
679 TRAP_DEBUGGER;
680
681 ret = CPUDEBUGGERRET;
682
683 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
684
685 return ret;
686 }
687
688 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)689 Assert(
690 const char *file,
691 int line,
692 const char *expression
693 )
694 {
695 #if CONFIG_NONFATAL_ASSERTS
696 static TUNABLE(bool, mach_assert, "assertions", true);
697
698 if (!mach_assert) {
699 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
700 return;
701 }
702 #endif
703
704 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
705 }
706
707 boolean_t
debug_is_current_cpu_in_panic_state(void)708 debug_is_current_cpu_in_panic_state(void)
709 {
710 return current_debugger_state()->db_entry_count > 0;
711 }
712
713 /*
714 * check if we are in a nested panic, report findings, take evasive action where necessary
715 *
716 * see also PE_update_panicheader_nestedpanic
717 */
718 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)719 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
720 {
721 if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
722 // Note: this is the first indication in the panic log or serial that we are off the rails...
723 //
724 // if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
725 // if we panic *after* the log is finalized then we will only see it in the serial log
726 //
727 paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
728 paniclog_flush();
729
730 // print the *new* panic string to the console, we might not get it by other means...
731 // TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
732 // rdar://87846117 (NestedPanic: output panic string to paniclog)
733 if (db_panic_str) {
734 printf("Nested panic string:\n");
735 #pragma clang diagnostic push
736 #pragma clang diagnostic ignored "-Wformat-nonliteral"
737 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
738 #pragma clang diagnostic pop
739 printf("\n<end nested panic string>\n");
740 }
741 }
742
743 // Stage 1 bailout
744 //
745 // Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog. If this fails with another nested
746 // panic then we will land in Stage 2 below...
747 //
748 if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
749 uint32_t panic_details = 0;
750
751 // if this is a force-reset panic then capture a log and reboot immediately.
752 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
753 panic_details |= kPanicDetailsForcePowerOff;
754 }
755
756 // normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
757 // there. To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
758 //
759 PEHaltRestartInternal(kPEPanicBegin, panic_details);
760
761 paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
762 PE_update_panicheader_nestedpanic();
763 paniclog_flush();
764
765 if (!panicDebugging) {
766 // note that this will also send kPEPanicEnd
767 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
768 }
769
770 // prints to console
771 paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
772 panic_spin_forever();
773 }
774
775 // Stage 2 bailout
776 //
777 // Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path. Try to issue
778 // a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
779 //
780 if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
781 if (!panicDebugging) {
782 // note that:
783 // - this code path should be audited for prints, as that is a common cause of nested panics
784 // - this code path should take the fastest route to the actual reset, and not call any un-necessary code
785 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
786 }
787
788 // prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
789 paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
790 panic_spin_forever();
791 }
792
793 // Stage 3 bailout
794 //
795 // We are done here, we were unable to reset the platform without another nested panic. Spin until the watchdog kicks in.
796 //
797 if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
798 kdp_machine_reboot_type(kPEHangCPU, 0);
799 }
800 }
801
802 void
Debugger(const char * message)803 Debugger(const char *message)
804 {
805 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
806 }
807
808 /*
809 * Enter the Debugger
810 *
811 * This is similar to, but not the same as a panic
812 *
813 * Key differences:
814 * - we get here from a debugger entry action (e.g. NMI)
815 * - the system is resumable on x86 (in theory, however it is not clear if this is tested)
816 * - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
817 *
818 */
819 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)820 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
821 uint64_t debugger_options_mask, unsigned long debugger_caller)
822 {
823 spl_t previous_interrupts_state;
824 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
825
826 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
827 read_lbr();
828 #endif
829 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
830 disable_preemption();
831
832 /* track depth of debugger/panic entry */
833 CPUDEBUGGERCOUNT++;
834
835 /* emit a tracepoint as early as possible in case of hang */
836 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
837
838 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
839 check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
840
841 /* Handle any necessary platform specific actions before we proceed */
842 PEInitiatePanic();
843
844 #if DEVELOPMENT || DEBUG
845 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
846 #endif
847
848 PE_panic_hook(message);
849
850 doprnt_hide_pointers = FALSE;
851
852 if (ctx != NULL) {
853 DebuggerSaveState(DBOP_DEBUGGER, message,
854 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
855 handle_debugger_trap(reason, 0, 0, ctx);
856 DebuggerSaveState(DBOP_NONE, NULL, NULL,
857 NULL, 0, NULL, FALSE, 0);
858 } else {
859 DebuggerTrapWithState(DBOP_DEBUGGER, message,
860 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
861 }
862
863 /* resume from the debugger */
864
865 CPUDEBUGGERCOUNT--;
866 doprnt_hide_pointers = old_doprnt_hide_pointers;
867 enable_preemption();
868 ml_set_interrupts_enabled(previous_interrupts_state);
869 }
870
871 static struct kdp_callout {
872 struct kdp_callout * callout_next;
873 kdp_callout_fn_t callout_fn;
874 boolean_t callout_in_progress;
875 void * callout_arg;
876 } * kdp_callout_list = NULL;
877
878 /*
879 * Called from kernel context to register a kdp event callout.
880 */
881 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)882 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
883 {
884 struct kdp_callout * kcp;
885 struct kdp_callout * list_head;
886
887 kcp = zalloc_permanent_type(struct kdp_callout);
888
889 kcp->callout_fn = fn;
890 kcp->callout_arg = arg;
891 kcp->callout_in_progress = FALSE;
892
893 /* Lock-less list insertion using compare and exchange. */
894 do {
895 list_head = kdp_callout_list;
896 kcp->callout_next = list_head;
897 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
898 }
899
900 static void
kdp_callouts(kdp_event_t event)901 kdp_callouts(kdp_event_t event)
902 {
903 struct kdp_callout *kcp = kdp_callout_list;
904
905 while (kcp) {
906 if (!kcp->callout_in_progress) {
907 kcp->callout_in_progress = TRUE;
908 kcp->callout_fn(kcp->callout_arg, event);
909 kcp->callout_in_progress = FALSE;
910 }
911 kcp = kcp->callout_next;
912 }
913 }
914
915 #if defined(__arm64__)
916 /*
917 * Register an additional buffer with data to include in the panic log
918 *
919 * <rdar://problem/50137705> tracks supporting more than one buffer
920 *
921 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
922 */
923 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)924 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
925 {
926 if (panic_data_buffers != NULL) {
927 panic("register_additional_panic_data_buffer called with buffer already registered");
928 }
929
930 if (producer_name == NULL || (strlen(producer_name) == 0)) {
931 panic("register_additional_panic_data_buffer called with invalid producer_name");
932 }
933
934 if (buf == NULL) {
935 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
936 }
937
938 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
939 panic("register_additional_panic_data_buffer called with invalid length");
940 }
941
942 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
943 new_panic_data_buffer->producer_name = producer_name;
944 new_panic_data_buffer->buf = buf;
945 new_panic_data_buffer->len = len;
946
947 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
948 panic("register_additional_panic_data_buffer called with buffer already registered");
949 }
950
951 return;
952 }
953 #endif /* defined(__arm64__) */
954
955 /*
956 * An overview of the xnu panic path:
957 *
958 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
959 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
960 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
961 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
962 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
963 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
964 * according to the device's boot-args.
965 */
966 #undef panic
967 void
panic(const char * str,...)968 panic(const char *str, ...)
969 {
970 va_list panic_str_args;
971
972 va_start(panic_str_args, str);
973 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
974 va_end(panic_str_args);
975 }
976
977 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)978 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
979 {
980 va_list panic_str_args;
981
982 va_start(panic_str_args, str);
983 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
984 NULL, (unsigned long)(char *)__builtin_return_address(0));
985 va_end(panic_str_args);
986 }
987
988 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)989 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
990 {
991 if (ptr == NULL) {
992 paniclog_append_noflush("NULL %s pointer\n", what);
993 return false;
994 }
995
996 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
997 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
998 what, ptr, (uint32_t)size);
999 return false;
1000 }
1001
1002 return true;
1003 }
1004
1005 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1006 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1007 {
1008 if (!PANIC_VALIDATE_PTR(thread)) {
1009 return false;
1010 }
1011
1012 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1013 return false;
1014 }
1015
1016 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1017 return false;
1018 }
1019
1020 if (task) {
1021 *task = thread->t_tro->tro_task;
1022 }
1023
1024 if (!panic_validate_ptr(thread->t_tro->tro_proc,
1025 sizeof(struct proc *), "bsd_info")) {
1026 *proc = NULL;
1027 } else {
1028 *proc = thread->t_tro->tro_proc;
1029 }
1030
1031 return true;
1032 }
1033
1034 #if defined (__x86_64__)
1035 /*
1036 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1037 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1038 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1039 * thread when writing the panic log.
1040 *
1041 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1042 */
1043 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1044 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1045 {
1046 va_list panic_str_args;
1047 __assert_only os_ref_count_t th_ref_count;
1048
1049 assert_thread_magic(thread);
1050 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1051 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1052
1053 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1054 thread_reference(thread);
1055
1056 va_start(panic_str_args, str);
1057 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1058 thread, (unsigned long)(char *)__builtin_return_address(0));
1059
1060 va_end(panic_str_args);
1061 }
1062 #endif /* defined (__x86_64__) */
1063
1064 #pragma clang diagnostic push
1065 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1066 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller)1067 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1068 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
1069 {
1070 #pragma clang diagnostic pop
1071
1072 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1073 read_lbr();
1074 #endif
1075
1076 /* Turn off I/O tracing once we've panicked */
1077 iotrace_disable();
1078
1079 /* call machine-layer panic handler */
1080 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
1081
1082 /* track depth of debugger/panic entry */
1083 CPUDEBUGGERCOUNT++;
1084
1085 /* emit a tracepoint as early as possible in case of hang */
1086 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1087
1088 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1089 check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1090
1091 /* Handle any necessary platform specific actions before we proceed */
1092 PEInitiatePanic();
1093
1094 #if DEVELOPMENT || DEBUG
1095 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1096 #endif
1097
1098 PE_panic_hook(panic_format_str);
1099
1100 #if defined (__x86_64__)
1101 plctrace_disable();
1102 #endif
1103
1104 if (write_trace_on_panic && kdebug_enable) {
1105 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1106 ml_set_interrupts_enabled(TRUE);
1107 KDBG_RELEASE(TRACE_PANIC);
1108 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1109 }
1110 }
1111
1112 ml_set_interrupts_enabled(FALSE);
1113 disable_preemption();
1114
1115 #if defined (__x86_64__)
1116 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1117 #endif /* defined (__x86_64__) */
1118
1119 /* Never hide pointers from panic logs. */
1120 doprnt_hide_pointers = FALSE;
1121
1122 if (ctx != NULL) {
1123 /*
1124 * We called into panic from a trap, no need to trap again. Set the
1125 * state on the current CPU and then jump to handle_debugger_trap.
1126 */
1127 DebuggerSaveState(DBOP_PANIC, "panic",
1128 panic_format_str, panic_args,
1129 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1130 handle_debugger_trap(reason, 0, 0, ctx);
1131 }
1132
1133 #if defined(__arm64__)
1134 /*
1135 * Signal to fastsim that it should open debug ports (nop on hardware)
1136 */
1137 __asm__ volatile ("HINT 0x45");
1138 #endif /* defined(__arm64__) */
1139
1140 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1141 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1142
1143 /*
1144 * Not reached.
1145 */
1146 panic_stop();
1147 __builtin_unreachable();
1148 }
1149
1150 void
panic_spin_forever(void)1151 panic_spin_forever(void)
1152 {
1153 for (;;) {
1154 #if defined(__arm__) || defined(__arm64__)
1155 /* On arm32, which doesn't have a WFE timeout, this may not return. But that should be OK on this path. */
1156 __builtin_arm_wfe();
1157 #else
1158 cpu_pause();
1159 #endif
1160 }
1161 }
1162
1163 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1164 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1165 {
1166 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1167 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1168 } else {
1169 PEHaltRestart(type);
1170 }
1171 halt_all_cpus(TRUE);
1172 }
1173
1174 void
kdp_machine_reboot(void)1175 kdp_machine_reboot(void)
1176 {
1177 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1178 }
1179
1180 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1181 panic_debugger_log(const char *string, ...)
1182 {
1183 va_list panic_debugger_log_args;
1184
1185 va_start(panic_debugger_log_args, string);
1186 #pragma clang diagnostic push
1187 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1188 _doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1189 #pragma clang diagnostic pop
1190 va_end(panic_debugger_log_args);
1191
1192 #if defined(__arm64__)
1193 paniclog_flush();
1194 #endif
1195 }
1196
1197 /*
1198 * Gather and save diagnostic information about a panic (or Debugger call).
1199 *
1200 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1201 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1202 * paniclog is written and no core is written unless we request a core on NMI.
1203 *
1204 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1205 * and calling out to any other functions we have for collecting diagnostic info.
1206 */
1207 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1208 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1209 {
1210 #if DEVELOPMENT || DEBUG
1211 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1212 #endif
1213
1214 #if defined(__x86_64__)
1215 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1216 #endif
1217 /*
1218 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1219 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1220 * we'll just spin in kdp_raise_exception.
1221 */
1222 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1223 kdp_raise_exception(exception, code, subcode, state);
1224 if (debugger_safe_to_return && !debugger_is_panic) {
1225 return;
1226 }
1227 }
1228
1229 #ifdef CONFIG_KCOV
1230 /* Try not to break core dump path by sanitizer. */
1231 kcov_panic_disable();
1232 #endif
1233
1234 if ((debugger_current_op == DBOP_PANIC) ||
1235 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1236 /*
1237 * Attempt to notify listeners once and only once that we've started
1238 * panicking. Only do this for Debugger() calls if we're treating
1239 * Debugger() calls like panic().
1240 */
1241 uint32_t panic_details = 0;
1242 /* if this is a force-reset panic then capture a log and reboot immediately. */
1243 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1244 panic_details |= kPanicDetailsForcePowerOff;
1245 }
1246 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1247
1248 /*
1249 * Set the begin pointer in the panic log structure. We key off of this
1250 * static variable rather than contents from the panic header itself in case someone
1251 * has stomped over the panic_info structure. Also initializes the header magic.
1252 */
1253 static boolean_t began_writing_paniclog = FALSE;
1254 if (!began_writing_paniclog) {
1255 PE_init_panicheader();
1256 began_writing_paniclog = TRUE;
1257 }
1258
1259 if (CPUDEBUGGERCOUNT > 1) {
1260 /*
1261 * we are in a nested panic. Record the nested bit in panic flags and do some housekeeping
1262 */
1263 PE_update_panicheader_nestedpanic();
1264 paniclog_flush();
1265 }
1266 }
1267
1268 /*
1269 * Write panic string if this was a panic.
1270 *
1271 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1272 */
1273 if (debugger_current_op == DBOP_PANIC) {
1274 paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1275 if (debugger_panic_str) {
1276 #pragma clang diagnostic push
1277 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1278 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1279 #pragma clang diagnostic pop
1280 }
1281 paniclog_append_noflush("\n");
1282 }
1283 #if defined(__x86_64__)
1284 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1285 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1286 }
1287
1288 /*
1289 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1290 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1291 * conventional sense.
1292 */
1293 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1294 #endif /* __x86_64__ */
1295 {
1296 kdp_callouts(KDP_EVENT_PANICLOG);
1297
1298 /*
1299 * Write paniclog and panic stackshot (if supported)
1300 * TODO: Need to clear panic log when return from debugger
1301 * hooked up for embedded
1302 */
1303 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
1304
1305 #if DEVELOPMENT || DEBUG
1306 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1307 #endif
1308
1309 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1310 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1311 PEHaltRestart(kPEPanicDiagnosticsDone);
1312 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1313 }
1314 }
1315
1316 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1317 /*
1318 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1319 * local coredumps, don't try to write these and instead go straight to reboot. This
1320 * allows us to persist any data that's stored in the panic log.
1321 */
1322 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1323 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1324 PEHaltRestart(kPEPanicDiagnosticsDone);
1325 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1326 }
1327
1328 /*
1329 * Consider generating a local corefile if the infrastructure is configured
1330 * and we haven't disabled on-device coredumps.
1331 */
1332 if (on_device_corefile_enabled()) {
1333 if (!kdp_has_polled_corefile()) {
1334 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1335 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1336 kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1337 #if defined(__arm64__)
1338 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1339 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1340 }
1341 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1342 paniclog_flush();
1343 #else /* defined(__arm64__) */
1344 if (panic_info->mph_panic_log_offset != 0) {
1345 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1346 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1347 }
1348 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1349 paniclog_flush();
1350 }
1351 #endif /* defined(__arm64__) */
1352 }
1353 }
1354 #if XNU_MONITOR
1355 else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) {
1356 paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state\n");
1357 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1358 paniclog_flush();
1359 }
1360 #endif /* XNU_MONITOR */
1361 else {
1362 int ret = -1;
1363
1364 #if defined (__x86_64__)
1365 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1366 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1367 #endif
1368 {
1369 /*
1370 * Doing an on-device coredump leaves the disk driver in a state
1371 * that can not be resumed.
1372 */
1373 debugger_safe_to_return = FALSE;
1374 begin_panic_transfer();
1375 ret = kern_dump(KERN_DUMP_DISK);
1376 abort_panic_transfer();
1377
1378 #if DEVELOPMENT || DEBUG
1379 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1380 #endif
1381 }
1382
1383 /*
1384 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1385 * or if option to ignore failures is set.
1386 */
1387 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1388 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1389 PEHaltRestart(kPEPanicDiagnosticsDone);
1390 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1391 }
1392 }
1393 }
1394
1395 if (debugger_current_op == DBOP_PANIC ||
1396 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1397 PEHaltRestart(kPEPanicDiagnosticsDone);
1398 }
1399
1400 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1401 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1402 }
1403
1404 /* If KDP is configured, try to trap to the debugger */
1405 #if defined(__arm64__)
1406 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1407 #else
1408 if (current_debugger != NO_CUR_DB) {
1409 #endif
1410 kdp_raise_exception(exception, code, subcode, state);
1411 /*
1412 * Only return if we entered via Debugger and it's safe to return
1413 * (we halted the other cores successfully, this isn't a nested panic, etc)
1414 */
1415 if (debugger_current_op == DBOP_DEBUGGER &&
1416 debugger_safe_to_return &&
1417 kernel_debugger_entry_count == 1 &&
1418 !debugger_is_panic) {
1419 return;
1420 }
1421 }
1422
1423 #if defined(__arm64__)
1424 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1425 /*
1426 * Print panic string at the end of serial output
1427 * to make panic more obvious when someone connects a debugger
1428 */
1429 if (debugger_panic_str) {
1430 panic_debugger_log("Original panic string:\n");
1431 panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1432 #pragma clang diagnostic push
1433 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1434 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1435 #pragma clang diagnostic pop
1436 panic_debugger_log("\n");
1437 }
1438
1439 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1440 panic_spin_shmcon();
1441 }
1442 #endif /* defined(__arm64__) */
1443
1444 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1445
1446 PEHaltRestart(kPEPanicDiagnosticsDone);
1447
1448 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1449
1450 if (!panicDebugging) {
1451 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1452 }
1453
1454 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1455 panic_spin_forever();
1456 }
1457
1458 #if SCHED_HYGIENE_DEBUG
1459 uint64_t debugger_trap_timestamps[9];
1460 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1461 #else
1462 # define DEBUGGER_TRAP_TIMESTAMP(i)
1463 #endif /* SCHED_HYGIENE_DEBUG */
1464
1465 void
1466 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1467 {
1468 unsigned int initial_not_in_kdp = not_in_kdp;
1469 kern_return_t ret;
1470 debugger_op db_prev_op = debugger_current_op;
1471
1472 DEBUGGER_TRAP_TIMESTAMP(0);
1473
1474 DebuggerLock();
1475 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1476
1477 DEBUGGER_TRAP_TIMESTAMP(1);
1478
1479 #if SCHED_HYGIENE_DEBUG
1480 if (serialmode & SERIALMODE_OUTPUT) {
1481 ml_spin_debug_reset(current_thread());
1482 }
1483 #endif /* SCHED_HYGIENE_DEBUG */
1484 if (ret != KERN_SUCCESS) {
1485 CPUDEBUGGERRET = ret;
1486 DebuggerUnlock();
1487 return;
1488 }
1489
1490 /* Update the global panic/debugger nested entry level */
1491 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1492 if (kernel_debugger_entry_count > 0) {
1493 console_suspend();
1494 }
1495
1496 /*
1497 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1498 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1499 */
1500
1501 if (CPUDEBUGGEROP == DBOP_NONE) {
1502 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1503 debugger_current_op = DBOP_BREAKPOINT;
1504 } else {
1505 /* Not safe to return from a nested panic/debugger call */
1506 if (debugger_current_op == DBOP_PANIC ||
1507 debugger_current_op == DBOP_DEBUGGER) {
1508 debugger_safe_to_return = FALSE;
1509 }
1510
1511 debugger_current_op = CPUDEBUGGEROP;
1512
1513 /* Only overwrite the panic message if there is none already - save the data from the first call */
1514 if (debugger_panic_str == NULL) {
1515 debugger_panic_str = CPUPANICSTR;
1516 debugger_panic_args = CPUPANICARGS;
1517 debugger_panic_data = CPUPANICDATAPTR;
1518 debugger_message = CPUDEBUGGERMSG;
1519 debugger_panic_caller = CPUPANICCALLER;
1520 }
1521
1522 debugger_panic_options = CPUPANICOPTS;
1523 }
1524
1525 /*
1526 * Clear the op from the processor debugger context so we can handle
1527 * breakpoints in the debugger
1528 */
1529 CPUDEBUGGEROP = DBOP_NONE;
1530
1531 DEBUGGER_TRAP_TIMESTAMP(2);
1532
1533 kdp_callouts(KDP_EVENT_ENTER);
1534 not_in_kdp = 0;
1535
1536 DEBUGGER_TRAP_TIMESTAMP(3);
1537
1538 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1539 shmem_mark_as_busy();
1540 #endif
1541
1542 if (debugger_current_op == DBOP_BREAKPOINT) {
1543 kdp_raise_exception(exception, code, subcode, state);
1544 } else if (debugger_current_op == DBOP_STACKSHOT) {
1545 CPUDEBUGGERRET = do_stackshot();
1546 #if PGO
1547 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1548 CPUDEBUGGERRET = do_pgo_reset_counters();
1549 #endif
1550 } else {
1551 /* note: this is the panic path... */
1552 debugger_collect_diagnostics(exception, code, subcode, state);
1553 }
1554
1555 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1556 shmem_unmark_as_busy();
1557 #endif
1558
1559 DEBUGGER_TRAP_TIMESTAMP(4);
1560
1561 not_in_kdp = initial_not_in_kdp;
1562 kdp_callouts(KDP_EVENT_EXIT);
1563
1564 DEBUGGER_TRAP_TIMESTAMP(5);
1565
1566 if (debugger_current_op != DBOP_BREAKPOINT) {
1567 debugger_panic_str = NULL;
1568 debugger_panic_args = NULL;
1569 debugger_panic_data = NULL;
1570 debugger_panic_options = 0;
1571 debugger_message = NULL;
1572 }
1573
1574 /* Restore the previous debugger state */
1575 debugger_current_op = db_prev_op;
1576
1577 DEBUGGER_TRAP_TIMESTAMP(6);
1578
1579 DebuggerResumeOtherCores();
1580
1581 DEBUGGER_TRAP_TIMESTAMP(7);
1582
1583 DebuggerUnlock();
1584
1585 DEBUGGER_TRAP_TIMESTAMP(8);
1586
1587 return;
1588 }
1589
1590 __attribute__((noinline, not_tail_called))
1591 void
1592 log(__unused int level, char *fmt, ...)
1593 {
1594 void *caller = __builtin_return_address(0);
1595 va_list listp;
1596 va_list listp2;
1597
1598
1599 #ifdef lint
1600 level++;
1601 #endif /* lint */
1602 #ifdef MACH_BSD
1603 va_start(listp, fmt);
1604 va_copy(listp2, listp);
1605
1606 disable_preemption();
1607 _doprnt(fmt, &listp, cons_putc_locked, 0);
1608 enable_preemption();
1609
1610 va_end(listp);
1611
1612 #pragma clang diagnostic push
1613 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1614 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1615 #pragma clang diagnostic pop
1616 va_end(listp2);
1617 #endif
1618 }
1619
1620 /*
1621 * Per <rdar://problem/24974766>, skip appending log messages to
1622 * the new logging infrastructure in contexts where safety is
1623 * uncertain. These contexts include:
1624 * - When we're in the debugger
1625 * - We're in a panic
1626 * - Interrupts are disabled
1627 * - Or Pre-emption is disabled
1628 * In all the above cases, it is potentially unsafe to log messages.
1629 */
1630
1631 boolean_t
1632 oslog_is_safe(void)
1633 {
1634 return kernel_debugger_entry_count == 0 &&
1635 not_in_kdp == 1 &&
1636 get_preemption_level() == 0 &&
1637 ml_get_interrupts_enabled() == TRUE;
1638 }
1639
1640 boolean_t
1641 debug_mode_active(void)
1642 {
1643 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1644 }
1645
1646 void
1647 debug_putc(char c)
1648 {
1649 if ((debug_buf_size != 0) &&
1650 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) {
1651 *debug_buf_ptr = c;
1652 debug_buf_ptr++;
1653 }
1654 }
1655
1656 #if defined (__x86_64__)
1657 struct pasc {
1658 unsigned a: 7;
1659 unsigned b: 7;
1660 unsigned c: 7;
1661 unsigned d: 7;
1662 unsigned e: 7;
1663 unsigned f: 7;
1664 unsigned g: 7;
1665 unsigned h: 7;
1666 } __attribute__((packed));
1667
1668 typedef struct pasc pasc_t;
1669
1670 /*
1671 * In-place packing routines -- inefficient, but they're called at most once.
1672 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1673 */
1674 int
1675 packA(char *inbuf, uint32_t length, uint32_t buflen)
1676 {
1677 unsigned int i, j = 0;
1678 pasc_t pack;
1679
1680 length = MIN(((length + 7) & ~7), buflen);
1681
1682 for (i = 0; i < length; i += 8) {
1683 pack.a = inbuf[i];
1684 pack.b = inbuf[i + 1];
1685 pack.c = inbuf[i + 2];
1686 pack.d = inbuf[i + 3];
1687 pack.e = inbuf[i + 4];
1688 pack.f = inbuf[i + 5];
1689 pack.g = inbuf[i + 6];
1690 pack.h = inbuf[i + 7];
1691 bcopy((char *) &pack, inbuf + j, 7);
1692 j += 7;
1693 }
1694 return j;
1695 }
1696
1697 void
1698 unpackA(char *inbuf, uint32_t length)
1699 {
1700 pasc_t packs;
1701 unsigned i = 0;
1702 length = (length * 8) / 7;
1703
1704 while (i < length) {
1705 packs = *(pasc_t *)&inbuf[i];
1706 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1707 inbuf[i++] = packs.a;
1708 inbuf[i++] = packs.b;
1709 inbuf[i++] = packs.c;
1710 inbuf[i++] = packs.d;
1711 inbuf[i++] = packs.e;
1712 inbuf[i++] = packs.f;
1713 inbuf[i++] = packs.g;
1714 inbuf[i++] = packs.h;
1715 }
1716 }
1717 #endif /* defined (__x86_64__) */
1718
1719 extern char *proc_name_address(void *);
1720 extern char *proc_longname_address(void *);
1721
1722 __private_extern__ void
1723 panic_display_process_name(void)
1724 {
1725 proc_name_t proc_name = {};
1726 struct proc *cbsd_info = NULL;
1727 task_t ctask = NULL;
1728 vm_size_t size;
1729
1730 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1731 goto out;
1732 }
1733
1734 if (cbsd_info == NULL) {
1735 goto out;
1736 }
1737
1738 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1739 (vm_offset_t)&proc_name, sizeof(proc_name));
1740
1741 if (size == 0 || proc_name[0] == '\0') {
1742 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1743 (vm_offset_t)&proc_name,
1744 MIN(sizeof(command_t), sizeof(proc_name)));
1745 if (size > 0) {
1746 proc_name[size - 1] = '\0';
1747 }
1748 }
1749
1750 out:
1751 proc_name[sizeof(proc_name) - 1] = '\0';
1752 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1753 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1754 }
1755
1756 unsigned
1757 panic_active(void)
1758 {
1759 return debugger_current_op == DBOP_PANIC ||
1760 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1761 }
1762
1763 void
1764 populate_model_name(char *model_string)
1765 {
1766 strlcpy(model_name, model_string, sizeof(model_name));
1767 }
1768
1769 void
1770 panic_display_model_name(void)
1771 {
1772 char tmp_model_name[sizeof(model_name)];
1773
1774 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1775 return;
1776 }
1777
1778 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1779
1780 if (tmp_model_name[0] != 0) {
1781 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1782 }
1783 }
1784
1785 void
1786 panic_display_kernel_uuid(void)
1787 {
1788 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1789
1790 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1791 return;
1792 }
1793
1794 if (tmp_kernel_uuid[0] != '\0') {
1795 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1796 }
1797 }
1798
1799
1800 void
1801 panic_display_kernel_aslr(void)
1802 {
1803
1804 kc_format_t kc_format;
1805
1806 PE_get_primary_kc_format(&kc_format);
1807
1808 if (kc_format == KCFormatFileset) {
1809 void *kch = PE_get_kc_header(KCKindPrimary);
1810 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1811 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1812 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1813 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1814 #if defined(__arm64__)
1815 extern vm_offset_t segTEXTEXECB;
1816 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
1817 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
1818 #endif /* defined(__arm64__) */
1819 } else if (vm_kernel_slide) {
1820 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1821 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1822 } else {
1823 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1824 }
1825 }
1826
1827 void
1828 panic_display_hibb(void)
1829 {
1830 #if defined(__i386__) || defined (__x86_64__)
1831 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1832 #endif
1833 }
1834
1835 #if CONFIG_ECC_LOGGING
1836 __private_extern__ void
1837 panic_display_ecc_errors(void)
1838 {
1839 uint32_t count = ecc_log_get_correction_count();
1840
1841 if (count > 0) {
1842 paniclog_append_noflush("ECC Corrections:%u\n", count);
1843 }
1844 }
1845 #endif /* CONFIG_ECC_LOGGING */
1846
1847 #if CONFIG_FREEZE
1848 extern bool freezer_incore_cseg_acct;
1849 extern int32_t c_segment_pages_compressed_incore;
1850 #endif
1851
1852 extern uint32_t c_segment_pages_compressed;
1853 extern uint32_t c_segment_count;
1854 extern uint32_t c_segments_limit;
1855 extern uint32_t c_segment_pages_compressed_limit;
1856 extern uint32_t c_segment_pages_compressed_nearing_limit;
1857 extern uint32_t c_segments_nearing_limit;
1858 extern int vm_num_swap_files;
1859
1860 void
1861 panic_display_compressor_stats(void)
1862 {
1863 int isswaplow = vm_swap_low_on_space();
1864 #if CONFIG_FREEZE
1865 uint32_t incore_seg_count;
1866 uint32_t incore_compressed_pages;
1867 if (freezer_incore_cseg_acct) {
1868 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
1869 incore_compressed_pages = c_segment_pages_compressed_incore;
1870 } else {
1871 incore_seg_count = c_segment_count;
1872 incore_compressed_pages = c_segment_pages_compressed;
1873 }
1874
1875 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1876 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
1877 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1878 (incore_seg_count * 100) / c_segments_limit,
1879 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
1880 vm_num_swap_files,
1881 isswaplow ? "LOW":"OK");
1882 #else /* CONFIG_FREEZE */
1883 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1884 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
1885 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1886 (c_segment_count * 100) / c_segments_limit,
1887 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
1888 vm_num_swap_files,
1889 isswaplow ? "LOW":"OK");
1890 #endif /* CONFIG_FREEZE */
1891 }
1892
1893 #if !CONFIG_TELEMETRY
1894 int
1895 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
1896 {
1897 return KERN_NOT_SUPPORTED;
1898 }
1899 #endif
1900
1901 #include <machine/machine_cpu.h>
1902
1903 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
1904
1905 boolean_t
1906 kern_feature_override(uint32_t fmask)
1907 {
1908 return (kern_feature_overrides & fmask) == fmask;
1909 }
1910
1911 boolean_t
1912 on_device_corefile_enabled(void)
1913 {
1914 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1915 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1916 if (debug_boot_arg == 0) {
1917 return FALSE;
1918 }
1919 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1920 return FALSE;
1921 }
1922 #if !XNU_TARGET_OS_OSX
1923 /*
1924 * outside of macOS, if there's a debug boot-arg set and local
1925 * cores aren't explicitly disabled, we always write a corefile.
1926 */
1927 return TRUE;
1928 #else /* !XNU_TARGET_OS_OSX */
1929 /*
1930 * on macOS, if corefiles on panic are requested and local cores
1931 * aren't disabled we write a local core.
1932 */
1933 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
1934 return TRUE;
1935 }
1936 #endif /* !XNU_TARGET_OS_OSX */
1937 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1938 return FALSE;
1939 }
1940
1941 boolean_t
1942 panic_stackshot_to_disk_enabled(void)
1943 {
1944 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1945 #if defined(__x86_64__)
1946 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
1947 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
1948 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
1949 return FALSE;
1950 }
1951
1952 return TRUE;
1953 }
1954 #endif
1955 return FALSE;
1956 }
1957
1958 const char *
1959 sysctl_debug_get_preoslog(size_t *size)
1960 {
1961 int result = 0;
1962 void *preoslog_pa = NULL;
1963 int preoslog_size = 0;
1964
1965 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1966 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1967 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1968 *size = 0;
1969 return NULL;
1970 }
1971
1972 /*
1973 * Beware:
1974 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
1975 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
1976 */
1977 *size = preoslog_size;
1978 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
1979 }
1980
1981 void
1982 sysctl_debug_free_preoslog(void)
1983 {
1984 #if RELEASE
1985 int result = 0;
1986 void *preoslog_pa = NULL;
1987 int preoslog_size = 0;
1988
1989 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1990 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1991 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1992 return;
1993 }
1994
1995 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
1996 #else
1997 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
1998 #endif // RELEASE
1999 }
2000
2001 #if (DEVELOPMENT || DEBUG)
2002
2003 void
2004 platform_stall_panic_or_spin(uint32_t req)
2005 {
2006 if (xnu_platform_stall_value & req) {
2007 if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2008 panic("Platform stall: User requested panic");
2009 } else {
2010 paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2011 panic_spin_forever();
2012 }
2013 }
2014 }
2015 #endif
2016
2017 #define AWL_HV_ENTRY_FLAG (0x1)
2018
2019 static inline void
2020 awl_set_scratch_reg_hv_bit(void)
2021 {
2022 #if defined(__arm64__)
2023 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
2024 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2025 awl_diag0 |= AWL_HV_ENTRY_FLAG;
2026 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2027 #endif // defined(__arm64__)
2028 }
2029
2030 void
2031 awl_mark_hv_entry(void)
2032 {
2033 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2034 return;
2035 }
2036 *PERCPU_GET(hv_entry_detected) = true;
2037
2038 awl_set_scratch_reg_hv_bit();
2039 }
2040
2041 /*
2042 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2043 * so restore it manually.
2044 */
2045 static bool
2046 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2047 {
2048 if (event == CPU_BOOTED) {
2049 if (*PERCPU_GET(hv_entry_detected)) {
2050 awl_set_scratch_reg_hv_bit();
2051 }
2052 }
2053
2054 return true;
2055 }
2056
2057 /*
2058 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2059 * for a callback to restore register after hibernation
2060 */
2061 __startup_func
2062 static void
2063 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2064 {
2065 DTEntry base = NULL;
2066
2067 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2068 return;
2069 }
2070 const uint8_t *data = NULL;
2071 unsigned int data_size = sizeof(uint8_t);
2072
2073 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2074 for (unsigned int i = 0; i < data_size; i++) {
2075 if (data[i] != 0) {
2076 awl_scratch_reg_supported = true;
2077 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2078 break;
2079 }
2080 }
2081 }
2082 }
2083 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2084