1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_cdata.h>
76 #include <kern/zalloc_internal.h>
77 #include <kern/iotrace.h>
78 #include <pexpert/device_tree.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_compressor.h>
83 #include <stdarg.h>
84 #include <stdatomic.h>
85 #include <sys/pgo.h>
86 #include <console/serial_protos.h>
87 #include <IOKit/IOBSD.h>
88
89 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
90 #include <kdp/kdp_udp.h>
91 #endif
92 #include <kern/processor.h>
93
94 #if defined(__i386__) || defined(__x86_64__)
95 #include <IOKit/IOBSD.h>
96
97 #include <i386/cpu_threads.h>
98 #include <i386/pmCPU.h>
99 #include <i386/lbr.h>
100 #endif
101
102 #include <IOKit/IOPlatformExpert.h>
103 #include <machine/machine_cpu.h>
104 #include <machine/pal_routines.h>
105
106 #include <sys/kdebug.h>
107 #include <libkern/OSKextLibPrivate.h>
108 #include <libkern/OSAtomic.h>
109 #include <libkern/kernel_mach_header.h>
110 #include <libkern/section_keywords.h>
111 #include <uuid/uuid.h>
112 #include <mach_debug/zone_info.h>
113 #include <mach/resource_monitors.h>
114 #include <machine/machine_routines.h>
115 #include <sys/proc_require.h>
116
117 #include <os/log_private.h>
118
119 #include <kern/ext_paniclog.h>
120
121 #if defined(__arm64__)
122 #include <pexpert/pexpert.h> /* For gPanicBase */
123 #include <arm/caches_internal.h>
124 #include <arm/misc_protos.h>
125 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
126 #endif
127
128 #include <san/kcov.h>
129
130 #if CONFIG_XNUPOST
131 #include <tests/xnupost.h>
132 extern int vsnprintf(char *, size_t, const char *, va_list);
133 #endif
134
135 #if CONFIG_CSR
136 #include <sys/csr.h>
137 #endif
138
139
140 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
141 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
142
143 unsigned int halt_in_debugger = 0;
144 unsigned int current_debugger = 0;
145 unsigned int active_debugger = 0;
146 SECURITY_READ_ONLY_LATE(unsigned int) panicDebugging = FALSE;
147 unsigned int kernel_debugger_entry_count = 0;
148
149 #if DEVELOPMENT || DEBUG
150 unsigned int panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
151 unsigned int panic_test_action_count = 1;
152 unsigned int panic_test_case = PANIC_TEST_CASE_DISABLED;
153 #endif
154
155 #if defined(__arm64__)
156 struct additional_panic_data_buffer *panic_data_buffers = NULL;
157 #endif
158
159 #if defined(__arm64__)
160 /*
161 * Magic number; this should be identical to the armv7 encoding for trap.
162 */
163 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
164 #elif defined (__x86_64__)
165 #define TRAP_DEBUGGER __asm__("int3")
166 #else
167 #error No TRAP_DEBUGGER for this architecture
168 #endif
169
170 #if defined(__i386__) || defined(__x86_64__)
171 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
172 #else
173 #define panic_stop() panic_spin_forever()
174 #endif
175
176 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
177 /*
178 * More than enough for any typical format string passed to panic();
179 * anything longer will be truncated but that's better than nothing.
180 */
181 #define EARLY_PANIC_BUFLEN 256
182 #endif
183
184 struct debugger_state {
185 uint64_t db_panic_options;
186 debugger_op db_current_op;
187 boolean_t db_proceed_on_sync_failure;
188 const char *db_message;
189 const char *db_panic_str;
190 va_list *db_panic_args;
191 void *db_panic_data_ptr;
192 unsigned long db_panic_caller;
193 /* incremented whenever we panic or call Debugger (current CPU panic level) */
194 uint32_t db_entry_count;
195 kern_return_t db_op_return;
196 };
197 static struct debugger_state PERCPU_DATA(debugger_state);
198
199 /* __pure2 is correct if this function is called with preemption disabled */
200 static inline __pure2 struct debugger_state *
current_debugger_state(void)201 current_debugger_state(void)
202 {
203 return PERCPU_GET(debugger_state);
204 }
205
206 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
207 #define CPUDEBUGGERMSG current_debugger_state()->db_message
208 #define CPUPANICSTR current_debugger_state()->db_panic_str
209 #define CPUPANICARGS current_debugger_state()->db_panic_args
210 #define CPUPANICOPTS current_debugger_state()->db_panic_options
211 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
212 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
213 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
214 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
215 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
216
217
218 /*
219 * Usage:
220 * panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
221 * panic_test_failure_mode is a bit map of things to do
222 * panic_test_case is what sort of test we are injecting
223 *
224 * For more details see definitions in debugger.h
225 *
226 * Note that not all combinations are sensible, but some actions can be combined, e.g.
227 * - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
228 * - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
229 * - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
230 */
231 #if DEVELOPMENT || DEBUG
232 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested) \
233 MACRO_BEGIN \
234 if ((panic_test_case & requested) && panic_test_action_count) { \
235 panic_test_action_count--; \
236 volatile int *panic_test_badpointer = (int *)4; \
237 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); } \
238 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; } \
239 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); } \
240 } \
241 MACRO_END
242
243 #endif /* DEVELOPMENT || DEBUG */
244
245 debugger_op debugger_current_op = DBOP_NONE;
246 const char *debugger_panic_str = NULL;
247 va_list *debugger_panic_args = NULL;
248 void *debugger_panic_data = NULL;
249 uint64_t debugger_panic_options = 0;
250 const char *debugger_message = NULL;
251 unsigned long debugger_panic_caller = 0;
252
253 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
254 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
255 unsigned long panic_caller) __dead2 __printflike(1, 0);
256 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
257 void panic_spin_forever(void) __dead2;
258 extern kern_return_t do_stackshot(void);
259 extern void PE_panic_hook(const char*);
260 extern int sync(proc_t p, void *, void *);
261
262 #define NESTEDDEBUGGERENTRYMAX 5
263 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
264 NESTEDDEBUGGERENTRYMAX);
265
266 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
267 static bool PERCPU_DATA(hv_entry_detected); // = false
268 static void awl_set_scratch_reg_hv_bit(void);
269 void awl_mark_hv_entry(void);
270 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
271
272 #if defined(__arm64__)
273 #define DEBUG_BUF_SIZE (4096)
274
275 /* debug_buf is directly linked with iBoot panic region for arm targets */
276 char *debug_buf_base = NULL;
277 char *debug_buf_ptr = NULL;
278 unsigned int debug_buf_size = 0;
279
280 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
281 #else /* defined(__arm64__) */
282 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
283 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
284 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
285
286 char debug_buf[DEBUG_BUF_SIZE];
287 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
288 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
289 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
290
291 /*
292 * We don't include the size of the panic header in the length of the data we actually write.
293 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
294 * the end of the log because we only support writing (3*PAGESIZE) bytes.
295 */
296 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
297
298 boolean_t extended_debug_log_enabled = FALSE;
299 #endif /* defined(__arm64__) */
300
301 #if defined(XNU_TARGET_OS_OSX)
302 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
303 #else
304 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
305 #endif
306
307 /* Debugger state */
308 atomic_int debugger_cpu = DEBUGGER_NO_CPU;
309 boolean_t debugger_allcpus_halted = FALSE;
310 boolean_t debugger_safe_to_return = TRUE;
311 unsigned int debugger_context = 0;
312
313 static char model_name[64];
314 unsigned char *kernel_uuid;
315
316 boolean_t kernelcache_uuid_valid = FALSE;
317 uuid_t kernelcache_uuid;
318 uuid_string_t kernelcache_uuid_string;
319
320 boolean_t pageablekc_uuid_valid = FALSE;
321 uuid_t pageablekc_uuid;
322 uuid_string_t pageablekc_uuid_string;
323
324 boolean_t auxkc_uuid_valid = FALSE;
325 uuid_t auxkc_uuid;
326 uuid_string_t auxkc_uuid_string;
327
328
329 /*
330 * By default we treat Debugger() the same as calls to panic(), unless
331 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
332 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
333 *
334 * Return from Debugger() is currently only implemented on x86
335 */
336 static boolean_t debugger_is_panic = TRUE;
337
338 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
339
340 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
341
342 char kernel_uuid_string[37]; /* uuid_string_t */
343 char kernelcache_uuid_string[37]; /* uuid_string_t */
344 char panic_disk_error_description[512];
345 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
346
347 extern unsigned int write_trace_on_panic;
348 int kext_assertions_enable =
349 #if DEBUG || DEVELOPMENT
350 TRUE;
351 #else
352 FALSE;
353 #endif
354
355 #if (DEVELOPMENT || DEBUG)
356 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
357 #endif
358
359 /*
360 * Maintain the physically-contiguous carveouts for the carveout bootargs.
361 */
362 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
363
364 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
365 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
366 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
367 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
368
369
370 /*
371 * Returns whether kernel debugging is expected to be restricted
372 * on the device currently based on CSR or other platform restrictions.
373 */
374 boolean_t
kernel_debugging_restricted(void)375 kernel_debugging_restricted(void)
376 {
377 #if XNU_TARGET_OS_OSX
378 #if CONFIG_CSR
379 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
380 return TRUE;
381 }
382 #endif /* CONFIG_CSR */
383 return FALSE;
384 #else /* XNU_TARGET_OS_OSX */
385 return FALSE;
386 #endif /* XNU_TARGET_OS_OSX */
387 }
388
389 __startup_func
390 static void
panic_init(void)391 panic_init(void)
392 {
393 unsigned long uuidlen = 0;
394 void *uuid;
395
396 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
397 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
398 kernel_uuid = uuid;
399 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
400 }
401
402 /*
403 * Take the value of the debug boot-arg into account
404 */
405 #if MACH_KDP
406 if (!kernel_debugging_restricted() && debug_boot_arg) {
407 if (debug_boot_arg & DB_HALT) {
408 halt_in_debugger = 1;
409 }
410
411 #if defined(__arm64__)
412 if (debug_boot_arg & DB_NMI) {
413 panicDebugging = TRUE;
414 }
415 #else
416 panicDebugging = TRUE;
417 #endif /* defined(__arm64__) */
418 }
419
420 #if defined(__arm64__)
421 char kdpname[80];
422
423 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
424 #endif /* defined(__arm64__) */
425
426 #endif /* MACH_KDP */
427
428 #if defined (__x86_64__)
429 /*
430 * By default we treat Debugger() the same as calls to panic(), unless
431 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
432 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
433 * This is because writing an on-device corefile is a destructive operation.
434 *
435 * Return from Debugger() is currently only implemented on x86
436 */
437 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
438 debugger_is_panic = FALSE;
439 }
440 #endif
441 }
442 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
443
444 #if defined (__x86_64__)
445 void
extended_debug_log_init(void)446 extended_debug_log_init(void)
447 {
448 assert(coprocessor_paniclog_flush);
449 /*
450 * Allocate an extended panic log buffer that has space for the panic
451 * stackshot at the end. Update the debug buf pointers appropriately
452 * to point at this new buffer.
453 *
454 * iBoot pre-initializes the panic region with the NULL character. We set this here
455 * so we can accurately calculate the CRC for the region without needing to flush the
456 * full region over SMC.
457 */
458 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
459
460 panic_info = (struct macos_panic_header *)new_debug_buf;
461 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
462 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
463
464 extended_debug_log_enabled = TRUE;
465
466 /*
467 * Insert a compiler barrier so we don't free the other panic stackshot buffer
468 * until after we've marked the new one as available
469 */
470 __compiler_barrier();
471 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
472 panic_stackshot_buf = 0;
473 panic_stackshot_buf_len = 0;
474 }
475 #endif /* defined (__x86_64__) */
476
477 void
debug_log_init(void)478 debug_log_init(void)
479 {
480 #if defined(__arm64__)
481 if (!gPanicBase) {
482 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
483 return;
484 }
485 /* Shift debug buf start location and size by the length of the panic header */
486 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
487 debug_buf_ptr = debug_buf_base;
488 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
489
490 #if CONFIG_EXT_PANICLOG
491 ext_paniclog_init();
492 #endif
493 #else
494 kern_return_t kr = KERN_SUCCESS;
495 bzero(panic_info, DEBUG_BUF_SIZE);
496
497 assert(debug_buf_base != NULL);
498 assert(debug_buf_ptr != NULL);
499 assert(debug_buf_size != 0);
500
501 /*
502 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
503 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
504 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
505 * up.
506 */
507 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
508 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
509 assert(kr == KERN_SUCCESS);
510 if (kr == KERN_SUCCESS) {
511 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
512 }
513 #endif
514 }
515
516 void
phys_carveout_init(void)517 phys_carveout_init(void)
518 {
519 if (!PE_i_can_has_debugger(NULL)) {
520 return;
521 }
522
523 #if __arm__ || __arm64__
524 #if DEVELOPMENT || DEBUG
525 #endif /* DEVELOPMENT || DEBUG */
526 #endif /* __arm__ || __arm64__ */
527
528 struct carveout {
529 const char *name;
530 vm_offset_t *va;
531 uint32_t requested_size;
532 uintptr_t *pa;
533 size_t *allocated_size;
534 uint64_t present;
535 } carveouts[] = {
536 {
537 "phys_carveout",
538 &phys_carveout,
539 phys_carveout_mb,
540 &phys_carveout_pa,
541 &phys_carveout_size,
542 phys_carveout_mb != 0,
543 }
544 };
545
546 for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
547 if (carveouts[i].present) {
548 size_t temp_carveout_size = 0;
549 if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
550 panic("%s_mb size overflowed (%uMB)",
551 carveouts[i].name, carveouts[i].requested_size);
552 return;
553 }
554
555 kmem_alloc_contig(kernel_map, carveouts[i].va,
556 temp_carveout_size, PAGE_MASK, 0, 0,
557 KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
558 VM_KERN_MEMORY_DIAG);
559
560 *carveouts[i].pa = kvtophys(*carveouts[i].va);
561 *carveouts[i].allocated_size = temp_carveout_size;
562 }
563 }
564
565 #if __arm64__ && (DEVELOPMENT || DEBUG)
566 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
567 PE_arm_debug_enable_trace(true);
568 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
569 }
570
571 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)572 debug_is_in_phys_carveout(vm_map_offset_t va)
573 {
574 return phys_carveout_size && va >= phys_carveout &&
575 va < (phys_carveout + phys_carveout_size);
576 }
577
578 boolean_t
debug_can_coredump_phys_carveout(void)579 debug_can_coredump_phys_carveout(void)
580 {
581 return phys_carveout_core;
582 }
583
584 static void
DebuggerLock(void)585 DebuggerLock(void)
586 {
587 int my_cpu = cpu_number();
588 int debugger_exp_cpu = DEBUGGER_NO_CPU;
589 assert(ml_get_interrupts_enabled() == FALSE);
590
591 if (atomic_load(&debugger_cpu) == my_cpu) {
592 return;
593 }
594
595 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
596 debugger_exp_cpu = DEBUGGER_NO_CPU;
597 }
598
599 return;
600 }
601
602 static void
DebuggerUnlock(void)603 DebuggerUnlock(void)
604 {
605 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
606
607 /*
608 * We don't do an atomic exchange here in case
609 * there's another CPU spinning to acquire the debugger_lock
610 * and we never get a chance to update it. We already have the
611 * lock so we can simply store DEBUGGER_NO_CPU and follow with
612 * a barrier.
613 */
614 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
615 OSMemoryBarrier();
616
617 return;
618 }
619
620 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)621 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
622 {
623 #if defined(__arm64__)
624 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
625 #else /* defined(__arm64__) */
626 #pragma unused(proceed_on_failure)
627 #pragma unused(is_stackshot)
628 mp_kdp_enter(proceed_on_failure);
629 return KERN_SUCCESS;
630 #endif
631 }
632
633 static void
DebuggerResumeOtherCores(void)634 DebuggerResumeOtherCores(void)
635 {
636 #if defined(__arm64__)
637 DebuggerXCallReturn();
638 #else /* defined(__arm64__) */
639 mp_kdp_exit();
640 #endif
641 }
642
643 __printflike(3, 0)
644 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)645 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
646 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
647 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
648 {
649 CPUDEBUGGEROP = db_op;
650
651 /*
652 * Note:
653 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
654 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
655 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
656 *
657 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
658 */
659 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
660 CPUDEBUGGERMSG = db_message;
661 CPUPANICSTR = db_panic_str;
662 CPUPANICARGS = db_panic_args;
663 CPUPANICDATAPTR = db_panic_data_ptr;
664 CPUPANICCALLER = db_panic_caller;
665 }
666
667 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
668 CPUDEBUGGERRET = KERN_SUCCESS;
669
670 /* Reset these on any nested panics */
671 // follow up in rdar://88497308 (nested panics should not clobber panic flags)
672 CPUPANICOPTS = db_panic_options;
673
674 return;
675 }
676
677 /*
678 * Save the requested debugger state/action into the current processor's
679 * percu state and trap to the debugger.
680 */
681 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)682 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
683 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
684 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
685 {
686 kern_return_t ret;
687
688 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
689 if (!PE_arm_debug_and_trace_initialized()) {
690 /*
691 * In practice this can only happen if we panicked very early,
692 * when only the boot CPU is online and before it has finished
693 * initializing the debug and trace infrastructure. We're going
694 * to hang soon, so let's at least make sure the message passed
695 * to panic() is actually logged.
696 */
697 char buf[EARLY_PANIC_BUFLEN];
698 vsnprintf(buf, EARLY_PANIC_BUFLEN, db_panic_str, *db_panic_args);
699 paniclog_append_noflush("%s\n", buf);
700 }
701 #endif
702
703 assert(ml_get_interrupts_enabled() == FALSE);
704 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
705 db_panic_options, db_panic_data_ptr,
706 db_proceed_on_sync_failure, db_panic_caller);
707
708 /*
709 * On ARM this generates an uncategorized exception -> sleh code ->
710 * DebuggerCall -> kdp_trap -> handle_debugger_trap
711 * So that is how XNU ensures that only one core can panic.
712 * The rest of the cores are halted by IPI if possible; if that
713 * fails it will fall back to dbgwrap.
714 */
715 TRAP_DEBUGGER;
716
717 ret = CPUDEBUGGERRET;
718
719 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
720
721 return ret;
722 }
723
724 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)725 Assert(
726 const char *file,
727 int line,
728 const char *expression
729 )
730 {
731 #if CONFIG_NONFATAL_ASSERTS
732 static TUNABLE(bool, mach_assert, "assertions", true);
733
734 if (!mach_assert) {
735 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
736 return;
737 }
738 #endif
739
740 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
741 }
742
743 boolean_t
debug_is_current_cpu_in_panic_state(void)744 debug_is_current_cpu_in_panic_state(void)
745 {
746 return current_debugger_state()->db_entry_count > 0;
747 }
748
749 /*
750 * check if we are in a nested panic, report findings, take evasive action where necessary
751 *
752 * see also PE_update_panicheader_nestedpanic
753 */
754 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)755 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
756 {
757 if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
758 // Note: this is the first indication in the panic log or serial that we are off the rails...
759 //
760 // if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
761 // if we panic *after* the log is finalized then we will only see it in the serial log
762 //
763 paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
764 paniclog_flush();
765
766 // print the *new* panic string to the console, we might not get it by other means...
767 // TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
768 // rdar://87846117 (NestedPanic: output panic string to paniclog)
769 if (db_panic_str) {
770 printf("Nested panic string:\n");
771 #pragma clang diagnostic push
772 #pragma clang diagnostic ignored "-Wformat-nonliteral"
773 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
774 #pragma clang diagnostic pop
775 printf("\n<end nested panic string>\n");
776 }
777 }
778
779 // Stage 1 bailout
780 //
781 // Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog. If this fails with another nested
782 // panic then we will land in Stage 2 below...
783 //
784 if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
785 uint32_t panic_details = 0;
786
787 // if this is a force-reset panic then capture a log and reboot immediately.
788 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
789 panic_details |= kPanicDetailsForcePowerOff;
790 }
791
792 // normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
793 // there. To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
794 //
795 PEHaltRestartInternal(kPEPanicBegin, panic_details);
796
797 paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
798 PE_update_panicheader_nestedpanic();
799 paniclog_flush();
800
801 if (!panicDebugging) {
802 // note that this will also send kPEPanicEnd
803 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
804 }
805
806 // prints to console
807 paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
808 panic_spin_forever();
809 }
810
811 // Stage 2 bailout
812 //
813 // Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path. Try to issue
814 // a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
815 //
816 if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
817 if (!panicDebugging) {
818 // note that:
819 // - this code path should be audited for prints, as that is a common cause of nested panics
820 // - this code path should take the fastest route to the actual reset, and not call any un-necessary code
821 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
822 }
823
824 // prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
825 paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
826 panic_spin_forever();
827 }
828
829 // Stage 3 bailout
830 //
831 // We are done here, we were unable to reset the platform without another nested panic. Spin until the watchdog kicks in.
832 //
833 if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
834 kdp_machine_reboot_type(kPEHangCPU, 0);
835 }
836 }
837
838 void
Debugger(const char * message)839 Debugger(const char *message)
840 {
841 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
842 }
843
844 /*
845 * Enter the Debugger
846 *
847 * This is similar to, but not the same as a panic
848 *
849 * Key differences:
850 * - we get here from a debugger entry action (e.g. NMI)
851 * - the system is resumable on x86 (in theory, however it is not clear if this is tested)
852 * - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
853 *
854 */
855 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)856 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
857 uint64_t debugger_options_mask, unsigned long debugger_caller)
858 {
859 spl_t previous_interrupts_state;
860 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
861
862 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
863 read_lbr();
864 #endif
865 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
866 disable_preemption();
867
868 /* track depth of debugger/panic entry */
869 CPUDEBUGGERCOUNT++;
870
871 /* emit a tracepoint as early as possible in case of hang */
872 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
873
874 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
875 check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
876
877 /* Handle any necessary platform specific actions before we proceed */
878 PEInitiatePanic();
879
880 #if DEVELOPMENT || DEBUG
881 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
882 #endif
883
884 PE_panic_hook(message);
885
886 doprnt_hide_pointers = FALSE;
887
888 if (ctx != NULL) {
889 DebuggerSaveState(DBOP_DEBUGGER, message,
890 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
891 handle_debugger_trap(reason, 0, 0, ctx);
892 DebuggerSaveState(DBOP_NONE, NULL, NULL,
893 NULL, 0, NULL, FALSE, 0);
894 } else {
895 DebuggerTrapWithState(DBOP_DEBUGGER, message,
896 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
897 }
898
899 /* resume from the debugger */
900
901 CPUDEBUGGERCOUNT--;
902 doprnt_hide_pointers = old_doprnt_hide_pointers;
903 enable_preemption();
904 ml_set_interrupts_enabled(previous_interrupts_state);
905 }
906
907 static struct kdp_callout {
908 struct kdp_callout * callout_next;
909 kdp_callout_fn_t callout_fn;
910 boolean_t callout_in_progress;
911 void * callout_arg;
912 } * kdp_callout_list = NULL;
913
914 /*
915 * Called from kernel context to register a kdp event callout.
916 */
917 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)918 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
919 {
920 struct kdp_callout * kcp;
921 struct kdp_callout * list_head;
922
923 kcp = zalloc_permanent_type(struct kdp_callout);
924
925 kcp->callout_fn = fn;
926 kcp->callout_arg = arg;
927 kcp->callout_in_progress = FALSE;
928
929 /* Lock-less list insertion using compare and exchange. */
930 do {
931 list_head = kdp_callout_list;
932 kcp->callout_next = list_head;
933 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
934 }
935
936 static void
kdp_callouts(kdp_event_t event)937 kdp_callouts(kdp_event_t event)
938 {
939 struct kdp_callout *kcp = kdp_callout_list;
940
941 while (kcp) {
942 if (!kcp->callout_in_progress) {
943 kcp->callout_in_progress = TRUE;
944 kcp->callout_fn(kcp->callout_arg, event);
945 kcp->callout_in_progress = FALSE;
946 }
947 kcp = kcp->callout_next;
948 }
949 }
950
951 #if defined(__arm64__)
952 /*
953 * Register an additional buffer with data to include in the panic log
954 *
955 * <rdar://problem/50137705> tracks supporting more than one buffer
956 *
957 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
958 */
959 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)960 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
961 {
962 if (panic_data_buffers != NULL) {
963 panic("register_additional_panic_data_buffer called with buffer already registered");
964 }
965
966 if (producer_name == NULL || (strlen(producer_name) == 0)) {
967 panic("register_additional_panic_data_buffer called with invalid producer_name");
968 }
969
970 if (buf == NULL) {
971 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
972 }
973
974 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
975 panic("register_additional_panic_data_buffer called with invalid length");
976 }
977
978 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
979 new_panic_data_buffer->producer_name = producer_name;
980 new_panic_data_buffer->buf = buf;
981 new_panic_data_buffer->len = len;
982
983 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
984 panic("register_additional_panic_data_buffer called with buffer already registered");
985 }
986
987 return;
988 }
989 #endif /* defined(__arm64__) */
990
991 /*
992 * An overview of the xnu panic path:
993 *
994 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
995 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
996 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
997 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
998 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
999 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
1000 * according to the device's boot-args.
1001 */
1002 #undef panic
1003 void
panic(const char * str,...)1004 panic(const char *str, ...)
1005 {
1006 va_list panic_str_args;
1007
1008 va_start(panic_str_args, str);
1009 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
1010 va_end(panic_str_args);
1011 }
1012
1013 void
panic_with_data(uuid_t uuid,void * addr,uint32_t len,const char * str,...)1014 panic_with_data(uuid_t uuid, void *addr, uint32_t len, const char *str, ...)
1015 {
1016 va_list panic_str_args;
1017
1018 ext_paniclog_panic_with_data(uuid, addr, len);
1019
1020 va_start(panic_str_args, str);
1021 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
1022 va_end(panic_str_args);
1023 }
1024
1025 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1026 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1027 {
1028 va_list panic_str_args;
1029
1030 va_start(panic_str_args, str);
1031 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1032 NULL, (unsigned long)(char *)__builtin_return_address(0));
1033 va_end(panic_str_args);
1034 }
1035
1036 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)1037 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
1038 {
1039 if (ptr == NULL) {
1040 paniclog_append_noflush("NULL %s pointer\n", what);
1041 return false;
1042 }
1043
1044 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1045 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1046 what, ptr, (uint32_t)size);
1047 return false;
1048 }
1049
1050 return true;
1051 }
1052
1053 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1054 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1055 {
1056 if (!PANIC_VALIDATE_PTR(thread)) {
1057 return false;
1058 }
1059
1060 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1061 return false;
1062 }
1063
1064 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1065 return false;
1066 }
1067
1068 if (task) {
1069 *task = thread->t_tro->tro_task;
1070 }
1071
1072 if (!panic_validate_ptr(thread->t_tro->tro_proc,
1073 sizeof(struct proc *), "bsd_info")) {
1074 *proc = NULL;
1075 } else {
1076 *proc = thread->t_tro->tro_proc;
1077 }
1078
1079 return true;
1080 }
1081
1082 #if defined (__x86_64__)
1083 /*
1084 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1085 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1086 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1087 * thread when writing the panic log.
1088 *
1089 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1090 */
1091 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1092 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1093 {
1094 va_list panic_str_args;
1095 __assert_only os_ref_count_t th_ref_count;
1096
1097 assert_thread_magic(thread);
1098 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1099 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1100
1101 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1102 thread_reference(thread);
1103
1104 va_start(panic_str_args, str);
1105 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1106 thread, (unsigned long)(char *)__builtin_return_address(0));
1107
1108 va_end(panic_str_args);
1109 }
1110 #endif /* defined (__x86_64__) */
1111
1112 #pragma clang diagnostic push
1113 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1114 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller)1115 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1116 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
1117 {
1118 #pragma clang diagnostic pop
1119
1120 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1121 read_lbr();
1122 #endif
1123
1124
1125 /* optionally call sync, to reduce lost logs on restart, avoid on recursive panic. Unsafe due to unbounded sync() duration */
1126 if ((panic_options_mask & DEBUGGER_OPTION_SYNC_ON_PANIC_UNSAFE) && (CPUDEBUGGERCOUNT == 0)) {
1127 sync(NULL, NULL, NULL);
1128 }
1129
1130 /* Turn off I/O tracing once we've panicked */
1131 iotrace_disable();
1132
1133 /* call machine-layer panic handler */
1134 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
1135
1136 /* track depth of debugger/panic entry */
1137 CPUDEBUGGERCOUNT++;
1138
1139 /* emit a tracepoint as early as possible in case of hang */
1140 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1141
1142 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1143 check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1144
1145 /* Handle any necessary platform specific actions before we proceed */
1146 PEInitiatePanic();
1147
1148 #if DEVELOPMENT || DEBUG
1149 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1150 #endif
1151
1152 PE_panic_hook(panic_format_str);
1153
1154 #if defined (__x86_64__)
1155 plctrace_disable();
1156 #endif
1157
1158 if (write_trace_on_panic && kdebug_enable) {
1159 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1160 ml_set_interrupts_enabled(TRUE);
1161 KDBG_RELEASE(TRACE_PANIC);
1162 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1163 }
1164 }
1165
1166 ml_set_interrupts_enabled(FALSE);
1167 disable_preemption();
1168
1169 #if defined (__x86_64__)
1170 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1171 #endif /* defined (__x86_64__) */
1172
1173 /* Never hide pointers from panic logs. */
1174 doprnt_hide_pointers = FALSE;
1175
1176 if (ctx != NULL) {
1177 /*
1178 * We called into panic from a trap, no need to trap again. Set the
1179 * state on the current CPU and then jump to handle_debugger_trap.
1180 */
1181 DebuggerSaveState(DBOP_PANIC, "panic",
1182 panic_format_str, panic_args,
1183 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1184 handle_debugger_trap(reason, 0, 0, ctx);
1185 }
1186
1187 #if defined(__arm64__) && !APPLEVIRTUALPLATFORM
1188 /*
1189 * Signal to fastsim that it should open debug ports (nop on hardware)
1190 */
1191 __asm__ volatile ("hint #0x45");
1192 #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */
1193
1194 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1195 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1196
1197 /*
1198 * Not reached.
1199 */
1200 panic_stop();
1201 __builtin_unreachable();
1202 }
1203
1204 void
panic_spin_forever(void)1205 panic_spin_forever(void)
1206 {
1207 for (;;) {
1208 #if defined(__arm__) || defined(__arm64__)
1209 /* On arm32, which doesn't have a WFE timeout, this may not return. But that should be OK on this path. */
1210 __builtin_arm_wfe();
1211 #else
1212 cpu_pause();
1213 #endif
1214 }
1215 }
1216
1217 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1218 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1219 {
1220 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1221 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1222 } else {
1223 PEHaltRestart(type);
1224 }
1225 halt_all_cpus(TRUE);
1226 }
1227
1228 void
kdp_machine_reboot(void)1229 kdp_machine_reboot(void)
1230 {
1231 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1232 }
1233
1234 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1235 panic_debugger_log(const char *string, ...)
1236 {
1237 va_list panic_debugger_log_args;
1238
1239 va_start(panic_debugger_log_args, string);
1240 #pragma clang diagnostic push
1241 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1242 _doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1243 #pragma clang diagnostic pop
1244 va_end(panic_debugger_log_args);
1245
1246 #if defined(__arm64__)
1247 paniclog_flush();
1248 #endif
1249 }
1250
1251 /*
1252 * Gather and save diagnostic information about a panic (or Debugger call).
1253 *
1254 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1255 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1256 * paniclog is written and no core is written unless we request a core on NMI.
1257 *
1258 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1259 * and calling out to any other functions we have for collecting diagnostic info.
1260 */
1261 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1262 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1263 {
1264 #if DEVELOPMENT || DEBUG
1265 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1266 #endif
1267
1268 #if defined(__x86_64__)
1269 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1270 #endif
1271 /*
1272 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1273 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1274 * we'll just spin in kdp_raise_exception.
1275 */
1276 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1277 kdp_raise_exception(exception, code, subcode, state);
1278 if (debugger_safe_to_return && !debugger_is_panic) {
1279 return;
1280 }
1281 }
1282
1283 #ifdef CONFIG_KCOV
1284 /* Try not to break core dump path by sanitizer. */
1285 kcov_panic_disable();
1286 #endif
1287
1288 if ((debugger_current_op == DBOP_PANIC) ||
1289 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1290 /*
1291 * Attempt to notify listeners once and only once that we've started
1292 * panicking. Only do this for Debugger() calls if we're treating
1293 * Debugger() calls like panic().
1294 */
1295 uint32_t panic_details = 0;
1296 /* if this is a force-reset panic then capture a log and reboot immediately. */
1297 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1298 panic_details |= kPanicDetailsForcePowerOff;
1299 }
1300 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1301
1302 /*
1303 * Set the begin pointer in the panic log structure. We key off of this
1304 * static variable rather than contents from the panic header itself in case someone
1305 * has stomped over the panic_info structure. Also initializes the header magic.
1306 */
1307 static boolean_t began_writing_paniclog = FALSE;
1308 if (!began_writing_paniclog) {
1309 PE_init_panicheader();
1310 began_writing_paniclog = TRUE;
1311 }
1312
1313 if (CPUDEBUGGERCOUNT > 1) {
1314 /*
1315 * we are in a nested panic. Record the nested bit in panic flags and do some housekeeping
1316 */
1317 PE_update_panicheader_nestedpanic();
1318 paniclog_flush();
1319 }
1320 }
1321
1322 /*
1323 * Write panic string if this was a panic.
1324 *
1325 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1326 */
1327 if (debugger_current_op == DBOP_PANIC) {
1328 paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1329 if (debugger_panic_str) {
1330 #pragma clang diagnostic push
1331 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1332 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1333 #pragma clang diagnostic pop
1334 }
1335 paniclog_append_noflush("\n");
1336 }
1337 #if defined(__x86_64__)
1338 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1339 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1340 }
1341
1342 /*
1343 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1344 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1345 * conventional sense.
1346 */
1347 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1348 #endif /* __x86_64__ */
1349 {
1350 kdp_callouts(KDP_EVENT_PANICLOG);
1351
1352 /*
1353 * Write paniclog and panic stackshot (if supported)
1354 * TODO: Need to clear panic log when return from debugger
1355 * hooked up for embedded
1356 */
1357 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
1358
1359 #if DEVELOPMENT || DEBUG
1360 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1361 #endif
1362
1363 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1364 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1365 PEHaltRestart(kPEPanicDiagnosticsDone);
1366 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1367 }
1368 }
1369
1370 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1371 /*
1372 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1373 * local coredumps, don't try to write these and instead go straight to reboot. This
1374 * allows us to persist any data that's stored in the panic log.
1375 */
1376 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1377 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1378 PEHaltRestart(kPEPanicDiagnosticsDone);
1379 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1380 }
1381
1382 /*
1383 * Consider generating a local corefile if the infrastructure is configured
1384 * and we haven't disabled on-device coredumps.
1385 */
1386 if (on_device_corefile_enabled()) {
1387 if (!kdp_has_polled_corefile()) {
1388 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1389 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1390 kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1391 #if defined(__arm64__)
1392 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1393 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1394 }
1395 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1396 paniclog_flush();
1397 #else /* defined(__arm64__) */
1398 if (panic_info->mph_panic_log_offset != 0) {
1399 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1400 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1401 }
1402 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1403 paniclog_flush();
1404 }
1405 #endif /* defined(__arm64__) */
1406 }
1407 }
1408 #if XNU_MONITOR
1409 else if (pmap_get_cpu_data()->ppl_state != PPL_STATE_KERNEL) {
1410 paniclog_append_noflush("skipping local kernel core because the PPL is not in KERNEL state\n");
1411 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1412 paniclog_flush();
1413 }
1414 #endif /* XNU_MONITOR */
1415 else {
1416 int ret = -1;
1417
1418 #if defined (__x86_64__)
1419 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1420 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1421 #endif
1422 {
1423 /*
1424 * Doing an on-device coredump leaves the disk driver in a state
1425 * that can not be resumed.
1426 */
1427 debugger_safe_to_return = FALSE;
1428 begin_panic_transfer();
1429 ret = kern_dump(KERN_DUMP_DISK);
1430 abort_panic_transfer();
1431
1432 #if DEVELOPMENT || DEBUG
1433 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1434 #endif
1435 }
1436
1437 /*
1438 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1439 * or if option to ignore failures is set.
1440 */
1441 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1442 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1443 PEHaltRestart(kPEPanicDiagnosticsDone);
1444 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1445 }
1446 }
1447 }
1448
1449 if (debugger_current_op == DBOP_PANIC ||
1450 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1451 PEHaltRestart(kPEPanicDiagnosticsDone);
1452 }
1453
1454 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1455 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1456 }
1457
1458 /* If KDP is configured, try to trap to the debugger */
1459 #if defined(__arm64__)
1460 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1461 #else
1462 if (current_debugger != NO_CUR_DB) {
1463 #endif
1464 kdp_raise_exception(exception, code, subcode, state);
1465 /*
1466 * Only return if we entered via Debugger and it's safe to return
1467 * (we halted the other cores successfully, this isn't a nested panic, etc)
1468 */
1469 if (debugger_current_op == DBOP_DEBUGGER &&
1470 debugger_safe_to_return &&
1471 kernel_debugger_entry_count == 1 &&
1472 !debugger_is_panic) {
1473 return;
1474 }
1475 }
1476
1477 #if defined(__arm64__)
1478 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1479 /*
1480 * Print panic string at the end of serial output
1481 * to make panic more obvious when someone connects a debugger
1482 */
1483 if (debugger_panic_str) {
1484 panic_debugger_log("Original panic string:\n");
1485 panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1486 #pragma clang diagnostic push
1487 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1488 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1489 #pragma clang diagnostic pop
1490 panic_debugger_log("\n");
1491 }
1492
1493 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1494 panic_spin_shmcon();
1495 }
1496 #endif /* defined(__arm64__) */
1497
1498 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1499
1500 PEHaltRestart(kPEPanicDiagnosticsDone);
1501
1502 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1503
1504 if (!panicDebugging) {
1505 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1506 }
1507
1508 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1509 panic_spin_forever();
1510 }
1511
1512 #if SCHED_HYGIENE_DEBUG
1513 uint64_t debugger_trap_timestamps[9];
1514 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1515 #else
1516 # define DEBUGGER_TRAP_TIMESTAMP(i)
1517 #endif /* SCHED_HYGIENE_DEBUG */
1518
1519 void
1520 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1521 {
1522 unsigned int initial_not_in_kdp = not_in_kdp;
1523 kern_return_t ret;
1524 debugger_op db_prev_op = debugger_current_op;
1525
1526 DEBUGGER_TRAP_TIMESTAMP(0);
1527
1528 DebuggerLock();
1529 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1530
1531 DEBUGGER_TRAP_TIMESTAMP(1);
1532
1533 #if SCHED_HYGIENE_DEBUG
1534 if (serialmode & SERIALMODE_OUTPUT) {
1535 ml_spin_debug_reset(current_thread());
1536 }
1537 #endif /* SCHED_HYGIENE_DEBUG */
1538 if (ret != KERN_SUCCESS) {
1539 CPUDEBUGGERRET = ret;
1540 DebuggerUnlock();
1541 return;
1542 }
1543
1544 /* Update the global panic/debugger nested entry level */
1545 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1546 if (kernel_debugger_entry_count > 0) {
1547 console_suspend();
1548 }
1549
1550 /*
1551 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1552 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1553 */
1554
1555 if (CPUDEBUGGEROP == DBOP_NONE) {
1556 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1557 debugger_current_op = DBOP_BREAKPOINT;
1558 } else {
1559 /* Not safe to return from a nested panic/debugger call */
1560 if (debugger_current_op == DBOP_PANIC ||
1561 debugger_current_op == DBOP_DEBUGGER) {
1562 debugger_safe_to_return = FALSE;
1563 }
1564
1565 debugger_current_op = CPUDEBUGGEROP;
1566
1567 /* Only overwrite the panic message if there is none already - save the data from the first call */
1568 if (debugger_panic_str == NULL) {
1569 debugger_panic_str = CPUPANICSTR;
1570 debugger_panic_args = CPUPANICARGS;
1571 debugger_panic_data = CPUPANICDATAPTR;
1572 debugger_message = CPUDEBUGGERMSG;
1573 debugger_panic_caller = CPUPANICCALLER;
1574 }
1575
1576 debugger_panic_options = CPUPANICOPTS;
1577 }
1578
1579 /*
1580 * Clear the op from the processor debugger context so we can handle
1581 * breakpoints in the debugger
1582 */
1583 CPUDEBUGGEROP = DBOP_NONE;
1584
1585 DEBUGGER_TRAP_TIMESTAMP(2);
1586
1587 kdp_callouts(KDP_EVENT_ENTER);
1588 not_in_kdp = 0;
1589
1590 DEBUGGER_TRAP_TIMESTAMP(3);
1591
1592 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1593 shmem_mark_as_busy();
1594 #endif
1595
1596 if (debugger_current_op == DBOP_BREAKPOINT) {
1597 kdp_raise_exception(exception, code, subcode, state);
1598 } else if (debugger_current_op == DBOP_STACKSHOT) {
1599 CPUDEBUGGERRET = do_stackshot();
1600 #if PGO
1601 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1602 CPUDEBUGGERRET = do_pgo_reset_counters();
1603 #endif
1604 } else {
1605 /* note: this is the panic path... */
1606 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
1607 if (!PE_arm_debug_and_trace_initialized()) {
1608 paniclog_append_noflush("kernel panicked before debug and trace infrastructure initialized!\n"
1609 "spinning forever...\n");
1610 panic_spin_forever();
1611 }
1612 #endif
1613 debugger_collect_diagnostics(exception, code, subcode, state);
1614 }
1615
1616 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1617 shmem_unmark_as_busy();
1618 #endif
1619
1620 DEBUGGER_TRAP_TIMESTAMP(4);
1621
1622 not_in_kdp = initial_not_in_kdp;
1623 kdp_callouts(KDP_EVENT_EXIT);
1624
1625 DEBUGGER_TRAP_TIMESTAMP(5);
1626
1627 if (debugger_current_op != DBOP_BREAKPOINT) {
1628 debugger_panic_str = NULL;
1629 debugger_panic_args = NULL;
1630 debugger_panic_data = NULL;
1631 debugger_panic_options = 0;
1632 debugger_message = NULL;
1633 }
1634
1635 /* Restore the previous debugger state */
1636 debugger_current_op = db_prev_op;
1637
1638 DEBUGGER_TRAP_TIMESTAMP(6);
1639
1640 DebuggerResumeOtherCores();
1641
1642 DEBUGGER_TRAP_TIMESTAMP(7);
1643
1644 DebuggerUnlock();
1645
1646 DEBUGGER_TRAP_TIMESTAMP(8);
1647
1648 return;
1649 }
1650
1651 __attribute__((noinline, not_tail_called))
1652 void
1653 log(__unused int level, char *fmt, ...)
1654 {
1655 void *caller = __builtin_return_address(0);
1656 va_list listp;
1657 va_list listp2;
1658
1659
1660 #ifdef lint
1661 level++;
1662 #endif /* lint */
1663 #ifdef MACH_BSD
1664 va_start(listp, fmt);
1665 va_copy(listp2, listp);
1666
1667 disable_preemption();
1668 _doprnt(fmt, &listp, cons_putc_locked, 0);
1669 enable_preemption();
1670
1671 va_end(listp);
1672
1673 #pragma clang diagnostic push
1674 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1675 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1676 #pragma clang diagnostic pop
1677 va_end(listp2);
1678 #endif
1679 }
1680
1681 /*
1682 * Per <rdar://problem/24974766>, skip appending log messages to
1683 * the new logging infrastructure in contexts where safety is
1684 * uncertain. These contexts include:
1685 * - When we're in the debugger
1686 * - We're in a panic
1687 * - Interrupts are disabled
1688 * - Or Pre-emption is disabled
1689 * In all the above cases, it is potentially unsafe to log messages.
1690 */
1691
1692 boolean_t
1693 oslog_is_safe(void)
1694 {
1695 return kernel_debugger_entry_count == 0 &&
1696 not_in_kdp == 1 &&
1697 get_preemption_level() == 0 &&
1698 ml_get_interrupts_enabled() == TRUE;
1699 }
1700
1701 boolean_t
1702 debug_mode_active(void)
1703 {
1704 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1705 }
1706
1707 void
1708 debug_putc(char c)
1709 {
1710 if ((debug_buf_size != 0) &&
1711 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size) &&
1712 (!is_debug_ptr_in_ext_paniclog())) {
1713 *debug_buf_ptr = c;
1714 debug_buf_ptr++;
1715 }
1716 }
1717
1718 #if defined (__x86_64__)
1719 struct pasc {
1720 unsigned a: 7;
1721 unsigned b: 7;
1722 unsigned c: 7;
1723 unsigned d: 7;
1724 unsigned e: 7;
1725 unsigned f: 7;
1726 unsigned g: 7;
1727 unsigned h: 7;
1728 } __attribute__((packed));
1729
1730 typedef struct pasc pasc_t;
1731
1732 /*
1733 * In-place packing routines -- inefficient, but they're called at most once.
1734 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1735 */
1736 int
1737 packA(char *inbuf, uint32_t length, uint32_t buflen)
1738 {
1739 unsigned int i, j = 0;
1740 pasc_t pack;
1741
1742 length = MIN(((length + 7) & ~7), buflen);
1743
1744 for (i = 0; i < length; i += 8) {
1745 pack.a = inbuf[i];
1746 pack.b = inbuf[i + 1];
1747 pack.c = inbuf[i + 2];
1748 pack.d = inbuf[i + 3];
1749 pack.e = inbuf[i + 4];
1750 pack.f = inbuf[i + 5];
1751 pack.g = inbuf[i + 6];
1752 pack.h = inbuf[i + 7];
1753 bcopy((char *) &pack, inbuf + j, 7);
1754 j += 7;
1755 }
1756 return j;
1757 }
1758
1759 void
1760 unpackA(char *inbuf, uint32_t length)
1761 {
1762 pasc_t packs;
1763 unsigned i = 0;
1764 length = (length * 8) / 7;
1765
1766 while (i < length) {
1767 packs = *(pasc_t *)&inbuf[i];
1768 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1769 inbuf[i++] = packs.a;
1770 inbuf[i++] = packs.b;
1771 inbuf[i++] = packs.c;
1772 inbuf[i++] = packs.d;
1773 inbuf[i++] = packs.e;
1774 inbuf[i++] = packs.f;
1775 inbuf[i++] = packs.g;
1776 inbuf[i++] = packs.h;
1777 }
1778 }
1779 #endif /* defined (__x86_64__) */
1780
1781 extern char *proc_name_address(void *);
1782 extern char *proc_longname_address(void *);
1783
1784 __private_extern__ void
1785 panic_display_process_name(void)
1786 {
1787 proc_name_t proc_name = {};
1788 struct proc *cbsd_info = NULL;
1789 task_t ctask = NULL;
1790 vm_size_t size;
1791
1792 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1793 goto out;
1794 }
1795
1796 if (cbsd_info == NULL) {
1797 goto out;
1798 }
1799
1800 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1801 (vm_offset_t)&proc_name, sizeof(proc_name));
1802
1803 if (size == 0 || proc_name[0] == '\0') {
1804 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1805 (vm_offset_t)&proc_name,
1806 MIN(sizeof(command_t), sizeof(proc_name)));
1807 if (size > 0) {
1808 proc_name[size - 1] = '\0';
1809 }
1810 }
1811
1812 out:
1813 proc_name[sizeof(proc_name) - 1] = '\0';
1814 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1815 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1816 }
1817
1818 unsigned
1819 panic_active(void)
1820 {
1821 return debugger_current_op == DBOP_PANIC ||
1822 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1823 }
1824
1825 void
1826 populate_model_name(char *model_string)
1827 {
1828 strlcpy(model_name, model_string, sizeof(model_name));
1829 }
1830
1831 void
1832 panic_display_model_name(void)
1833 {
1834 char tmp_model_name[sizeof(model_name)];
1835
1836 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1837 return;
1838 }
1839
1840 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1841
1842 if (tmp_model_name[0] != 0) {
1843 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1844 }
1845 }
1846
1847 void
1848 panic_display_kernel_uuid(void)
1849 {
1850 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1851
1852 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1853 return;
1854 }
1855
1856 if (tmp_kernel_uuid[0] != '\0') {
1857 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1858 }
1859 }
1860
1861
1862 void
1863 panic_display_kernel_aslr(void)
1864 {
1865
1866 kc_format_t kc_format;
1867
1868 PE_get_primary_kc_format(&kc_format);
1869
1870 if (kc_format == KCFormatFileset) {
1871 void *kch = PE_get_kc_header(KCKindPrimary);
1872 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1873 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1874 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1875 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1876 #if defined(__arm64__)
1877 extern vm_offset_t segTEXTEXECB;
1878 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
1879 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
1880 #endif /* defined(__arm64__) */
1881 } else if (vm_kernel_slide) {
1882 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1883 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1884 } else {
1885 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1886 }
1887 }
1888
1889 void
1890 panic_display_hibb(void)
1891 {
1892 #if defined(__i386__) || defined (__x86_64__)
1893 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1894 #endif
1895 }
1896
1897 #if CONFIG_ECC_LOGGING
1898 __private_extern__ void
1899 panic_display_ecc_errors(void)
1900 {
1901 uint32_t count = ecc_log_get_correction_count();
1902
1903 if (count > 0) {
1904 paniclog_append_noflush("ECC Corrections:%u\n", count);
1905 }
1906 }
1907 #endif /* CONFIG_ECC_LOGGING */
1908
1909 #if CONFIG_FREEZE
1910 extern bool freezer_incore_cseg_acct;
1911 extern int32_t c_segment_pages_compressed_incore;
1912 #endif
1913
1914 extern uint32_t c_segment_pages_compressed;
1915 extern uint32_t c_segment_count;
1916 extern uint32_t c_segments_limit;
1917 extern uint32_t c_segment_pages_compressed_limit;
1918 extern uint32_t c_segment_pages_compressed_nearing_limit;
1919 extern uint32_t c_segments_nearing_limit;
1920 extern int vm_num_swap_files;
1921
1922 void
1923 panic_display_compressor_stats(void)
1924 {
1925 int isswaplow = vm_swap_low_on_space();
1926 #if CONFIG_FREEZE
1927 uint32_t incore_seg_count;
1928 uint32_t incore_compressed_pages;
1929 if (freezer_incore_cseg_acct) {
1930 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
1931 incore_compressed_pages = c_segment_pages_compressed_incore;
1932 } else {
1933 incore_seg_count = c_segment_count;
1934 incore_compressed_pages = c_segment_pages_compressed;
1935 }
1936
1937 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1938 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
1939 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1940 (incore_seg_count * 100) / c_segments_limit,
1941 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
1942 vm_num_swap_files,
1943 isswaplow ? "LOW":"OK");
1944 #else /* CONFIG_FREEZE */
1945 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1946 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
1947 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1948 (c_segment_count * 100) / c_segments_limit,
1949 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
1950 vm_num_swap_files,
1951 isswaplow ? "LOW":"OK");
1952 #endif /* CONFIG_FREEZE */
1953 }
1954
1955 #if !CONFIG_TELEMETRY
1956 int
1957 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
1958 {
1959 return KERN_NOT_SUPPORTED;
1960 }
1961 #endif
1962
1963 #include <machine/machine_cpu.h>
1964
1965 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
1966
1967 boolean_t
1968 kern_feature_override(uint32_t fmask)
1969 {
1970 return (kern_feature_overrides & fmask) == fmask;
1971 }
1972
1973 boolean_t
1974 on_device_corefile_enabled(void)
1975 {
1976 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1977 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1978 if (debug_boot_arg == 0) {
1979 return FALSE;
1980 }
1981 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1982 return FALSE;
1983 }
1984 #if !XNU_TARGET_OS_OSX
1985 /*
1986 * outside of macOS, if there's a debug boot-arg set and local
1987 * cores aren't explicitly disabled, we always write a corefile.
1988 */
1989 return TRUE;
1990 #else /* !XNU_TARGET_OS_OSX */
1991 /*
1992 * on macOS, if corefiles on panic are requested and local cores
1993 * aren't disabled we write a local core.
1994 */
1995 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
1996 return TRUE;
1997 }
1998 #endif /* !XNU_TARGET_OS_OSX */
1999 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
2000 return FALSE;
2001 }
2002
2003 boolean_t
2004 panic_stackshot_to_disk_enabled(void)
2005 {
2006 assert(startup_phase >= STARTUP_SUB_TUNABLES);
2007 #if defined(__x86_64__)
2008 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
2009 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
2010 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
2011 return FALSE;
2012 }
2013
2014 return TRUE;
2015 }
2016 #endif
2017 return FALSE;
2018 }
2019
2020 const char *
2021 sysctl_debug_get_preoslog(size_t *size)
2022 {
2023 int result = 0;
2024 void *preoslog_pa = NULL;
2025 int preoslog_size = 0;
2026
2027 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2028 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2029 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2030 *size = 0;
2031 return NULL;
2032 }
2033
2034 /*
2035 * Beware:
2036 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
2037 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
2038 */
2039 *size = preoslog_size;
2040 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
2041 }
2042
2043 void
2044 sysctl_debug_free_preoslog(void)
2045 {
2046 #if RELEASE
2047 int result = 0;
2048 void *preoslog_pa = NULL;
2049 int preoslog_size = 0;
2050
2051 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2052 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2053 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2054 return;
2055 }
2056
2057 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
2058 #else
2059 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2060 #endif // RELEASE
2061 }
2062
2063
2064 #if (DEVELOPMENT || DEBUG)
2065
2066 void
2067 platform_stall_panic_or_spin(uint32_t req)
2068 {
2069 if (xnu_platform_stall_value & req) {
2070 if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2071 panic("Platform stall: User requested panic");
2072 } else {
2073 paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2074 panic_spin_forever();
2075 }
2076 }
2077 }
2078 #endif
2079
2080
2081 #define AWL_HV_ENTRY_FLAG (0x1)
2082
2083 static inline void
2084 awl_set_scratch_reg_hv_bit(void)
2085 {
2086 #if defined(__arm64__)
2087 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
2088 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2089 awl_diag0 |= AWL_HV_ENTRY_FLAG;
2090 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2091 #endif // defined(__arm64__)
2092 }
2093
2094 void
2095 awl_mark_hv_entry(void)
2096 {
2097 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2098 return;
2099 }
2100 *PERCPU_GET(hv_entry_detected) = true;
2101
2102 awl_set_scratch_reg_hv_bit();
2103 }
2104
2105 /*
2106 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2107 * so restore it manually.
2108 */
2109 static bool
2110 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2111 {
2112 if (event == CPU_BOOTED) {
2113 if (*PERCPU_GET(hv_entry_detected)) {
2114 awl_set_scratch_reg_hv_bit();
2115 }
2116 }
2117
2118 return true;
2119 }
2120
2121 /*
2122 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2123 * for a callback to restore register after hibernation
2124 */
2125 __startup_func
2126 static void
2127 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2128 {
2129 DTEntry base = NULL;
2130
2131 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2132 return;
2133 }
2134 const uint8_t *data = NULL;
2135 unsigned int data_size = sizeof(uint8_t);
2136
2137 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2138 for (unsigned int i = 0; i < data_size; i++) {
2139 if (data[i] != 0) {
2140 awl_scratch_reg_supported = true;
2141 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2142 break;
2143 }
2144 }
2145 }
2146 }
2147 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2148