xref: /xnu-12377.41.6/osfmk/kern/debug.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/trap_telemetry.h>
75 #include <kern/ecc.h>
76 #include <kern/kern_stackshot.h>
77 #include <kern/kern_cdata.h>
78 #include <kern/zalloc_internal.h>
79 #include <kern/iotrace.h>
80 #include <pexpert/device_tree.h>
81 #include <vm/vm_kern_xnu.h>
82 #include <vm/vm_map.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_compressor_xnu.h>
85 #include <stdarg.h>
86 #include <stdatomic.h>
87 #include <sys/pgo.h>
88 #include <console/serial_protos.h>
89 #include <IOKit/IOBSD.h>
90 #include <libkern/crc.h>
91 
92 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
93 #include <kdp/kdp_udp.h>
94 #endif
95 #include <kern/processor.h>
96 
97 #if defined(__i386__) || defined(__x86_64__)
98 #include <IOKit/IOBSD.h>
99 
100 #include <i386/cpu_threads.h>
101 #include <i386/pmCPU.h>
102 #include <i386/lbr.h>
103 #endif
104 
105 #include <IOKit/IOPlatformExpert.h>
106 #include <machine/machine_cpu.h>
107 #include <machine/pal_routines.h>
108 
109 #include <sys/kdebug.h>
110 #include <libkern/OSKextLibPrivate.h>
111 #include <libkern/OSAtomic.h>
112 #include <libkern/kernel_mach_header.h>
113 #include <libkern/section_keywords.h>
114 #include <uuid/uuid.h>
115 #include <mach_debug/zone_info.h>
116 #include <mach/resource_monitors.h>
117 #include <machine/machine_routines.h>
118 #include <sys/proc_require.h>
119 #include <vm/vm_compressor_internal.h>
120 
121 #include <os/log_private.h>
122 
123 #include <kern/ext_paniclog.h>
124 
125 #if defined(__arm64__)
126 #include <pexpert/pexpert.h> /* For gPanicBase */
127 #include <arm/caches_internal.h>
128 #include <arm/misc_protos.h>
129 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
130 #endif
131 
132 #include <san/kcov.h>
133 
134 #if CONFIG_XNUPOST
135 #include <tests/xnupost.h>
136 #endif
137 
138 extern int vsnprintf(char *, size_t, const char *, va_list);
139 
140 #if CONFIG_CSR
141 #include <sys/csr.h>
142 #endif
143 
144 #if CONFIG_EXCLAVES
145 #include <xnuproxy/panic.h>
146 #include "exclaves_panic.h"
147 #endif
148 
149 #if CONFIG_SPTM
150 #include <arm64/sptm/sptm.h>
151 #include <arm64/sptm/pmap/pmap_data.h>
152 #endif /* CONFIG_SPTM */
153 
154 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
155 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
156 extern unsigned int debug_boot_arg;
157 
158 unsigned int    halt_in_debugger = 0;
159 unsigned int    current_debugger = 0;
160 unsigned int    active_debugger = 0;
161 SECURITY_READ_ONLY_LATE(unsigned int)    panicDebugging = FALSE;
162 unsigned int    kernel_debugger_entry_count = 0;
163 
164 #if DEVELOPMENT || DEBUG
165 unsigned int    panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
166 unsigned int    panic_test_action_count = 1;
167 unsigned int    panic_test_case = PANIC_TEST_CASE_DISABLED;
168 #endif
169 
170 #if defined(__arm64__)
171 struct additional_panic_data_buffer *panic_data_buffers = NULL;
172 #endif
173 
174 #if defined(__arm64__)
175 /*
176  * Magic number; this should be identical to the armv7 encoding for trap.
177  */
178 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
179 #elif defined (__x86_64__)
180 #define TRAP_DEBUGGER __asm__("int3")
181 #else
182 #error No TRAP_DEBUGGER for this architecture
183 #endif
184 
185 #if defined(__i386__) || defined(__x86_64__)
186 #define panic_stop()    pmCPUHalt(PM_HALT_PANIC)
187 #else
188 #define panic_stop()    panic_spin_forever()
189 #endif
190 
191 /*
192  * More than enough for any typical format string passed to panic();
193  * anything longer will be truncated but that's better than nothing.
194  */
195 #define EARLY_PANIC_BUFLEN 256
196 
197 struct debugger_state {
198 	uint64_t        db_panic_options;
199 	debugger_op     db_current_op;
200 	boolean_t       db_proceed_on_sync_failure;
201 	const char     *db_message;
202 	const char     *db_panic_str;
203 	va_list        *db_panic_args;
204 	void           *db_panic_data_ptr;
205 	unsigned long   db_panic_caller;
206 	const char     *db_panic_initiator;
207 	/* incremented whenever we panic or call Debugger (current CPU panic level) */
208 	uint32_t        db_entry_count;
209 	kern_return_t   db_op_return;
210 };
211 static struct debugger_state PERCPU_DATA(debugger_state);
212 struct kernel_panic_reason PERCPU_DATA(panic_reason);
213 
214 /* __pure2 is correct if this function is called with preemption disabled */
215 static inline __pure2 struct debugger_state *
current_debugger_state(void)216 current_debugger_state(void)
217 {
218 	return PERCPU_GET(debugger_state);
219 }
220 
221 #define CPUDEBUGGEROP    current_debugger_state()->db_current_op
222 #define CPUDEBUGGERMSG   current_debugger_state()->db_message
223 #define CPUPANICSTR      current_debugger_state()->db_panic_str
224 #define CPUPANICARGS     current_debugger_state()->db_panic_args
225 #define CPUPANICOPTS     current_debugger_state()->db_panic_options
226 #define CPUPANICDATAPTR  current_debugger_state()->db_panic_data_ptr
227 #define CPUDEBUGGERSYNC  current_debugger_state()->db_proceed_on_sync_failure
228 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
229 #define CPUDEBUGGERRET   current_debugger_state()->db_op_return
230 #define CPUPANICCALLER   current_debugger_state()->db_panic_caller
231 #define CPUPANICINITIATOR current_debugger_state()->db_panic_initiator
232 
233 
234 /*
235  *  Usage:
236  *  panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
237  *  panic_test_failure_mode is a bit map of things to do
238  *  panic_test_case is what sort of test we are injecting
239  *
240  *  For more details see definitions in debugger.h
241  *
242  *  Note that not all combinations are sensible, but some actions can be combined, e.g.
243  *  - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
244  *  - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
245  *  - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
246  */
247 #if DEVELOPMENT || DEBUG
248 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested)                                                                                                                                                                                                         \
249 MACRO_BEGIN                                                                                                                                                                                                                                                                                                                     \
250 	if ((panic_test_case & requested) && panic_test_action_count) {                                                                                                                                                                                                                                                                                                \
251 	    panic_test_action_count--; \
252 	        volatile int *panic_test_badpointer = (int *)4;                                                                                                                                                                                                                         \
253 	        if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); }                                                                       \
254 	        if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; }                                                                       \
255 	        if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); }                      \
256 	}                                                                                                                                                                                                                                                                                                                               \
257 MACRO_END
258 
259 #endif /* DEVELOPMENT || DEBUG */
260 
261 debugger_op debugger_current_op = DBOP_NONE;
262 const char *debugger_panic_str = NULL;
263 va_list *debugger_panic_args = NULL;
264 void *debugger_panic_data = NULL;
265 uint64_t debugger_panic_options = 0;
266 const char *debugger_message = NULL;
267 unsigned long debugger_panic_caller = 0;
268 const char *debugger_panic_initiator = "";
269 
270 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
271     unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
272     unsigned long panic_caller, const char *panic_initiator) __dead2 __printflike(1, 0);
273 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
274 void panic_spin_forever(void) __dead2;
275 void panic_stackshot_release_lock(void);
276 extern void PE_panic_hook(const char*);
277 extern int sync_internal(void);
278 
279 #define NESTEDDEBUGGERENTRYMAX 5
280 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
281     NESTEDDEBUGGERENTRYMAX);
282 
283 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
284 static bool PERCPU_DATA(hv_entry_detected); // = false
285 static void awl_set_scratch_reg_hv_bit(void);
286 void awl_mark_hv_entry(void);
287 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
288 
289 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
290 static boolean_t device_corefile_valid_on_ephemeral(void);
291 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
292 
293 #if defined(__arm64__)
294 #define DEBUG_BUF_SIZE (4096)
295 
296 /* debug_buf is directly linked with iBoot panic region for arm targets */
297 char *debug_buf_base = NULL;
298 char *debug_buf_ptr = NULL;
299 unsigned int debug_buf_size = 0;
300 
301 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
302 #else /* defined(__arm64__) */
303 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
304 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
305 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
306 
307 char debug_buf[DEBUG_BUF_SIZE];
308 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
309 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
310 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
311 
312 /*
313  * We don't include the size of the panic header in the length of the data we actually write.
314  * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
315  * the end of the log because we only support writing (3*PAGESIZE) bytes.
316  */
317 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
318 
319 boolean_t extended_debug_log_enabled = FALSE;
320 #endif /* defined(__arm64__) */
321 
322 #if defined(XNU_TARGET_OS_OSX)
323 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
324 #else
325 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
326 #endif
327 
328 static inline boolean_t debug_fatal_panic_begin(void);
329 
330 /* Debugger state */
331 atomic_int     debugger_cpu = DEBUGGER_NO_CPU;
332 boolean_t      debugger_allcpus_halted = FALSE;
333 boolean_t      debugger_safe_to_return = TRUE;
334 unsigned int   debugger_context = 0;
335 
336 static char model_name[64];
337 unsigned char *kernel_uuid;
338 
339 boolean_t kernelcache_uuid_valid = FALSE;
340 uuid_t kernelcache_uuid;
341 uuid_string_t kernelcache_uuid_string;
342 
343 boolean_t pageablekc_uuid_valid = FALSE;
344 uuid_t pageablekc_uuid;
345 uuid_string_t pageablekc_uuid_string;
346 
347 boolean_t auxkc_uuid_valid = FALSE;
348 uuid_t auxkc_uuid;
349 uuid_string_t auxkc_uuid_string;
350 
351 
352 /*
353  * By default we treat Debugger() the same as calls to panic(), unless
354  * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
355  * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
356  *
357  * Return from Debugger() is currently only implemented on x86
358  */
359 static boolean_t debugger_is_panic = TRUE;
360 
361 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
362 
363 TUNABLE_DEV_WRITEABLE(unsigned int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
364 
365 char kernel_uuid_string[37]; /* uuid_string_t */
366 char kernelcache_uuid_string[37]; /* uuid_string_t */
367 char   panic_disk_error_description[512];
368 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
369 
370 extern unsigned int write_trace_on_panic;
371 int kext_assertions_enable =
372 #if DEBUG || DEVELOPMENT
373     TRUE;
374 #else
375     FALSE;
376 #endif
377 
378 /*
379  * Maintain the physically-contiguous carveouts for the carveout bootargs.
380  */
381 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
382 
383 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
384 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
385 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
386 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
387 
388 
389 #if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
390 /**
391  * Extra debug state which is set when panic lockdown is initiated.
392  * This information is intended to help when debugging issues with the panic
393  * path.
394  */
395 struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
396 #endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
397 
398 /*
399  * Returns whether kernel debugging is expected to be restricted
400  * on the device currently based on CSR or other platform restrictions.
401  */
402 boolean_t
kernel_debugging_restricted(void)403 kernel_debugging_restricted(void)
404 {
405 #if XNU_TARGET_OS_OSX
406 #if CONFIG_CSR
407 	if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
408 		return TRUE;
409 	}
410 #endif /* CONFIG_CSR */
411 	return FALSE;
412 #else /* XNU_TARGET_OS_OSX */
413 	return FALSE;
414 #endif /* XNU_TARGET_OS_OSX */
415 }
416 
417 __startup_func
418 static void
panic_init(void)419 panic_init(void)
420 {
421 	unsigned long uuidlen = 0;
422 	void *uuid;
423 
424 	uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
425 	if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
426 		kernel_uuid = uuid;
427 		uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
428 	}
429 
430 	/*
431 	 * Take the value of the debug boot-arg into account
432 	 */
433 #if MACH_KDP
434 	if (!kernel_debugging_restricted() && debug_boot_arg) {
435 		if (debug_boot_arg & DB_HALT) {
436 			halt_in_debugger = 1;
437 		}
438 
439 #if defined(__arm64__)
440 		if (debug_boot_arg & DB_NMI) {
441 			panicDebugging  = TRUE;
442 		}
443 #else
444 		panicDebugging = TRUE;
445 #endif /* defined(__arm64__) */
446 	}
447 
448 #if defined(__arm64__)
449 	char kdpname[80];
450 
451 	kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
452 #endif /* defined(__arm64__) */
453 
454 #endif /* MACH_KDP */
455 
456 #if defined (__x86_64__)
457 	/*
458 	 * By default we treat Debugger() the same as calls to panic(), unless
459 	 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
460 	 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
461 	 * This is because writing an on-device corefile is a destructive operation.
462 	 *
463 	 * Return from Debugger() is currently only implemented on x86
464 	 */
465 	if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
466 		debugger_is_panic = FALSE;
467 	}
468 #endif
469 }
470 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
471 
472 #if defined (__x86_64__)
473 void
extended_debug_log_init(void)474 extended_debug_log_init(void)
475 {
476 	assert(coprocessor_paniclog_flush);
477 	/*
478 	 * Allocate an extended panic log buffer that has space for the panic
479 	 * stackshot at the end. Update the debug buf pointers appropriately
480 	 * to point at this new buffer.
481 	 *
482 	 * iBoot pre-initializes the panic region with the NULL character. We set this here
483 	 * so we can accurately calculate the CRC for the region without needing to flush the
484 	 * full region over SMC.
485 	 */
486 	char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
487 
488 	panic_info = (struct macos_panic_header *)new_debug_buf;
489 	debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
490 	debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
491 
492 	extended_debug_log_enabled = TRUE;
493 
494 	/*
495 	 * Insert a compiler barrier so we don't free the other panic stackshot buffer
496 	 * until after we've marked the new one as available
497 	 */
498 	__compiler_barrier();
499 	kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
500 	panic_stackshot_buf = 0;
501 	panic_stackshot_buf_len = 0;
502 }
503 #endif /* defined (__x86_64__) */
504 
505 void
debug_log_init(void)506 debug_log_init(void)
507 {
508 #if defined(__arm64__)
509 	if (!gPanicBase) {
510 		printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
511 		return;
512 	}
513 	/* Shift debug buf start location and size by the length of the panic header */
514 	debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
515 	debug_buf_ptr = debug_buf_base;
516 	debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
517 
518 #if CONFIG_EXT_PANICLOG
519 	ext_paniclog_init();
520 #endif
521 #else
522 	kern_return_t kr = KERN_SUCCESS;
523 	bzero(panic_info, DEBUG_BUF_SIZE);
524 
525 	assert(debug_buf_base != NULL);
526 	assert(debug_buf_ptr != NULL);
527 	assert(debug_buf_size != 0);
528 
529 	/*
530 	 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
531 	 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
532 	 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
533 	 * up.
534 	 */
535 	kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
536 	    KMA_DATA_SHARED | KMA_ZERO, VM_KERN_MEMORY_DIAG);
537 	assert(kr == KERN_SUCCESS);
538 	if (kr == KERN_SUCCESS) {
539 		panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
540 	}
541 #endif
542 }
543 
544 void
phys_carveout_init(void)545 phys_carveout_init(void)
546 {
547 	if (!PE_i_can_has_debugger(NULL)) {
548 		return;
549 	}
550 
551 #if __arm__ || __arm64__
552 #if DEVELOPMENT || DEBUG
553 #endif /* DEVELOPMENT || DEBUG  */
554 #endif /* __arm__ || __arm64__ */
555 
556 	struct carveout {
557 		const char *name;
558 		vm_offset_t *va;
559 		uint32_t requested_size;
560 		uintptr_t *pa;
561 		size_t *allocated_size;
562 		uint64_t present;
563 	} carveouts[] = {
564 		{
565 			"phys_carveout",
566 			&phys_carveout,
567 			phys_carveout_mb,
568 			&phys_carveout_pa,
569 			&phys_carveout_size,
570 			phys_carveout_mb != 0,
571 
572 			/* Before Donan, XNU allocates the panic-trace carveout. */
573 		}
574 	};
575 
576 	for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
577 		if (carveouts[i].present) {
578 			size_t temp_carveout_size = 0;
579 			if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
580 				panic("%s_mb size overflowed (%uMB)",
581 				    carveouts[i].name, carveouts[i].requested_size);
582 				return;
583 			}
584 
585 			kmem_alloc_contig(kernel_map, carveouts[i].va,
586 			    temp_carveout_size, PAGE_MASK, 0, 0,
587 			    KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA_SHARED |
588 			    KMA_NOSOFTLIMIT,
589 			    VM_KERN_MEMORY_DIAG);
590 
591 			*carveouts[i].pa = kvtophys(*carveouts[i].va);
592 			*carveouts[i].allocated_size = temp_carveout_size;
593 		}
594 	}
595 }
596 
597 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)598 debug_is_in_phys_carveout(vm_map_offset_t va)
599 {
600 	return phys_carveout_size && va >= phys_carveout &&
601 	       va < (phys_carveout + phys_carveout_size);
602 }
603 
604 boolean_t
debug_can_coredump_phys_carveout(void)605 debug_can_coredump_phys_carveout(void)
606 {
607 	return phys_carveout_core;
608 }
609 
610 static boolean_t
DebuggerLock(void)611 DebuggerLock(void)
612 {
613 	int my_cpu = cpu_number();
614 	int debugger_exp_cpu = DEBUGGER_NO_CPU;
615 	assert(ml_get_interrupts_enabled() == FALSE);
616 
617 	if (atomic_load(&debugger_cpu) == my_cpu) {
618 		return true;
619 	}
620 
621 	if (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
622 		return false;
623 	}
624 
625 	return true;
626 }
627 
628 static void
DebuggerUnlock(void)629 DebuggerUnlock(void)
630 {
631 	assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
632 
633 	/*
634 	 * We don't do an atomic exchange here in case
635 	 * there's another CPU spinning to acquire the debugger_lock
636 	 * and we never get a chance to update it. We already have the
637 	 * lock so we can simply store DEBUGGER_NO_CPU and follow with
638 	 * a barrier.
639 	 */
640 	atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
641 	OSMemoryBarrier();
642 
643 	return;
644 }
645 
646 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)647 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
648 {
649 #if defined(__arm64__)
650 	return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
651 #else /* defined(__arm64__) */
652 #pragma unused(proceed_on_failure)
653 	mp_kdp_enter(proceed_on_failure, is_stackshot);
654 	return KERN_SUCCESS;
655 #endif
656 }
657 
658 static void
DebuggerResumeOtherCores(void)659 DebuggerResumeOtherCores(void)
660 {
661 #if defined(__arm64__)
662 	DebuggerXCallReturn();
663 #else /* defined(__arm64__) */
664 	mp_kdp_exit();
665 #endif
666 }
667 
668 __printflike(3, 0)
669 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)670 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
671     va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
672     boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char *db_panic_initiator)
673 {
674 	CPUDEBUGGEROP = db_op;
675 
676 	/*
677 	 * Note:
678 	 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
679 	 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
680 	 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
681 	 *
682 	 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
683 	 */
684 	if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
685 		CPUDEBUGGERMSG = db_message;
686 		CPUPANICSTR = db_panic_str;
687 		CPUPANICARGS = db_panic_args;
688 		CPUPANICDATAPTR = db_panic_data_ptr;
689 		CPUPANICCALLER = db_panic_caller;
690 		CPUPANICINITIATOR = db_panic_initiator;
691 
692 #if CONFIG_EXCLAVES
693 		char *panic_str;
694 		if (exclaves_panic_get_string(&panic_str) == KERN_SUCCESS) {
695 			CPUPANICSTR = panic_str;
696 		}
697 #endif
698 	}
699 
700 	CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
701 	CPUDEBUGGERRET = KERN_SUCCESS;
702 
703 	/* Reset these on any nested panics */
704 	// follow up in rdar://88497308 (nested panics should not clobber panic flags)
705 	CPUPANICOPTS = db_panic_options;
706 
707 	return;
708 }
709 
710 /*
711  * Save the requested debugger state/action into the current processor's
712  * percu state and trap to the debugger.
713  */
714 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)715 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
716     va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
717     boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char* db_panic_initiator)
718 {
719 	kern_return_t ret;
720 
721 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
722 	if (!PE_arm_debug_and_trace_initialized()) {
723 		/*
724 		 * In practice this can only happen if we panicked very early,
725 		 * when only the boot CPU is online and before it has finished
726 		 * initializing the debug and trace infrastructure. We're going
727 		 * to hang soon, so let's at least make sure the message passed
728 		 * to panic() is actually logged.
729 		 */
730 		char buf[EARLY_PANIC_BUFLEN];
731 		vsnprintf(buf, EARLY_PANIC_BUFLEN, db_panic_str, *db_panic_args);
732 		paniclog_append_noflush("%s\n", buf);
733 	}
734 #endif
735 
736 	assert(ml_get_interrupts_enabled() == FALSE);
737 	DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
738 	    db_panic_options, db_panic_data_ptr,
739 	    db_proceed_on_sync_failure, db_panic_caller, db_panic_initiator);
740 
741 	/*
742 	 * On ARM this generates an uncategorized exception -> sleh code ->
743 	 *   DebuggerCall -> kdp_trap -> handle_debugger_trap
744 	 * So that is how XNU ensures that only one core can panic.
745 	 * The rest of the cores are halted by IPI if possible; if that
746 	 * fails it will fall back to dbgwrap.
747 	 */
748 	TRAP_DEBUGGER;
749 
750 	ret = CPUDEBUGGERRET;
751 
752 	DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0, NULL);
753 
754 	return ret;
755 }
756 
757 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)758 Assert(const char*file, int line, const char *expression)
759 {
760 	panic_plain("%s:%d Assertion failed: %s", file, line, expression);
761 }
762 
763 void
panic_assert_format(char * buf,size_t len,struct mach_assert_hdr * hdr,long a,long b)764 panic_assert_format(char *buf, size_t len, struct mach_assert_hdr *hdr, long a, long b)
765 {
766 	struct mach_assert_default *adef;
767 	struct mach_assert_3x      *a3x;
768 
769 	static_assert(MACH_ASSERT_TRAP_CODE == XNU_HARD_TRAP_ASSERT_FAILURE);
770 
771 	switch (hdr->type) {
772 	case MACH_ASSERT_DEFAULT:
773 		adef = __container_of(hdr, struct mach_assert_default, hdr);
774 		snprintf(buf, len, "%s:%d Assertion failed: %s",
775 		    hdr->filename, hdr->lineno, adef->expr);
776 		break;
777 
778 	case MACH_ASSERT_3P:
779 		a3x = __container_of(hdr, struct mach_assert_3x, hdr);
780 		snprintf(buf, len, "%s:%d Assertion failed: "
781 		    "%s %s %s (%p %s %p)",
782 		    hdr->filename, hdr->lineno, a3x->a, a3x->op, a3x->b,
783 		    (void *)a, a3x->op, (void *)b);
784 		break;
785 
786 	case MACH_ASSERT_3S:
787 		a3x = __container_of(hdr, struct mach_assert_3x, hdr);
788 		snprintf(buf, len, "%s:%d Assertion failed: "
789 		    "%s %s %s (0x%lx %s 0x%lx, %ld %s %ld)",
790 		    hdr->filename, hdr->lineno, a3x->a, a3x->op, a3x->b,
791 		    a, a3x->op, b, a, a3x->op, b);
792 		break;
793 
794 	case MACH_ASSERT_3U:
795 		a3x = __container_of(hdr, struct mach_assert_3x, hdr);
796 		snprintf(buf, len, "%s:%d Assertion failed: "
797 		    "%s %s %s (0x%lx %s 0x%lx, %lu %s %lu)",
798 		    hdr->filename, hdr->lineno, a3x->a, a3x->op, a3x->b,
799 		    a, a3x->op, b, a, a3x->op, b);
800 		break;
801 	}
802 }
803 
804 boolean_t
debug_is_current_cpu_in_panic_state(void)805 debug_is_current_cpu_in_panic_state(void)
806 {
807 	return current_debugger_state()->db_entry_count > 0;
808 }
809 
810 /*
811  * check if we are in a nested panic, report findings, take evasive action where necessary
812  *
813  * see also PE_update_panicheader_nestedpanic
814  */
815 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)816 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
817 {
818 	if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
819 		// Note: this is the first indication in the panic log or serial that we are off the rails...
820 		//
821 		// if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
822 		// if we panic *after* the log is finalized then we will only see it in the serial log
823 		//
824 		paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
825 		print_curr_backtrace();
826 		paniclog_flush();
827 
828 		// print the *new* panic string to the console, we might not get it by other means...
829 		// TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
830 		// rdar://87846117 (NestedPanic: output panic string to paniclog)
831 		if (db_panic_str) {
832 			printf("Nested panic string:\n");
833 #pragma clang diagnostic push
834 #pragma clang diagnostic ignored "-Wformat-nonliteral"
835 #pragma clang diagnostic ignored "-Wformat"
836 			_doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
837 #pragma clang diagnostic pop
838 			printf("\n<end nested panic string>\n");
839 		}
840 	}
841 
842 	// Stage 1 bailout
843 	//
844 	// Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog.  If this fails with another nested
845 	// panic then we will land in Stage 2 below...
846 	//
847 	if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
848 		uint32_t panic_details = 0;
849 
850 		// if this is a force-reset panic then capture a log and reboot immediately.
851 		if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
852 			panic_details |= kPanicDetailsForcePowerOff;
853 		}
854 
855 		// normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
856 		// there.  To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
857 		//
858 		PEHaltRestartInternal(kPEPanicBegin, panic_details);
859 
860 		paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
861 		PE_update_panicheader_nestedpanic();
862 		paniclog_flush();
863 
864 		if (!panicDebugging) {
865 			// note that this will also send kPEPanicEnd
866 			kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
867 		}
868 
869 		// prints to console
870 		paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
871 		panic_spin_forever();
872 	}
873 
874 	// Stage 2 bailout
875 	//
876 	// Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path.  Try to issue
877 	// a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
878 	//
879 	if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
880 		if (!panicDebugging) {
881 			// note that:
882 			// - this code path should be audited for prints, as that is a common cause of nested panics
883 			// - this code path should take the fastest route to the actual reset, and not call any un-necessary code
884 			kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
885 		}
886 
887 		// prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
888 		paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
889 		panic_spin_forever();
890 	}
891 
892 	// Stage 3 bailout
893 	//
894 	// We are done here, we were unable to reset the platform without another nested panic.  Spin until the watchdog kicks in.
895 	//
896 	if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
897 		kdp_machine_reboot_type(kPEHangCPU, 0);
898 	}
899 }
900 
901 void
Debugger(const char * message)902 Debugger(const char *message)
903 {
904 	DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
905 }
906 
907 /*
908  *  Enter the Debugger
909  *
910  *  This is similar to, but not the same as a panic
911  *
912  *  Key differences:
913  *  - we get here from a debugger entry action (e.g. NMI)
914  *  - the system is resumable on x86 (in theory, however it is not clear if this is tested)
915  *  - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
916  *
917  */
918 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)919 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
920     uint64_t debugger_options_mask, unsigned long debugger_caller)
921 {
922 	spl_t previous_interrupts_state;
923 	boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
924 
925 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
926 	read_lbr();
927 #endif
928 	previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
929 	disable_preemption();
930 
931 	/* track depth of debugger/panic entry */
932 	CPUDEBUGGERCOUNT++;
933 
934 	/* emit a tracepoint as early as possible in case of hang */
935 	SOCD_TRACE_XNU(PANIC,
936 	    ((CPUDEBUGGERCOUNT <= 2) ? SOCD_TRACE_MODE_STICKY_TRACEPOINT : SOCD_TRACE_MODE_NONE),
937 	    PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)),
938 	    VALUE(debugger_options_mask),
939 	    ADDR(message),
940 	    ADDR(debugger_caller));
941 
942 	/* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
943 	check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
944 
945 	/* Handle any necessary platform specific actions before we proceed */
946 	PEInitiatePanic();
947 
948 #if DEVELOPMENT || DEBUG
949 	INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
950 #endif
951 
952 	PE_panic_hook(message);
953 
954 	doprnt_hide_pointers = FALSE;
955 
956 	if (ctx != NULL) {
957 		DebuggerSaveState(DBOP_DEBUGGER, message,
958 		    NULL, NULL, debugger_options_mask, NULL, TRUE, 0, "");
959 		handle_debugger_trap(reason, 0, 0, ctx);
960 		DebuggerSaveState(DBOP_NONE, NULL, NULL,
961 		    NULL, 0, NULL, FALSE, 0, "");
962 	} else {
963 		DebuggerTrapWithState(DBOP_DEBUGGER, message,
964 		    NULL, NULL, debugger_options_mask, NULL, TRUE, 0, NULL);
965 	}
966 
967 	/* resume from the debugger */
968 
969 	CPUDEBUGGERCOUNT--;
970 	doprnt_hide_pointers = old_doprnt_hide_pointers;
971 	enable_preemption();
972 	ml_set_interrupts_enabled(previous_interrupts_state);
973 }
974 
975 static struct kdp_callout {
976 	struct kdp_callout * callout_next;
977 	kdp_callout_fn_t callout_fn;
978 	boolean_t callout_in_progress;
979 	void * callout_arg;
980 } * kdp_callout_list = NULL;
981 
982 /*
983  * Called from kernel context to register a kdp event callout.
984  */
985 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)986 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
987 {
988 	struct kdp_callout * kcp;
989 	struct kdp_callout * list_head;
990 
991 	kcp = zalloc_permanent_type(struct kdp_callout);
992 
993 	kcp->callout_fn = fn;
994 	kcp->callout_arg = arg;
995 	kcp->callout_in_progress = FALSE;
996 
997 	/* Lock-less list insertion using compare and exchange. */
998 	do {
999 		list_head = kdp_callout_list;
1000 		kcp->callout_next = list_head;
1001 	} while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
1002 }
1003 
1004 static void
kdp_callouts(kdp_event_t event)1005 kdp_callouts(kdp_event_t event)
1006 {
1007 	struct kdp_callout      *kcp = kdp_callout_list;
1008 
1009 	while (kcp) {
1010 		if (!kcp->callout_in_progress) {
1011 			kcp->callout_in_progress = TRUE;
1012 			kcp->callout_fn(kcp->callout_arg, event);
1013 			kcp->callout_in_progress = FALSE;
1014 		}
1015 		kcp = kcp->callout_next;
1016 	}
1017 }
1018 
1019 #if defined(__arm64__)
1020 /*
1021  * Register an additional buffer with data to include in the panic log
1022  *
1023  * <rdar://problem/50137705> tracks supporting more than one buffer
1024  *
1025  * Note that producer_name and buf should never be de-allocated as we reference these during panic.
1026  */
1027 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)1028 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
1029 {
1030 	if (panic_data_buffers != NULL) {
1031 		panic("register_additional_panic_data_buffer called with buffer already registered");
1032 	}
1033 
1034 	if (producer_name == NULL || (strlen(producer_name) == 0)) {
1035 		panic("register_additional_panic_data_buffer called with invalid producer_name");
1036 	}
1037 
1038 	if (buf == NULL) {
1039 		panic("register_additional_panic_data_buffer called with invalid buffer pointer");
1040 	}
1041 
1042 	if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
1043 		panic("register_additional_panic_data_buffer called with invalid length");
1044 	}
1045 
1046 	struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
1047 	new_panic_data_buffer->producer_name = producer_name;
1048 	new_panic_data_buffer->buf = buf;
1049 	new_panic_data_buffer->len = len;
1050 
1051 	if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
1052 		panic("register_additional_panic_data_buffer called with buffer already registered");
1053 	}
1054 
1055 	return;
1056 }
1057 #endif /* defined(__arm64__) */
1058 
1059 /*
1060  * An overview of the xnu panic path:
1061  *
1062  * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
1063  * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
1064  * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
1065  * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
1066  * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
1067  * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
1068  * according to the device's boot-args.
1069  */
1070 #undef panic
1071 void
panic(const char * str,...)1072 panic(const char *str, ...)
1073 {
1074 	va_list panic_str_args;
1075 
1076 	va_start(panic_str_args, str);
1077 	panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1078 	va_end(panic_str_args);
1079 }
1080 
1081 void
panic_with_data(uuid_t uuid,void * addr,uint32_t len,uint64_t debugger_options_mask,const char * str,...)1082 panic_with_data(uuid_t uuid, void *addr, uint32_t len, uint64_t debugger_options_mask, const char *str, ...)
1083 {
1084 	va_list panic_str_args;
1085 
1086 	ext_paniclog_panic_with_data(uuid, addr, len);
1087 
1088 #if CONFIG_EXCLAVES
1089 	/*
1090 	 * Before trapping, inform the exclaves scheduler that we're going down
1091 	 * so it can grab an exclaves stackshot.
1092 	 */
1093 	if ((debugger_options_mask & DEBUGGER_OPTION_USER_WATCHDOG) != 0 &&
1094 	    exclaves_get_boot_stage() != EXCLAVES_BOOT_STAGE_NONE) {
1095 		(void) exclaves_scheduler_request_watchdog_panic();
1096 	}
1097 #endif /* CONFIG_EXCLAVES */
1098 
1099 	va_start(panic_str_args, str);
1100 	panic_trap_to_debugger(str, &panic_str_args, 0, NULL, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1101 	    NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1102 	va_end(panic_str_args);
1103 }
1104 
1105 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1106 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1107 {
1108 	va_list panic_str_args;
1109 
1110 #if CONFIG_EXCLAVES
1111 	/*
1112 	 * Before trapping, inform the exclaves scheduler that we're going down
1113 	 * so it can grab an exclaves stackshot.
1114 	 */
1115 	if ((debugger_options_mask & DEBUGGER_OPTION_USER_WATCHDOG) != 0 &&
1116 	    exclaves_get_boot_stage() != EXCLAVES_BOOT_STAGE_NONE) {
1117 		(void) exclaves_scheduler_request_watchdog_panic();
1118 	}
1119 #endif /* CONFIG_EXCLAVES */
1120 
1121 	va_start(panic_str_args, str);
1122 	panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1123 	    NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1124 	va_end(panic_str_args);
1125 }
1126 
1127 void
panic_with_options_and_initiator(const char * initiator,unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1128 panic_with_options_and_initiator(const char* initiator, unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1129 {
1130 	va_list panic_str_args;
1131 
1132 	va_start(panic_str_args, str);
1133 	panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1134 	    NULL, (unsigned long)(char *)__builtin_return_address(0), initiator);
1135 	va_end(panic_str_args);
1136 }
1137 
1138 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)1139 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
1140 {
1141 	if (ptr == NULL) {
1142 		paniclog_append_noflush("NULL %s pointer\n", what);
1143 		return false;
1144 	}
1145 
1146 	if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1147 		paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1148 		    what, ptr, (uint32_t)size);
1149 		return false;
1150 	}
1151 
1152 	return true;
1153 }
1154 
1155 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1156 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1157 {
1158 	if (!PANIC_VALIDATE_PTR(thread)) {
1159 		return false;
1160 	}
1161 
1162 	if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1163 		return false;
1164 	}
1165 
1166 	if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1167 		return false;
1168 	}
1169 
1170 	if (task) {
1171 		*task = thread->t_tro->tro_task;
1172 	}
1173 
1174 	if (!panic_validate_ptr(thread->t_tro->tro_proc,
1175 	    sizeof(struct proc *), "bsd_info")) {
1176 		*proc = NULL;
1177 	} else {
1178 		*proc = thread->t_tro->tro_proc;
1179 	}
1180 
1181 	return true;
1182 }
1183 
1184 #if defined (__x86_64__)
1185 /*
1186  * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1187  * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1188  * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1189  * thread when writing the panic log.
1190  *
1191  * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1192  */
1193 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1194 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1195 {
1196 	va_list panic_str_args;
1197 	__assert_only os_ref_count_t th_ref_count;
1198 
1199 	assert_thread_magic(thread);
1200 	th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1201 	assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1202 
1203 	/* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1204 	thread_reference(thread);
1205 
1206 	va_start(panic_str_args, str);
1207 	panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1208 	    thread, (unsigned long)(char *)__builtin_return_address(0), "");
1209 
1210 	va_end(panic_str_args);
1211 }
1212 #endif /* defined (__x86_64__) */
1213 
1214 #pragma clang diagnostic push
1215 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1216 __mockable void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller,const char * panic_initiator)1217 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1218     uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller, const char *panic_initiator)
1219 {
1220 #pragma clang diagnostic pop
1221 
1222 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1223 	read_lbr();
1224 #endif
1225 
1226 	/* For very early panics before XNU serial initialization. */
1227 	if (PE_kputc == NULL) {
1228 		char buf[EARLY_PANIC_BUFLEN];
1229 		vsnprintf(buf, EARLY_PANIC_BUFLEN, panic_format_str, *panic_args);
1230 		paniclog_append_noflush("panic: %s\n", buf);
1231 		paniclog_append_noflush("Kernel panicked very early before serial init, spinning forever...\n");
1232 		panic_spin_forever();
1233 	}
1234 
1235 	/* optionally call sync, to reduce lost logs on restart, avoid on recursive panic. Unsafe due to unbounded sync() duration */
1236 	if ((panic_options_mask & DEBUGGER_OPTION_SYNC_ON_PANIC_UNSAFE) && (CPUDEBUGGERCOUNT == 0)) {
1237 		sync_internal();
1238 	}
1239 
1240 	/* Turn off I/O tracing once we've panicked */
1241 	iotrace_disable();
1242 
1243 	/* call machine-layer panic handler */
1244 	ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller, panic_initiator);
1245 
1246 	/* track depth of debugger/panic entry */
1247 	CPUDEBUGGERCOUNT++;
1248 
1249 	__unused uint32_t panic_initiator_crc = panic_initiator ? crc32(0, panic_initiator, strnlen(panic_initiator, MAX_PANIC_INITIATOR_SIZE)) : 0;
1250 
1251 	/* emit a tracepoint as early as possible in case of hang */
1252 	SOCD_TRACE_XNU(PANIC,
1253 	    ((CPUDEBUGGERCOUNT <= 2) ? SOCD_TRACE_MODE_STICKY_TRACEPOINT : SOCD_TRACE_MODE_NONE),
1254 	    PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)),
1255 	    PACK_2X32(VALUE(panic_initiator_crc), VALUE(panic_options_mask & 0xFFFFFFFF)),
1256 	    ADDR(panic_format_str),
1257 	    ADDR(panic_caller));
1258 
1259 	/* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1260 	check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1261 
1262 	/* If we're in a stackshot, signal that we've started panicking and wait for other CPUs to coalesce and spin before proceeding */
1263 	stackshot_cpu_signal_panic();
1264 
1265 	/* Handle any necessary platform specific actions before we proceed */
1266 	PEInitiatePanic();
1267 
1268 #if DEVELOPMENT || DEBUG
1269 	INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1270 #endif
1271 
1272 	PE_panic_hook(panic_format_str);
1273 
1274 #if defined (__x86_64__)
1275 	plctrace_disable();
1276 #endif
1277 
1278 	if (write_trace_on_panic && kdebug_enable) {
1279 		if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1280 			ml_set_interrupts_enabled(TRUE);
1281 			KDBG_RELEASE(TRACE_PANIC);
1282 			kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1283 		}
1284 	}
1285 
1286 	ml_set_interrupts_enabled(FALSE);
1287 	disable_preemption();
1288 
1289 	if (!debug_fatal_panic_begin()) {
1290 		/*
1291 		 * This CPU lost the race to be the first to panic. Re-enable
1292 		 * interrupts and dead loop here awaiting the debugger xcall from
1293 		 * the CPU that first panicked.
1294 		 */
1295 		ml_set_interrupts_enabled(TRUE);
1296 		panic_stop();
1297 	}
1298 
1299 #if defined (__x86_64__)
1300 	pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1301 #endif /* defined (__x86_64__) */
1302 
1303 	/* Never hide pointers from panic logs. */
1304 	doprnt_hide_pointers = FALSE;
1305 
1306 	if (ctx != NULL) {
1307 		/*
1308 		 * We called into panic from a trap, no need to trap again. Set the
1309 		 * state on the current CPU and then jump to handle_debugger_trap.
1310 		 */
1311 		DebuggerSaveState(DBOP_PANIC, "panic",
1312 		    panic_format_str, panic_args,
1313 		    panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1314 		handle_debugger_trap(reason, 0, 0, ctx);
1315 	}
1316 
1317 #if defined(__arm64__) && !APPLEVIRTUALPLATFORM
1318 	/*
1319 	 *  Signal to fastsim that it should open debug ports (nop on hardware)
1320 	 */
1321 	__asm__         volatile ("hint #0x45");
1322 #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */
1323 
1324 	DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1325 	    panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1326 
1327 	/*
1328 	 * Not reached.
1329 	 */
1330 	panic_stop();
1331 	__builtin_unreachable();
1332 }
1333 
1334 /* We rely on this symbol being visible in the debugger for triage automation */
1335 void __attribute__((noinline, optnone))
panic_spin_forever(void)1336 panic_spin_forever(void)
1337 {
1338 	for (;;) {
1339 #if defined(__arm__) || defined(__arm64__)
1340 		/* On arm32, which doesn't have a WFE timeout, this may not return.  But that should be OK on this path. */
1341 		__builtin_arm_wfe();
1342 #else
1343 		cpu_pause();
1344 #endif
1345 	}
1346 }
1347 
1348 void
panic_stackshot_release_lock(void)1349 panic_stackshot_release_lock(void)
1350 {
1351 	assert(!not_in_kdp);
1352 	DebuggerUnlock();
1353 }
1354 
1355 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1356 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1357 {
1358 	if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1359 		PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1360 	} else {
1361 		PEHaltRestart(type);
1362 	}
1363 	halt_all_cpus(TRUE);
1364 }
1365 
1366 void
kdp_machine_reboot(void)1367 kdp_machine_reboot(void)
1368 {
1369 	kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1370 }
1371 
1372 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1373 panic_debugger_log(const char *string, ...)
1374 {
1375 	va_list panic_debugger_log_args;
1376 
1377 	va_start(panic_debugger_log_args, string);
1378 #pragma clang diagnostic push
1379 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1380 #pragma clang diagnostic ignored "-Wformat"
1381 	_doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1382 #pragma clang diagnostic pop
1383 	va_end(panic_debugger_log_args);
1384 
1385 #if defined(__arm64__)
1386 	paniclog_flush();
1387 #endif
1388 }
1389 
1390 /*
1391  * Gather and save diagnostic information about a panic (or Debugger call).
1392  *
1393  * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1394  * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1395  * paniclog is written and no core is written unless we request a core on NMI.
1396  *
1397  * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1398  * and calling out to any other functions we have for collecting diagnostic info.
1399  */
1400 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1401 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1402 {
1403 #if DEVELOPMENT || DEBUG
1404 	INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1405 #endif
1406 
1407 #if defined(__x86_64__)
1408 	kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1409 #endif
1410 	/*
1411 	 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1412 	 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1413 	 * we'll just spin in kdp_raise_exception.
1414 	 */
1415 	if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1416 		kdp_raise_exception(exception, code, subcode, state);
1417 		if (debugger_safe_to_return && !debugger_is_panic) {
1418 			return;
1419 		}
1420 	}
1421 
1422 #ifdef CONFIG_KCOV
1423 	/* Try not to break core dump path by sanitizer. */
1424 	kcov_panic_disable();
1425 #endif
1426 
1427 	if ((debugger_current_op == DBOP_PANIC) ||
1428 	    ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1429 		/*
1430 		 * Attempt to notify listeners once and only once that we've started
1431 		 * panicking. Only do this for Debugger() calls if we're treating
1432 		 * Debugger() calls like panic().
1433 		 */
1434 		uint32_t panic_details = 0;
1435 		/* if this is a force-reset panic then capture a log and reboot immediately. */
1436 		if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1437 			panic_details |= kPanicDetailsForcePowerOff;
1438 		}
1439 		PEHaltRestartInternal(kPEPanicBegin, panic_details);
1440 
1441 		/*
1442 		 * Set the begin pointer in the panic log structure. We key off of this
1443 		 * static variable rather than contents from the panic header itself in case someone
1444 		 * has stomped over the panic_info structure. Also initializes the header magic.
1445 		 */
1446 		static boolean_t began_writing_paniclog = FALSE;
1447 		if (!began_writing_paniclog) {
1448 			PE_init_panicheader();
1449 			began_writing_paniclog = TRUE;
1450 		}
1451 
1452 		if (CPUDEBUGGERCOUNT > 1) {
1453 			/*
1454 			 * we are in a nested panic.  Record the nested bit in panic flags and do some housekeeping
1455 			 */
1456 			PE_update_panicheader_nestedpanic();
1457 			paniclog_flush();
1458 		}
1459 	}
1460 
1461 	/*
1462 	 * Write panic string if this was a panic.
1463 	 *
1464 	 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1465 	 */
1466 	if (debugger_current_op == DBOP_PANIC) {
1467 		paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1468 		if (debugger_panic_str) {
1469 #pragma clang diagnostic push
1470 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1471 #pragma clang diagnostic ignored "-Wformat"
1472 			_doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1473 #pragma clang diagnostic pop
1474 		}
1475 		paniclog_append_noflush("\n");
1476 	}
1477 #if defined(__x86_64__)
1478 	else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1479 		paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1480 	}
1481 
1482 	/*
1483 	 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1484 	 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1485 	 * conventional sense.
1486 	 */
1487 	if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1488 #endif /* __x86_64__ */
1489 	{
1490 		kdp_callouts(KDP_EVENT_PANICLOG);
1491 
1492 		/*
1493 		 * Write paniclog and panic stackshot (if supported)
1494 		 * TODO: Need to clear panic log when return from debugger
1495 		 * hooked up for embedded
1496 		 */
1497 		SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options, debugger_panic_initiator);
1498 
1499 #if DEVELOPMENT || DEBUG
1500 		INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1501 #endif
1502 
1503 		/* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1504 		if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1505 			PEHaltRestart(kPEPanicDiagnosticsDone);
1506 			PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1507 		}
1508 	}
1509 
1510 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1511 	/*
1512 	 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1513 	 * local coredumps, don't try to write these and instead go straight to reboot. This
1514 	 * allows us to persist any data that's stored in the panic log.
1515 	 */
1516 	if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1517 	    (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1518 		PEHaltRestart(kPEPanicDiagnosticsDone);
1519 		kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1520 	}
1521 
1522 	/*
1523 	 * Consider generating a local corefile if the infrastructure is configured
1524 	 * and we haven't disabled on-device coredumps.
1525 	 */
1526 	if (on_device_corefile_enabled()) {
1527 #if CONFIG_SPTM
1528 		/* We want to skip taking a local core dump if this is a panic from SPTM/TXM/cL4. */
1529 		extern uint8_t sptm_supports_local_coredump;
1530 		bool sptm_interrupted = false;
1531 		pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu);
1532 		(void)sptm_get_cpu_state(sptm_pcpu->sptm_cpu_id, CPUSTATE_SPTM_INTERRUPTED, &sptm_interrupted);
1533 #endif
1534 		if (!kdp_has_polled_corefile()) {
1535 			if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1536 				paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1537 				    kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1538 #if defined(__arm64__)
1539 				if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1540 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1541 				}
1542 				panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1543 				paniclog_flush();
1544 #else /* defined(__arm64__) */
1545 				if (panic_info->mph_panic_log_offset != 0) {
1546 					if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1547 						panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1548 					}
1549 					panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1550 					paniclog_flush();
1551 				}
1552 #endif /* defined(__arm64__) */
1553 			}
1554 		}
1555 #if XNU_MONITOR
1556 		else if (pmap_get_cpu_data()->ppl_state != PPL_STATE_KERNEL) {
1557 			paniclog_append_noflush("skipping local kernel core because the PPL is not in KERNEL state\n");
1558 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1559 			paniclog_flush();
1560 		}
1561 #elif CONFIG_SPTM
1562 		else if (!sptm_supports_local_coredump) {
1563 			paniclog_append_noflush("skipping local kernel core because the SPTM is in PANIC state and can't support core dump generation\n");
1564 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1565 			paniclog_flush();
1566 		} else if (sptm_interrupted) {
1567 			paniclog_append_noflush("skipping local kernel core because the SPTM is in INTERRUPTED state and can't support core dump generation\n");
1568 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1569 			paniclog_flush();
1570 		}
1571 #endif /* XNU_MONITOR */
1572 		else {
1573 			int ret = -1;
1574 
1575 #if defined (__x86_64__)
1576 			/* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1577 			if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1578 #endif
1579 			{
1580 				/*
1581 				 * Doing an on-device coredump leaves the disk driver in a state
1582 				 * that can not be resumed.
1583 				 */
1584 				debugger_safe_to_return = FALSE;
1585 				begin_panic_transfer();
1586 				ret = kern_dump(KERN_DUMP_DISK);
1587 				abort_panic_transfer();
1588 
1589 #if DEVELOPMENT || DEBUG
1590 				INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1591 #endif
1592 			}
1593 
1594 			/*
1595 			 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1596 			 * or if option to ignore failures is set.
1597 			 */
1598 			if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1599 			    ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1600 				PEHaltRestart(kPEPanicDiagnosticsDone);
1601 				kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1602 			}
1603 		}
1604 	}
1605 
1606 	if (debugger_current_op == DBOP_PANIC ||
1607 	    ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1608 		PEHaltRestart(kPEPanicDiagnosticsDone);
1609 	}
1610 
1611 	if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1612 		kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1613 	}
1614 
1615 	/* If KDP is configured, try to trap to the debugger */
1616 #if defined(__arm64__)
1617 	if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1618 #else
1619 	if (current_debugger != NO_CUR_DB) {
1620 #endif
1621 		kdp_raise_exception(exception, code, subcode, state);
1622 		/*
1623 		 * Only return if we entered via Debugger and it's safe to return
1624 		 * (we halted the other cores successfully, this isn't a nested panic, etc)
1625 		 */
1626 		if (debugger_current_op == DBOP_DEBUGGER &&
1627 		    debugger_safe_to_return &&
1628 		    kernel_debugger_entry_count == 1 &&
1629 		    !debugger_is_panic) {
1630 			return;
1631 		}
1632 	}
1633 
1634 #if defined(__arm64__)
1635 	if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1636 		/*
1637 		 * Print panic string at the end of serial output
1638 		 * to make panic more obvious when someone connects a debugger
1639 		 */
1640 		if (debugger_panic_str) {
1641 			panic_debugger_log("Original panic string:\n");
1642 			panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1643 #pragma clang diagnostic push
1644 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1645 #pragma clang diagnostic ignored "-Wformat"
1646 			_doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1647 #pragma clang diagnostic pop
1648 			panic_debugger_log("\n");
1649 		}
1650 
1651 		/* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1652 		panic_spin_shmcon();
1653 	}
1654 #endif /* defined(__arm64__) */
1655 
1656 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1657 
1658 	PEHaltRestart(kPEPanicDiagnosticsDone);
1659 
1660 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1661 
1662 	if (!panicDebugging) {
1663 		kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1664 	}
1665 
1666 	paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1667 	panic_spin_forever();
1668 }
1669 
1670 #if SCHED_HYGIENE_DEBUG
1671 uint64_t debugger_trap_timestamps[9];
1672 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1673 #else
1674 # define DEBUGGER_TRAP_TIMESTAMP(i)
1675 #endif /* SCHED_HYGIENE_DEBUG */
1676 
1677 void
1678 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1679 {
1680 	unsigned int initial_not_in_kdp = not_in_kdp;
1681 	kern_return_t ret = KERN_SUCCESS;
1682 	debugger_op db_prev_op = debugger_current_op;
1683 
1684 	if (!DebuggerLock()) {
1685 		/*
1686 		 * We lost the race to be the first to panic.
1687 		 * Return here so that we will enter the panic stop
1688 		 * infinite loop and take the debugger IPI from the
1689 		 * first CPU that got the debugger lock.
1690 		 */
1691 		return;
1692 	}
1693 
1694 	DEBUGGER_TRAP_TIMESTAMP(0);
1695 
1696 	ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1697 
1698 	DEBUGGER_TRAP_TIMESTAMP(1);
1699 
1700 #if SCHED_HYGIENE_DEBUG
1701 	if (serialmode & SERIALMODE_OUTPUT) {
1702 		ml_spin_debug_reset(current_thread());
1703 	}
1704 #endif /* SCHED_HYGIENE_DEBUG */
1705 	if (ret != KERN_SUCCESS) {
1706 		CPUDEBUGGERRET = ret;
1707 		DebuggerUnlock();
1708 		return;
1709 	}
1710 
1711 	/* Update the global panic/debugger nested entry level */
1712 	kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1713 	if (kernel_debugger_entry_count > 0) {
1714 		console_suspend();
1715 	}
1716 
1717 	/*
1718 	 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1719 	 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1720 	 */
1721 
1722 	if (CPUDEBUGGEROP == DBOP_NONE) {
1723 		/* If there was no debugger context setup, we trapped due to a software breakpoint */
1724 		debugger_current_op = DBOP_BREAKPOINT;
1725 	} else {
1726 		/* Not safe to return from a nested panic/debugger call */
1727 		if (debugger_current_op == DBOP_PANIC ||
1728 		    debugger_current_op == DBOP_DEBUGGER) {
1729 			debugger_safe_to_return = FALSE;
1730 		}
1731 
1732 		debugger_current_op = CPUDEBUGGEROP;
1733 
1734 		/* Only overwrite the panic message if there is none already - save the data from the first call */
1735 		if (debugger_panic_str == NULL) {
1736 			debugger_panic_str = CPUPANICSTR;
1737 			debugger_panic_args = CPUPANICARGS;
1738 			debugger_panic_data = CPUPANICDATAPTR;
1739 			debugger_message = CPUDEBUGGERMSG;
1740 			debugger_panic_caller = CPUPANICCALLER;
1741 			debugger_panic_initiator = CPUPANICINITIATOR;
1742 		}
1743 
1744 		debugger_panic_options = CPUPANICOPTS;
1745 	}
1746 
1747 	/*
1748 	 * Clear the op from the processor debugger context so we can handle
1749 	 * breakpoints in the debugger
1750 	 */
1751 	CPUDEBUGGEROP = DBOP_NONE;
1752 
1753 	DEBUGGER_TRAP_TIMESTAMP(2);
1754 
1755 	kdp_callouts(KDP_EVENT_ENTER);
1756 	not_in_kdp = 0;
1757 
1758 	DEBUGGER_TRAP_TIMESTAMP(3);
1759 
1760 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1761 	shmem_mark_as_busy();
1762 #endif
1763 
1764 	if (debugger_current_op == DBOP_BREAKPOINT) {
1765 		kdp_raise_exception(exception, code, subcode, state);
1766 	} else if (debugger_current_op == DBOP_STACKSHOT) {
1767 		CPUDEBUGGERRET = do_stackshot(NULL);
1768 #if PGO
1769 	} else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1770 		CPUDEBUGGERRET = do_pgo_reset_counters();
1771 #endif
1772 	} else {
1773 		/* note: this is the panic path...  */
1774 		if (!debug_fatal_panic_begin()) {
1775 			/*
1776 			 * This CPU lost the race to be the first to panic. Re-enable
1777 			 * interrupts and dead loop here awaiting the debugger xcall from
1778 			 * the CPU that first panicked.
1779 			 */
1780 			ml_set_interrupts_enabled(TRUE);
1781 			panic_stop();
1782 		}
1783 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
1784 		if (!PE_arm_debug_and_trace_initialized()) {
1785 			paniclog_append_noflush("kernel panicked before debug and trace infrastructure initialized!\n"
1786 			    "spinning forever...\n");
1787 			panic_spin_forever();
1788 		}
1789 #endif
1790 		debugger_collect_diagnostics(exception, code, subcode, state);
1791 	}
1792 
1793 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1794 	shmem_unmark_as_busy();
1795 #endif
1796 
1797 	DEBUGGER_TRAP_TIMESTAMP(4);
1798 
1799 	not_in_kdp = initial_not_in_kdp;
1800 	kdp_callouts(KDP_EVENT_EXIT);
1801 
1802 	DEBUGGER_TRAP_TIMESTAMP(5);
1803 
1804 	if (debugger_current_op != DBOP_BREAKPOINT) {
1805 		debugger_panic_str = NULL;
1806 		debugger_panic_args = NULL;
1807 		debugger_panic_data = NULL;
1808 		debugger_panic_options = 0;
1809 		debugger_message = NULL;
1810 	}
1811 
1812 	/* Restore the previous debugger state */
1813 	debugger_current_op = db_prev_op;
1814 
1815 	DEBUGGER_TRAP_TIMESTAMP(6);
1816 
1817 	DebuggerResumeOtherCores();
1818 
1819 	DEBUGGER_TRAP_TIMESTAMP(7);
1820 
1821 	DebuggerUnlock();
1822 
1823 	DEBUGGER_TRAP_TIMESTAMP(8);
1824 
1825 	return;
1826 }
1827 
1828 __attribute__((noinline, not_tail_called))
1829 void
1830 log(__unused int level, char *fmt, ...)
1831 {
1832 	void *caller = __builtin_return_address(0);
1833 	va_list listp;
1834 	va_list listp2;
1835 
1836 
1837 #ifdef lint
1838 	level++;
1839 #endif /* lint */
1840 #ifdef  MACH_BSD
1841 	va_start(listp, fmt);
1842 	va_copy(listp2, listp);
1843 
1844 	disable_preemption();
1845 	_doprnt(fmt, &listp, cons_putc_locked, 0);
1846 	enable_preemption();
1847 
1848 	va_end(listp);
1849 
1850 #pragma clang diagnostic push
1851 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1852 #pragma clang diagnostic ignored "-Wformat"
1853 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1854 #pragma clang diagnostic pop
1855 	va_end(listp2);
1856 #endif
1857 }
1858 
1859 /*
1860  * Per <rdar://problem/24974766>, skip appending log messages to
1861  * the new logging infrastructure in contexts where safety is
1862  * uncertain. These contexts include:
1863  *   - When we're in the debugger
1864  *   - We're in a panic
1865  *   - Interrupts are disabled
1866  *   - Or Pre-emption is disabled
1867  * In all the above cases, it is potentially unsafe to log messages.
1868  */
1869 
1870 boolean_t
1871 oslog_is_safe(void)
1872 {
1873 	return kernel_debugger_entry_count == 0 &&
1874 	       not_in_kdp == 1 &&
1875 	       get_preemption_level() == 0 &&
1876 	       ml_get_interrupts_enabled() == TRUE;
1877 }
1878 
1879 boolean_t
1880 debug_mode_active(void)
1881 {
1882 	return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1883 }
1884 
1885 void
1886 debug_putc(char c)
1887 {
1888 	if ((debug_buf_size != 0) &&
1889 	    ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size) &&
1890 	    (!is_debug_ptr_in_ext_paniclog())) {
1891 		*debug_buf_ptr = c;
1892 		debug_buf_ptr++;
1893 	}
1894 }
1895 
1896 #if defined (__x86_64__)
1897 struct pasc {
1898 	unsigned a: 7;
1899 	unsigned b: 7;
1900 	unsigned c: 7;
1901 	unsigned d: 7;
1902 	unsigned e: 7;
1903 	unsigned f: 7;
1904 	unsigned g: 7;
1905 	unsigned h: 7;
1906 }  __attribute__((packed));
1907 
1908 typedef struct pasc pasc_t;
1909 
1910 /*
1911  * In-place packing routines -- inefficient, but they're called at most once.
1912  * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1913  */
1914 int
1915 packA(char *inbuf, uint32_t length, uint32_t buflen)
1916 {
1917 	unsigned int i, j = 0;
1918 	pasc_t pack;
1919 
1920 	length = MIN(((length + 7) & ~7), buflen);
1921 
1922 	for (i = 0; i < length; i += 8) {
1923 		pack.a = inbuf[i];
1924 		pack.b = inbuf[i + 1];
1925 		pack.c = inbuf[i + 2];
1926 		pack.d = inbuf[i + 3];
1927 		pack.e = inbuf[i + 4];
1928 		pack.f = inbuf[i + 5];
1929 		pack.g = inbuf[i + 6];
1930 		pack.h = inbuf[i + 7];
1931 		bcopy((char *) &pack, inbuf + j, 7);
1932 		j += 7;
1933 	}
1934 	return j;
1935 }
1936 
1937 void
1938 unpackA(char *inbuf, uint32_t length)
1939 {
1940 	pasc_t packs;
1941 	unsigned i = 0;
1942 	length = (length * 8) / 7;
1943 
1944 	while (i < length) {
1945 		packs = *(pasc_t *)&inbuf[i];
1946 		bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1947 		inbuf[i++] = packs.a;
1948 		inbuf[i++] = packs.b;
1949 		inbuf[i++] = packs.c;
1950 		inbuf[i++] = packs.d;
1951 		inbuf[i++] = packs.e;
1952 		inbuf[i++] = packs.f;
1953 		inbuf[i++] = packs.g;
1954 		inbuf[i++] = packs.h;
1955 	}
1956 }
1957 #endif /* defined (__x86_64__) */
1958 
1959 extern char *proc_name_address(void *);
1960 extern char *proc_longname_address(void *);
1961 
1962 __private_extern__ void
1963 panic_display_process_name(void)
1964 {
1965 	proc_name_t proc_name = {};
1966 	struct proc *cbsd_info = NULL;
1967 	task_t ctask = NULL;
1968 	vm_size_t size;
1969 
1970 	if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1971 		goto out;
1972 	}
1973 
1974 	if (cbsd_info == NULL) {
1975 		goto out;
1976 	}
1977 
1978 	size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1979 	    (vm_offset_t)&proc_name, sizeof(proc_name));
1980 
1981 	if (size == 0 || proc_name[0] == '\0') {
1982 		size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1983 		    (vm_offset_t)&proc_name,
1984 		    MIN(sizeof(command_t), sizeof(proc_name)));
1985 		if (size > 0) {
1986 			proc_name[size - 1] = '\0';
1987 		}
1988 	}
1989 
1990 out:
1991 	proc_name[sizeof(proc_name) - 1] = '\0';
1992 	paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1993 	    current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1994 }
1995 
1996 unsigned
1997 panic_active(void)
1998 {
1999 	return debugger_current_op == DBOP_PANIC ||
2000 	       (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
2001 }
2002 
2003 void
2004 populate_model_name(char *model_string)
2005 {
2006 	strlcpy(model_name, model_string, sizeof(model_name));
2007 }
2008 
2009 void
2010 panic_display_model_name(void)
2011 {
2012 	char tmp_model_name[sizeof(model_name)];
2013 
2014 	if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
2015 		return;
2016 	}
2017 
2018 	tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
2019 
2020 	if (tmp_model_name[0] != 0) {
2021 		paniclog_append_noflush("System model name: %s\n", tmp_model_name);
2022 	}
2023 }
2024 
2025 void
2026 panic_display_kernel_uuid(void)
2027 {
2028 	char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
2029 
2030 	if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
2031 		return;
2032 	}
2033 
2034 	if (tmp_kernel_uuid[0] != '\0') {
2035 		paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
2036 	}
2037 }
2038 
2039 #if CONFIG_SPTM
2040 static void
2041 panic_display_component_uuid(char const *component_name, void *component_address)
2042 {
2043 	uuid_t *component_uuid;
2044 	unsigned long component_uuid_len = 0;
2045 	uuid_string_t component_uuid_string;
2046 
2047 	component_uuid = getuuidfromheader((kernel_mach_header_t *)component_address, &component_uuid_len);
2048 
2049 	if (component_uuid != NULL && component_uuid_len == sizeof(uuid_t)) {
2050 		uuid_unparse_upper(*component_uuid, component_uuid_string);
2051 		paniclog_append_noflush("%s UUID: %s\n", component_name, component_uuid_string);
2052 	}
2053 }
2054 #endif /* CONFIG_SPTM */
2055 
2056 void
2057 panic_display_kernel_aslr(void)
2058 {
2059 #if CONFIG_SPTM
2060 	{
2061 		struct debug_header const *dh = SPTMArgs->debug_header;
2062 
2063 		paniclog_append_noflush("Debug Header address: %p\n", dh);
2064 
2065 		if (dh != NULL) {
2066 			void *component_address;
2067 
2068 			paniclog_append_noflush("Debug Header entry count: %d\n", dh->count);
2069 
2070 			switch (dh->count) {
2071 			default: // 3 or more
2072 				component_address = dh->image[DEBUG_HEADER_ENTRY_TXM];
2073 				paniclog_append_noflush("TXM load address: %p\n", component_address);
2074 
2075 				panic_display_component_uuid("TXM", component_address);
2076 				OS_FALLTHROUGH;
2077 			case 2:
2078 				component_address = dh->image[DEBUG_HEADER_ENTRY_XNU];
2079 				paniclog_append_noflush("Debug Header kernelcache load address: %p\n", component_address);
2080 
2081 				panic_display_component_uuid("Debug Header kernelcache", component_address);
2082 				OS_FALLTHROUGH;
2083 			case 1:
2084 				component_address = dh->image[DEBUG_HEADER_ENTRY_SPTM];
2085 				paniclog_append_noflush("SPTM load address: %p\n", component_address);
2086 
2087 				panic_display_component_uuid("SPTM", component_address);
2088 				OS_FALLTHROUGH;
2089 			case 0:
2090 				; // nothing to print
2091 			}
2092 		}
2093 	}
2094 #endif /* CONFIG_SPTM */
2095 
2096 	kc_format_t kc_format;
2097 
2098 	PE_get_primary_kc_format(&kc_format);
2099 
2100 	if (kc_format == KCFormatFileset) {
2101 		void *kch = PE_get_kc_header(KCKindPrimary);
2102 		paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
2103 		paniclog_append_noflush("KernelCache base:  %p\n", (void*) kch);
2104 		paniclog_append_noflush("Kernel slide:      0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
2105 		paniclog_append_noflush("Kernel text base:  %p\n", (void *) vm_kernel_stext);
2106 #if defined(__arm64__)
2107 		extern vm_offset_t segTEXTEXECB;
2108 		paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
2109 		paniclog_append_noflush("Kernel text exec base:  0x%016lx\n", (unsigned long)segTEXTEXECB);
2110 #endif /* defined(__arm64__) */
2111 	} else if (vm_kernel_slide) {
2112 		paniclog_append_noflush("Kernel slide:      0x%016lx\n", (unsigned long) vm_kernel_slide);
2113 		paniclog_append_noflush("Kernel text base:  %p\n", (void *)vm_kernel_stext);
2114 	} else {
2115 		paniclog_append_noflush("Kernel text base:  %p\n", (void *)vm_kernel_stext);
2116 	}
2117 }
2118 
2119 void
2120 panic_display_hibb(void)
2121 {
2122 #if defined(__i386__) || defined (__x86_64__)
2123 	paniclog_append_noflush("__HIB  text base: %p\n", (void *) vm_hib_base);
2124 #endif
2125 }
2126 
2127 #if CONFIG_ECC_LOGGING
2128 __private_extern__ void
2129 panic_display_ecc_errors(void)
2130 {
2131 	uint32_t count = ecc_log_get_correction_count();
2132 
2133 	if (count > 0) {
2134 		paniclog_append_noflush("ECC Corrections:%u\n", count);
2135 	}
2136 }
2137 #endif /* CONFIG_ECC_LOGGING */
2138 
2139 extern int vm_num_swap_files;
2140 
2141 void
2142 panic_display_compressor_stats(void)
2143 {
2144 	int isswaplow = vm_swap_low_on_space();
2145 #if CONFIG_FREEZE
2146 	uint32_t incore_seg_count;
2147 	uint32_t incore_compressed_pages;
2148 	if (freezer_incore_cseg_acct) {
2149 		incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
2150 		incore_compressed_pages = c_segment_pages_compressed_incore;
2151 	} else {
2152 		incore_seg_count = c_segment_count;
2153 		incore_compressed_pages = c_segment_pages_compressed;
2154 	}
2155 
2156 	paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2157 	    (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
2158 	    (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2159 	    (incore_seg_count * 100) / c_segments_limit,
2160 	    (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
2161 	    vm_num_swap_files,
2162 	    isswaplow ? "LOW":"OK");
2163 #else /* CONFIG_FREEZE */
2164 	paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2165 	    (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
2166 	    (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2167 	    (c_segment_count * 100) / c_segments_limit,
2168 	    (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
2169 	    vm_num_swap_files,
2170 	    isswaplow ? "LOW":"OK");
2171 #endif /* CONFIG_FREEZE */
2172 }
2173 
2174 #if !CONFIG_TELEMETRY
2175 int
2176 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
2177 {
2178 	return KERN_NOT_SUPPORTED;
2179 }
2180 #endif
2181 
2182 #include <machine/machine_cpu.h>
2183 
2184 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
2185 
2186 __startup_func
2187 static void
2188 kern_feature_override_init(void)
2189 {
2190 	/*
2191 	 * update kern_feature_override based on the serverperfmode=1 boot-arg
2192 	 * being present, but do not look at the device-tree setting on purpose.
2193 	 *
2194 	 * scale_setup() will update serverperfmode=1 based on the DT later.
2195 	 */
2196 
2197 	if (serverperfmode) {
2198 		kern_feature_overrides |= KF_SERVER_PERF_MODE_OVRD;
2199 	}
2200 }
2201 STARTUP(TUNABLES, STARTUP_RANK_LAST, kern_feature_override_init);
2202 
2203 #if MACH_ASSERT
2204 STATIC_IF_KEY_DEFINE_TRUE(mach_assert);
2205 #endif
2206 
2207 #if SCHED_HYGIENE_DEBUG
2208 STATIC_IF_KEY_DEFINE_TRUE(sched_debug_pmc);
2209 STATIC_IF_KEY_DEFINE_TRUE(sched_debug_preemption_disable);
2210 STATIC_IF_KEY_DEFINE_TRUE(sched_debug_interrupt_disable);
2211 #endif /* SCHED_HYGIENE_DEBUG */
2212 
2213 __static_if_init_func
2214 static void
2215 kern_feature_override_apply(const char *args)
2216 {
2217 	uint64_t kf_ovrd;
2218 
2219 	/*
2220 	 * Compute the value of kern_feature_override like it will look like
2221 	 * after kern_feature_override_init().
2222 	 */
2223 	kf_ovrd = static_if_boot_arg_uint64(args, "validation_disables", 0);
2224 	if (static_if_boot_arg_uint64(args, "serverperfmode", 0)) {
2225 		kf_ovrd |= KF_SERVER_PERF_MODE_OVRD;
2226 	}
2227 
2228 #if DEBUG_RW
2229 	lck_rw_assert_init(args, kf_ovrd);
2230 #endif /* DEBUG_RW */
2231 #if MACH_ASSERT
2232 	if (kf_ovrd & KF_MACH_ASSERT_OVRD) {
2233 		static_if_key_disable(mach_assert);
2234 	}
2235 #endif /* MACH_ASSERT */
2236 #if SCHED_HYGIENE_DEBUG
2237 	if ((int64_t)static_if_boot_arg_uint64(args, "wdt", 0) != -1) {
2238 		if (kf_ovrd & KF_SCHED_HYGIENE_DEBUG_PMC_OVRD) {
2239 			static_if_key_disable(sched_debug_pmc);
2240 		}
2241 		if (kf_ovrd & KF_PREEMPTION_DISABLED_DEBUG_OVRD) {
2242 			static_if_key_disable(sched_debug_preemption_disable);
2243 			if (kf_ovrd & KF_INTERRUPT_MASKED_DEBUG_OVRD) {
2244 				static_if_key_disable(sched_debug_interrupt_disable);
2245 			}
2246 		}
2247 	}
2248 #endif /* SCHED_HYGIENE_DEBUG */
2249 }
2250 STATIC_IF_INIT(kern_feature_override_apply);
2251 
2252 boolean_t
2253 kern_feature_override(uint32_t fmask)
2254 {
2255 	return (kern_feature_overrides & fmask) == fmask;
2256 }
2257 
2258 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
2259 static boolean_t
2260 device_corefile_valid_on_ephemeral(void)
2261 {
2262 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
2263 	DTEntry node;
2264 	const uint32_t *value = NULL;
2265 	unsigned int size = 0;
2266 	if (kSuccess != SecureDTLookupEntry(NULL, "/product", &node)) {
2267 		return TRUE;
2268 	}
2269 	if (kSuccess != SecureDTGetProperty(node, "ephemeral-data-mode", (void const **) &value, &size)) {
2270 		return TRUE;
2271 	}
2272 
2273 	if (size != sizeof(uint32_t)) {
2274 		return TRUE;
2275 	}
2276 
2277 	if ((*value) && (kern_dump_should_enforce_encryption() == true)) {
2278 		return FALSE;
2279 	}
2280 #endif /* ifdef CONFIG_KDP_COREDUMP_ENCRYPTION */
2281 
2282 	return TRUE;
2283 }
2284 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
2285 
2286 boolean_t
2287 on_device_corefile_enabled(void)
2288 {
2289 	assert(startup_phase >= STARTUP_SUB_TUNABLES);
2290 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
2291 	if (debug_boot_arg == 0) {
2292 		return FALSE;
2293 	}
2294 	if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
2295 		return FALSE;
2296 	}
2297 #if !XNU_TARGET_OS_OSX
2298 	if (device_corefile_valid_on_ephemeral() == FALSE) {
2299 		return FALSE;
2300 	}
2301 	/*
2302 	 * outside of macOS, if there's a debug boot-arg set and local
2303 	 * cores aren't explicitly disabled, we always write a corefile.
2304 	 */
2305 	return TRUE;
2306 #else /* !XNU_TARGET_OS_OSX */
2307 	/*
2308 	 * on macOS, if corefiles on panic are requested and local cores
2309 	 * aren't disabled we write a local core.
2310 	 */
2311 	if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
2312 		return TRUE;
2313 	}
2314 #endif /* !XNU_TARGET_OS_OSX */
2315 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
2316 	return FALSE;
2317 }
2318 
2319 boolean_t
2320 panic_stackshot_to_disk_enabled(void)
2321 {
2322 	assert(startup_phase >= STARTUP_SUB_TUNABLES);
2323 #if defined(__x86_64__)
2324 	if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
2325 		/* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
2326 		if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
2327 			return FALSE;
2328 		}
2329 
2330 		return TRUE;
2331 	}
2332 #endif
2333 	return FALSE;
2334 }
2335 
2336 const char *
2337 sysctl_debug_get_preoslog(size_t *size)
2338 {
2339 	int result = 0;
2340 	void *preoslog_pa = NULL;
2341 	int preoslog_size = 0;
2342 
2343 	result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2344 	if (result || preoslog_pa == NULL || preoslog_size == 0) {
2345 		kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2346 		*size = 0;
2347 		return NULL;
2348 	}
2349 
2350 	/*
2351 	 *  Beware:
2352 	 *  On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
2353 	 *  On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
2354 	 */
2355 	*size = preoslog_size;
2356 	return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
2357 }
2358 
2359 void
2360 sysctl_debug_free_preoslog(void)
2361 {
2362 #if RELEASE
2363 	int result = 0;
2364 	void *preoslog_pa = NULL;
2365 	int preoslog_size = 0;
2366 
2367 	result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2368 	if (result || preoslog_pa == NULL || preoslog_size == 0) {
2369 		kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2370 		return;
2371 	}
2372 
2373 	IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
2374 #else
2375 	/*  On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2376 #endif // RELEASE
2377 }
2378 
2379 #if HAS_UPSI_FAILURE_INJECTION
2380 uint64_t xnu_upsi_injection_stage  = 0;
2381 uint64_t xnu_upsi_injection_action = 0;
2382 
2383 __attribute__((optnone)) static void
2384 SPINNING_FOREVER(void)
2385 {
2386 	// Decided to disable optimizations on this function instead of using a
2387 	// volatile bool for the deadloop.
2388 	// This simplifies the process of using the deadloop as an LLDB attach point
2389 	bool loop = true;
2390 
2391 	while (loop) {
2392 	}
2393 	return;
2394 }
2395 
2396 void
2397 check_for_failure_injection(failure_injection_stage_t current_stage)
2398 {
2399 	// Can't call this function with the default initialization for xnu_upsi_injection_stage
2400 	assert(current_stage != 0);
2401 
2402 	// Check condition to inject a panic/stall/hang
2403 	if (current_stage != xnu_upsi_injection_stage) {
2404 		return;
2405 	}
2406 
2407 	// Do the requested action
2408 	switch (xnu_upsi_injection_action) {
2409 	case INJECTION_ACTION_PANIC:
2410 		panic("Test panic at stage 0x%llx", current_stage);
2411 	case INJECTION_ACTION_WATCHDOG_TIMEOUT:
2412 	case INJECTION_ACTION_DEADLOOP:
2413 		SPINNING_FOREVER();
2414 		break;
2415 	default:
2416 		break;
2417 	}
2418 }
2419 #endif // HAS_UPSI_FAILURE_INJECTION
2420 
2421 #define AWL_HV_ENTRY_FLAG (0x1)
2422 
2423 static inline void
2424 awl_set_scratch_reg_hv_bit(void)
2425 {
2426 #if defined(__arm64__)
2427 #define WATCHDOG_DIAG0     "S3_5_c15_c2_6"
2428 	uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2429 	awl_diag0 |= AWL_HV_ENTRY_FLAG;
2430 	__builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2431 #endif // defined(__arm64__)
2432 }
2433 
2434 void
2435 awl_mark_hv_entry(void)
2436 {
2437 	if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2438 		return;
2439 	}
2440 	*PERCPU_GET(hv_entry_detected) = true;
2441 
2442 	awl_set_scratch_reg_hv_bit();
2443 }
2444 
2445 /*
2446  * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2447  * so restore it manually.
2448  */
2449 static bool
2450 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2451 {
2452 	if (event == CPU_BOOTED) {
2453 		if (*PERCPU_GET(hv_entry_detected)) {
2454 			awl_set_scratch_reg_hv_bit();
2455 		}
2456 	}
2457 
2458 	return true;
2459 }
2460 
2461 /*
2462  * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2463  * for a callback to restore register after hibernation
2464  */
2465 __startup_func
2466 static void
2467 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2468 {
2469 	DTEntry base = NULL;
2470 
2471 	if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2472 		return;
2473 	}
2474 	const uint8_t *data = NULL;
2475 	unsigned int data_size = sizeof(uint8_t);
2476 
2477 	if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2478 		for (unsigned int i = 0; i < data_size; i++) {
2479 			if (data[i] != 0) {
2480 				awl_scratch_reg_supported = true;
2481 				cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2482 				break;
2483 			}
2484 		}
2485 	}
2486 }
2487 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2488 
2489 /**
2490  * Signal that the system is going down for a panic. Returns true if it is safe to
2491  * proceed with the panic flow, false if we should re-enable interrupts and spin
2492  * to allow another CPU to proceed with its panic flow.
2493  *
2494  * This function is idempotent when called from the same CPU; in the normal
2495  * panic case it is invoked twice, since it needs to be invoked in the case
2496  * where we enter the panic flow outside of panic() from DebuggerWithContext().
2497  */
2498 static inline boolean_t
2499 debug_fatal_panic_begin(void)
2500 {
2501 #if CONFIG_SPTM
2502 	/*
2503 	 * Since we're going down, initiate panic lockdown.
2504 	 *
2505 	 * Whether or not this call to panic lockdown can be subverted is murky.
2506 	 * This doesn't really matter, however, because any security critical panics
2507 	 * events will have already initiated lockdown from the exception vector
2508 	 * before calling panic. Thus, lockdown from panic itself is fine as merely
2509 	 * a "best effort".
2510 	 */
2511 #if DEVELOPMENT || DEBUG
2512 	panic_lockdown_record_debug_data();
2513 #endif /* DEVELOPMENT || DEBUG */
2514 	sptm_xnu_panic_begin();
2515 
2516 	pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu);
2517 	uint16_t sptm_cpu_id = sptm_pcpu->sptm_cpu_id;
2518 	uint64_t sptm_panicking_cpu_id;
2519 
2520 	if (sptm_get_panicking_cpu_id(&sptm_panicking_cpu_id) == LIBSPTM_SUCCESS &&
2521 	    sptm_panicking_cpu_id != sptm_cpu_id) {
2522 		return false;
2523 	}
2524 #endif /* CONFIG_SPTM */
2525 	return true;
2526 }
2527