xref: /xnu-11417.140.69/osfmk/kern/debug.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/trap_telemetry.h>
75 #include <kern/ecc.h>
76 #include <kern/kern_stackshot.h>
77 #include <kern/kern_cdata.h>
78 #include <kern/zalloc_internal.h>
79 #include <kern/iotrace.h>
80 #include <pexpert/device_tree.h>
81 #include <vm/vm_kern_xnu.h>
82 #include <vm/vm_map.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_compressor_xnu.h>
85 #include <stdarg.h>
86 #include <stdatomic.h>
87 #include <sys/pgo.h>
88 #include <console/serial_protos.h>
89 #include <IOKit/IOBSD.h>
90 
91 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
92 #include <kdp/kdp_udp.h>
93 #endif
94 #include <kern/processor.h>
95 
96 #if defined(__i386__) || defined(__x86_64__)
97 #include <IOKit/IOBSD.h>
98 
99 #include <i386/cpu_threads.h>
100 #include <i386/pmCPU.h>
101 #include <i386/lbr.h>
102 #endif
103 
104 #include <IOKit/IOPlatformExpert.h>
105 #include <machine/machine_cpu.h>
106 #include <machine/pal_routines.h>
107 
108 #include <sys/kdebug.h>
109 #include <libkern/OSKextLibPrivate.h>
110 #include <libkern/OSAtomic.h>
111 #include <libkern/kernel_mach_header.h>
112 #include <libkern/section_keywords.h>
113 #include <uuid/uuid.h>
114 #include <mach_debug/zone_info.h>
115 #include <mach/resource_monitors.h>
116 #include <machine/machine_routines.h>
117 #include <sys/proc_require.h>
118 
119 #include <os/log_private.h>
120 
121 #include <kern/ext_paniclog.h>
122 
123 #if defined(__arm64__)
124 #include <pexpert/pexpert.h> /* For gPanicBase */
125 #include <arm/caches_internal.h>
126 #include <arm/misc_protos.h>
127 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
128 #endif
129 
130 #include <san/kcov.h>
131 
132 #if CONFIG_XNUPOST
133 #include <tests/xnupost.h>
134 extern int vsnprintf(char *, size_t, const char *, va_list);
135 #endif
136 
137 #if CONFIG_CSR
138 #include <sys/csr.h>
139 #endif
140 
141 #if CONFIG_EXCLAVES
142 #include <xnuproxy/panic.h>
143 #include "exclaves_panic.h"
144 #endif
145 
146 #if CONFIG_SPTM
147 #include <arm64/sptm/sptm.h>
148 #include <arm64/sptm/pmap/pmap_data.h>
149 #endif /* CONFIG_SPTM */
150 
151 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
152 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
153 extern unsigned int debug_boot_arg;
154 extern int serial_init(void);
155 
156 unsigned int    halt_in_debugger = 0;
157 unsigned int    current_debugger = 0;
158 unsigned int    active_debugger = 0;
159 SECURITY_READ_ONLY_LATE(unsigned int)    panicDebugging = FALSE;
160 unsigned int    kernel_debugger_entry_count = 0;
161 
162 #if DEVELOPMENT || DEBUG
163 unsigned int    panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
164 unsigned int    panic_test_action_count = 1;
165 unsigned int    panic_test_case = PANIC_TEST_CASE_DISABLED;
166 #endif
167 
168 #if defined(__arm64__)
169 struct additional_panic_data_buffer *panic_data_buffers = NULL;
170 #endif
171 
172 #if defined(__arm64__)
173 /*
174  * Magic number; this should be identical to the armv7 encoding for trap.
175  */
176 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
177 #elif defined (__x86_64__)
178 #define TRAP_DEBUGGER __asm__("int3")
179 #else
180 #error No TRAP_DEBUGGER for this architecture
181 #endif
182 
183 #if defined(__i386__) || defined(__x86_64__)
184 #define panic_stop()    pmCPUHalt(PM_HALT_PANIC)
185 #else
186 #define panic_stop()    panic_spin_forever()
187 #endif
188 
189 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
190 /*
191  * More than enough for any typical format string passed to panic();
192  * anything longer will be truncated but that's better than nothing.
193  */
194 #define EARLY_PANIC_BUFLEN 256
195 #endif
196 
197 struct debugger_state {
198 	uint64_t        db_panic_options;
199 	debugger_op     db_current_op;
200 	boolean_t       db_proceed_on_sync_failure;
201 	const char     *db_message;
202 	const char     *db_panic_str;
203 	va_list        *db_panic_args;
204 	void           *db_panic_data_ptr;
205 	unsigned long   db_panic_caller;
206 	const char     *db_panic_initiator;
207 	/* incremented whenever we panic or call Debugger (current CPU panic level) */
208 	uint32_t        db_entry_count;
209 	kern_return_t   db_op_return;
210 };
211 static struct debugger_state PERCPU_DATA(debugger_state);
212 struct kernel_panic_reason PERCPU_DATA(panic_reason);
213 
214 /* __pure2 is correct if this function is called with preemption disabled */
215 static inline __pure2 struct debugger_state *
current_debugger_state(void)216 current_debugger_state(void)
217 {
218 	return PERCPU_GET(debugger_state);
219 }
220 
221 #define CPUDEBUGGEROP    current_debugger_state()->db_current_op
222 #define CPUDEBUGGERMSG   current_debugger_state()->db_message
223 #define CPUPANICSTR      current_debugger_state()->db_panic_str
224 #define CPUPANICARGS     current_debugger_state()->db_panic_args
225 #define CPUPANICOPTS     current_debugger_state()->db_panic_options
226 #define CPUPANICDATAPTR  current_debugger_state()->db_panic_data_ptr
227 #define CPUDEBUGGERSYNC  current_debugger_state()->db_proceed_on_sync_failure
228 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
229 #define CPUDEBUGGERRET   current_debugger_state()->db_op_return
230 #define CPUPANICCALLER   current_debugger_state()->db_panic_caller
231 #define CPUPANICINITIATOR current_debugger_state()->db_panic_initiator
232 
233 
234 /*
235  *  Usage:
236  *  panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
237  *  panic_test_failure_mode is a bit map of things to do
238  *  panic_test_case is what sort of test we are injecting
239  *
240  *  For more details see definitions in debugger.h
241  *
242  *  Note that not all combinations are sensible, but some actions can be combined, e.g.
243  *  - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
244  *  - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
245  *  - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
246  */
247 #if DEVELOPMENT || DEBUG
248 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested)                                                                                                                                                                                                         \
249 MACRO_BEGIN                                                                                                                                                                                                                                                                                                                     \
250 	if ((panic_test_case & requested) && panic_test_action_count) {                                                                                                                                                                                                                                                                                                \
251 	    panic_test_action_count--; \
252 	        volatile int *panic_test_badpointer = (int *)4;                                                                                                                                                                                                                         \
253 	        if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); }                                                                       \
254 	        if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; }                                                                       \
255 	        if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); }                      \
256 	}                                                                                                                                                                                                                                                                                                                               \
257 MACRO_END
258 
259 #endif /* DEVELOPMENT || DEBUG */
260 
261 debugger_op debugger_current_op = DBOP_NONE;
262 const char *debugger_panic_str = NULL;
263 va_list *debugger_panic_args = NULL;
264 void *debugger_panic_data = NULL;
265 uint64_t debugger_panic_options = 0;
266 const char *debugger_message = NULL;
267 unsigned long debugger_panic_caller = 0;
268 const char *debugger_panic_initiator = "";
269 
270 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
271     unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
272     unsigned long panic_caller, const char *panic_initiator) __dead2 __printflike(1, 0);
273 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
274 void panic_spin_forever(void) __dead2;
275 void panic_stackshot_release_lock(void);
276 extern void PE_panic_hook(const char*);
277 extern int sync_internal(void);
278 
279 #define NESTEDDEBUGGERENTRYMAX 5
280 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
281     NESTEDDEBUGGERENTRYMAX);
282 
283 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
284 static bool PERCPU_DATA(hv_entry_detected); // = false
285 static void awl_set_scratch_reg_hv_bit(void);
286 void awl_mark_hv_entry(void);
287 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
288 
289 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
290 static boolean_t device_corefile_valid_on_ephemeral(void);
291 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
292 
293 #if defined(__arm64__)
294 #define DEBUG_BUF_SIZE (4096)
295 
296 /* debug_buf is directly linked with iBoot panic region for arm targets */
297 char *debug_buf_base = NULL;
298 char *debug_buf_ptr = NULL;
299 unsigned int debug_buf_size = 0;
300 
301 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
302 #else /* defined(__arm64__) */
303 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
304 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
305 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
306 
307 char debug_buf[DEBUG_BUF_SIZE];
308 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
309 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
310 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
311 
312 /*
313  * We don't include the size of the panic header in the length of the data we actually write.
314  * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
315  * the end of the log because we only support writing (3*PAGESIZE) bytes.
316  */
317 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
318 
319 boolean_t extended_debug_log_enabled = FALSE;
320 #endif /* defined(__arm64__) */
321 
322 #if defined(XNU_TARGET_OS_OSX)
323 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
324 #else
325 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
326 #endif
327 
328 static inline void debug_fatal_panic_begin(void);
329 
330 /* Debugger state */
331 atomic_int     debugger_cpu = DEBUGGER_NO_CPU;
332 boolean_t      debugger_allcpus_halted = FALSE;
333 boolean_t      debugger_safe_to_return = TRUE;
334 unsigned int   debugger_context = 0;
335 
336 static char model_name[64];
337 unsigned char *kernel_uuid;
338 
339 boolean_t kernelcache_uuid_valid = FALSE;
340 uuid_t kernelcache_uuid;
341 uuid_string_t kernelcache_uuid_string;
342 
343 boolean_t pageablekc_uuid_valid = FALSE;
344 uuid_t pageablekc_uuid;
345 uuid_string_t pageablekc_uuid_string;
346 
347 boolean_t auxkc_uuid_valid = FALSE;
348 uuid_t auxkc_uuid;
349 uuid_string_t auxkc_uuid_string;
350 
351 
352 /*
353  * By default we treat Debugger() the same as calls to panic(), unless
354  * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
355  * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
356  *
357  * Return from Debugger() is currently only implemented on x86
358  */
359 static boolean_t debugger_is_panic = TRUE;
360 
361 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
362 
363 TUNABLE_DEV_WRITEABLE(unsigned int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
364 
365 char kernel_uuid_string[37]; /* uuid_string_t */
366 char kernelcache_uuid_string[37]; /* uuid_string_t */
367 char   panic_disk_error_description[512];
368 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
369 
370 extern unsigned int write_trace_on_panic;
371 int kext_assertions_enable =
372 #if DEBUG || DEVELOPMENT
373     TRUE;
374 #else
375     FALSE;
376 #endif
377 
378 #if (DEVELOPMENT || DEBUG)
379 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
380 #endif
381 
382 /*
383  * Maintain the physically-contiguous carveouts for the carveout bootargs.
384  */
385 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
386 
387 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
388 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
389 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
390 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
391 
392 
393 #if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
394 /**
395  * Extra debug state which is set when panic lockdown is initiated.
396  * This information is intended to help when debugging issues with the panic
397  * path.
398  */
399 struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
400 #endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
401 
402 /*
403  * Returns whether kernel debugging is expected to be restricted
404  * on the device currently based on CSR or other platform restrictions.
405  */
406 boolean_t
kernel_debugging_restricted(void)407 kernel_debugging_restricted(void)
408 {
409 #if XNU_TARGET_OS_OSX
410 #if CONFIG_CSR
411 	if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
412 		return TRUE;
413 	}
414 #endif /* CONFIG_CSR */
415 	return FALSE;
416 #else /* XNU_TARGET_OS_OSX */
417 	return FALSE;
418 #endif /* XNU_TARGET_OS_OSX */
419 }
420 
421 __startup_func
422 static void
panic_init(void)423 panic_init(void)
424 {
425 	unsigned long uuidlen = 0;
426 	void *uuid;
427 
428 	uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
429 	if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
430 		kernel_uuid = uuid;
431 		uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
432 	}
433 
434 	/*
435 	 * Take the value of the debug boot-arg into account
436 	 */
437 #if MACH_KDP
438 	if (!kernel_debugging_restricted() && debug_boot_arg) {
439 		if (debug_boot_arg & DB_HALT) {
440 			halt_in_debugger = 1;
441 		}
442 
443 #if defined(__arm64__)
444 		if (debug_boot_arg & DB_NMI) {
445 			panicDebugging  = TRUE;
446 		}
447 #else
448 		panicDebugging = TRUE;
449 #endif /* defined(__arm64__) */
450 	}
451 
452 #if defined(__arm64__)
453 	char kdpname[80];
454 
455 	kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
456 #endif /* defined(__arm64__) */
457 
458 #endif /* MACH_KDP */
459 
460 #if defined (__x86_64__)
461 	/*
462 	 * By default we treat Debugger() the same as calls to panic(), unless
463 	 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
464 	 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
465 	 * This is because writing an on-device corefile is a destructive operation.
466 	 *
467 	 * Return from Debugger() is currently only implemented on x86
468 	 */
469 	if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
470 		debugger_is_panic = FALSE;
471 	}
472 #endif
473 }
474 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
475 
476 #if defined (__x86_64__)
477 void
extended_debug_log_init(void)478 extended_debug_log_init(void)
479 {
480 	assert(coprocessor_paniclog_flush);
481 	/*
482 	 * Allocate an extended panic log buffer that has space for the panic
483 	 * stackshot at the end. Update the debug buf pointers appropriately
484 	 * to point at this new buffer.
485 	 *
486 	 * iBoot pre-initializes the panic region with the NULL character. We set this here
487 	 * so we can accurately calculate the CRC for the region without needing to flush the
488 	 * full region over SMC.
489 	 */
490 	char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
491 
492 	panic_info = (struct macos_panic_header *)new_debug_buf;
493 	debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
494 	debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
495 
496 	extended_debug_log_enabled = TRUE;
497 
498 	/*
499 	 * Insert a compiler barrier so we don't free the other panic stackshot buffer
500 	 * until after we've marked the new one as available
501 	 */
502 	__compiler_barrier();
503 	kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
504 	panic_stackshot_buf = 0;
505 	panic_stackshot_buf_len = 0;
506 }
507 #endif /* defined (__x86_64__) */
508 
509 void
debug_log_init(void)510 debug_log_init(void)
511 {
512 #if defined(__arm64__)
513 	if (!gPanicBase) {
514 		printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
515 		return;
516 	}
517 	/* Shift debug buf start location and size by the length of the panic header */
518 	debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
519 	debug_buf_ptr = debug_buf_base;
520 	debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
521 
522 #if CONFIG_EXT_PANICLOG
523 	ext_paniclog_init();
524 #endif
525 #else
526 	kern_return_t kr = KERN_SUCCESS;
527 	bzero(panic_info, DEBUG_BUF_SIZE);
528 
529 	assert(debug_buf_base != NULL);
530 	assert(debug_buf_ptr != NULL);
531 	assert(debug_buf_size != 0);
532 
533 	/*
534 	 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
535 	 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
536 	 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
537 	 * up.
538 	 */
539 	kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
540 	    KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
541 	assert(kr == KERN_SUCCESS);
542 	if (kr == KERN_SUCCESS) {
543 		panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
544 	}
545 #endif
546 }
547 
548 void
phys_carveout_init(void)549 phys_carveout_init(void)
550 {
551 	if (!PE_i_can_has_debugger(NULL)) {
552 		return;
553 	}
554 
555 #if __arm__ || __arm64__
556 #if DEVELOPMENT || DEBUG
557 #endif /* DEVELOPMENT || DEBUG  */
558 #endif /* __arm__ || __arm64__ */
559 
560 	struct carveout {
561 		const char *name;
562 		vm_offset_t *va;
563 		uint32_t requested_size;
564 		uintptr_t *pa;
565 		size_t *allocated_size;
566 		uint64_t present;
567 	} carveouts[] = {
568 		{
569 			"phys_carveout",
570 			&phys_carveout,
571 			phys_carveout_mb,
572 			&phys_carveout_pa,
573 			&phys_carveout_size,
574 			phys_carveout_mb != 0,
575 		}
576 	};
577 
578 	for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
579 		if (carveouts[i].present) {
580 			size_t temp_carveout_size = 0;
581 			if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
582 				panic("%s_mb size overflowed (%uMB)",
583 				    carveouts[i].name, carveouts[i].requested_size);
584 				return;
585 			}
586 
587 			kmem_alloc_contig(kernel_map, carveouts[i].va,
588 			    temp_carveout_size, PAGE_MASK, 0, 0,
589 			    KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA |
590 			    KMA_NOSOFTLIMIT,
591 			    VM_KERN_MEMORY_DIAG);
592 
593 			*carveouts[i].pa = kvtophys(*carveouts[i].va);
594 			*carveouts[i].allocated_size = temp_carveout_size;
595 		}
596 	}
597 }
598 
599 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)600 debug_is_in_phys_carveout(vm_map_offset_t va)
601 {
602 	return phys_carveout_size && va >= phys_carveout &&
603 	       va < (phys_carveout + phys_carveout_size);
604 }
605 
606 boolean_t
debug_can_coredump_phys_carveout(void)607 debug_can_coredump_phys_carveout(void)
608 {
609 	return phys_carveout_core;
610 }
611 
612 static void
DebuggerLock(void)613 DebuggerLock(void)
614 {
615 	int my_cpu = cpu_number();
616 	int debugger_exp_cpu = DEBUGGER_NO_CPU;
617 	assert(ml_get_interrupts_enabled() == FALSE);
618 
619 	if (atomic_load(&debugger_cpu) == my_cpu) {
620 		return;
621 	}
622 
623 	while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
624 		debugger_exp_cpu = DEBUGGER_NO_CPU;
625 	}
626 
627 	return;
628 }
629 
630 static void
DebuggerUnlock(void)631 DebuggerUnlock(void)
632 {
633 	assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
634 
635 	/*
636 	 * We don't do an atomic exchange here in case
637 	 * there's another CPU spinning to acquire the debugger_lock
638 	 * and we never get a chance to update it. We already have the
639 	 * lock so we can simply store DEBUGGER_NO_CPU and follow with
640 	 * a barrier.
641 	 */
642 	atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
643 	OSMemoryBarrier();
644 
645 	return;
646 }
647 
648 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)649 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
650 {
651 #if defined(__arm64__)
652 	return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
653 #else /* defined(__arm64__) */
654 #pragma unused(proceed_on_failure)
655 	mp_kdp_enter(proceed_on_failure, is_stackshot);
656 	return KERN_SUCCESS;
657 #endif
658 }
659 
660 static void
DebuggerResumeOtherCores(void)661 DebuggerResumeOtherCores(void)
662 {
663 #if defined(__arm64__)
664 	DebuggerXCallReturn();
665 #else /* defined(__arm64__) */
666 	mp_kdp_exit();
667 #endif
668 }
669 
670 __printflike(3, 0)
671 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)672 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
673     va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
674     boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char *db_panic_initiator)
675 {
676 	CPUDEBUGGEROP = db_op;
677 
678 	/*
679 	 * Note:
680 	 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
681 	 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
682 	 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
683 	 *
684 	 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
685 	 */
686 	if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
687 		CPUDEBUGGERMSG = db_message;
688 		CPUPANICSTR = db_panic_str;
689 		CPUPANICARGS = db_panic_args;
690 		CPUPANICDATAPTR = db_panic_data_ptr;
691 		CPUPANICCALLER = db_panic_caller;
692 		CPUPANICINITIATOR = db_panic_initiator;
693 
694 #if CONFIG_EXCLAVES
695 		char *panic_str;
696 		if (exclaves_panic_get_string(&panic_str) == KERN_SUCCESS) {
697 			CPUPANICSTR = panic_str;
698 		}
699 #endif
700 	}
701 
702 	CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
703 	CPUDEBUGGERRET = KERN_SUCCESS;
704 
705 	/* Reset these on any nested panics */
706 	// follow up in rdar://88497308 (nested panics should not clobber panic flags)
707 	CPUPANICOPTS = db_panic_options;
708 
709 	return;
710 }
711 
712 /*
713  * Save the requested debugger state/action into the current processor's
714  * percu state and trap to the debugger.
715  */
716 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)717 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
718     va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
719     boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char* db_panic_initiator)
720 {
721 	kern_return_t ret;
722 
723 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
724 	if (!PE_arm_debug_and_trace_initialized()) {
725 		/*
726 		 * In practice this can only happen if we panicked very early,
727 		 * when only the boot CPU is online and before it has finished
728 		 * initializing the debug and trace infrastructure. We're going
729 		 * to hang soon, so let's at least make sure the message passed
730 		 * to panic() is actually logged.
731 		 */
732 		char buf[EARLY_PANIC_BUFLEN];
733 		vsnprintf(buf, EARLY_PANIC_BUFLEN, db_panic_str, *db_panic_args);
734 		paniclog_append_noflush("%s\n", buf);
735 	}
736 #endif
737 
738 	assert(ml_get_interrupts_enabled() == FALSE);
739 	DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
740 	    db_panic_options, db_panic_data_ptr,
741 	    db_proceed_on_sync_failure, db_panic_caller, db_panic_initiator);
742 
743 	/*
744 	 * On ARM this generates an uncategorized exception -> sleh code ->
745 	 *   DebuggerCall -> kdp_trap -> handle_debugger_trap
746 	 * So that is how XNU ensures that only one core can panic.
747 	 * The rest of the cores are halted by IPI if possible; if that
748 	 * fails it will fall back to dbgwrap.
749 	 */
750 	TRAP_DEBUGGER;
751 
752 	ret = CPUDEBUGGERRET;
753 
754 	DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0, NULL);
755 
756 	return ret;
757 }
758 
759 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)760 Assert(const char*file, int line, const char *expression)
761 {
762 	panic_plain("%s:%d Assertion failed: %s", file, line, expression);
763 }
764 
765 void
panic_assert_format(char * buf,size_t len,struct mach_assert_hdr * hdr,long a,long b)766 panic_assert_format(char *buf, size_t len, struct mach_assert_hdr *hdr, long a, long b)
767 {
768 	struct mach_assert_default *adef;
769 	struct mach_assert_3x      *a3x;
770 
771 	static_assert(MACH_ASSERT_TRAP_CODE == XNU_HARD_TRAP_ASSERT_FAILURE);
772 
773 	switch (hdr->type) {
774 	case MACH_ASSERT_DEFAULT:
775 		adef = __container_of(hdr, struct mach_assert_default, hdr);
776 		snprintf(buf, len, "%s:%d Assertion failed: %s",
777 		    hdr->filename, hdr->lineno, adef->expr);
778 		break;
779 
780 	case MACH_ASSERT_3P:
781 		a3x = __container_of(hdr, struct mach_assert_3x, hdr);
782 		snprintf(buf, len, "%s:%d Assertion failed: "
783 		    "%s %s %s (%p %s %p)",
784 		    hdr->filename, hdr->lineno, a3x->a, a3x->op, a3x->b,
785 		    (void *)a, a3x->op, (void *)b);
786 		break;
787 
788 	case MACH_ASSERT_3S:
789 		a3x = __container_of(hdr, struct mach_assert_3x, hdr);
790 		snprintf(buf, len, "%s:%d Assertion failed: "
791 		    "%s %s %s (0x%lx %s 0x%lx, %ld %s %ld)",
792 		    hdr->filename, hdr->lineno, a3x->a, a3x->op, a3x->b,
793 		    a, a3x->op, b, a, a3x->op, b);
794 		break;
795 
796 	case MACH_ASSERT_3U:
797 		a3x = __container_of(hdr, struct mach_assert_3x, hdr);
798 		snprintf(buf, len, "%s:%d Assertion failed: "
799 		    "%s %s %s (0x%lx %s 0x%lx, %lu %s %lu)",
800 		    hdr->filename, hdr->lineno, a3x->a, a3x->op, a3x->b,
801 		    a, a3x->op, b, a, a3x->op, b);
802 		break;
803 	}
804 }
805 
806 boolean_t
debug_is_current_cpu_in_panic_state(void)807 debug_is_current_cpu_in_panic_state(void)
808 {
809 	return current_debugger_state()->db_entry_count > 0;
810 }
811 
812 /*
813  * check if we are in a nested panic, report findings, take evasive action where necessary
814  *
815  * see also PE_update_panicheader_nestedpanic
816  */
817 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)818 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
819 {
820 	if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
821 		// Note: this is the first indication in the panic log or serial that we are off the rails...
822 		//
823 		// if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
824 		// if we panic *after* the log is finalized then we will only see it in the serial log
825 		//
826 		paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
827 		paniclog_flush();
828 
829 		// print the *new* panic string to the console, we might not get it by other means...
830 		// TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
831 		// rdar://87846117 (NestedPanic: output panic string to paniclog)
832 		if (db_panic_str) {
833 			printf("Nested panic string:\n");
834 #pragma clang diagnostic push
835 #pragma clang diagnostic ignored "-Wformat-nonliteral"
836 			_doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
837 #pragma clang diagnostic pop
838 			printf("\n<end nested panic string>\n");
839 		}
840 	}
841 
842 	// Stage 1 bailout
843 	//
844 	// Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog.  If this fails with another nested
845 	// panic then we will land in Stage 2 below...
846 	//
847 	if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
848 		uint32_t panic_details = 0;
849 
850 		// if this is a force-reset panic then capture a log and reboot immediately.
851 		if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
852 			panic_details |= kPanicDetailsForcePowerOff;
853 		}
854 
855 		// normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
856 		// there.  To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
857 		//
858 		PEHaltRestartInternal(kPEPanicBegin, panic_details);
859 
860 		paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
861 		PE_update_panicheader_nestedpanic();
862 		paniclog_flush();
863 
864 		if (!panicDebugging) {
865 			// note that this will also send kPEPanicEnd
866 			kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
867 		}
868 
869 		// prints to console
870 		paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
871 		panic_spin_forever();
872 	}
873 
874 	// Stage 2 bailout
875 	//
876 	// Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path.  Try to issue
877 	// a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
878 	//
879 	if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
880 		if (!panicDebugging) {
881 			// note that:
882 			// - this code path should be audited for prints, as that is a common cause of nested panics
883 			// - this code path should take the fastest route to the actual reset, and not call any un-necessary code
884 			kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
885 		}
886 
887 		// prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
888 		paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
889 		panic_spin_forever();
890 	}
891 
892 	// Stage 3 bailout
893 	//
894 	// We are done here, we were unable to reset the platform without another nested panic.  Spin until the watchdog kicks in.
895 	//
896 	if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
897 		kdp_machine_reboot_type(kPEHangCPU, 0);
898 	}
899 }
900 
901 void
Debugger(const char * message)902 Debugger(const char *message)
903 {
904 	DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
905 }
906 
907 /*
908  *  Enter the Debugger
909  *
910  *  This is similar to, but not the same as a panic
911  *
912  *  Key differences:
913  *  - we get here from a debugger entry action (e.g. NMI)
914  *  - the system is resumable on x86 (in theory, however it is not clear if this is tested)
915  *  - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
916  *
917  */
918 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)919 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
920     uint64_t debugger_options_mask, unsigned long debugger_caller)
921 {
922 	spl_t previous_interrupts_state;
923 	boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
924 
925 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
926 	read_lbr();
927 #endif
928 	previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
929 	disable_preemption();
930 
931 	/* track depth of debugger/panic entry */
932 	CPUDEBUGGERCOUNT++;
933 
934 	/* emit a tracepoint as early as possible in case of hang */
935 	SOCD_TRACE_XNU(PANIC,
936 	    ((CPUDEBUGGERCOUNT <= 2) ? SOCD_TRACE_MODE_STICKY_TRACEPOINT : SOCD_TRACE_MODE_NONE),
937 	    PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)),
938 	    VALUE(debugger_options_mask),
939 	    ADDR(message),
940 	    ADDR(debugger_caller));
941 
942 	/* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
943 	check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
944 
945 	/* Handle any necessary platform specific actions before we proceed */
946 	PEInitiatePanic();
947 
948 #if DEVELOPMENT || DEBUG
949 	INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
950 #endif
951 
952 	PE_panic_hook(message);
953 
954 	doprnt_hide_pointers = FALSE;
955 
956 	if (ctx != NULL) {
957 		DebuggerSaveState(DBOP_DEBUGGER, message,
958 		    NULL, NULL, debugger_options_mask, NULL, TRUE, 0, "");
959 		handle_debugger_trap(reason, 0, 0, ctx);
960 		DebuggerSaveState(DBOP_NONE, NULL, NULL,
961 		    NULL, 0, NULL, FALSE, 0, "");
962 	} else {
963 		DebuggerTrapWithState(DBOP_DEBUGGER, message,
964 		    NULL, NULL, debugger_options_mask, NULL, TRUE, 0, NULL);
965 	}
966 
967 	/* resume from the debugger */
968 
969 	CPUDEBUGGERCOUNT--;
970 	doprnt_hide_pointers = old_doprnt_hide_pointers;
971 	enable_preemption();
972 	ml_set_interrupts_enabled(previous_interrupts_state);
973 }
974 
975 static struct kdp_callout {
976 	struct kdp_callout * callout_next;
977 	kdp_callout_fn_t callout_fn;
978 	boolean_t callout_in_progress;
979 	void * callout_arg;
980 } * kdp_callout_list = NULL;
981 
982 /*
983  * Called from kernel context to register a kdp event callout.
984  */
985 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)986 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
987 {
988 	struct kdp_callout * kcp;
989 	struct kdp_callout * list_head;
990 
991 	kcp = zalloc_permanent_type(struct kdp_callout);
992 
993 	kcp->callout_fn = fn;
994 	kcp->callout_arg = arg;
995 	kcp->callout_in_progress = FALSE;
996 
997 	/* Lock-less list insertion using compare and exchange. */
998 	do {
999 		list_head = kdp_callout_list;
1000 		kcp->callout_next = list_head;
1001 	} while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
1002 }
1003 
1004 static void
kdp_callouts(kdp_event_t event)1005 kdp_callouts(kdp_event_t event)
1006 {
1007 	struct kdp_callout      *kcp = kdp_callout_list;
1008 
1009 	while (kcp) {
1010 		if (!kcp->callout_in_progress) {
1011 			kcp->callout_in_progress = TRUE;
1012 			kcp->callout_fn(kcp->callout_arg, event);
1013 			kcp->callout_in_progress = FALSE;
1014 		}
1015 		kcp = kcp->callout_next;
1016 	}
1017 }
1018 
1019 #if defined(__arm64__)
1020 /*
1021  * Register an additional buffer with data to include in the panic log
1022  *
1023  * <rdar://problem/50137705> tracks supporting more than one buffer
1024  *
1025  * Note that producer_name and buf should never be de-allocated as we reference these during panic.
1026  */
1027 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)1028 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
1029 {
1030 	if (panic_data_buffers != NULL) {
1031 		panic("register_additional_panic_data_buffer called with buffer already registered");
1032 	}
1033 
1034 	if (producer_name == NULL || (strlen(producer_name) == 0)) {
1035 		panic("register_additional_panic_data_buffer called with invalid producer_name");
1036 	}
1037 
1038 	if (buf == NULL) {
1039 		panic("register_additional_panic_data_buffer called with invalid buffer pointer");
1040 	}
1041 
1042 	if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
1043 		panic("register_additional_panic_data_buffer called with invalid length");
1044 	}
1045 
1046 	struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
1047 	new_panic_data_buffer->producer_name = producer_name;
1048 	new_panic_data_buffer->buf = buf;
1049 	new_panic_data_buffer->len = len;
1050 
1051 	if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
1052 		panic("register_additional_panic_data_buffer called with buffer already registered");
1053 	}
1054 
1055 	return;
1056 }
1057 #endif /* defined(__arm64__) */
1058 
1059 /*
1060  * An overview of the xnu panic path:
1061  *
1062  * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
1063  * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
1064  * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
1065  * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
1066  * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
1067  * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
1068  * according to the device's boot-args.
1069  */
1070 #undef panic
1071 void
panic(const char * str,...)1072 panic(const char *str, ...)
1073 {
1074 	va_list panic_str_args;
1075 
1076 	va_start(panic_str_args, str);
1077 	panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1078 	va_end(panic_str_args);
1079 }
1080 
1081 void
panic_with_data(uuid_t uuid,void * addr,uint32_t len,uint64_t debugger_options_mask,const char * str,...)1082 panic_with_data(uuid_t uuid, void *addr, uint32_t len, uint64_t debugger_options_mask, const char *str, ...)
1083 {
1084 	va_list panic_str_args;
1085 
1086 	ext_paniclog_panic_with_data(uuid, addr, len);
1087 
1088 #if CONFIG_EXCLAVES
1089 	/*
1090 	 * Before trapping, inform the exclaves scheduler that we're going down
1091 	 * so it can grab an exclaves stackshot.
1092 	 */
1093 	if ((debugger_options_mask & DEBUGGER_OPTION_USER_WATCHDOG) != 0 &&
1094 	    exclaves_get_boot_stage() != EXCLAVES_BOOT_STAGE_NONE) {
1095 		(void) exclaves_scheduler_request_watchdog_panic();
1096 	}
1097 #endif /* CONFIG_EXCLAVES */
1098 
1099 	va_start(panic_str_args, str);
1100 	panic_trap_to_debugger(str, &panic_str_args, 0, NULL, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1101 	    NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1102 	va_end(panic_str_args);
1103 }
1104 
1105 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1106 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1107 {
1108 	va_list panic_str_args;
1109 
1110 #if CONFIG_EXCLAVES
1111 	/*
1112 	 * Before trapping, inform the exclaves scheduler that we're going down
1113 	 * so it can grab an exclaves stackshot.
1114 	 */
1115 	if ((debugger_options_mask & DEBUGGER_OPTION_USER_WATCHDOG) != 0 &&
1116 	    exclaves_get_boot_stage() != EXCLAVES_BOOT_STAGE_NONE) {
1117 		(void) exclaves_scheduler_request_watchdog_panic();
1118 	}
1119 #endif /* CONFIG_EXCLAVES */
1120 
1121 	va_start(panic_str_args, str);
1122 	panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1123 	    NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1124 	va_end(panic_str_args);
1125 }
1126 
1127 void
panic_with_options_and_initiator(const char * initiator,unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1128 panic_with_options_and_initiator(const char* initiator, unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1129 {
1130 	va_list panic_str_args;
1131 
1132 	va_start(panic_str_args, str);
1133 	panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1134 	    NULL, (unsigned long)(char *)__builtin_return_address(0), initiator);
1135 	va_end(panic_str_args);
1136 }
1137 
1138 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)1139 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
1140 {
1141 	if (ptr == NULL) {
1142 		paniclog_append_noflush("NULL %s pointer\n", what);
1143 		return false;
1144 	}
1145 
1146 	if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1147 		paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1148 		    what, ptr, (uint32_t)size);
1149 		return false;
1150 	}
1151 
1152 	return true;
1153 }
1154 
1155 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1156 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1157 {
1158 	if (!PANIC_VALIDATE_PTR(thread)) {
1159 		return false;
1160 	}
1161 
1162 	if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1163 		return false;
1164 	}
1165 
1166 	if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1167 		return false;
1168 	}
1169 
1170 	if (task) {
1171 		*task = thread->t_tro->tro_task;
1172 	}
1173 
1174 	if (!panic_validate_ptr(thread->t_tro->tro_proc,
1175 	    sizeof(struct proc *), "bsd_info")) {
1176 		*proc = NULL;
1177 	} else {
1178 		*proc = thread->t_tro->tro_proc;
1179 	}
1180 
1181 	return true;
1182 }
1183 
1184 #if defined (__x86_64__)
1185 /*
1186  * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1187  * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1188  * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1189  * thread when writing the panic log.
1190  *
1191  * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1192  */
1193 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1194 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1195 {
1196 	va_list panic_str_args;
1197 	__assert_only os_ref_count_t th_ref_count;
1198 
1199 	assert_thread_magic(thread);
1200 	th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1201 	assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1202 
1203 	/* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1204 	thread_reference(thread);
1205 
1206 	va_start(panic_str_args, str);
1207 	panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1208 	    thread, (unsigned long)(char *)__builtin_return_address(0), "");
1209 
1210 	va_end(panic_str_args);
1211 }
1212 #endif /* defined (__x86_64__) */
1213 
1214 #pragma clang diagnostic push
1215 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1216 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller,const char * panic_initiator)1217 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1218     uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller, const char *panic_initiator)
1219 {
1220 #pragma clang diagnostic pop
1221 
1222 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1223 	read_lbr();
1224 #endif
1225 
1226 	/* optionally call sync, to reduce lost logs on restart, avoid on recursive panic. Unsafe due to unbounded sync() duration */
1227 	if ((panic_options_mask & DEBUGGER_OPTION_SYNC_ON_PANIC_UNSAFE) && (CPUDEBUGGERCOUNT == 0)) {
1228 		sync_internal();
1229 	}
1230 
1231 	/* Turn off I/O tracing once we've panicked */
1232 	iotrace_disable();
1233 
1234 	/* call machine-layer panic handler */
1235 	ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller, panic_initiator);
1236 
1237 	/* track depth of debugger/panic entry */
1238 	CPUDEBUGGERCOUNT++;
1239 
1240 	/* emit a tracepoint as early as possible in case of hang */
1241 	SOCD_TRACE_XNU(PANIC,
1242 	    ((CPUDEBUGGERCOUNT <= 2) ? SOCD_TRACE_MODE_STICKY_TRACEPOINT : SOCD_TRACE_MODE_NONE),
1243 	    PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)),
1244 	    VALUE(panic_options_mask),
1245 	    ADDR(panic_format_str),
1246 	    ADDR(panic_caller));
1247 
1248 	/* enable serial on the first panic if the always-on panic print flag is set */
1249 	if ((debug_boot_arg & DB_PRT) && (CPUDEBUGGERCOUNT == 1)) {
1250 		serial_init();
1251 	}
1252 
1253 	/* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1254 	check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1255 
1256 	/* If we're in a stackshot, signal that we've started panicking and wait for other CPUs to coalesce and spin before proceeding */
1257 	stackshot_cpu_signal_panic();
1258 
1259 	/* Handle any necessary platform specific actions before we proceed */
1260 	PEInitiatePanic();
1261 
1262 #if DEVELOPMENT || DEBUG
1263 	INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1264 #endif
1265 
1266 	PE_panic_hook(panic_format_str);
1267 
1268 #if defined (__x86_64__)
1269 	plctrace_disable();
1270 #endif
1271 
1272 	if (write_trace_on_panic && kdebug_enable) {
1273 		if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1274 			ml_set_interrupts_enabled(TRUE);
1275 			KDBG_RELEASE(TRACE_PANIC);
1276 			kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1277 		}
1278 	}
1279 
1280 	ml_set_interrupts_enabled(FALSE);
1281 	disable_preemption();
1282 
1283 	debug_fatal_panic_begin();
1284 
1285 #if defined (__x86_64__)
1286 	pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1287 #endif /* defined (__x86_64__) */
1288 
1289 	/* Never hide pointers from panic logs. */
1290 	doprnt_hide_pointers = FALSE;
1291 
1292 	if (ctx != NULL) {
1293 		/*
1294 		 * We called into panic from a trap, no need to trap again. Set the
1295 		 * state on the current CPU and then jump to handle_debugger_trap.
1296 		 */
1297 		DebuggerSaveState(DBOP_PANIC, "panic",
1298 		    panic_format_str, panic_args,
1299 		    panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1300 		handle_debugger_trap(reason, 0, 0, ctx);
1301 	}
1302 
1303 #if defined(__arm64__) && !APPLEVIRTUALPLATFORM
1304 	/*
1305 	 *  Signal to fastsim that it should open debug ports (nop on hardware)
1306 	 */
1307 	__asm__         volatile ("hint #0x45");
1308 #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */
1309 
1310 	DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1311 	    panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1312 
1313 	/*
1314 	 * Not reached.
1315 	 */
1316 	panic_stop();
1317 	__builtin_unreachable();
1318 }
1319 
1320 void
panic_spin_forever(void)1321 panic_spin_forever(void)
1322 {
1323 	for (;;) {
1324 #if defined(__arm__) || defined(__arm64__)
1325 		/* On arm32, which doesn't have a WFE timeout, this may not return.  But that should be OK on this path. */
1326 		__builtin_arm_wfe();
1327 #else
1328 		cpu_pause();
1329 #endif
1330 	}
1331 }
1332 
1333 void
panic_stackshot_release_lock(void)1334 panic_stackshot_release_lock(void)
1335 {
1336 	assert(!not_in_kdp);
1337 	DebuggerUnlock();
1338 }
1339 
1340 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1341 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1342 {
1343 	if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1344 		PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1345 	} else {
1346 		PEHaltRestart(type);
1347 	}
1348 	halt_all_cpus(TRUE);
1349 }
1350 
1351 void
kdp_machine_reboot(void)1352 kdp_machine_reboot(void)
1353 {
1354 	kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1355 }
1356 
1357 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1358 panic_debugger_log(const char *string, ...)
1359 {
1360 	va_list panic_debugger_log_args;
1361 
1362 	va_start(panic_debugger_log_args, string);
1363 #pragma clang diagnostic push
1364 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1365 	_doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1366 #pragma clang diagnostic pop
1367 	va_end(panic_debugger_log_args);
1368 
1369 #if defined(__arm64__)
1370 	paniclog_flush();
1371 #endif
1372 }
1373 
1374 /*
1375  * Gather and save diagnostic information about a panic (or Debugger call).
1376  *
1377  * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1378  * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1379  * paniclog is written and no core is written unless we request a core on NMI.
1380  *
1381  * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1382  * and calling out to any other functions we have for collecting diagnostic info.
1383  */
1384 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1385 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1386 {
1387 #if DEVELOPMENT || DEBUG
1388 	INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1389 #endif
1390 
1391 #if defined(__x86_64__)
1392 	kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1393 #endif
1394 	/*
1395 	 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1396 	 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1397 	 * we'll just spin in kdp_raise_exception.
1398 	 */
1399 	if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1400 		kdp_raise_exception(exception, code, subcode, state);
1401 		if (debugger_safe_to_return && !debugger_is_panic) {
1402 			return;
1403 		}
1404 	}
1405 
1406 #ifdef CONFIG_KCOV
1407 	/* Try not to break core dump path by sanitizer. */
1408 	kcov_panic_disable();
1409 #endif
1410 
1411 	if ((debugger_current_op == DBOP_PANIC) ||
1412 	    ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1413 		/*
1414 		 * Attempt to notify listeners once and only once that we've started
1415 		 * panicking. Only do this for Debugger() calls if we're treating
1416 		 * Debugger() calls like panic().
1417 		 */
1418 		uint32_t panic_details = 0;
1419 		/* if this is a force-reset panic then capture a log and reboot immediately. */
1420 		if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1421 			panic_details |= kPanicDetailsForcePowerOff;
1422 		}
1423 		PEHaltRestartInternal(kPEPanicBegin, panic_details);
1424 
1425 		/*
1426 		 * Set the begin pointer in the panic log structure. We key off of this
1427 		 * static variable rather than contents from the panic header itself in case someone
1428 		 * has stomped over the panic_info structure. Also initializes the header magic.
1429 		 */
1430 		static boolean_t began_writing_paniclog = FALSE;
1431 		if (!began_writing_paniclog) {
1432 			PE_init_panicheader();
1433 			began_writing_paniclog = TRUE;
1434 		}
1435 
1436 		if (CPUDEBUGGERCOUNT > 1) {
1437 			/*
1438 			 * we are in a nested panic.  Record the nested bit in panic flags and do some housekeeping
1439 			 */
1440 			PE_update_panicheader_nestedpanic();
1441 			paniclog_flush();
1442 		}
1443 	}
1444 
1445 	/*
1446 	 * Write panic string if this was a panic.
1447 	 *
1448 	 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1449 	 */
1450 	if (debugger_current_op == DBOP_PANIC) {
1451 		paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1452 		if (debugger_panic_str) {
1453 #pragma clang diagnostic push
1454 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1455 			_doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1456 #pragma clang diagnostic pop
1457 		}
1458 		paniclog_append_noflush("\n");
1459 	}
1460 #if defined(__x86_64__)
1461 	else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1462 		paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1463 	}
1464 
1465 	/*
1466 	 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1467 	 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1468 	 * conventional sense.
1469 	 */
1470 	if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1471 #endif /* __x86_64__ */
1472 	{
1473 		kdp_callouts(KDP_EVENT_PANICLOG);
1474 
1475 		/*
1476 		 * Write paniclog and panic stackshot (if supported)
1477 		 * TODO: Need to clear panic log when return from debugger
1478 		 * hooked up for embedded
1479 		 */
1480 		SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options, debugger_panic_initiator);
1481 
1482 #if DEVELOPMENT || DEBUG
1483 		INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1484 #endif
1485 
1486 		/* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1487 		if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1488 			PEHaltRestart(kPEPanicDiagnosticsDone);
1489 			PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1490 		}
1491 	}
1492 
1493 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1494 	/*
1495 	 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1496 	 * local coredumps, don't try to write these and instead go straight to reboot. This
1497 	 * allows us to persist any data that's stored in the panic log.
1498 	 */
1499 	if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1500 	    (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1501 		PEHaltRestart(kPEPanicDiagnosticsDone);
1502 		kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1503 	}
1504 
1505 	/*
1506 	 * Consider generating a local corefile if the infrastructure is configured
1507 	 * and we haven't disabled on-device coredumps.
1508 	 */
1509 	if (on_device_corefile_enabled()) {
1510 #if CONFIG_SPTM
1511 		/* We want to skip taking a local core dump if this is a panic from SPTM/TXM/cL4. */
1512 		extern uint8_t sptm_supports_local_coredump;
1513 		bool sptm_interrupted = false;
1514 		pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu);
1515 		(void)sptm_get_cpu_state(sptm_pcpu->sptm_cpu_id, CPUSTATE_SPTM_INTERRUPTED, &sptm_interrupted);
1516 #endif
1517 		if (!kdp_has_polled_corefile()) {
1518 			if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1519 				paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1520 				    kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1521 #if defined(__arm64__)
1522 				if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1523 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1524 				}
1525 				panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1526 				paniclog_flush();
1527 #else /* defined(__arm64__) */
1528 				if (panic_info->mph_panic_log_offset != 0) {
1529 					if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1530 						panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1531 					}
1532 					panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1533 					paniclog_flush();
1534 				}
1535 #endif /* defined(__arm64__) */
1536 			}
1537 		}
1538 #if XNU_MONITOR
1539 		else if (pmap_get_cpu_data()->ppl_state != PPL_STATE_KERNEL) {
1540 			paniclog_append_noflush("skipping local kernel core because the PPL is not in KERNEL state\n");
1541 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1542 			paniclog_flush();
1543 		}
1544 #elif CONFIG_SPTM
1545 		else if (!sptm_supports_local_coredump) {
1546 			paniclog_append_noflush("skipping local kernel core because the SPTM is in PANIC state and can't support core dump generation\n");
1547 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1548 			paniclog_flush();
1549 		} else if (sptm_interrupted) {
1550 			paniclog_append_noflush("skipping local kernel core because the SPTM is in INTERRUPTED state and can't support core dump generation\n");
1551 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1552 			paniclog_flush();
1553 		}
1554 #endif /* XNU_MONITOR */
1555 		else {
1556 			int ret = -1;
1557 
1558 #if defined (__x86_64__)
1559 			/* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1560 			if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1561 #endif
1562 			{
1563 				/*
1564 				 * Doing an on-device coredump leaves the disk driver in a state
1565 				 * that can not be resumed.
1566 				 */
1567 				debugger_safe_to_return = FALSE;
1568 				begin_panic_transfer();
1569 				ret = kern_dump(KERN_DUMP_DISK);
1570 				abort_panic_transfer();
1571 
1572 #if DEVELOPMENT || DEBUG
1573 				INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1574 #endif
1575 			}
1576 
1577 			/*
1578 			 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1579 			 * or if option to ignore failures is set.
1580 			 */
1581 			if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1582 			    ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1583 				PEHaltRestart(kPEPanicDiagnosticsDone);
1584 				kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1585 			}
1586 		}
1587 	}
1588 
1589 	if (debugger_current_op == DBOP_PANIC ||
1590 	    ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1591 		PEHaltRestart(kPEPanicDiagnosticsDone);
1592 	}
1593 
1594 	if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1595 		kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1596 	}
1597 
1598 	/* If KDP is configured, try to trap to the debugger */
1599 #if defined(__arm64__)
1600 	if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1601 #else
1602 	if (current_debugger != NO_CUR_DB) {
1603 #endif
1604 		kdp_raise_exception(exception, code, subcode, state);
1605 		/*
1606 		 * Only return if we entered via Debugger and it's safe to return
1607 		 * (we halted the other cores successfully, this isn't a nested panic, etc)
1608 		 */
1609 		if (debugger_current_op == DBOP_DEBUGGER &&
1610 		    debugger_safe_to_return &&
1611 		    kernel_debugger_entry_count == 1 &&
1612 		    !debugger_is_panic) {
1613 			return;
1614 		}
1615 	}
1616 
1617 #if defined(__arm64__)
1618 	if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1619 		/*
1620 		 * Print panic string at the end of serial output
1621 		 * to make panic more obvious when someone connects a debugger
1622 		 */
1623 		if (debugger_panic_str) {
1624 			panic_debugger_log("Original panic string:\n");
1625 			panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1626 #pragma clang diagnostic push
1627 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1628 			_doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1629 #pragma clang diagnostic pop
1630 			panic_debugger_log("\n");
1631 		}
1632 
1633 		/* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1634 		panic_spin_shmcon();
1635 	}
1636 #endif /* defined(__arm64__) */
1637 
1638 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1639 
1640 	PEHaltRestart(kPEPanicDiagnosticsDone);
1641 
1642 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1643 
1644 	if (!panicDebugging) {
1645 		kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1646 	}
1647 
1648 	paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1649 	panic_spin_forever();
1650 }
1651 
1652 #if SCHED_HYGIENE_DEBUG
1653 uint64_t debugger_trap_timestamps[9];
1654 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1655 #else
1656 # define DEBUGGER_TRAP_TIMESTAMP(i)
1657 #endif /* SCHED_HYGIENE_DEBUG */
1658 
1659 void
1660 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1661 {
1662 	unsigned int initial_not_in_kdp = not_in_kdp;
1663 	kern_return_t ret = KERN_SUCCESS;
1664 	debugger_op db_prev_op = debugger_current_op;
1665 
1666 	DEBUGGER_TRAP_TIMESTAMP(0);
1667 
1668 	DebuggerLock();
1669 	ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1670 
1671 	DEBUGGER_TRAP_TIMESTAMP(1);
1672 
1673 #if SCHED_HYGIENE_DEBUG
1674 	if (serialmode & SERIALMODE_OUTPUT) {
1675 		ml_spin_debug_reset(current_thread());
1676 	}
1677 #endif /* SCHED_HYGIENE_DEBUG */
1678 	if (ret != KERN_SUCCESS) {
1679 		CPUDEBUGGERRET = ret;
1680 		DebuggerUnlock();
1681 		return;
1682 	}
1683 
1684 	/* Update the global panic/debugger nested entry level */
1685 	kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1686 	if (kernel_debugger_entry_count > 0) {
1687 		console_suspend();
1688 	}
1689 
1690 	/*
1691 	 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1692 	 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1693 	 */
1694 
1695 	if (CPUDEBUGGEROP == DBOP_NONE) {
1696 		/* If there was no debugger context setup, we trapped due to a software breakpoint */
1697 		debugger_current_op = DBOP_BREAKPOINT;
1698 	} else {
1699 		/* Not safe to return from a nested panic/debugger call */
1700 		if (debugger_current_op == DBOP_PANIC ||
1701 		    debugger_current_op == DBOP_DEBUGGER) {
1702 			debugger_safe_to_return = FALSE;
1703 		}
1704 
1705 		debugger_current_op = CPUDEBUGGEROP;
1706 
1707 		/* Only overwrite the panic message if there is none already - save the data from the first call */
1708 		if (debugger_panic_str == NULL) {
1709 			debugger_panic_str = CPUPANICSTR;
1710 			debugger_panic_args = CPUPANICARGS;
1711 			debugger_panic_data = CPUPANICDATAPTR;
1712 			debugger_message = CPUDEBUGGERMSG;
1713 			debugger_panic_caller = CPUPANICCALLER;
1714 			debugger_panic_initiator = CPUPANICINITIATOR;
1715 		}
1716 
1717 		debugger_panic_options = CPUPANICOPTS;
1718 	}
1719 
1720 	/*
1721 	 * Clear the op from the processor debugger context so we can handle
1722 	 * breakpoints in the debugger
1723 	 */
1724 	CPUDEBUGGEROP = DBOP_NONE;
1725 
1726 	DEBUGGER_TRAP_TIMESTAMP(2);
1727 
1728 	kdp_callouts(KDP_EVENT_ENTER);
1729 	not_in_kdp = 0;
1730 
1731 	DEBUGGER_TRAP_TIMESTAMP(3);
1732 
1733 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1734 	shmem_mark_as_busy();
1735 #endif
1736 
1737 	if (debugger_current_op == DBOP_BREAKPOINT) {
1738 		kdp_raise_exception(exception, code, subcode, state);
1739 	} else if (debugger_current_op == DBOP_STACKSHOT) {
1740 		CPUDEBUGGERRET = do_stackshot(NULL);
1741 #if PGO
1742 	} else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1743 		CPUDEBUGGERRET = do_pgo_reset_counters();
1744 #endif
1745 	} else {
1746 		/* note: this is the panic path...  */
1747 		debug_fatal_panic_begin();
1748 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
1749 		if (!PE_arm_debug_and_trace_initialized()) {
1750 			paniclog_append_noflush("kernel panicked before debug and trace infrastructure initialized!\n"
1751 			    "spinning forever...\n");
1752 			panic_spin_forever();
1753 		}
1754 #endif
1755 		debugger_collect_diagnostics(exception, code, subcode, state);
1756 	}
1757 
1758 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1759 	shmem_unmark_as_busy();
1760 #endif
1761 
1762 	DEBUGGER_TRAP_TIMESTAMP(4);
1763 
1764 	not_in_kdp = initial_not_in_kdp;
1765 	kdp_callouts(KDP_EVENT_EXIT);
1766 
1767 	DEBUGGER_TRAP_TIMESTAMP(5);
1768 
1769 	if (debugger_current_op != DBOP_BREAKPOINT) {
1770 		debugger_panic_str = NULL;
1771 		debugger_panic_args = NULL;
1772 		debugger_panic_data = NULL;
1773 		debugger_panic_options = 0;
1774 		debugger_message = NULL;
1775 	}
1776 
1777 	/* Restore the previous debugger state */
1778 	debugger_current_op = db_prev_op;
1779 
1780 	DEBUGGER_TRAP_TIMESTAMP(6);
1781 
1782 	DebuggerResumeOtherCores();
1783 
1784 	DEBUGGER_TRAP_TIMESTAMP(7);
1785 
1786 	DebuggerUnlock();
1787 
1788 	DEBUGGER_TRAP_TIMESTAMP(8);
1789 
1790 	return;
1791 }
1792 
1793 __attribute__((noinline, not_tail_called))
1794 void
1795 log(__unused int level, char *fmt, ...)
1796 {
1797 	void *caller = __builtin_return_address(0);
1798 	va_list listp;
1799 	va_list listp2;
1800 
1801 
1802 #ifdef lint
1803 	level++;
1804 #endif /* lint */
1805 #ifdef  MACH_BSD
1806 	va_start(listp, fmt);
1807 	va_copy(listp2, listp);
1808 
1809 	disable_preemption();
1810 	_doprnt(fmt, &listp, cons_putc_locked, 0);
1811 	enable_preemption();
1812 
1813 	va_end(listp);
1814 
1815 #pragma clang diagnostic push
1816 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1817 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1818 #pragma clang diagnostic pop
1819 	va_end(listp2);
1820 #endif
1821 }
1822 
1823 /*
1824  * Per <rdar://problem/24974766>, skip appending log messages to
1825  * the new logging infrastructure in contexts where safety is
1826  * uncertain. These contexts include:
1827  *   - When we're in the debugger
1828  *   - We're in a panic
1829  *   - Interrupts are disabled
1830  *   - Or Pre-emption is disabled
1831  * In all the above cases, it is potentially unsafe to log messages.
1832  */
1833 
1834 boolean_t
1835 oslog_is_safe(void)
1836 {
1837 	return kernel_debugger_entry_count == 0 &&
1838 	       not_in_kdp == 1 &&
1839 	       get_preemption_level() == 0 &&
1840 	       ml_get_interrupts_enabled() == TRUE;
1841 }
1842 
1843 boolean_t
1844 debug_mode_active(void)
1845 {
1846 	return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1847 }
1848 
1849 void
1850 debug_putc(char c)
1851 {
1852 	if ((debug_buf_size != 0) &&
1853 	    ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size) &&
1854 	    (!is_debug_ptr_in_ext_paniclog())) {
1855 		*debug_buf_ptr = c;
1856 		debug_buf_ptr++;
1857 	}
1858 }
1859 
1860 #if defined (__x86_64__)
1861 struct pasc {
1862 	unsigned a: 7;
1863 	unsigned b: 7;
1864 	unsigned c: 7;
1865 	unsigned d: 7;
1866 	unsigned e: 7;
1867 	unsigned f: 7;
1868 	unsigned g: 7;
1869 	unsigned h: 7;
1870 }  __attribute__((packed));
1871 
1872 typedef struct pasc pasc_t;
1873 
1874 /*
1875  * In-place packing routines -- inefficient, but they're called at most once.
1876  * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1877  */
1878 int
1879 packA(char *inbuf, uint32_t length, uint32_t buflen)
1880 {
1881 	unsigned int i, j = 0;
1882 	pasc_t pack;
1883 
1884 	length = MIN(((length + 7) & ~7), buflen);
1885 
1886 	for (i = 0; i < length; i += 8) {
1887 		pack.a = inbuf[i];
1888 		pack.b = inbuf[i + 1];
1889 		pack.c = inbuf[i + 2];
1890 		pack.d = inbuf[i + 3];
1891 		pack.e = inbuf[i + 4];
1892 		pack.f = inbuf[i + 5];
1893 		pack.g = inbuf[i + 6];
1894 		pack.h = inbuf[i + 7];
1895 		bcopy((char *) &pack, inbuf + j, 7);
1896 		j += 7;
1897 	}
1898 	return j;
1899 }
1900 
1901 void
1902 unpackA(char *inbuf, uint32_t length)
1903 {
1904 	pasc_t packs;
1905 	unsigned i = 0;
1906 	length = (length * 8) / 7;
1907 
1908 	while (i < length) {
1909 		packs = *(pasc_t *)&inbuf[i];
1910 		bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1911 		inbuf[i++] = packs.a;
1912 		inbuf[i++] = packs.b;
1913 		inbuf[i++] = packs.c;
1914 		inbuf[i++] = packs.d;
1915 		inbuf[i++] = packs.e;
1916 		inbuf[i++] = packs.f;
1917 		inbuf[i++] = packs.g;
1918 		inbuf[i++] = packs.h;
1919 	}
1920 }
1921 #endif /* defined (__x86_64__) */
1922 
1923 extern char *proc_name_address(void *);
1924 extern char *proc_longname_address(void *);
1925 
1926 __private_extern__ void
1927 panic_display_process_name(void)
1928 {
1929 	proc_name_t proc_name = {};
1930 	struct proc *cbsd_info = NULL;
1931 	task_t ctask = NULL;
1932 	vm_size_t size;
1933 
1934 	if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1935 		goto out;
1936 	}
1937 
1938 	if (cbsd_info == NULL) {
1939 		goto out;
1940 	}
1941 
1942 	size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1943 	    (vm_offset_t)&proc_name, sizeof(proc_name));
1944 
1945 	if (size == 0 || proc_name[0] == '\0') {
1946 		size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1947 		    (vm_offset_t)&proc_name,
1948 		    MIN(sizeof(command_t), sizeof(proc_name)));
1949 		if (size > 0) {
1950 			proc_name[size - 1] = '\0';
1951 		}
1952 	}
1953 
1954 out:
1955 	proc_name[sizeof(proc_name) - 1] = '\0';
1956 	paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1957 	    current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1958 }
1959 
1960 unsigned
1961 panic_active(void)
1962 {
1963 	return debugger_current_op == DBOP_PANIC ||
1964 	       (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1965 }
1966 
1967 void
1968 populate_model_name(char *model_string)
1969 {
1970 	strlcpy(model_name, model_string, sizeof(model_name));
1971 }
1972 
1973 void
1974 panic_display_model_name(void)
1975 {
1976 	char tmp_model_name[sizeof(model_name)];
1977 
1978 	if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1979 		return;
1980 	}
1981 
1982 	tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1983 
1984 	if (tmp_model_name[0] != 0) {
1985 		paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1986 	}
1987 }
1988 
1989 void
1990 panic_display_kernel_uuid(void)
1991 {
1992 	char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1993 
1994 	if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1995 		return;
1996 	}
1997 
1998 	if (tmp_kernel_uuid[0] != '\0') {
1999 		paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
2000 	}
2001 }
2002 
2003 #if CONFIG_SPTM
2004 static void
2005 panic_display_component_uuid(char const *component_name, void *component_address)
2006 {
2007 	uuid_t *component_uuid;
2008 	unsigned long component_uuid_len = 0;
2009 	uuid_string_t component_uuid_string;
2010 
2011 	component_uuid = getuuidfromheader((kernel_mach_header_t *)component_address, &component_uuid_len);
2012 
2013 	if (component_uuid != NULL && component_uuid_len == sizeof(uuid_t)) {
2014 		uuid_unparse_upper(*component_uuid, component_uuid_string);
2015 		paniclog_append_noflush("%s UUID: %s\n", component_name, component_uuid_string);
2016 	}
2017 }
2018 #endif /* CONFIG_SPTM */
2019 
2020 void
2021 panic_display_kernel_aslr(void)
2022 {
2023 #if CONFIG_SPTM
2024 	{
2025 		struct debug_header const *dh = SPTMArgs->debug_header;
2026 
2027 		paniclog_append_noflush("Debug Header address: %p\n", dh);
2028 
2029 		if (dh != NULL) {
2030 			void *component_address;
2031 
2032 			paniclog_append_noflush("Debug Header entry count: %d\n", dh->count);
2033 
2034 			switch (dh->count) {
2035 			default: // 3 or more
2036 				component_address = dh->image[DEBUG_HEADER_ENTRY_TXM];
2037 				paniclog_append_noflush("TXM load address: %p\n", component_address);
2038 
2039 				panic_display_component_uuid("TXM", component_address);
2040 				OS_FALLTHROUGH;
2041 			case 2:
2042 				component_address = dh->image[DEBUG_HEADER_ENTRY_XNU];
2043 				paniclog_append_noflush("Debug Header kernelcache load address: %p\n", component_address);
2044 
2045 				panic_display_component_uuid("Debug Header kernelcache", component_address);
2046 				OS_FALLTHROUGH;
2047 			case 1:
2048 				component_address = dh->image[DEBUG_HEADER_ENTRY_SPTM];
2049 				paniclog_append_noflush("SPTM load address: %p\n", component_address);
2050 
2051 				panic_display_component_uuid("SPTM", component_address);
2052 				OS_FALLTHROUGH;
2053 			case 0:
2054 				; // nothing to print
2055 			}
2056 		}
2057 	}
2058 #endif /* CONFIG_SPTM */
2059 
2060 	kc_format_t kc_format;
2061 
2062 	PE_get_primary_kc_format(&kc_format);
2063 
2064 	if (kc_format == KCFormatFileset) {
2065 		void *kch = PE_get_kc_header(KCKindPrimary);
2066 		paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
2067 		paniclog_append_noflush("KernelCache base:  %p\n", (void*) kch);
2068 		paniclog_append_noflush("Kernel slide:      0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
2069 		paniclog_append_noflush("Kernel text base:  %p\n", (void *) vm_kernel_stext);
2070 #if defined(__arm64__)
2071 		extern vm_offset_t segTEXTEXECB;
2072 		paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
2073 		paniclog_append_noflush("Kernel text exec base:  0x%016lx\n", (unsigned long)segTEXTEXECB);
2074 #endif /* defined(__arm64__) */
2075 	} else if (vm_kernel_slide) {
2076 		paniclog_append_noflush("Kernel slide:      0x%016lx\n", (unsigned long) vm_kernel_slide);
2077 		paniclog_append_noflush("Kernel text base:  %p\n", (void *)vm_kernel_stext);
2078 	} else {
2079 		paniclog_append_noflush("Kernel text base:  %p\n", (void *)vm_kernel_stext);
2080 	}
2081 }
2082 
2083 void
2084 panic_display_hibb(void)
2085 {
2086 #if defined(__i386__) || defined (__x86_64__)
2087 	paniclog_append_noflush("__HIB  text base: %p\n", (void *) vm_hib_base);
2088 #endif
2089 }
2090 
2091 #if CONFIG_ECC_LOGGING
2092 __private_extern__ void
2093 panic_display_ecc_errors(void)
2094 {
2095 	uint32_t count = ecc_log_get_correction_count();
2096 
2097 	if (count > 0) {
2098 		paniclog_append_noflush("ECC Corrections:%u\n", count);
2099 	}
2100 }
2101 #endif /* CONFIG_ECC_LOGGING */
2102 
2103 #if CONFIG_FREEZE
2104 extern bool freezer_incore_cseg_acct;
2105 extern int32_t c_segment_pages_compressed_incore;
2106 #endif
2107 
2108 extern uint32_t c_segment_pages_compressed;
2109 extern uint32_t c_segment_count;
2110 extern uint32_t c_segments_limit;
2111 extern uint32_t c_segment_pages_compressed_limit;
2112 extern uint32_t c_segment_pages_compressed_nearing_limit;
2113 extern uint32_t c_segments_nearing_limit;
2114 extern int vm_num_swap_files;
2115 
2116 void
2117 panic_display_compressor_stats(void)
2118 {
2119 	int isswaplow = vm_swap_low_on_space();
2120 #if CONFIG_FREEZE
2121 	uint32_t incore_seg_count;
2122 	uint32_t incore_compressed_pages;
2123 	if (freezer_incore_cseg_acct) {
2124 		incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
2125 		incore_compressed_pages = c_segment_pages_compressed_incore;
2126 	} else {
2127 		incore_seg_count = c_segment_count;
2128 		incore_compressed_pages = c_segment_pages_compressed;
2129 	}
2130 
2131 	paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2132 	    (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
2133 	    (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2134 	    (incore_seg_count * 100) / c_segments_limit,
2135 	    (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
2136 	    vm_num_swap_files,
2137 	    isswaplow ? "LOW":"OK");
2138 #else /* CONFIG_FREEZE */
2139 	paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2140 	    (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
2141 	    (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2142 	    (c_segment_count * 100) / c_segments_limit,
2143 	    (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
2144 	    vm_num_swap_files,
2145 	    isswaplow ? "LOW":"OK");
2146 #endif /* CONFIG_FREEZE */
2147 }
2148 
2149 #if !CONFIG_TELEMETRY
2150 int
2151 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
2152 {
2153 	return KERN_NOT_SUPPORTED;
2154 }
2155 #endif
2156 
2157 #include <machine/machine_cpu.h>
2158 
2159 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
2160 
2161 __startup_func
2162 static void
2163 kern_feature_override_init(void)
2164 {
2165 	/*
2166 	 * update kern_feature_override based on the serverperfmode=1 boot-arg
2167 	 * being present, but do not look at the device-tree setting on purpose.
2168 	 *
2169 	 * scale_setup() will update serverperfmode=1 based on the DT later.
2170 	 */
2171 
2172 	if (serverperfmode) {
2173 		kern_feature_overrides |= KF_SERVER_PERF_MODE_OVRD;
2174 	}
2175 }
2176 STARTUP(TUNABLES, STARTUP_RANK_LAST, kern_feature_override_init);
2177 
2178 #if MACH_ASSERT
2179 STATIC_IF_KEY_DEFINE_TRUE(mach_assert);
2180 #endif
2181 
2182 #if SCHED_HYGIENE_DEBUG
2183 STATIC_IF_KEY_DEFINE_TRUE(sched_debug_pmc);
2184 STATIC_IF_KEY_DEFINE_TRUE(sched_debug_preemption_disable);
2185 STATIC_IF_KEY_DEFINE_TRUE(sched_debug_interrupt_disable);
2186 #endif /* SCHED_HYGIENE_DEBUG */
2187 
2188 __static_if_init_func
2189 static void
2190 kern_feature_override_apply(const char *args)
2191 {
2192 	uint64_t kf_ovrd;
2193 
2194 	/*
2195 	 * Compute the value of kern_feature_override like it will look like
2196 	 * after kern_feature_override_init().
2197 	 */
2198 	kf_ovrd = static_if_boot_arg_uint64(args, "validation_disables", 0);
2199 	if (static_if_boot_arg_uint64(args, "serverperfmode", 0)) {
2200 		kf_ovrd |= KF_SERVER_PERF_MODE_OVRD;
2201 	}
2202 
2203 #if DEBUG_RW
2204 	lck_rw_assert_init(args, kf_ovrd);
2205 #endif /* DEBUG_RW */
2206 #if MACH_ASSERT
2207 	if (kf_ovrd & KF_MACH_ASSERT_OVRD) {
2208 		static_if_key_disable(mach_assert);
2209 	}
2210 #endif /* MACH_ASSERT */
2211 #if SCHED_HYGIENE_DEBUG
2212 	if ((int64_t)static_if_boot_arg_uint64(args, "wdt", 0) != -1) {
2213 		if (kf_ovrd & KF_SCHED_HYGIENE_DEBUG_PMC_OVRD) {
2214 			static_if_key_disable(sched_debug_pmc);
2215 		}
2216 		if (kf_ovrd & KF_PREEMPTION_DISABLED_DEBUG_OVRD) {
2217 			static_if_key_disable(sched_debug_preemption_disable);
2218 			if (kf_ovrd & KF_INTERRUPT_MASKED_DEBUG_OVRD) {
2219 				static_if_key_disable(sched_debug_interrupt_disable);
2220 			}
2221 		}
2222 	}
2223 #endif /* SCHED_HYGIENE_DEBUG */
2224 }
2225 STATIC_IF_INIT(kern_feature_override_apply);
2226 
2227 boolean_t
2228 kern_feature_override(uint32_t fmask)
2229 {
2230 	return (kern_feature_overrides & fmask) == fmask;
2231 }
2232 
2233 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
2234 static boolean_t
2235 device_corefile_valid_on_ephemeral(void)
2236 {
2237 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
2238 	DTEntry node;
2239 	const uint32_t *value = NULL;
2240 	unsigned int size = 0;
2241 	if (kSuccess != SecureDTLookupEntry(NULL, "/product", &node)) {
2242 		return TRUE;
2243 	}
2244 	if (kSuccess != SecureDTGetProperty(node, "ephemeral-data-mode", (void const **) &value, &size)) {
2245 		return TRUE;
2246 	}
2247 
2248 	if (size != sizeof(uint32_t)) {
2249 		return TRUE;
2250 	}
2251 
2252 	if ((*value) && (kern_dump_should_enforce_encryption() == true)) {
2253 		return FALSE;
2254 	}
2255 #endif /* ifdef CONFIG_KDP_COREDUMP_ENCRYPTION */
2256 
2257 	return TRUE;
2258 }
2259 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
2260 
2261 boolean_t
2262 on_device_corefile_enabled(void)
2263 {
2264 	assert(startup_phase >= STARTUP_SUB_TUNABLES);
2265 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
2266 	if (debug_boot_arg == 0) {
2267 		return FALSE;
2268 	}
2269 	if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
2270 		return FALSE;
2271 	}
2272 #if !XNU_TARGET_OS_OSX
2273 	if (device_corefile_valid_on_ephemeral() == FALSE) {
2274 		return FALSE;
2275 	}
2276 	/*
2277 	 * outside of macOS, if there's a debug boot-arg set and local
2278 	 * cores aren't explicitly disabled, we always write a corefile.
2279 	 */
2280 	return TRUE;
2281 #else /* !XNU_TARGET_OS_OSX */
2282 	/*
2283 	 * on macOS, if corefiles on panic are requested and local cores
2284 	 * aren't disabled we write a local core.
2285 	 */
2286 	if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
2287 		return TRUE;
2288 	}
2289 #endif /* !XNU_TARGET_OS_OSX */
2290 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
2291 	return FALSE;
2292 }
2293 
2294 boolean_t
2295 panic_stackshot_to_disk_enabled(void)
2296 {
2297 	assert(startup_phase >= STARTUP_SUB_TUNABLES);
2298 #if defined(__x86_64__)
2299 	if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
2300 		/* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
2301 		if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
2302 			return FALSE;
2303 		}
2304 
2305 		return TRUE;
2306 	}
2307 #endif
2308 	return FALSE;
2309 }
2310 
2311 const char *
2312 sysctl_debug_get_preoslog(size_t *size)
2313 {
2314 	int result = 0;
2315 	void *preoslog_pa = NULL;
2316 	int preoslog_size = 0;
2317 
2318 	result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2319 	if (result || preoslog_pa == NULL || preoslog_size == 0) {
2320 		kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2321 		*size = 0;
2322 		return NULL;
2323 	}
2324 
2325 	/*
2326 	 *  Beware:
2327 	 *  On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
2328 	 *  On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
2329 	 */
2330 	*size = preoslog_size;
2331 	return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
2332 }
2333 
2334 void
2335 sysctl_debug_free_preoslog(void)
2336 {
2337 #if RELEASE
2338 	int result = 0;
2339 	void *preoslog_pa = NULL;
2340 	int preoslog_size = 0;
2341 
2342 	result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2343 	if (result || preoslog_pa == NULL || preoslog_size == 0) {
2344 		kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2345 		return;
2346 	}
2347 
2348 	IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
2349 #else
2350 	/*  On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2351 #endif // RELEASE
2352 }
2353 
2354 
2355 #if (DEVELOPMENT || DEBUG)
2356 
2357 void
2358 platform_stall_panic_or_spin(uint32_t req)
2359 {
2360 	if (xnu_platform_stall_value & req) {
2361 		if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2362 			panic("Platform stall: User requested panic");
2363 		} else {
2364 			paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2365 			panic_spin_forever();
2366 		}
2367 	}
2368 }
2369 #endif
2370 
2371 
2372 #define AWL_HV_ENTRY_FLAG (0x1)
2373 
2374 static inline void
2375 awl_set_scratch_reg_hv_bit(void)
2376 {
2377 #if defined(__arm64__)
2378 #define WATCHDOG_DIAG0     "S3_5_c15_c2_6"
2379 	uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2380 	awl_diag0 |= AWL_HV_ENTRY_FLAG;
2381 	__builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2382 #endif // defined(__arm64__)
2383 }
2384 
2385 void
2386 awl_mark_hv_entry(void)
2387 {
2388 	if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2389 		return;
2390 	}
2391 	*PERCPU_GET(hv_entry_detected) = true;
2392 
2393 	awl_set_scratch_reg_hv_bit();
2394 }
2395 
2396 /*
2397  * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2398  * so restore it manually.
2399  */
2400 static bool
2401 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2402 {
2403 	if (event == CPU_BOOTED) {
2404 		if (*PERCPU_GET(hv_entry_detected)) {
2405 			awl_set_scratch_reg_hv_bit();
2406 		}
2407 	}
2408 
2409 	return true;
2410 }
2411 
2412 /*
2413  * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2414  * for a callback to restore register after hibernation
2415  */
2416 __startup_func
2417 static void
2418 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2419 {
2420 	DTEntry base = NULL;
2421 
2422 	if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2423 		return;
2424 	}
2425 	const uint8_t *data = NULL;
2426 	unsigned int data_size = sizeof(uint8_t);
2427 
2428 	if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2429 		for (unsigned int i = 0; i < data_size; i++) {
2430 			if (data[i] != 0) {
2431 				awl_scratch_reg_supported = true;
2432 				cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2433 				break;
2434 			}
2435 		}
2436 	}
2437 }
2438 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2439 
2440 /**
2441  * Signal that the system is going down for a panic
2442  */
2443 static inline void
2444 debug_fatal_panic_begin(void)
2445 {
2446 #if CONFIG_SPTM
2447 	/*
2448 	 * Since we're going down, initiate panic lockdown.
2449 	 *
2450 	 * Whether or not this call to panic lockdown can be subverted is murky.
2451 	 * This doesn't really matter, however, because any security critical panics
2452 	 * events will have already initiated lockdown from the exception vector
2453 	 * before calling panic. Thus, lockdown from panic itself is fine as merely
2454 	 * a "best effort".
2455 	 */
2456 #if DEVELOPMENT || DEBUG
2457 	panic_lockdown_record_debug_data();
2458 #endif /* DEVELOPMENT || DEBUG */
2459 	sptm_xnu_panic_begin();
2460 #endif /* CONFIG_SPTM */
2461 }
2462