1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_cdata.h>
76 #include <kern/zalloc_internal.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_map.h>
79 #include <vm/pmap.h>
80 #include <stdarg.h>
81 #include <stdatomic.h>
82 #include <sys/pgo.h>
83 #include <console/serial_protos.h>
84
85 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
86 #include <kdp/kdp_udp.h>
87 #endif
88 #include <kern/processor.h>
89
90 #if defined(__i386__) || defined(__x86_64__)
91 #include <IOKit/IOBSD.h>
92
93 #include <i386/cpu_threads.h>
94 #include <i386/pmCPU.h>
95 #include <i386/lbr.h>
96 #endif
97
98 #include <IOKit/IOPlatformExpert.h>
99 #include <machine/pal_routines.h>
100
101 #include <sys/kdebug.h>
102 #include <libkern/OSKextLibPrivate.h>
103 #include <libkern/OSAtomic.h>
104 #include <libkern/kernel_mach_header.h>
105 #include <libkern/section_keywords.h>
106 #include <uuid/uuid.h>
107 #include <mach_debug/zone_info.h>
108 #include <mach/resource_monitors.h>
109
110 #include <os/log_private.h>
111
112 #if defined(__arm__) || defined(__arm64__)
113 #include <pexpert/pexpert.h> /* For gPanicBase */
114 #include <arm/caches_internal.h>
115 #include <arm/misc_protos.h>
116 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
117 #endif
118
119 #include <san/kcov.h>
120
121 #if CONFIG_XNUPOST
122 #include <tests/xnupost.h>
123 extern int vsnprintf(char *, size_t, const char *, va_list);
124 #endif
125
126 #if CONFIG_CSR
127 #include <sys/csr.h>
128 #endif
129
130 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
131
132 unsigned int halt_in_debugger = 0;
133 unsigned int current_debugger = 0;
134 unsigned int active_debugger = 0;
135 unsigned int panicDebugging = FALSE;
136 unsigned int kernel_debugger_entry_count = 0;
137
138 #if defined(__arm__) || defined(__arm64__)
139 struct additional_panic_data_buffer *panic_data_buffers = NULL;
140 #endif
141
142 #if defined(__arm__)
143 #define TRAP_DEBUGGER __asm__ volatile("trap")
144 #elif defined(__arm64__)
145 /*
146 * Magic number; this should be identical to the __arm__ encoding for trap.
147 */
148 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
149 #elif defined (__x86_64__)
150 #define TRAP_DEBUGGER __asm__("int3")
151 #else
152 #error No TRAP_DEBUGGER for this architecture
153 #endif
154
155 #if defined(__i386__) || defined(__x86_64__)
156 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
157 #else
158 #define panic_stop() panic_spin_forever()
159 #endif
160
161 struct debugger_state {
162 uint64_t db_panic_options;
163 debugger_op db_current_op;
164 boolean_t db_proceed_on_sync_failure;
165 const char *db_message;
166 const char *db_panic_str;
167 va_list *db_panic_args;
168 void *db_panic_data_ptr;
169 unsigned long db_panic_caller;
170 /* incremented whenever we panic or call Debugger (current CPU panic level) */
171 uint32_t db_entry_count;
172 kern_return_t db_op_return;
173 };
174 static struct debugger_state PERCPU_DATA(debugger_state);
175
176 /* __pure2 is correct if this function is called with preemption disabled */
177 static inline __pure2 struct debugger_state *
current_debugger_state(void)178 current_debugger_state(void)
179 {
180 return PERCPU_GET(debugger_state);
181 }
182
183 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
184 #define CPUDEBUGGERMSG current_debugger_state()->db_message
185 #define CPUPANICSTR current_debugger_state()->db_panic_str
186 #define CPUPANICARGS current_debugger_state()->db_panic_args
187 #define CPUPANICOPTS current_debugger_state()->db_panic_options
188 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
189 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
190 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
191 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
192 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
193
194 #if DEVELOPMENT || DEBUG
195 #define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \
196 MACRO_BEGIN \
197 if (requested) { \
198 volatile int *badpointer = (int *)4; \
199 *badpointer = 0; \
200 } \
201 MACRO_END
202 #endif /* DEVELOPMENT || DEBUG */
203
204 debugger_op debugger_current_op = DBOP_NONE;
205 const char *debugger_panic_str = NULL;
206 va_list *debugger_panic_args = NULL;
207 void *debugger_panic_data = NULL;
208 uint64_t debugger_panic_options = 0;
209 const char *debugger_message = NULL;
210 unsigned long debugger_panic_caller = 0;
211
212 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
213 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
214 unsigned long panic_caller) __dead2;
215 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
216 void panic_spin_forever(void) __dead2;
217 extern kern_return_t do_stackshot(void);
218 extern void PE_panic_hook(const char*);
219
220 #define NESTEDDEBUGGERENTRYMAX 5
221 static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
222
223 #if defined(__arm__) || defined(__arm64__)
224 #define DEBUG_BUF_SIZE (4096)
225
226 /* debug_buf is directly linked with iBoot panic region for arm targets */
227 char *debug_buf_base = NULL;
228 char *debug_buf_ptr = NULL;
229 unsigned int debug_buf_size = 0;
230
231 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
232 #else /* defined(__arm__) || defined(__arm64__) */
233 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
234 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
235 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
236
237 char debug_buf[DEBUG_BUF_SIZE];
238 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
239 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
240 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
241
242 /*
243 * We don't include the size of the panic header in the length of the data we actually write.
244 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
245 * the end of the log because we only support writing (3*PAGESIZE) bytes.
246 */
247 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
248
249 boolean_t extended_debug_log_enabled = FALSE;
250 #endif /* defined(__arm__) || defined(__arm64__) */
251
252 #if defined(XNU_TARGET_OS_OSX)
253 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
254 #else
255 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
256 #endif
257
258 /* Debugger state */
259 atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU);
260 boolean_t debugger_allcpus_halted = FALSE;
261 boolean_t debugger_safe_to_return = TRUE;
262 unsigned int debugger_context = 0;
263
264 static char model_name[64];
265 unsigned char *kernel_uuid;
266
267 boolean_t kernelcache_uuid_valid = FALSE;
268 uuid_t kernelcache_uuid;
269 uuid_string_t kernelcache_uuid_string;
270
271 boolean_t pageablekc_uuid_valid = FALSE;
272 uuid_t pageablekc_uuid;
273 uuid_string_t pageablekc_uuid_string;
274
275 boolean_t auxkc_uuid_valid = FALSE;
276 uuid_t auxkc_uuid;
277 uuid_string_t auxkc_uuid_string;
278
279
280 /*
281 * By default we treat Debugger() the same as calls to panic(), unless
282 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
283 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
284 *
285 * Return from Debugger() is currently only implemented on x86
286 */
287 static boolean_t debugger_is_panic = TRUE;
288
289 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
290
291 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
292
293 char kernel_uuid_string[37]; /* uuid_string_t */
294 char kernelcache_uuid_string[37]; /* uuid_string_t */
295 char panic_disk_error_description[512];
296 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
297
298 extern unsigned int write_trace_on_panic;
299 int kext_assertions_enable =
300 #if DEBUG || DEVELOPMENT
301 TRUE;
302 #else
303 FALSE;
304 #endif
305
306 /*
307 * Maintain the physically-contiguous carveout for the `phys_carveout_mb`
308 * boot-arg.
309 */
310
311 TUNABLE(size_t, phys_carveout_mb, "phys_carveout_mb", 0);
312 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 0);
313 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
314 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
315 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
316 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout_metadata = 0;
317 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_metadata_pa = 0;
318 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_metadata_size = 0;
319
320 /*
321 * Returns whether kernel debugging is expected to be restricted
322 * on the device currently based on CSR or other platform restrictions.
323 */
324 boolean_t
kernel_debugging_restricted(void)325 kernel_debugging_restricted(void)
326 {
327 #if XNU_TARGET_OS_OSX
328 #if CONFIG_CSR
329 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
330 return TRUE;
331 }
332 #endif /* CONFIG_CSR */
333 return FALSE;
334 #else /* XNU_TARGET_OS_OSX */
335 return FALSE;
336 #endif /* XNU_TARGET_OS_OSX */
337 }
338
339 __startup_func
340 static void
panic_init(void)341 panic_init(void)
342 {
343 unsigned long uuidlen = 0;
344 void *uuid;
345
346 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
347 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
348 kernel_uuid = uuid;
349 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
350 }
351
352 /*
353 * Take the value of the debug boot-arg into account
354 */
355 #if MACH_KDP
356 if (!kernel_debugging_restricted() && debug_boot_arg) {
357 if (debug_boot_arg & DB_HALT) {
358 halt_in_debugger = 1;
359 }
360
361 #if defined(__arm__) || defined(__arm64__)
362 if (debug_boot_arg & DB_NMI) {
363 panicDebugging = TRUE;
364 }
365 #else
366 panicDebugging = TRUE;
367 #endif /* defined(__arm__) || defined(__arm64__) */
368 }
369
370 if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) {
371 max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
372 }
373
374 #if defined(__arm__) || defined(__arm64__)
375 char kdpname[80];
376
377 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
378 #endif /* defined(__arm__) || defined(__arm64__) */
379
380 #endif /* MACH_KDP */
381
382 #if defined (__x86_64__)
383 /*
384 * By default we treat Debugger() the same as calls to panic(), unless
385 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
386 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
387 * This is because writing an on-device corefile is a destructive operation.
388 *
389 * Return from Debugger() is currently only implemented on x86
390 */
391 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
392 debugger_is_panic = FALSE;
393 }
394 #endif
395 }
396 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
397
398 #if defined (__x86_64__)
399 void
extended_debug_log_init(void)400 extended_debug_log_init(void)
401 {
402 assert(coprocessor_paniclog_flush);
403 /*
404 * Allocate an extended panic log buffer that has space for the panic
405 * stackshot at the end. Update the debug buf pointers appropriately
406 * to point at this new buffer.
407 *
408 * iBoot pre-initializes the panic region with the NULL character. We set this here
409 * so we can accurately calculate the CRC for the region without needing to flush the
410 * full region over SMC.
411 */
412 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
413
414 panic_info = (struct macos_panic_header *)new_debug_buf;
415 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
416 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
417
418 extended_debug_log_enabled = TRUE;
419
420 /*
421 * Insert a compiler barrier so we don't free the other panic stackshot buffer
422 * until after we've marked the new one as available
423 */
424 __compiler_barrier();
425 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
426 panic_stackshot_buf = 0;
427 panic_stackshot_buf_len = 0;
428 }
429 #endif /* defined (__x86_64__) */
430
431 void
debug_log_init(void)432 debug_log_init(void)
433 {
434 #if defined(__arm__) || defined(__arm64__)
435 if (!gPanicBase) {
436 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
437 return;
438 }
439 /* Shift debug buf start location and size by the length of the panic header */
440 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
441 debug_buf_ptr = debug_buf_base;
442 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
443 #else
444 kern_return_t kr = KERN_SUCCESS;
445 bzero(panic_info, DEBUG_BUF_SIZE);
446
447 assert(debug_buf_base != NULL);
448 assert(debug_buf_ptr != NULL);
449 assert(debug_buf_size != 0);
450
451 /*
452 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
453 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
454 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
455 * up.
456 */
457 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG);
458 assert(kr == KERN_SUCCESS);
459 if (kr == KERN_SUCCESS) {
460 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
461 }
462 #endif
463 }
464
465 void
phys_carveout_init(void)466 phys_carveout_init(void)
467 {
468 if (!PE_i_can_has_debugger(NULL)) {
469 return;
470 }
471
472 if (phys_carveout_mb == 0) {
473 return;
474 }
475
476 size_t temp_phys_carveout_size = 0;
477 if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &temp_phys_carveout_size)) {
478 panic("phys_carveout_mb size overflowed (%luMB)",
479 phys_carveout_mb);
480 return;
481 }
482
483 kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, temp_phys_carveout_size,
484 VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT,
485 VM_KERN_MEMORY_DIAG);
486 if (kr != KERN_SUCCESS) {
487 panic("failed to allocate %luMB for phys_carveout_mb: %u",
488 phys_carveout_mb, (unsigned int)kr);
489 return;
490 }
491
492 phys_carveout_pa = kvtophys(phys_carveout);
493 phys_carveout_size = temp_phys_carveout_size;
494
495 /*
496 * Record and dump carveout metadata region into corefile. Smallest unit (a
497 * page) is allocated as storage for panic_trace_header_t content, which is
498 * unfortunately wasteful but simplifies usage logic rather than stealing
499 * bytes from prior phys_carveout.
500 */
501 if (debug_can_coredump_phys_carveout()) {
502 size_t temp_phys_carveout_metadata_size = PAGE_SIZE;
503 kr = kmem_alloc_contig(kernel_map, &phys_carveout_metadata, temp_phys_carveout_metadata_size,
504 VM_MAP_PAGE_MASK(kernel_map), 0, 0,
505 KMA_NOPAGEWAIT, VM_KERN_MEMORY_DIAG);
506 if (kr != KERN_SUCCESS) {
507 panic("failed to allocate %u for phys_carveout_metadata: %u",
508 (unsigned int)temp_phys_carveout_metadata_size, (unsigned int)kr);
509 return;
510 }
511 phys_carveout_metadata_size = temp_phys_carveout_metadata_size;
512 phys_carveout_metadata_pa = kvtophys(phys_carveout_metadata);
513 }
514
515 #if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
516 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
517 PE_arm_debug_enable_trace();
518 #endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */
519 }
520
521 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)522 debug_is_in_phys_carveout(vm_map_offset_t va)
523 {
524 return phys_carveout_size && va >= phys_carveout &&
525 va < (phys_carveout + phys_carveout_size);
526 }
527
528 boolean_t
debug_is_in_phys_carveout_metadata(vm_map_offset_t va)529 debug_is_in_phys_carveout_metadata(vm_map_offset_t va)
530 {
531 return phys_carveout_metadata_size && va >= phys_carveout_metadata && va < (phys_carveout_metadata + phys_carveout_metadata_size);
532 }
533
534 boolean_t
debug_can_coredump_phys_carveout(void)535 debug_can_coredump_phys_carveout(void)
536 {
537 return phys_carveout_core;
538 }
539
540 static void
DebuggerLock(void)541 DebuggerLock(void)
542 {
543 int my_cpu = cpu_number();
544 int debugger_exp_cpu = DEBUGGER_NO_CPU;
545 assert(ml_get_interrupts_enabled() == FALSE);
546
547 if (atomic_load(&debugger_cpu) == my_cpu) {
548 return;
549 }
550
551 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
552 debugger_exp_cpu = DEBUGGER_NO_CPU;
553 }
554
555 return;
556 }
557
558 static void
DebuggerUnlock(void)559 DebuggerUnlock(void)
560 {
561 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
562
563 /*
564 * We don't do an atomic exchange here in case
565 * there's another CPU spinning to acquire the debugger_lock
566 * and we never get a chance to update it. We already have the
567 * lock so we can simply store DEBUGGER_NO_CPU and follow with
568 * a barrier.
569 */
570 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
571 OSMemoryBarrier();
572
573 return;
574 }
575
576 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)577 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
578 {
579 #if defined(__arm__) || defined(__arm64__)
580 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
581 #else /* defined(__arm__) || defined(__arm64__) */
582 #pragma unused(proceed_on_failure)
583 #pragma unused(is_stackshot)
584 mp_kdp_enter(proceed_on_failure);
585 return KERN_SUCCESS;
586 #endif
587 }
588
589 static void
DebuggerResumeOtherCores(void)590 DebuggerResumeOtherCores(void)
591 {
592 #if defined(__arm__) || defined(__arm64__)
593 DebuggerXCallReturn();
594 #else /* defined(__arm__) || defined(__arm64__) */
595 mp_kdp_exit();
596 #endif
597 }
598
599 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)600 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
601 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
602 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
603 {
604 CPUDEBUGGEROP = db_op;
605
606 /* Preserve the original panic message */
607 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
608 CPUDEBUGGERMSG = db_message;
609 CPUPANICSTR = db_panic_str;
610 CPUPANICARGS = db_panic_args;
611 CPUPANICDATAPTR = db_panic_data_ptr;
612 CPUPANICCALLER = db_panic_caller;
613 } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) {
614 kprintf("Nested panic detected:");
615 if (db_panic_str != NULL) {
616 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
617 }
618 }
619
620 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
621 CPUDEBUGGERRET = KERN_SUCCESS;
622
623 /* Reset these on any nested panics */
624 CPUPANICOPTS = db_panic_options;
625
626 return;
627 }
628
629 /*
630 * Save the requested debugger state/action into the current processor's
631 * percu state and trap to the debugger.
632 */
633 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)634 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
635 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
636 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
637 {
638 kern_return_t ret;
639
640 assert(ml_get_interrupts_enabled() == FALSE);
641 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
642 db_panic_options, db_panic_data_ptr,
643 db_proceed_on_sync_failure, db_panic_caller);
644
645 /*
646 * On ARM this generates an uncategorized exception -> sleh code ->
647 * DebuggerCall -> kdp_trap -> handle_debugger_trap
648 * So that is how XNU ensures that only one core can panic.
649 * The rest of the cores are halted by IPI if possible; if that
650 * fails it will fall back to dbgwrap.
651 */
652 TRAP_DEBUGGER;
653
654 ret = CPUDEBUGGERRET;
655
656 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
657
658 return ret;
659 }
660
661 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)662 Assert(
663 const char *file,
664 int line,
665 const char *expression
666 )
667 {
668 #if CONFIG_NONFATAL_ASSERTS
669 static TUNABLE(bool, mach_assert, "assertions", true);
670
671 if (!mach_assert) {
672 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
673 return;
674 }
675 #endif
676
677 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
678 }
679
680 boolean_t
debug_is_current_cpu_in_panic_state(void)681 debug_is_current_cpu_in_panic_state(void)
682 {
683 return current_debugger_state()->db_entry_count > 0;
684 }
685
686 void
Debugger(const char * message)687 Debugger(const char *message)
688 {
689 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
690 }
691
692 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)693 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
694 uint64_t debugger_options_mask, unsigned long debugger_caller)
695 {
696 spl_t previous_interrupts_state;
697 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
698
699 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
700 read_lbr();
701 #endif
702 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
703 disable_preemption();
704
705 CPUDEBUGGERCOUNT++;
706
707 if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
708 static boolean_t in_panic_kprintf = FALSE;
709
710 /* Notify any listeners that we've started a panic */
711 uint32_t panic_details = 0;
712 if (debugger_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
713 panic_details |= kPanicDetailsForcePowerOff;
714 }
715 PEHaltRestartInternal(kPEPanicBegin, panic_details);
716
717 if (!in_panic_kprintf) {
718 in_panic_kprintf = TRUE;
719 kprintf("Detected nested debugger entry count exceeding %d\n",
720 max_debugger_entry_count);
721 in_panic_kprintf = FALSE;
722 }
723
724 if (!panicDebugging) {
725 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask);
726 }
727
728 panic_spin_forever();
729 }
730
731 #pragma unused(debugger_caller) // lies!
732 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(CPUDEBUGGERCOUNT), VALUE(cpu_number())), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
733
734 /* Handle any necessary platform specific actions before we proceed */
735 PEInitiatePanic();
736
737 #if DEVELOPMENT || DEBUG
738 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
739 #endif
740
741 PE_panic_hook(message);
742
743 doprnt_hide_pointers = FALSE;
744
745 if (ctx != NULL) {
746 DebuggerSaveState(DBOP_DEBUGGER, message,
747 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
748 handle_debugger_trap(reason, 0, 0, ctx);
749 DebuggerSaveState(DBOP_NONE, NULL, NULL,
750 NULL, 0, NULL, FALSE, 0);
751 } else {
752 DebuggerTrapWithState(DBOP_DEBUGGER, message,
753 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
754 }
755
756 CPUDEBUGGERCOUNT--;
757 doprnt_hide_pointers = old_doprnt_hide_pointers;
758 enable_preemption();
759 ml_set_interrupts_enabled(previous_interrupts_state);
760 }
761
762 static struct kdp_callout {
763 struct kdp_callout * callout_next;
764 kdp_callout_fn_t callout_fn;
765 boolean_t callout_in_progress;
766 void * callout_arg;
767 } * kdp_callout_list = NULL;
768
769 /*
770 * Called from kernel context to register a kdp event callout.
771 */
772 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)773 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
774 {
775 struct kdp_callout * kcp;
776 struct kdp_callout * list_head;
777
778 kcp = zalloc_permanent_type(struct kdp_callout);
779
780 kcp->callout_fn = fn;
781 kcp->callout_arg = arg;
782 kcp->callout_in_progress = FALSE;
783
784 /* Lock-less list insertion using compare and exchange. */
785 do {
786 list_head = kdp_callout_list;
787 kcp->callout_next = list_head;
788 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
789 }
790
791 static void
kdp_callouts(kdp_event_t event)792 kdp_callouts(kdp_event_t event)
793 {
794 struct kdp_callout *kcp = kdp_callout_list;
795
796 while (kcp) {
797 if (!kcp->callout_in_progress) {
798 kcp->callout_in_progress = TRUE;
799 kcp->callout_fn(kcp->callout_arg, event);
800 kcp->callout_in_progress = FALSE;
801 }
802 kcp = kcp->callout_next;
803 }
804 }
805
806 #if defined(__arm__) || defined(__arm64__)
807 /*
808 * Register an additional buffer with data to include in the panic log
809 *
810 * <rdar://problem/50137705> tracks supporting more than one buffer
811 *
812 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
813 */
814 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)815 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
816 {
817 if (panic_data_buffers != NULL) {
818 panic("register_additional_panic_data_buffer called with buffer already registered");
819 }
820
821 if (producer_name == NULL || (strlen(producer_name) == 0)) {
822 panic("register_additional_panic_data_buffer called with invalid producer_name");
823 }
824
825 if (buf == NULL) {
826 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
827 }
828
829 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
830 panic("register_additional_panic_data_buffer called with invalid length");
831 }
832
833 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
834 new_panic_data_buffer->producer_name = producer_name;
835 new_panic_data_buffer->buf = buf;
836 new_panic_data_buffer->len = len;
837
838 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
839 panic("register_additional_panic_data_buffer called with buffer already registered");
840 }
841
842 return;
843 }
844 #endif /* defined(__arm__) || defined(__arm64__) */
845
846 /*
847 * An overview of the xnu panic path:
848 *
849 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
850 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
851 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
852 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
853 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
854 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
855 * according to the device's boot-args.
856 */
857 #undef panic
858 void
panic(const char * str,...)859 panic(const char *str, ...)
860 {
861 va_list panic_str_args;
862
863 va_start(panic_str_args, str);
864 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
865 va_end(panic_str_args);
866 }
867
868 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)869 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
870 {
871 va_list panic_str_args;
872
873 va_start(panic_str_args, str);
874 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
875 NULL, (unsigned long)(char *)__builtin_return_address(0));
876 va_end(panic_str_args);
877 }
878
879 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)880 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
881 {
882 if (ptr == NULL) {
883 paniclog_append_noflush("NULL %s pointer\n", what);
884 return false;
885 }
886
887 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
888 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
889 what, ptr, (uint32_t)size);
890 return false;
891 }
892
893 return true;
894 }
895
896 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)897 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
898 {
899 if (!PANIC_VALIDATE_PTR(thread)) {
900 return false;
901 }
902
903 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
904 return false;
905 }
906
907 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
908 return false;
909 }
910
911 if (task) {
912 *task = thread->t_tro->tro_task;
913 }
914
915 if (!panic_validate_ptr(thread->t_tro->tro_proc,
916 sizeof(struct proc *), "bsd_info")) {
917 *proc = NULL;
918 } else {
919 *proc = thread->t_tro->tro_proc;
920 }
921
922 return true;
923 }
924
925 #if defined (__x86_64__)
926 /*
927 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
928 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
929 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
930 * thread when writing the panic log.
931 *
932 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
933 */
934 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)935 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
936 {
937 va_list panic_str_args;
938 __assert_only os_ref_count_t th_ref_count;
939
940 assert_thread_magic(thread);
941 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
942 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
943
944 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
945 thread_reference(thread);
946
947 va_start(panic_str_args, str);
948 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
949 thread, (unsigned long)(char *)__builtin_return_address(0));
950
951 va_end(panic_str_args);
952 }
953 #endif /* defined (__x86_64__) */
954
955 #pragma clang diagnostic push
956 #pragma clang diagnostic ignored "-Wmissing-noreturn"
957 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller)958 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
959 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
960 {
961 #pragma clang diagnostic pop
962
963 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
964 read_lbr();
965 /* Turn off I/O tracing once we've panicked */
966 mmiotrace_enabled = 0;
967 #endif
968
969 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
970
971 CPUDEBUGGERCOUNT++;
972
973 if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
974 static boolean_t in_panic_kprintf = FALSE;
975
976 /* Notify any listeners that we've started a panic */
977 uint32_t panic_details = 0;
978 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
979 panic_details |= kPanicDetailsForcePowerOff;
980 }
981 PEHaltRestartInternal(kPEPanicBegin, panic_details);
982
983 if (!in_panic_kprintf) {
984 in_panic_kprintf = TRUE;
985 kprintf("Detected nested debugger entry count exceeding %d\n",
986 max_debugger_entry_count);
987 in_panic_kprintf = FALSE;
988 }
989
990 if (!panicDebugging) {
991 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
992 }
993
994 panic_spin_forever();
995 }
996
997 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(CPUDEBUGGERCOUNT), VALUE(cpu_number())), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
998 /* Handle any necessary platform specific actions before we proceed */
999 PEInitiatePanic();
1000
1001 #if DEVELOPMENT || DEBUG
1002 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
1003 #endif
1004
1005 PE_panic_hook(panic_format_str);
1006
1007 #if defined (__x86_64__)
1008 plctrace_disable();
1009 #endif
1010
1011 if (write_trace_on_panic && kdebug_enable) {
1012 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1013 ml_set_interrupts_enabled(TRUE);
1014 KDBG_RELEASE(TRACE_PANIC);
1015 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1016 }
1017 }
1018
1019 ml_set_interrupts_enabled(FALSE);
1020 disable_preemption();
1021
1022 #if defined (__x86_64__)
1023 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1024 #endif /* defined (__x86_64__) */
1025
1026 /* Never hide pointers from panic logs. */
1027 doprnt_hide_pointers = FALSE;
1028
1029 if (ctx != NULL) {
1030 /*
1031 * We called into panic from a trap, no need to trap again. Set the
1032 * state on the current CPU and then jump to handle_debugger_trap.
1033 */
1034 DebuggerSaveState(DBOP_PANIC, "panic",
1035 panic_format_str, panic_args,
1036 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1037 handle_debugger_trap(reason, 0, 0, ctx);
1038 }
1039
1040 #if defined(__arm64__)
1041 /*
1042 * Signal to fastsim that it should open debug ports (nop on hardware)
1043 */
1044 __asm__ volatile ("HINT 0x45");
1045 #endif /* defined(__arm64__) */
1046
1047 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1048 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1049
1050 /*
1051 * Not reached.
1052 */
1053 panic_stop();
1054 __builtin_unreachable();
1055 }
1056
1057 void
panic_spin_forever(void)1058 panic_spin_forever(void)
1059 {
1060 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1061
1062 for (;;) {
1063 }
1064 }
1065
1066 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1067 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1068 {
1069 printf("Attempting system restart...\n");
1070 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1071 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1072 } else {
1073 PEHaltRestart(type);
1074 }
1075 halt_all_cpus(TRUE);
1076 }
1077
1078 void
kdp_machine_reboot(void)1079 kdp_machine_reboot(void)
1080 {
1081 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1082 }
1083
1084 /*
1085 * Gather and save diagnostic information about a panic (or Debugger call).
1086 *
1087 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1088 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1089 * paniclog is written and no core is written unless we request a core on NMI.
1090 *
1091 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1092 * and calling out to any other functions we have for collecting diagnostic info.
1093 */
1094 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1095 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1096 {
1097 #if DEVELOPMENT || DEBUG
1098 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_PRELOG));
1099 #endif
1100
1101 #if defined(__x86_64__)
1102 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1103 #endif
1104 /*
1105 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1106 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1107 * we'll just spin in kdp_raise_exception.
1108 */
1109 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1110 kdp_raise_exception(exception, code, subcode, state);
1111 if (debugger_safe_to_return && !debugger_is_panic) {
1112 return;
1113 }
1114 }
1115
1116 #ifdef CONFIG_KCOV
1117 /* Try not to break core dump path by sanitizer. */
1118 kcov_panic_disable();
1119 #endif
1120
1121 if ((debugger_current_op == DBOP_PANIC) ||
1122 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1123 /*
1124 * Attempt to notify listeners once and only once that we've started
1125 * panicking. Only do this for Debugger() calls if we're treating
1126 * Debugger() calls like panic().
1127 */
1128 uint32_t panic_details = 0;
1129 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1130 panic_details |= kPanicDetailsForcePowerOff;
1131 }
1132 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1133
1134 /*
1135 * Set the begin pointer in the panic log structure. We key off of this
1136 * static variable rather than contents from the panic header itself in case someone
1137 * has stomped over the panic_info structure. Also initializes the header magic.
1138 */
1139 static boolean_t began_writing_paniclog = FALSE;
1140 if (!began_writing_paniclog) {
1141 PE_init_panicheader();
1142 began_writing_paniclog = TRUE;
1143 } else {
1144 /*
1145 * If we reached here, update the panic header to keep it as consistent
1146 * as possible during a nested panic
1147 */
1148 PE_update_panicheader_nestedpanic();
1149 }
1150 }
1151
1152 /*
1153 * Write panic string if this was a panic.
1154 *
1155 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1156 */
1157 if (debugger_current_op == DBOP_PANIC) {
1158 paniclog_append_noflush("panic(cpu %d caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1159 if (debugger_panic_str) {
1160 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1161 }
1162 paniclog_append_noflush("\n");
1163 }
1164 #if defined(__x86_64__)
1165 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1166 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1167 }
1168
1169 /*
1170 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1171 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1172 * conventional sense.
1173 */
1174 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1175 #endif /* __x86_64__ */
1176 {
1177 kdp_callouts(KDP_EVENT_PANICLOG);
1178
1179 /*
1180 * Write paniclog and panic stackshot (if supported)
1181 * TODO: Need to clear panic log when return from debugger
1182 * hooked up for embedded
1183 */
1184 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
1185
1186 #if DEVELOPMENT || DEBUG
1187 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTLOG));
1188 #endif
1189
1190 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1191 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1192 PEHaltRestart(kPEPanicDiagnosticsDone);
1193 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1194 }
1195 }
1196
1197 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1198 /*
1199 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1200 * local coredumps, don't try to write these and instead go straight to reboot. This
1201 * allows us to persist any data that's stored in the panic log.
1202 */
1203 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1204 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1205 PEHaltRestart(kPEPanicDiagnosticsDone);
1206 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1207 }
1208
1209 /*
1210 * Consider generating a local corefile if the infrastructure is configured
1211 * and we haven't disabled on-device coredumps.
1212 */
1213 if (on_device_corefile_enabled()) {
1214 if (!kdp_has_polled_corefile()) {
1215 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1216 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)\n",
1217 kdp_polled_corefile_error());
1218 #if defined(__arm__) || defined(__arm64__)
1219 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1220 paniclog_flush();
1221 #else /* defined(__arm__) || defined(__arm64__) */
1222 if (panic_info->mph_panic_log_offset != 0) {
1223 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1224 paniclog_flush();
1225 }
1226 #endif /* defined(__arm__) || defined(__arm64__) */
1227 }
1228 }
1229 #if XNU_MONITOR
1230 else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) {
1231 paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state\n");
1232 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1233 paniclog_flush();
1234 }
1235 #endif /* XNU_MONITOR */
1236 else {
1237 int ret = -1;
1238
1239 #if defined (__x86_64__)
1240 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1241 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1242 #endif
1243 {
1244 /*
1245 * Doing an on-device coredump leaves the disk driver in a state
1246 * that can not be resumed.
1247 */
1248 debugger_safe_to_return = FALSE;
1249 begin_panic_transfer();
1250 ret = kern_dump(KERN_DUMP_DISK);
1251 abort_panic_transfer();
1252
1253 #if DEVELOPMENT || DEBUG
1254 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTCORE));
1255 #endif
1256 }
1257
1258 /*
1259 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1260 * or if option to ignore failures is set.
1261 */
1262 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1263 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1264 PEHaltRestart(kPEPanicDiagnosticsDone);
1265 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1266 }
1267 }
1268 }
1269
1270 if (debugger_current_op == DBOP_PANIC ||
1271 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1272 PEHaltRestart(kPEPanicDiagnosticsDone);
1273 }
1274
1275 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1276 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1277 }
1278
1279 /* If KDP is configured, try to trap to the debugger */
1280 #if defined(__arm__) || defined(__arm64__)
1281 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1282 #else
1283 if (current_debugger != NO_CUR_DB) {
1284 #endif
1285 kdp_raise_exception(exception, code, subcode, state);
1286 /*
1287 * Only return if we entered via Debugger and it's safe to return
1288 * (we halted the other cores successfully, this isn't a nested panic, etc)
1289 */
1290 if (debugger_current_op == DBOP_DEBUGGER &&
1291 debugger_safe_to_return &&
1292 kernel_debugger_entry_count == 1 &&
1293 !debugger_is_panic) {
1294 return;
1295 }
1296 }
1297
1298 #if defined(__arm__) || defined(__arm64__)
1299 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1300 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1301 panic_spin_shmcon();
1302 }
1303 #endif /* defined(__arm__) || defined(__arm64__) */
1304
1305 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1306
1307 PEHaltRestart(kPEPanicDiagnosticsDone);
1308
1309 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1310
1311 if (!panicDebugging) {
1312 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1313 }
1314
1315 panic_spin_forever();
1316 }
1317
1318 #if INTERRUPT_MASKED_DEBUG
1319 uint64_t debugger_trap_timestamps[9];
1320 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1321 #else
1322 # define DEBUGGER_TRAP_TIMESTAMP(i)
1323 #endif
1324
1325 void
1326 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1327 {
1328 unsigned int initial_not_in_kdp = not_in_kdp;
1329 kern_return_t ret;
1330 debugger_op db_prev_op = debugger_current_op;
1331
1332 DEBUGGER_TRAP_TIMESTAMP(0);
1333
1334 DebuggerLock();
1335 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1336
1337 DEBUGGER_TRAP_TIMESTAMP(1);
1338
1339 #if INTERRUPT_MASKED_DEBUG
1340 if (serialmode & SERIALMODE_OUTPUT) {
1341 ml_spin_debug_reset(current_thread());
1342 }
1343 #endif
1344 if (ret != KERN_SUCCESS) {
1345 CPUDEBUGGERRET = ret;
1346 DebuggerUnlock();
1347 return;
1348 }
1349
1350 /* Update the global panic/debugger nested entry level */
1351 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1352 if (kernel_debugger_entry_count > 0) {
1353 console_suspend();
1354 }
1355
1356 /*
1357 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1358 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1359 */
1360
1361 if (CPUDEBUGGEROP == DBOP_NONE) {
1362 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1363 debugger_current_op = DBOP_BREAKPOINT;
1364 } else {
1365 /* Not safe to return from a nested panic/debugger call */
1366 if (debugger_current_op == DBOP_PANIC ||
1367 debugger_current_op == DBOP_DEBUGGER) {
1368 debugger_safe_to_return = FALSE;
1369 }
1370
1371 debugger_current_op = CPUDEBUGGEROP;
1372
1373 /* Only overwrite the panic message if there is none already - save the data from the first call */
1374 if (debugger_panic_str == NULL) {
1375 debugger_panic_str = CPUPANICSTR;
1376 debugger_panic_args = CPUPANICARGS;
1377 debugger_panic_data = CPUPANICDATAPTR;
1378 debugger_message = CPUDEBUGGERMSG;
1379 debugger_panic_caller = CPUPANICCALLER;
1380 }
1381
1382 debugger_panic_options = CPUPANICOPTS;
1383 }
1384
1385 /*
1386 * Clear the op from the processor debugger context so we can handle
1387 * breakpoints in the debugger
1388 */
1389 CPUDEBUGGEROP = DBOP_NONE;
1390
1391 DEBUGGER_TRAP_TIMESTAMP(2);
1392
1393 kdp_callouts(KDP_EVENT_ENTER);
1394 not_in_kdp = 0;
1395
1396 DEBUGGER_TRAP_TIMESTAMP(3);
1397
1398 if (debugger_current_op == DBOP_BREAKPOINT) {
1399 kdp_raise_exception(exception, code, subcode, state);
1400 } else if (debugger_current_op == DBOP_STACKSHOT) {
1401 CPUDEBUGGERRET = do_stackshot();
1402 #if PGO
1403 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1404 CPUDEBUGGERRET = do_pgo_reset_counters();
1405 #endif
1406 } else {
1407 debugger_collect_diagnostics(exception, code, subcode, state);
1408 }
1409
1410 DEBUGGER_TRAP_TIMESTAMP(4);
1411
1412 not_in_kdp = initial_not_in_kdp;
1413 kdp_callouts(KDP_EVENT_EXIT);
1414
1415 DEBUGGER_TRAP_TIMESTAMP(5);
1416
1417 if (debugger_current_op != DBOP_BREAKPOINT) {
1418 debugger_panic_str = NULL;
1419 debugger_panic_args = NULL;
1420 debugger_panic_data = NULL;
1421 debugger_panic_options = 0;
1422 debugger_message = NULL;
1423 }
1424
1425 /* Restore the previous debugger state */
1426 debugger_current_op = db_prev_op;
1427
1428 DEBUGGER_TRAP_TIMESTAMP(6);
1429
1430 DebuggerResumeOtherCores();
1431
1432 DEBUGGER_TRAP_TIMESTAMP(7);
1433
1434 DebuggerUnlock();
1435
1436 DEBUGGER_TRAP_TIMESTAMP(8);
1437
1438 return;
1439 }
1440
1441 __attribute__((noinline, not_tail_called))
1442 void
1443 log(__unused int level, char *fmt, ...)
1444 {
1445 void *caller = __builtin_return_address(0);
1446 va_list listp;
1447 va_list listp2;
1448
1449
1450 #ifdef lint
1451 level++;
1452 #endif /* lint */
1453 #ifdef MACH_BSD
1454 va_start(listp, fmt);
1455 va_copy(listp2, listp);
1456
1457 disable_preemption();
1458 _doprnt(fmt, &listp, cons_putc_locked, 0);
1459 enable_preemption();
1460
1461 va_end(listp);
1462
1463 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1464 va_end(listp2);
1465 #endif
1466 }
1467
1468 /*
1469 * Per <rdar://problem/24974766>, skip appending log messages to
1470 * the new logging infrastructure in contexts where safety is
1471 * uncertain. These contexts include:
1472 * - When we're in the debugger
1473 * - We're in a panic
1474 * - Interrupts are disabled
1475 * - Or Pre-emption is disabled
1476 * In all the above cases, it is potentially unsafe to log messages.
1477 */
1478
1479 boolean_t
1480 oslog_is_safe(void)
1481 {
1482 return kernel_debugger_entry_count == 0 &&
1483 not_in_kdp == 1 &&
1484 get_preemption_level() == 0 &&
1485 ml_get_interrupts_enabled() == TRUE;
1486 }
1487
1488 boolean_t
1489 debug_mode_active(void)
1490 {
1491 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1492 }
1493
1494 void
1495 debug_putc(char c)
1496 {
1497 if ((debug_buf_size != 0) &&
1498 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) {
1499 *debug_buf_ptr = c;
1500 debug_buf_ptr++;
1501 }
1502 }
1503
1504 #if defined (__x86_64__)
1505 struct pasc {
1506 unsigned a: 7;
1507 unsigned b: 7;
1508 unsigned c: 7;
1509 unsigned d: 7;
1510 unsigned e: 7;
1511 unsigned f: 7;
1512 unsigned g: 7;
1513 unsigned h: 7;
1514 } __attribute__((packed));
1515
1516 typedef struct pasc pasc_t;
1517
1518 /*
1519 * In-place packing routines -- inefficient, but they're called at most once.
1520 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1521 */
1522 int
1523 packA(char *inbuf, uint32_t length, uint32_t buflen)
1524 {
1525 unsigned int i, j = 0;
1526 pasc_t pack;
1527
1528 length = MIN(((length + 7) & ~7), buflen);
1529
1530 for (i = 0; i < length; i += 8) {
1531 pack.a = inbuf[i];
1532 pack.b = inbuf[i + 1];
1533 pack.c = inbuf[i + 2];
1534 pack.d = inbuf[i + 3];
1535 pack.e = inbuf[i + 4];
1536 pack.f = inbuf[i + 5];
1537 pack.g = inbuf[i + 6];
1538 pack.h = inbuf[i + 7];
1539 bcopy((char *) &pack, inbuf + j, 7);
1540 j += 7;
1541 }
1542 return j;
1543 }
1544
1545 void
1546 unpackA(char *inbuf, uint32_t length)
1547 {
1548 pasc_t packs;
1549 unsigned i = 0;
1550 length = (length * 8) / 7;
1551
1552 while (i < length) {
1553 packs = *(pasc_t *)&inbuf[i];
1554 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1555 inbuf[i++] = packs.a;
1556 inbuf[i++] = packs.b;
1557 inbuf[i++] = packs.c;
1558 inbuf[i++] = packs.d;
1559 inbuf[i++] = packs.e;
1560 inbuf[i++] = packs.f;
1561 inbuf[i++] = packs.g;
1562 inbuf[i++] = packs.h;
1563 }
1564 }
1565 #endif /* defined (__x86_64__) */
1566
1567 extern char *proc_name_address(void *);
1568 extern char *proc_longname_address(void *);
1569
1570 __private_extern__ void
1571 panic_display_process_name(void)
1572 {
1573 proc_name_t proc_name = {};
1574 struct proc *cbsd_info = NULL;
1575 task_t ctask = NULL;
1576 vm_size_t size;
1577
1578 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1579 goto out;
1580 }
1581
1582 if (cbsd_info == NULL) {
1583 goto out;
1584 }
1585
1586 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1587 (vm_offset_t)&proc_name, sizeof(proc_name));
1588
1589 if (size == 0 || proc_name[0] == '\0') {
1590 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1591 (vm_offset_t)&proc_name,
1592 MIN(sizeof(command_t), sizeof(proc_name)));
1593 if (size > 0) {
1594 proc_name[size - 1] = '\0';
1595 }
1596 }
1597
1598 out:
1599 proc_name[sizeof(proc_name) - 1] = '\0';
1600 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1601 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1602 }
1603
1604 unsigned
1605 panic_active(void)
1606 {
1607 return debugger_panic_str != (char *) 0;
1608 }
1609
1610 void
1611 populate_model_name(char *model_string)
1612 {
1613 strlcpy(model_name, model_string, sizeof(model_name));
1614 }
1615
1616 void
1617 panic_display_model_name(void)
1618 {
1619 char tmp_model_name[sizeof(model_name)];
1620
1621 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1622 return;
1623 }
1624
1625 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1626
1627 if (tmp_model_name[0] != 0) {
1628 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1629 }
1630 }
1631
1632 void
1633 panic_display_kernel_uuid(void)
1634 {
1635 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1636
1637 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1638 return;
1639 }
1640
1641 if (tmp_kernel_uuid[0] != '\0') {
1642 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1643 }
1644 }
1645
1646 void
1647 panic_display_kernel_aslr(void)
1648 {
1649 kc_format_t kc_format;
1650
1651 PE_get_primary_kc_format(&kc_format);
1652
1653 if (kc_format == KCFormatFileset) {
1654 void *kch = PE_get_kc_header(KCKindPrimary);
1655 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1656 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1657 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1658 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1659 #if defined(__arm64__)
1660 extern vm_offset_t segTEXTEXECB;
1661 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
1662 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
1663 #endif /* defined(__arm64__) */
1664 } else if (vm_kernel_slide) {
1665 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1666 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1667 } else {
1668 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1669 }
1670 }
1671
1672 void
1673 panic_display_hibb(void)
1674 {
1675 #if defined(__i386__) || defined (__x86_64__)
1676 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1677 #endif
1678 }
1679
1680 #if CONFIG_ECC_LOGGING
1681 __private_extern__ void
1682 panic_display_ecc_errors(void)
1683 {
1684 uint32_t count = ecc_log_get_correction_count();
1685
1686 if (count > 0) {
1687 paniclog_append_noflush("ECC Corrections:%u\n", count);
1688 }
1689 }
1690 #endif /* CONFIG_ECC_LOGGING */
1691
1692 #if !CONFIG_TELEMETRY
1693 int
1694 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
1695 {
1696 return KERN_NOT_SUPPORTED;
1697 }
1698 #endif
1699
1700 #include <machine/machine_cpu.h>
1701
1702 SECURITY_READ_ONLY_LATE(uint32_t) kern_feature_overrides = 0;
1703
1704 boolean_t
1705 kern_feature_override(uint32_t fmask)
1706 {
1707 if (kern_feature_overrides == 0) {
1708 uint32_t fdisables = 0;
1709 /*
1710 * Expected to be first invoked early, in a single-threaded
1711 * environment
1712 */
1713 if (PE_parse_boot_argn("validation_disables", &fdisables, sizeof(fdisables))) {
1714 fdisables |= KF_INITIALIZED;
1715 kern_feature_overrides = fdisables;
1716 } else {
1717 kern_feature_overrides |= KF_INITIALIZED;
1718 }
1719 }
1720 return (kern_feature_overrides & fmask) == fmask;
1721 }
1722
1723 boolean_t
1724 on_device_corefile_enabled(void)
1725 {
1726 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1727 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1728 if (debug_boot_arg == 0) {
1729 return FALSE;
1730 }
1731 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1732 return FALSE;
1733 }
1734 #if !XNU_TARGET_OS_OSX
1735 /*
1736 * outside of macOS, if there's a debug boot-arg set and local
1737 * cores aren't explicitly disabled, we always write a corefile.
1738 */
1739 return TRUE;
1740 #else /* !XNU_TARGET_OS_OSX */
1741 /*
1742 * on macOS, if corefiles on panic are requested and local cores
1743 * aren't disabled we write a local core.
1744 */
1745 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
1746 return TRUE;
1747 }
1748 #endif /* !XNU_TARGET_OS_OSX */
1749 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1750 return FALSE;
1751 }
1752
1753 boolean_t
1754 panic_stackshot_to_disk_enabled(void)
1755 {
1756 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1757 #if defined(__x86_64__)
1758 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
1759 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
1760 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
1761 return FALSE;
1762 }
1763
1764 return TRUE;
1765 }
1766 #endif
1767 return FALSE;
1768 }
1769
1770 #if DEBUG || DEVELOPMENT
1771 const char *
1772 sysctl_debug_get_preoslog(size_t *size)
1773 {
1774 int result = 0;
1775 void *preoslog_pa = NULL;
1776 int preoslog_size = 0;
1777
1778 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1779 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1780 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1781 *size = 0;
1782 return NULL;
1783 }
1784
1785 /*
1786 * Beware:
1787 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
1788 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
1789 */
1790 *size = preoslog_size;
1791 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
1792 }
1793 #endif /* DEBUG || DEVELOPMENT */
1794