1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_cdata.h>
76 #include <kern/zalloc_internal.h>
77 #include <pexpert/device_tree.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_map.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_compressor.h>
82 #include <stdarg.h>
83 #include <stdatomic.h>
84 #include <sys/pgo.h>
85 #include <console/serial_protos.h>
86
87 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
88 #include <kdp/kdp_udp.h>
89 #endif
90 #include <kern/processor.h>
91
92 #if defined(__i386__) || defined(__x86_64__)
93 #include <IOKit/IOBSD.h>
94
95 #include <i386/cpu_threads.h>
96 #include <i386/pmCPU.h>
97 #include <i386/lbr.h>
98 #endif
99
100 #include <IOKit/IOPlatformExpert.h>
101 #include <machine/pal_routines.h>
102
103 #include <sys/kdebug.h>
104 #include <libkern/OSKextLibPrivate.h>
105 #include <libkern/OSAtomic.h>
106 #include <libkern/kernel_mach_header.h>
107 #include <libkern/section_keywords.h>
108 #include <uuid/uuid.h>
109 #include <mach_debug/zone_info.h>
110 #include <mach/resource_monitors.h>
111 #include <machine/machine_routines.h>
112
113 #include <os/log_private.h>
114
115 #if defined(__arm__) || defined(__arm64__)
116 #include <pexpert/pexpert.h> /* For gPanicBase */
117 #include <arm/caches_internal.h>
118 #include <arm/misc_protos.h>
119 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
120 #endif
121
122 #include <san/kcov.h>
123
124 #if CONFIG_XNUPOST
125 #include <tests/xnupost.h>
126 extern int vsnprintf(char *, size_t, const char *, va_list);
127 #endif
128
129 #if CONFIG_CSR
130 #include <sys/csr.h>
131 #endif
132
133 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
134 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
135
136 unsigned int halt_in_debugger = 0;
137 unsigned int current_debugger = 0;
138 unsigned int active_debugger = 0;
139 unsigned int panicDebugging = FALSE;
140 unsigned int kernel_debugger_entry_count = 0;
141
142 #if defined(__arm__) || defined(__arm64__)
143 struct additional_panic_data_buffer *panic_data_buffers = NULL;
144 #endif
145
146 #if defined(__arm__)
147 #define TRAP_DEBUGGER __asm__ volatile("trap")
148 #elif defined(__arm64__)
149 /*
150 * Magic number; this should be identical to the __arm__ encoding for trap.
151 */
152 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
153 #elif defined (__x86_64__)
154 #define TRAP_DEBUGGER __asm__("int3")
155 #else
156 #error No TRAP_DEBUGGER for this architecture
157 #endif
158
159 #if defined(__i386__) || defined(__x86_64__)
160 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
161 #else
162 #define panic_stop() panic_spin_forever()
163 #endif
164
165 struct debugger_state {
166 uint64_t db_panic_options;
167 debugger_op db_current_op;
168 boolean_t db_proceed_on_sync_failure;
169 const char *db_message;
170 const char *db_panic_str;
171 va_list *db_panic_args;
172 void *db_panic_data_ptr;
173 unsigned long db_panic_caller;
174 /* incremented whenever we panic or call Debugger (current CPU panic level) */
175 uint32_t db_entry_count;
176 kern_return_t db_op_return;
177 };
178 static struct debugger_state PERCPU_DATA(debugger_state);
179
180 /* __pure2 is correct if this function is called with preemption disabled */
181 static inline __pure2 struct debugger_state *
current_debugger_state(void)182 current_debugger_state(void)
183 {
184 return PERCPU_GET(debugger_state);
185 }
186
187 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
188 #define CPUDEBUGGERMSG current_debugger_state()->db_message
189 #define CPUPANICSTR current_debugger_state()->db_panic_str
190 #define CPUPANICARGS current_debugger_state()->db_panic_args
191 #define CPUPANICOPTS current_debugger_state()->db_panic_options
192 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
193 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
194 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
195 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
196 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
197
198 #if DEVELOPMENT || DEBUG
199 #define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \
200 MACRO_BEGIN \
201 if (requested) { \
202 volatile int *badpointer = (int *)4; \
203 *badpointer = 0; \
204 } \
205 MACRO_END
206 #endif /* DEVELOPMENT || DEBUG */
207
208 debugger_op debugger_current_op = DBOP_NONE;
209 const char *debugger_panic_str = NULL;
210 va_list *debugger_panic_args = NULL;
211 void *debugger_panic_data = NULL;
212 uint64_t debugger_panic_options = 0;
213 const char *debugger_message = NULL;
214 unsigned long debugger_panic_caller = 0;
215
216 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
217 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
218 unsigned long panic_caller) __dead2 __printflike(1, 0);
219 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
220 void panic_spin_forever(void) __dead2;
221 extern kern_return_t do_stackshot(void);
222 extern void PE_panic_hook(const char*);
223
224 #define NESTEDDEBUGGERENTRYMAX 5
225 static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
226
227 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
228 static bool PERCPU_DATA(hv_entry_detected); // = false
229 static void awl_set_scratch_reg_hv_bit(void);
230 void awl_mark_hv_entry(void);
231 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
232
233 #if defined(__arm__) || defined(__arm64__)
234 #define DEBUG_BUF_SIZE (4096)
235
236 /* debug_buf is directly linked with iBoot panic region for arm targets */
237 char *debug_buf_base = NULL;
238 char *debug_buf_ptr = NULL;
239 unsigned int debug_buf_size = 0;
240
241 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
242 #else /* defined(__arm__) || defined(__arm64__) */
243 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
244 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
245 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
246
247 char debug_buf[DEBUG_BUF_SIZE];
248 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
249 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
250 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
251
252 /*
253 * We don't include the size of the panic header in the length of the data we actually write.
254 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
255 * the end of the log because we only support writing (3*PAGESIZE) bytes.
256 */
257 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
258
259 boolean_t extended_debug_log_enabled = FALSE;
260 #endif /* defined(__arm__) || defined(__arm64__) */
261
262 #if defined(XNU_TARGET_OS_OSX)
263 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
264 #else
265 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
266 #endif
267
268 /* Debugger state */
269 atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU);
270 boolean_t debugger_allcpus_halted = FALSE;
271 boolean_t debugger_safe_to_return = TRUE;
272 unsigned int debugger_context = 0;
273
274 static char model_name[64];
275 unsigned char *kernel_uuid;
276
277 boolean_t kernelcache_uuid_valid = FALSE;
278 uuid_t kernelcache_uuid;
279 uuid_string_t kernelcache_uuid_string;
280
281 boolean_t pageablekc_uuid_valid = FALSE;
282 uuid_t pageablekc_uuid;
283 uuid_string_t pageablekc_uuid_string;
284
285 boolean_t auxkc_uuid_valid = FALSE;
286 uuid_t auxkc_uuid;
287 uuid_string_t auxkc_uuid_string;
288
289
290 /*
291 * By default we treat Debugger() the same as calls to panic(), unless
292 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
293 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
294 *
295 * Return from Debugger() is currently only implemented on x86
296 */
297 static boolean_t debugger_is_panic = TRUE;
298
299 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
300
301 TUNABLE(int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
302
303 char kernel_uuid_string[37]; /* uuid_string_t */
304 char kernelcache_uuid_string[37]; /* uuid_string_t */
305 char panic_disk_error_description[512];
306 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
307
308 extern unsigned int write_trace_on_panic;
309 int kext_assertions_enable =
310 #if DEBUG || DEVELOPMENT
311 TRUE;
312 #else
313 FALSE;
314 #endif
315
316 /*
317 * Maintain the physically-contiguous carveout for the `phys_carveout_mb`
318 * boot-arg.
319 */
320
321 TUNABLE(size_t, phys_carveout_mb, "phys_carveout_mb", 0);
322 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 0);
323 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
324 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
325 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
326 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout_metadata = 0;
327 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_metadata_pa = 0;
328 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_metadata_size = 0;
329
330 /*
331 * Returns whether kernel debugging is expected to be restricted
332 * on the device currently based on CSR or other platform restrictions.
333 */
334 boolean_t
kernel_debugging_restricted(void)335 kernel_debugging_restricted(void)
336 {
337 #if XNU_TARGET_OS_OSX
338 #if CONFIG_CSR
339 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
340 return TRUE;
341 }
342 #endif /* CONFIG_CSR */
343 return FALSE;
344 #else /* XNU_TARGET_OS_OSX */
345 return FALSE;
346 #endif /* XNU_TARGET_OS_OSX */
347 }
348
349 __startup_func
350 static void
panic_init(void)351 panic_init(void)
352 {
353 unsigned long uuidlen = 0;
354 void *uuid;
355
356 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
357 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
358 kernel_uuid = uuid;
359 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
360 }
361
362 /*
363 * Take the value of the debug boot-arg into account
364 */
365 #if MACH_KDP
366 if (!kernel_debugging_restricted() && debug_boot_arg) {
367 if (debug_boot_arg & DB_HALT) {
368 halt_in_debugger = 1;
369 }
370
371 #if defined(__arm__) || defined(__arm64__)
372 if (debug_boot_arg & DB_NMI) {
373 panicDebugging = TRUE;
374 }
375 #else
376 panicDebugging = TRUE;
377 #endif /* defined(__arm__) || defined(__arm64__) */
378 }
379
380 if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) {
381 max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
382 }
383
384 #if defined(__arm__) || defined(__arm64__)
385 char kdpname[80];
386
387 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
388 #endif /* defined(__arm__) || defined(__arm64__) */
389
390 #endif /* MACH_KDP */
391
392 #if defined (__x86_64__)
393 /*
394 * By default we treat Debugger() the same as calls to panic(), unless
395 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
396 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
397 * This is because writing an on-device corefile is a destructive operation.
398 *
399 * Return from Debugger() is currently only implemented on x86
400 */
401 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
402 debugger_is_panic = FALSE;
403 }
404 #endif
405 }
406 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
407
408 #if defined (__x86_64__)
409 void
extended_debug_log_init(void)410 extended_debug_log_init(void)
411 {
412 assert(coprocessor_paniclog_flush);
413 /*
414 * Allocate an extended panic log buffer that has space for the panic
415 * stackshot at the end. Update the debug buf pointers appropriately
416 * to point at this new buffer.
417 *
418 * iBoot pre-initializes the panic region with the NULL character. We set this here
419 * so we can accurately calculate the CRC for the region without needing to flush the
420 * full region over SMC.
421 */
422 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
423
424 panic_info = (struct macos_panic_header *)new_debug_buf;
425 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
426 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
427
428 extended_debug_log_enabled = TRUE;
429
430 /*
431 * Insert a compiler barrier so we don't free the other panic stackshot buffer
432 * until after we've marked the new one as available
433 */
434 __compiler_barrier();
435 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
436 panic_stackshot_buf = 0;
437 panic_stackshot_buf_len = 0;
438 }
439 #endif /* defined (__x86_64__) */
440
441 void
debug_log_init(void)442 debug_log_init(void)
443 {
444 #if defined(__arm__) || defined(__arm64__)
445 if (!gPanicBase) {
446 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
447 return;
448 }
449 /* Shift debug buf start location and size by the length of the panic header */
450 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
451 debug_buf_ptr = debug_buf_base;
452 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
453 #else
454 kern_return_t kr = KERN_SUCCESS;
455 bzero(panic_info, DEBUG_BUF_SIZE);
456
457 assert(debug_buf_base != NULL);
458 assert(debug_buf_ptr != NULL);
459 assert(debug_buf_size != 0);
460
461 /*
462 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
463 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
464 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
465 * up.
466 */
467 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
468 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
469 assert(kr == KERN_SUCCESS);
470 if (kr == KERN_SUCCESS) {
471 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
472 }
473 #endif
474 }
475
476 void
phys_carveout_init(void)477 phys_carveout_init(void)
478 {
479 if (!PE_i_can_has_debugger(NULL)) {
480 return;
481 }
482
483 if (phys_carveout_mb == 0) {
484 return;
485 }
486
487 size_t temp_phys_carveout_size = 0;
488 if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &temp_phys_carveout_size)) {
489 panic("phys_carveout_mb size overflowed (%luMB)",
490 phys_carveout_mb);
491 return;
492 }
493
494 kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, temp_phys_carveout_size,
495 VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
496 VM_KERN_MEMORY_DIAG);
497 if (kr != KERN_SUCCESS) {
498 panic("failed to allocate %luMB for phys_carveout_mb: %u",
499 phys_carveout_mb, (unsigned int)kr);
500 return;
501 }
502
503 phys_carveout_pa = kvtophys(phys_carveout);
504 phys_carveout_size = temp_phys_carveout_size;
505
506 /*
507 * Record and dump carveout metadata region into corefile. Smallest unit (a
508 * page) is allocated as storage for panic_trace_header_t content, which is
509 * unfortunately wasteful but simplifies usage logic rather than stealing
510 * bytes from prior phys_carveout.
511 */
512 if (debug_can_coredump_phys_carveout()) {
513 size_t temp_phys_carveout_metadata_size = PAGE_SIZE;
514 kr = kmem_alloc_contig(kernel_map, &phys_carveout_metadata, temp_phys_carveout_metadata_size,
515 VM_MAP_PAGE_MASK(kernel_map), 0, 0,
516 KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA, VM_KERN_MEMORY_DIAG);
517 if (kr != KERN_SUCCESS) {
518 panic("failed to allocate %u for phys_carveout_metadata: %u",
519 (unsigned int)temp_phys_carveout_metadata_size, (unsigned int)kr);
520 return;
521 }
522 phys_carveout_metadata_size = temp_phys_carveout_metadata_size;
523 phys_carveout_metadata_pa = kvtophys(phys_carveout_metadata);
524 }
525
526 #if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
527 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
528 PE_arm_debug_enable_trace();
529 #endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */
530 }
531
532 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)533 debug_is_in_phys_carveout(vm_map_offset_t va)
534 {
535 return phys_carveout_size && va >= phys_carveout &&
536 va < (phys_carveout + phys_carveout_size);
537 }
538
539 boolean_t
debug_is_in_phys_carveout_metadata(vm_map_offset_t va)540 debug_is_in_phys_carveout_metadata(vm_map_offset_t va)
541 {
542 return phys_carveout_metadata_size && va >= phys_carveout_metadata && va < (phys_carveout_metadata + phys_carveout_metadata_size);
543 }
544
545 boolean_t
debug_can_coredump_phys_carveout(void)546 debug_can_coredump_phys_carveout(void)
547 {
548 return phys_carveout_core;
549 }
550
551 static void
DebuggerLock(void)552 DebuggerLock(void)
553 {
554 int my_cpu = cpu_number();
555 int debugger_exp_cpu = DEBUGGER_NO_CPU;
556 assert(ml_get_interrupts_enabled() == FALSE);
557
558 if (atomic_load(&debugger_cpu) == my_cpu) {
559 return;
560 }
561
562 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
563 debugger_exp_cpu = DEBUGGER_NO_CPU;
564 }
565
566 return;
567 }
568
569 static void
DebuggerUnlock(void)570 DebuggerUnlock(void)
571 {
572 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
573
574 /*
575 * We don't do an atomic exchange here in case
576 * there's another CPU spinning to acquire the debugger_lock
577 * and we never get a chance to update it. We already have the
578 * lock so we can simply store DEBUGGER_NO_CPU and follow with
579 * a barrier.
580 */
581 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
582 OSMemoryBarrier();
583
584 return;
585 }
586
587 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)588 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
589 {
590 #if defined(__arm__) || defined(__arm64__)
591 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
592 #else /* defined(__arm__) || defined(__arm64__) */
593 #pragma unused(proceed_on_failure)
594 #pragma unused(is_stackshot)
595 mp_kdp_enter(proceed_on_failure);
596 return KERN_SUCCESS;
597 #endif
598 }
599
600 static void
DebuggerResumeOtherCores(void)601 DebuggerResumeOtherCores(void)
602 {
603 #if defined(__arm__) || defined(__arm64__)
604 DebuggerXCallReturn();
605 #else /* defined(__arm__) || defined(__arm64__) */
606 mp_kdp_exit();
607 #endif
608 }
609
610 __printflike(3, 0)
611 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)612 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
613 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
614 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
615 {
616 CPUDEBUGGEROP = db_op;
617
618 /* Preserve the original panic message */
619 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
620 CPUDEBUGGERMSG = db_message;
621 CPUPANICSTR = db_panic_str;
622 CPUPANICARGS = db_panic_args;
623 CPUPANICDATAPTR = db_panic_data_ptr;
624 CPUPANICCALLER = db_panic_caller;
625 } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) {
626 kprintf("Nested panic detected:");
627 if (db_panic_str != NULL) {
628 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
629 }
630 }
631
632 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
633 CPUDEBUGGERRET = KERN_SUCCESS;
634
635 /* Reset these on any nested panics */
636 CPUPANICOPTS = db_panic_options;
637
638 return;
639 }
640
641 /*
642 * Save the requested debugger state/action into the current processor's
643 * percu state and trap to the debugger.
644 */
645 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller)646 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
647 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
648 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
649 {
650 kern_return_t ret;
651
652 assert(ml_get_interrupts_enabled() == FALSE);
653 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
654 db_panic_options, db_panic_data_ptr,
655 db_proceed_on_sync_failure, db_panic_caller);
656
657 /*
658 * On ARM this generates an uncategorized exception -> sleh code ->
659 * DebuggerCall -> kdp_trap -> handle_debugger_trap
660 * So that is how XNU ensures that only one core can panic.
661 * The rest of the cores are halted by IPI if possible; if that
662 * fails it will fall back to dbgwrap.
663 */
664 TRAP_DEBUGGER;
665
666 ret = CPUDEBUGGERRET;
667
668 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
669
670 return ret;
671 }
672
673 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)674 Assert(
675 const char *file,
676 int line,
677 const char *expression
678 )
679 {
680 #if CONFIG_NONFATAL_ASSERTS
681 static TUNABLE(bool, mach_assert, "assertions", true);
682
683 if (!mach_assert) {
684 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
685 return;
686 }
687 #endif
688
689 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
690 }
691
692 boolean_t
debug_is_current_cpu_in_panic_state(void)693 debug_is_current_cpu_in_panic_state(void)
694 {
695 return current_debugger_state()->db_entry_count > 0;
696 }
697
698 void
Debugger(const char * message)699 Debugger(const char *message)
700 {
701 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
702 }
703
704 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)705 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
706 uint64_t debugger_options_mask, unsigned long debugger_caller)
707 {
708 spl_t previous_interrupts_state;
709 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
710
711 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
712 read_lbr();
713 #endif
714 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
715 disable_preemption();
716
717 CPUDEBUGGERCOUNT++;
718
719 if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
720 static boolean_t in_panic_kprintf = FALSE;
721
722 /* Notify any listeners that we've started a panic */
723 uint32_t panic_details = 0;
724 if (debugger_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
725 panic_details |= kPanicDetailsForcePowerOff;
726 }
727 PEHaltRestartInternal(kPEPanicBegin, panic_details);
728
729 if (!in_panic_kprintf) {
730 in_panic_kprintf = TRUE;
731 kprintf("Detected nested debugger entry count exceeding %d\n",
732 max_debugger_entry_count);
733 in_panic_kprintf = FALSE;
734 }
735
736 if (!panicDebugging) {
737 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask);
738 }
739
740 panic_spin_forever();
741 }
742
743 #pragma unused(debugger_caller) // lies!
744 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
745
746 /* Handle any necessary platform specific actions before we proceed */
747 PEInitiatePanic();
748
749 #if DEVELOPMENT || DEBUG
750 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
751 #endif
752
753 PE_panic_hook(message);
754
755 doprnt_hide_pointers = FALSE;
756
757 if (ctx != NULL) {
758 DebuggerSaveState(DBOP_DEBUGGER, message,
759 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
760 handle_debugger_trap(reason, 0, 0, ctx);
761 DebuggerSaveState(DBOP_NONE, NULL, NULL,
762 NULL, 0, NULL, FALSE, 0);
763 } else {
764 DebuggerTrapWithState(DBOP_DEBUGGER, message,
765 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
766 }
767
768 CPUDEBUGGERCOUNT--;
769 doprnt_hide_pointers = old_doprnt_hide_pointers;
770 enable_preemption();
771 ml_set_interrupts_enabled(previous_interrupts_state);
772 }
773
774 static struct kdp_callout {
775 struct kdp_callout * callout_next;
776 kdp_callout_fn_t callout_fn;
777 boolean_t callout_in_progress;
778 void * callout_arg;
779 } * kdp_callout_list = NULL;
780
781 /*
782 * Called from kernel context to register a kdp event callout.
783 */
784 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)785 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
786 {
787 struct kdp_callout * kcp;
788 struct kdp_callout * list_head;
789
790 kcp = zalloc_permanent_type(struct kdp_callout);
791
792 kcp->callout_fn = fn;
793 kcp->callout_arg = arg;
794 kcp->callout_in_progress = FALSE;
795
796 /* Lock-less list insertion using compare and exchange. */
797 do {
798 list_head = kdp_callout_list;
799 kcp->callout_next = list_head;
800 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
801 }
802
803 static void
kdp_callouts(kdp_event_t event)804 kdp_callouts(kdp_event_t event)
805 {
806 struct kdp_callout *kcp = kdp_callout_list;
807
808 while (kcp) {
809 if (!kcp->callout_in_progress) {
810 kcp->callout_in_progress = TRUE;
811 kcp->callout_fn(kcp->callout_arg, event);
812 kcp->callout_in_progress = FALSE;
813 }
814 kcp = kcp->callout_next;
815 }
816 }
817
818 #if defined(__arm__) || defined(__arm64__)
819 /*
820 * Register an additional buffer with data to include in the panic log
821 *
822 * <rdar://problem/50137705> tracks supporting more than one buffer
823 *
824 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
825 */
826 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)827 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
828 {
829 if (panic_data_buffers != NULL) {
830 panic("register_additional_panic_data_buffer called with buffer already registered");
831 }
832
833 if (producer_name == NULL || (strlen(producer_name) == 0)) {
834 panic("register_additional_panic_data_buffer called with invalid producer_name");
835 }
836
837 if (buf == NULL) {
838 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
839 }
840
841 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
842 panic("register_additional_panic_data_buffer called with invalid length");
843 }
844
845 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
846 new_panic_data_buffer->producer_name = producer_name;
847 new_panic_data_buffer->buf = buf;
848 new_panic_data_buffer->len = len;
849
850 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
851 panic("register_additional_panic_data_buffer called with buffer already registered");
852 }
853
854 return;
855 }
856 #endif /* defined(__arm__) || defined(__arm64__) */
857
858 /*
859 * An overview of the xnu panic path:
860 *
861 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
862 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
863 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
864 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
865 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
866 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
867 * according to the device's boot-args.
868 */
869 #undef panic
870 void
panic(const char * str,...)871 panic(const char *str, ...)
872 {
873 va_list panic_str_args;
874
875 va_start(panic_str_args, str);
876 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
877 va_end(panic_str_args);
878 }
879
880 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)881 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
882 {
883 va_list panic_str_args;
884
885 va_start(panic_str_args, str);
886 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
887 NULL, (unsigned long)(char *)__builtin_return_address(0));
888 va_end(panic_str_args);
889 }
890
891 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)892 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
893 {
894 if (ptr == NULL) {
895 paniclog_append_noflush("NULL %s pointer\n", what);
896 return false;
897 }
898
899 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
900 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
901 what, ptr, (uint32_t)size);
902 return false;
903 }
904
905 return true;
906 }
907
908 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)909 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
910 {
911 if (!PANIC_VALIDATE_PTR(thread)) {
912 return false;
913 }
914
915 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
916 return false;
917 }
918
919 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
920 return false;
921 }
922
923 if (task) {
924 *task = thread->t_tro->tro_task;
925 }
926
927 if (!panic_validate_ptr(thread->t_tro->tro_proc,
928 sizeof(struct proc *), "bsd_info")) {
929 *proc = NULL;
930 } else {
931 *proc = thread->t_tro->tro_proc;
932 }
933
934 return true;
935 }
936
937 #if defined (__x86_64__)
938 /*
939 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
940 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
941 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
942 * thread when writing the panic log.
943 *
944 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
945 */
946 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)947 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
948 {
949 va_list panic_str_args;
950 __assert_only os_ref_count_t th_ref_count;
951
952 assert_thread_magic(thread);
953 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
954 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
955
956 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
957 thread_reference(thread);
958
959 va_start(panic_str_args, str);
960 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
961 thread, (unsigned long)(char *)__builtin_return_address(0));
962
963 va_end(panic_str_args);
964 }
965 #endif /* defined (__x86_64__) */
966
967 #pragma clang diagnostic push
968 #pragma clang diagnostic ignored "-Wmissing-noreturn"
969 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller)970 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
971 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
972 {
973 #pragma clang diagnostic pop
974
975 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
976 read_lbr();
977 /* Turn off I/O tracing once we've panicked */
978 mmiotrace_enabled = 0;
979 #endif
980
981 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
982
983 CPUDEBUGGERCOUNT++;
984
985 if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
986 static boolean_t in_panic_kprintf = FALSE;
987
988 /* Notify any listeners that we've started a panic */
989 uint32_t panic_details = 0;
990 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
991 panic_details |= kPanicDetailsForcePowerOff;
992 }
993 PEHaltRestartInternal(kPEPanicBegin, panic_details);
994
995 if (!in_panic_kprintf) {
996 in_panic_kprintf = TRUE;
997 kprintf("Detected nested debugger entry count exceeding %d\n",
998 max_debugger_entry_count);
999 in_panic_kprintf = FALSE;
1000 }
1001
1002 if (!panicDebugging) {
1003 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
1004 }
1005
1006 panic_spin_forever();
1007 }
1008
1009 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1010 /* Handle any necessary platform specific actions before we proceed */
1011 PEInitiatePanic();
1012
1013 #if DEVELOPMENT || DEBUG
1014 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
1015 #endif
1016
1017 PE_panic_hook(panic_format_str);
1018
1019 #if defined (__x86_64__)
1020 plctrace_disable();
1021 #endif
1022
1023 if (write_trace_on_panic && kdebug_enable) {
1024 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1025 ml_set_interrupts_enabled(TRUE);
1026 KDBG_RELEASE(TRACE_PANIC);
1027 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1028 }
1029 }
1030
1031 ml_set_interrupts_enabled(FALSE);
1032 disable_preemption();
1033
1034 #if defined (__x86_64__)
1035 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1036 #endif /* defined (__x86_64__) */
1037
1038 /* Never hide pointers from panic logs. */
1039 doprnt_hide_pointers = FALSE;
1040
1041 if (ctx != NULL) {
1042 /*
1043 * We called into panic from a trap, no need to trap again. Set the
1044 * state on the current CPU and then jump to handle_debugger_trap.
1045 */
1046 DebuggerSaveState(DBOP_PANIC, "panic",
1047 panic_format_str, panic_args,
1048 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1049 handle_debugger_trap(reason, 0, 0, ctx);
1050 }
1051
1052 #if defined(__arm64__)
1053 /*
1054 * Signal to fastsim that it should open debug ports (nop on hardware)
1055 */
1056 __asm__ volatile ("HINT 0x45");
1057 #endif /* defined(__arm64__) */
1058
1059 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1060 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
1061
1062 /*
1063 * Not reached.
1064 */
1065 panic_stop();
1066 __builtin_unreachable();
1067 }
1068
1069 void
panic_spin_forever(void)1070 panic_spin_forever(void)
1071 {
1072 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1073
1074 for (;;) {
1075 }
1076 }
1077
1078 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1079 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1080 {
1081 printf("Attempting system restart...\n");
1082 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1083 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1084 } else {
1085 PEHaltRestart(type);
1086 }
1087 halt_all_cpus(TRUE);
1088 }
1089
1090 void
kdp_machine_reboot(void)1091 kdp_machine_reboot(void)
1092 {
1093 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1094 }
1095
1096 /*
1097 * Gather and save diagnostic information about a panic (or Debugger call).
1098 *
1099 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1100 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1101 * paniclog is written and no core is written unless we request a core on NMI.
1102 *
1103 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1104 * and calling out to any other functions we have for collecting diagnostic info.
1105 */
1106 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1107 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1108 {
1109 #if DEVELOPMENT || DEBUG
1110 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_PRELOG));
1111 #endif
1112
1113 #if defined(__x86_64__)
1114 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1115 #endif
1116 /*
1117 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1118 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1119 * we'll just spin in kdp_raise_exception.
1120 */
1121 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1122 kdp_raise_exception(exception, code, subcode, state);
1123 if (debugger_safe_to_return && !debugger_is_panic) {
1124 return;
1125 }
1126 }
1127
1128 #ifdef CONFIG_KCOV
1129 /* Try not to break core dump path by sanitizer. */
1130 kcov_panic_disable();
1131 #endif
1132
1133 if ((debugger_current_op == DBOP_PANIC) ||
1134 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1135 /*
1136 * Attempt to notify listeners once and only once that we've started
1137 * panicking. Only do this for Debugger() calls if we're treating
1138 * Debugger() calls like panic().
1139 */
1140 uint32_t panic_details = 0;
1141 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1142 panic_details |= kPanicDetailsForcePowerOff;
1143 }
1144 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1145
1146 /*
1147 * Set the begin pointer in the panic log structure. We key off of this
1148 * static variable rather than contents from the panic header itself in case someone
1149 * has stomped over the panic_info structure. Also initializes the header magic.
1150 */
1151 static boolean_t began_writing_paniclog = FALSE;
1152 if (!began_writing_paniclog) {
1153 PE_init_panicheader();
1154 began_writing_paniclog = TRUE;
1155 } else {
1156 /*
1157 * If we reached here, update the panic header to keep it as consistent
1158 * as possible during a nested panic
1159 */
1160 PE_update_panicheader_nestedpanic();
1161 }
1162 }
1163
1164 /*
1165 * Write panic string if this was a panic.
1166 *
1167 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1168 */
1169 if (debugger_current_op == DBOP_PANIC) {
1170 paniclog_append_noflush("panic(cpu %d caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1171 if (debugger_panic_str) {
1172 #pragma clang diagnostic push
1173 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1174 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1175 #pragma clang diagnostic pop
1176 }
1177 paniclog_append_noflush("\n");
1178 }
1179 #if defined(__x86_64__)
1180 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1181 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1182 }
1183
1184 /*
1185 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1186 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1187 * conventional sense.
1188 */
1189 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1190 #endif /* __x86_64__ */
1191 {
1192 kdp_callouts(KDP_EVENT_PANICLOG);
1193
1194 /*
1195 * Write paniclog and panic stackshot (if supported)
1196 * TODO: Need to clear panic log when return from debugger
1197 * hooked up for embedded
1198 */
1199 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
1200
1201 #if DEVELOPMENT || DEBUG
1202 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTLOG));
1203 #endif
1204
1205 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1206 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1207 PEHaltRestart(kPEPanicDiagnosticsDone);
1208 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1209 }
1210 }
1211
1212 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1213 /*
1214 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1215 * local coredumps, don't try to write these and instead go straight to reboot. This
1216 * allows us to persist any data that's stored in the panic log.
1217 */
1218 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1219 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1220 PEHaltRestart(kPEPanicDiagnosticsDone);
1221 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1222 }
1223
1224 /*
1225 * Consider generating a local corefile if the infrastructure is configured
1226 * and we haven't disabled on-device coredumps.
1227 */
1228 if (on_device_corefile_enabled()) {
1229 if (!kdp_has_polled_corefile()) {
1230 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1231 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)\n",
1232 kdp_polled_corefile_error());
1233 #if defined(__arm__) || defined(__arm64__)
1234 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1235 paniclog_flush();
1236 #else /* defined(__arm__) || defined(__arm64__) */
1237 if (panic_info->mph_panic_log_offset != 0) {
1238 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1239 paniclog_flush();
1240 }
1241 #endif /* defined(__arm__) || defined(__arm64__) */
1242 }
1243 }
1244 #if XNU_MONITOR
1245 else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) {
1246 paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state\n");
1247 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1248 paniclog_flush();
1249 }
1250 #endif /* XNU_MONITOR */
1251 else {
1252 int ret = -1;
1253
1254 #if defined (__x86_64__)
1255 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1256 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1257 #endif
1258 {
1259 /*
1260 * Doing an on-device coredump leaves the disk driver in a state
1261 * that can not be resumed.
1262 */
1263 debugger_safe_to_return = FALSE;
1264 begin_panic_transfer();
1265 ret = kern_dump(KERN_DUMP_DISK);
1266 abort_panic_transfer();
1267
1268 #if DEVELOPMENT || DEBUG
1269 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTCORE));
1270 #endif
1271 }
1272
1273 /*
1274 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1275 * or if option to ignore failures is set.
1276 */
1277 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1278 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1279 PEHaltRestart(kPEPanicDiagnosticsDone);
1280 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1281 }
1282 }
1283 }
1284
1285 if (debugger_current_op == DBOP_PANIC ||
1286 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1287 PEHaltRestart(kPEPanicDiagnosticsDone);
1288 }
1289
1290 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1291 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1292 }
1293
1294 /* If KDP is configured, try to trap to the debugger */
1295 #if defined(__arm__) || defined(__arm64__)
1296 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1297 #else
1298 if (current_debugger != NO_CUR_DB) {
1299 #endif
1300 kdp_raise_exception(exception, code, subcode, state);
1301 /*
1302 * Only return if we entered via Debugger and it's safe to return
1303 * (we halted the other cores successfully, this isn't a nested panic, etc)
1304 */
1305 if (debugger_current_op == DBOP_DEBUGGER &&
1306 debugger_safe_to_return &&
1307 kernel_debugger_entry_count == 1 &&
1308 !debugger_is_panic) {
1309 return;
1310 }
1311 }
1312
1313 #if defined(__arm__) || defined(__arm64__)
1314 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1315 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1316 panic_spin_shmcon();
1317 }
1318 #endif /* defined(__arm__) || defined(__arm64__) */
1319
1320 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1321
1322 PEHaltRestart(kPEPanicDiagnosticsDone);
1323
1324 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1325
1326 if (!panicDebugging) {
1327 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1328 }
1329
1330 panic_spin_forever();
1331 }
1332
1333 #if INTERRUPT_MASKED_DEBUG
1334 uint64_t debugger_trap_timestamps[9];
1335 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1336 #else
1337 # define DEBUGGER_TRAP_TIMESTAMP(i)
1338 #endif
1339
1340 void
1341 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1342 {
1343 unsigned int initial_not_in_kdp = not_in_kdp;
1344 kern_return_t ret;
1345 debugger_op db_prev_op = debugger_current_op;
1346
1347 DEBUGGER_TRAP_TIMESTAMP(0);
1348
1349 DebuggerLock();
1350 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1351
1352 DEBUGGER_TRAP_TIMESTAMP(1);
1353
1354 #if INTERRUPT_MASKED_DEBUG
1355 if (serialmode & SERIALMODE_OUTPUT) {
1356 ml_spin_debug_reset(current_thread());
1357 }
1358 #endif
1359 if (ret != KERN_SUCCESS) {
1360 CPUDEBUGGERRET = ret;
1361 DebuggerUnlock();
1362 return;
1363 }
1364
1365 /* Update the global panic/debugger nested entry level */
1366 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1367 if (kernel_debugger_entry_count > 0) {
1368 console_suspend();
1369 }
1370
1371 /*
1372 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1373 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1374 */
1375
1376 if (CPUDEBUGGEROP == DBOP_NONE) {
1377 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1378 debugger_current_op = DBOP_BREAKPOINT;
1379 } else {
1380 /* Not safe to return from a nested panic/debugger call */
1381 if (debugger_current_op == DBOP_PANIC ||
1382 debugger_current_op == DBOP_DEBUGGER) {
1383 debugger_safe_to_return = FALSE;
1384 }
1385
1386 debugger_current_op = CPUDEBUGGEROP;
1387
1388 /* Only overwrite the panic message if there is none already - save the data from the first call */
1389 if (debugger_panic_str == NULL) {
1390 debugger_panic_str = CPUPANICSTR;
1391 debugger_panic_args = CPUPANICARGS;
1392 debugger_panic_data = CPUPANICDATAPTR;
1393 debugger_message = CPUDEBUGGERMSG;
1394 debugger_panic_caller = CPUPANICCALLER;
1395 }
1396
1397 debugger_panic_options = CPUPANICOPTS;
1398 }
1399
1400 /*
1401 * Clear the op from the processor debugger context so we can handle
1402 * breakpoints in the debugger
1403 */
1404 CPUDEBUGGEROP = DBOP_NONE;
1405
1406 DEBUGGER_TRAP_TIMESTAMP(2);
1407
1408 kdp_callouts(KDP_EVENT_ENTER);
1409 not_in_kdp = 0;
1410
1411 DEBUGGER_TRAP_TIMESTAMP(3);
1412
1413 if (debugger_current_op == DBOP_BREAKPOINT) {
1414 kdp_raise_exception(exception, code, subcode, state);
1415 } else if (debugger_current_op == DBOP_STACKSHOT) {
1416 CPUDEBUGGERRET = do_stackshot();
1417 #if PGO
1418 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1419 CPUDEBUGGERRET = do_pgo_reset_counters();
1420 #endif
1421 } else {
1422 debugger_collect_diagnostics(exception, code, subcode, state);
1423 }
1424
1425 DEBUGGER_TRAP_TIMESTAMP(4);
1426
1427 not_in_kdp = initial_not_in_kdp;
1428 kdp_callouts(KDP_EVENT_EXIT);
1429
1430 DEBUGGER_TRAP_TIMESTAMP(5);
1431
1432 if (debugger_current_op != DBOP_BREAKPOINT) {
1433 debugger_panic_str = NULL;
1434 debugger_panic_args = NULL;
1435 debugger_panic_data = NULL;
1436 debugger_panic_options = 0;
1437 debugger_message = NULL;
1438 }
1439
1440 /* Restore the previous debugger state */
1441 debugger_current_op = db_prev_op;
1442
1443 DEBUGGER_TRAP_TIMESTAMP(6);
1444
1445 DebuggerResumeOtherCores();
1446
1447 DEBUGGER_TRAP_TIMESTAMP(7);
1448
1449 DebuggerUnlock();
1450
1451 DEBUGGER_TRAP_TIMESTAMP(8);
1452
1453 return;
1454 }
1455
1456 __attribute__((noinline, not_tail_called))
1457 void
1458 log(__unused int level, char *fmt, ...)
1459 {
1460 void *caller = __builtin_return_address(0);
1461 va_list listp;
1462 va_list listp2;
1463
1464
1465 #ifdef lint
1466 level++;
1467 #endif /* lint */
1468 #ifdef MACH_BSD
1469 va_start(listp, fmt);
1470 va_copy(listp2, listp);
1471
1472 disable_preemption();
1473 _doprnt(fmt, &listp, cons_putc_locked, 0);
1474 enable_preemption();
1475
1476 va_end(listp);
1477
1478 #pragma clang diagnostic push
1479 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1480 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1481 #pragma clang diagnostic pop
1482 va_end(listp2);
1483 #endif
1484 }
1485
1486 /*
1487 * Per <rdar://problem/24974766>, skip appending log messages to
1488 * the new logging infrastructure in contexts where safety is
1489 * uncertain. These contexts include:
1490 * - When we're in the debugger
1491 * - We're in a panic
1492 * - Interrupts are disabled
1493 * - Or Pre-emption is disabled
1494 * In all the above cases, it is potentially unsafe to log messages.
1495 */
1496
1497 boolean_t
1498 oslog_is_safe(void)
1499 {
1500 return kernel_debugger_entry_count == 0 &&
1501 not_in_kdp == 1 &&
1502 get_preemption_level() == 0 &&
1503 ml_get_interrupts_enabled() == TRUE;
1504 }
1505
1506 boolean_t
1507 debug_mode_active(void)
1508 {
1509 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1510 }
1511
1512 void
1513 debug_putc(char c)
1514 {
1515 if ((debug_buf_size != 0) &&
1516 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) {
1517 *debug_buf_ptr = c;
1518 debug_buf_ptr++;
1519 }
1520 }
1521
1522 #if defined (__x86_64__)
1523 struct pasc {
1524 unsigned a: 7;
1525 unsigned b: 7;
1526 unsigned c: 7;
1527 unsigned d: 7;
1528 unsigned e: 7;
1529 unsigned f: 7;
1530 unsigned g: 7;
1531 unsigned h: 7;
1532 } __attribute__((packed));
1533
1534 typedef struct pasc pasc_t;
1535
1536 /*
1537 * In-place packing routines -- inefficient, but they're called at most once.
1538 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1539 */
1540 int
1541 packA(char *inbuf, uint32_t length, uint32_t buflen)
1542 {
1543 unsigned int i, j = 0;
1544 pasc_t pack;
1545
1546 length = MIN(((length + 7) & ~7), buflen);
1547
1548 for (i = 0; i < length; i += 8) {
1549 pack.a = inbuf[i];
1550 pack.b = inbuf[i + 1];
1551 pack.c = inbuf[i + 2];
1552 pack.d = inbuf[i + 3];
1553 pack.e = inbuf[i + 4];
1554 pack.f = inbuf[i + 5];
1555 pack.g = inbuf[i + 6];
1556 pack.h = inbuf[i + 7];
1557 bcopy((char *) &pack, inbuf + j, 7);
1558 j += 7;
1559 }
1560 return j;
1561 }
1562
1563 void
1564 unpackA(char *inbuf, uint32_t length)
1565 {
1566 pasc_t packs;
1567 unsigned i = 0;
1568 length = (length * 8) / 7;
1569
1570 while (i < length) {
1571 packs = *(pasc_t *)&inbuf[i];
1572 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1573 inbuf[i++] = packs.a;
1574 inbuf[i++] = packs.b;
1575 inbuf[i++] = packs.c;
1576 inbuf[i++] = packs.d;
1577 inbuf[i++] = packs.e;
1578 inbuf[i++] = packs.f;
1579 inbuf[i++] = packs.g;
1580 inbuf[i++] = packs.h;
1581 }
1582 }
1583 #endif /* defined (__x86_64__) */
1584
1585 extern char *proc_name_address(void *);
1586 extern char *proc_longname_address(void *);
1587
1588 __private_extern__ void
1589 panic_display_process_name(void)
1590 {
1591 proc_name_t proc_name = {};
1592 struct proc *cbsd_info = NULL;
1593 task_t ctask = NULL;
1594 vm_size_t size;
1595
1596 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1597 goto out;
1598 }
1599
1600 if (cbsd_info == NULL) {
1601 goto out;
1602 }
1603
1604 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1605 (vm_offset_t)&proc_name, sizeof(proc_name));
1606
1607 if (size == 0 || proc_name[0] == '\0') {
1608 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1609 (vm_offset_t)&proc_name,
1610 MIN(sizeof(command_t), sizeof(proc_name)));
1611 if (size > 0) {
1612 proc_name[size - 1] = '\0';
1613 }
1614 }
1615
1616 out:
1617 proc_name[sizeof(proc_name) - 1] = '\0';
1618 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1619 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1620 }
1621
1622 unsigned
1623 panic_active(void)
1624 {
1625 return debugger_current_op == DBOP_PANIC ||
1626 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1627 }
1628
1629 void
1630 populate_model_name(char *model_string)
1631 {
1632 strlcpy(model_name, model_string, sizeof(model_name));
1633 }
1634
1635 void
1636 panic_display_model_name(void)
1637 {
1638 char tmp_model_name[sizeof(model_name)];
1639
1640 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1641 return;
1642 }
1643
1644 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1645
1646 if (tmp_model_name[0] != 0) {
1647 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1648 }
1649 }
1650
1651 void
1652 panic_display_kernel_uuid(void)
1653 {
1654 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1655
1656 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1657 return;
1658 }
1659
1660 if (tmp_kernel_uuid[0] != '\0') {
1661 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1662 }
1663 }
1664
1665 void
1666 panic_display_kernel_aslr(void)
1667 {
1668 kc_format_t kc_format;
1669
1670 PE_get_primary_kc_format(&kc_format);
1671
1672 if (kc_format == KCFormatFileset) {
1673 void *kch = PE_get_kc_header(KCKindPrimary);
1674 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1675 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1676 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1677 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1678 #if defined(__arm64__)
1679 extern vm_offset_t segTEXTEXECB;
1680 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
1681 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
1682 #endif /* defined(__arm64__) */
1683 } else if (vm_kernel_slide) {
1684 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1685 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1686 } else {
1687 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
1688 }
1689 }
1690
1691 void
1692 panic_display_hibb(void)
1693 {
1694 #if defined(__i386__) || defined (__x86_64__)
1695 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1696 #endif
1697 }
1698
1699 #if CONFIG_ECC_LOGGING
1700 __private_extern__ void
1701 panic_display_ecc_errors(void)
1702 {
1703 uint32_t count = ecc_log_get_correction_count();
1704
1705 if (count > 0) {
1706 paniclog_append_noflush("ECC Corrections:%u\n", count);
1707 }
1708 }
1709 #endif /* CONFIG_ECC_LOGGING */
1710
1711 #if CONFIG_FREEZE
1712 extern bool freezer_incore_cseg_acct;
1713 extern uint32_t c_segment_pages_compressed_incore;
1714 #endif
1715
1716 extern uint32_t c_segment_pages_compressed;
1717 extern uint32_t c_segment_count;
1718 extern uint32_t c_segments_limit;
1719 extern uint32_t c_segment_pages_compressed_limit;
1720 extern uint32_t c_segment_pages_compressed_nearing_limit;
1721 extern uint32_t c_segments_nearing_limit;
1722 extern int vm_num_swap_files;
1723
1724 void
1725 panic_display_compressor_stats(void)
1726 {
1727 int isswaplow = vm_swap_low_on_space();
1728 #if CONFIG_FREEZE
1729 uint32_t incore_seg_count;
1730 uint32_t incore_compressed_pages;
1731 if (freezer_incore_cseg_acct) {
1732 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
1733 incore_compressed_pages = c_segment_pages_compressed_incore;
1734 } else {
1735 incore_seg_count = c_segment_count;
1736 incore_compressed_pages = c_segment_pages_compressed;
1737 }
1738
1739 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1740 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
1741 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1742 (incore_seg_count * 100) / c_segments_limit,
1743 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
1744 vm_num_swap_files,
1745 isswaplow ? "LOW":"OK");
1746 #else /* CONFIG_FREEZE */
1747 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
1748 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
1749 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
1750 (c_segment_count * 100) / c_segments_limit,
1751 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
1752 vm_num_swap_files,
1753 isswaplow ? "LOW":"OK");
1754 #endif /* CONFIG_FREEZE */
1755 }
1756
1757 #if !CONFIG_TELEMETRY
1758 int
1759 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
1760 {
1761 return KERN_NOT_SUPPORTED;
1762 }
1763 #endif
1764
1765 #include <machine/machine_cpu.h>
1766
1767 SECURITY_READ_ONLY_LATE(uint32_t) kern_feature_overrides = 0;
1768
1769 boolean_t
1770 kern_feature_override(uint32_t fmask)
1771 {
1772 if (kern_feature_overrides == 0) {
1773 uint32_t fdisables = 0;
1774 /*
1775 * Expected to be first invoked early, in a single-threaded
1776 * environment
1777 */
1778 if (PE_parse_boot_argn("validation_disables", &fdisables, sizeof(fdisables))) {
1779 fdisables |= KF_INITIALIZED;
1780 kern_feature_overrides = fdisables;
1781 } else {
1782 kern_feature_overrides |= KF_INITIALIZED;
1783 }
1784 }
1785 return (kern_feature_overrides & fmask) == fmask;
1786 }
1787
1788 boolean_t
1789 on_device_corefile_enabled(void)
1790 {
1791 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1792 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1793 if (debug_boot_arg == 0) {
1794 return FALSE;
1795 }
1796 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1797 return FALSE;
1798 }
1799 #if !XNU_TARGET_OS_OSX
1800 /*
1801 * outside of macOS, if there's a debug boot-arg set and local
1802 * cores aren't explicitly disabled, we always write a corefile.
1803 */
1804 return TRUE;
1805 #else /* !XNU_TARGET_OS_OSX */
1806 /*
1807 * on macOS, if corefiles on panic are requested and local cores
1808 * aren't disabled we write a local core.
1809 */
1810 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
1811 return TRUE;
1812 }
1813 #endif /* !XNU_TARGET_OS_OSX */
1814 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1815 return FALSE;
1816 }
1817
1818 boolean_t
1819 panic_stackshot_to_disk_enabled(void)
1820 {
1821 assert(startup_phase >= STARTUP_SUB_TUNABLES);
1822 #if defined(__x86_64__)
1823 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
1824 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
1825 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
1826 return FALSE;
1827 }
1828
1829 return TRUE;
1830 }
1831 #endif
1832 return FALSE;
1833 }
1834
1835 const char *
1836 sysctl_debug_get_preoslog(size_t *size)
1837 {
1838 int result = 0;
1839 void *preoslog_pa = NULL;
1840 int preoslog_size = 0;
1841
1842 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1843 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1844 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1845 *size = 0;
1846 return NULL;
1847 }
1848
1849 /*
1850 * Beware:
1851 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
1852 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
1853 */
1854 *size = preoslog_size;
1855 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
1856 }
1857
1858 void
1859 sysctl_debug_free_preoslog(void)
1860 {
1861 #if RELEASE
1862 int result = 0;
1863 void *preoslog_pa = NULL;
1864 int preoslog_size = 0;
1865
1866 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1867 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1868 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1869 return;
1870 }
1871
1872 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
1873 #else
1874 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
1875 #endif // RELEASE
1876 }
1877
1878 #define AWL_HV_ENTRY_FLAG (0x1)
1879
1880 static inline void
1881 awl_set_scratch_reg_hv_bit(void)
1882 {
1883 #if defined(__arm64__)
1884 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
1885 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
1886 awl_diag0 |= AWL_HV_ENTRY_FLAG;
1887 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
1888 #endif // defined(__arm64__)
1889 }
1890
1891 void
1892 awl_mark_hv_entry(void)
1893 {
1894 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
1895 return;
1896 }
1897 *PERCPU_GET(hv_entry_detected) = true;
1898
1899 awl_set_scratch_reg_hv_bit();
1900 }
1901
1902 /*
1903 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
1904 * so restore it manually.
1905 */
1906 static bool
1907 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
1908 {
1909 if (event == CPU_BOOTED) {
1910 if (*PERCPU_GET(hv_entry_detected)) {
1911 awl_set_scratch_reg_hv_bit();
1912 }
1913 }
1914
1915 return true;
1916 }
1917
1918 /*
1919 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
1920 * for a callback to restore register after hibernation
1921 */
1922 __startup_func
1923 static void
1924 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
1925 {
1926 DTEntry base = NULL;
1927
1928 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
1929 return;
1930 }
1931 const uint8_t *data = NULL;
1932 unsigned int data_size = sizeof(uint8_t);
1933
1934 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
1935 for (unsigned int i = 0; i < data_size; i++) {
1936 if (data[i] != 0) {
1937 awl_scratch_reg_supported = true;
1938 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
1939 break;
1940 }
1941 }
1942 }
1943 }
1944 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
1945