1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30 #include <mach_kdp.h>
31
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
35
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
42
43 #include <ptrauth.h>
44
45 #include <kern/misc_protos.h>
46 #include <kern/startup.h>
47 #include <kern/clock.h>
48 #include <kern/debug.h>
49 #include <kern/processor.h>
50 #include <kdp/kdp_core.h>
51 #if ALTERNATE_DEBUGGER
52 #include <arm64/alternate_debugger.h>
53 #endif
54 #include <machine/atomic.h>
55 #include <machine/trap.h>
56 #include <kern/spl.h>
57 #include <pexpert/pexpert.h>
58 #include <kdp/kdp_callout.h>
59 #include <kdp/kdp_dyld.h>
60 #include <kdp/kdp_internal.h>
61 #include <uuid/uuid.h>
62 #include <sys/codesign.h>
63 #include <sys/time.h>
64
65 #include <IOKit/IOPlatformExpert.h>
66 #include <IOKit/IOKitServer.h>
67
68 #include <mach/vm_prot.h>
69 #include <vm/vm_map.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_shared_region.h>
72 #include <mach/time_value.h>
73 #include <machine/machparam.h> /* for btop */
74
75 #include <console/video_console.h>
76 #include <console/serial_protos.h>
77 #include <arm/cpu_data.h>
78 #include <arm/cpu_data_internal.h>
79 #include <arm/cpu_internal.h>
80 #include <arm/misc_protos.h>
81 #include <libkern/OSKextLibPrivate.h>
82 #include <vm/vm_kern.h>
83 #include <kern/kern_cdata.h>
84 #include <kern/ledger.h>
85
86 #if MACH_KDP
87 void kdp_trap(unsigned int, struct arm_saved_state *);
88 #endif
89
90 extern kern_return_t do_stackshot(void *);
91 extern void kdp_snapshot_preflight(int pid, void * tracebuf,
92 uint32_t tracebuf_size, uint64_t flags,
93 kcdata_descriptor_t data_p,
94 uint64_t since_timestamp, uint32_t pagetable_mask);
95 extern int kdp_stack_snapshot_bytes_traced(void);
96 extern int kdp_stack_snapshot_bytes_uncompressed(void);
97
98 /*
99 * Increment the PANICLOG_VERSION if you change the format of the panic
100 * log in any way.
101 */
102 #define PANICLOG_VERSION 13
103 static struct kcdata_descriptor kc_panic_data;
104
105 extern char firmware_version[];
106 extern volatile uint32_t debug_enabled;
107 extern unsigned int not_in_kdp;
108
109 extern int copyinframe(vm_address_t fp, uint32_t * frame);
110 extern void kdp_callouts(kdp_event_t event);
111
112 /* #include <sys/proc.h> */
113 #define MAXCOMLEN 16
114 struct proc;
115 extern int proc_pid(struct proc *p);
116 extern void proc_name_kdp(struct proc *, char *, int);
117
118 /*
119 * Make sure there's enough space to include the relevant bits in the format required
120 * within the space allocated for the panic version string in the panic header.
121 * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'.
122 */
123 #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
124
125 extern const char version[];
126 extern char osversion[];
127 extern char osproductversion[];
128 extern char osreleasetype[];
129
130 #if defined(XNU_TARGET_OS_BRIDGE)
131 extern char macosproductversion[];
132 extern char macosversion[];
133 #endif
134
135 extern uint8_t gPlatformECID[8];
136 extern uint32_t gPlatformMemoryID;
137
138 extern uint64_t last_hwaccess_thread;
139
140 /*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32
141 * since the target name and model name typically doesn't exceed this size */
142 extern char gTargetTypeBuffer[16];
143 extern char gModelTypeBuffer[32];
144
145 extern struct timeval gIOLastSleepTime;
146 extern struct timeval gIOLastWakeTime;
147 extern boolean_t is_clock_configured;
148 extern boolean_t kernelcache_uuid_valid;
149 extern uuid_t kernelcache_uuid;
150
151 extern void stackshot_memcpy(void *dst, const void *src, size_t len);
152
153 /* Definitions for frame pointers */
154 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
155 #define FP_LR_OFFSET ((uint32_t)4)
156 #define FP_LR_OFFSET64 ((uint32_t)8)
157 #define FP_MAX_NUM_TO_EVALUATE (50)
158
159 /* Timeout for all processors responding to debug crosscall */
160 MACHINE_TIMEOUT32(debug_ack_timeout, "debug-ack", 240000, MACHINE_TIMEOUT_UNIT_TIMEBASE, NULL);
161
162 /* Forward functions definitions */
163 void panic_display_times(void);
164 void panic_print_symbol_name(vm_address_t search);
165
166
167 /* Global variables */
168 static uint32_t panic_bt_depth;
169 boolean_t PanicInfoSaved = FALSE;
170 boolean_t force_immediate_debug_halt = FALSE;
171 unsigned int debug_ack_timeout_count = 0;
172 volatile unsigned int debugger_sync = 0;
173 volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
174 volatile unsigned int debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */
175 unsigned int DebugContextCount = 0;
176
177 #if defined(__arm64__)
178 uint8_t PE_smc_stashed_x86_system_state = 0xFF;
179 uint8_t PE_smc_stashed_x86_power_state = 0xFF;
180 uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
181 uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
182 uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
183 uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
184 #endif
185
186
187 /*
188 * Backtrace a single frame.
189 */
190 static void
print_one_backtrace(pmap_t pmap,vm_offset_t topfp,const char * cur_marker,boolean_t is_64_bit,boolean_t print_kexts_in_backtrace)191 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
192 boolean_t is_64_bit, boolean_t print_kexts_in_backtrace)
193 {
194 unsigned int i = 0;
195 addr64_t lr = 0;
196 addr64_t fp = topfp;
197 addr64_t fp_for_ppn = 0;
198 ppnum_t ppn = (ppnum_t)NULL;
199 vm_offset_t raddrs[FP_MAX_NUM_TO_EVALUATE] = { 0 };
200 bool dump_kernel_stack = (fp >= VM_MIN_KERNEL_ADDRESS);
201
202 do {
203 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
204 break;
205 }
206 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
207 break;
208 }
209 if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
210 break;
211 }
212
213 /*
214 * Check to see if current address will result in a different
215 * ppn than previously computed (to avoid recomputation) via
216 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
217 */
218 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
219 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
220 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
221 }
222 if (ppn != (ppnum_t)NULL) {
223 if (is_64_bit) {
224 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
225 #if defined(HAS_APPLE_PAC)
226 /* return addresses on stack will be signed by arm64e ABI */
227 lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address);
228 #endif
229 } else {
230 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
231 }
232 } else {
233 if (is_64_bit) {
234 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
235 } else {
236 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
237 }
238 break;
239 }
240 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
241 ppn = pmap_find_phys(pmap, fp);
242 fp_for_ppn = fp;
243 }
244 if (ppn != (ppnum_t)NULL) {
245 if (is_64_bit) {
246 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
247 } else {
248 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
249 }
250 } else {
251 if (is_64_bit) {
252 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
253 } else {
254 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
255 }
256 break;
257 }
258 /*
259 * Counter 'i' may == FP_MAX_NUM_TO_EVALUATE when running one
260 * extra round to check whether we have all frames in order to
261 * indicate (in)complete backtrace below. This happens in a case
262 * where total frame count and FP_MAX_NUM_TO_EVALUATE are equal.
263 * Do not capture anything.
264 */
265 if (i < FP_MAX_NUM_TO_EVALUATE && lr) {
266 if (is_64_bit) {
267 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
268 } else {
269 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
270 }
271 raddrs[i] = lr;
272 }
273 } while ((++i <= FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
274
275 if (i > FP_MAX_NUM_TO_EVALUATE && fp != 0) {
276 paniclog_append_noflush("Backtrace continues...\n");
277 }
278
279 if (print_kexts_in_backtrace && i > 0) {
280 kmod_panic_dump(&raddrs[0], i);
281 }
282 }
283
284 #define SANE_TASK_LIMIT 256
285 #define TOP_RUNNABLE_LIMIT 5
286 #define PANICLOG_UUID_BUF_SIZE 256
287
288 extern void panic_print_vnodes(void);
289
290 static void
panic_display_hung_cpus_help(void)291 panic_display_hung_cpus_help(void)
292 {
293 #if defined(__arm64__)
294 const uint32_t pcsr_offset = 0x90;
295
296 /*
297 * Print some info that might help in cases where nothing
298 * else does
299 */
300 const ml_topology_info_t *info = ml_get_topology_info();
301 if (info) {
302 unsigned i, retry;
303
304 for (i = 0; i < info->num_cpus; i++) {
305 if (info->cpus[i].cpu_UTTDBG_regs) {
306 volatile uint64_t *pcsr = (volatile uint64_t*)(info->cpus[i].cpu_UTTDBG_regs + pcsr_offset);
307 volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr;
308 uint64_t pc = 0;
309
310 // a number of retries are needed till this works
311 for (retry = 1024; retry && !pc; retry--) {
312 //a 32-bit read is required to make a PC sample be produced, else we'll only get a zero
313 (void)*pcsrTrigger;
314 pc = *pcsr;
315 }
316
317 //postprocessing (same as astris does)
318 if (pc >> 48) {
319 pc |= 0xffff000000000000ull;
320 }
321 paniclog_append_noflush("CORE %u recently retired instr at 0x%016llx\n", i, pc);
322 }
323 }
324 }
325 #endif //defined(__arm64__)
326 }
327
328
329 static void
panic_display_pvhs_locked(void)330 panic_display_pvhs_locked(void)
331 {
332 }
333
334 static void
panic_display_pvh_to_lock(void)335 panic_display_pvh_to_lock(void)
336 {
337 }
338
339 static void
panic_display_last_pc_lr(void)340 panic_display_last_pc_lr(void)
341 {
342 #if defined(__arm64__)
343 const int max_cpu = ml_get_max_cpu_number();
344
345 for (int cpu = 0; cpu <= max_cpu; cpu++) {
346 cpu_data_t *current_cpu_datap = cpu_datap(cpu);
347
348 if (current_cpu_datap == NULL) {
349 continue;
350 }
351
352 if (current_cpu_datap == getCpuDatap()) {
353 /**
354 * Skip printing the PC/LR if this is the CPU
355 * that initiated the panic.
356 */
357 paniclog_append_noflush("CORE %u is the one that panicked. Check the full backtrace for details.\n", cpu);
358 continue;
359 }
360
361 paniclog_append_noflush("CORE %u: PC=0x%016llx, LR=0x%016llx, FP=0x%016llx\n", cpu,
362 current_cpu_datap->ipi_pc, (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_lr),
363 (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_fp));
364 }
365 #endif
366 }
367
368 static void
do_print_all_backtraces(const char * message,uint64_t panic_options)369 do_print_all_backtraces(const char *message, uint64_t panic_options)
370 {
371 int logversion = PANICLOG_VERSION;
372 thread_t cur_thread = current_thread();
373 uintptr_t cur_fp;
374 task_t task;
375 struct proc *proc;
376 int print_vnodes = 0;
377 const char *nohilite_thread_marker = "\t";
378
379 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
380 int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
381 int bytes_uncompressed = 0;
382 uint64_t bytes_used = 0ULL;
383 int err = 0;
384 char *stackshot_begin_loc = NULL;
385 kc_format_t kc_format;
386 bool filesetKC = false;
387
388 #if defined(__arm__)
389 __asm__ volatile ("mov %0, r7":"=r"(cur_fp));
390 #elif defined(__arm64__)
391 __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp));
392 #else
393 #error Unknown architecture.
394 #endif
395 if (panic_bt_depth != 0) {
396 return;
397 }
398 panic_bt_depth++;
399
400 __unused bool result = PE_get_primary_kc_format(&kc_format);
401 assert(result == true);
402 filesetKC = kc_format == KCFormatFileset;
403
404 /* Truncate panic string to 1200 bytes */
405 paniclog_append_noflush("Debugger message: %.1200s\n", message);
406 if (debug_enabled) {
407 paniclog_append_noflush("Device: %s\n",
408 ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
409 paniclog_append_noflush("Hardware Model: %s\n",
410 ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
411 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
412 gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
413 gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
414 if (last_hwaccess_thread) {
415 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
416 }
417 paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
418 }
419 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
420 paniclog_append_noflush("OS release type: %.256s\n",
421 ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet");
422 paniclog_append_noflush("OS version: %.256s\n",
423 ('\0' != osversion[0]) ? osversion : "Not set yet");
424 #if defined(XNU_TARGET_OS_BRIDGE)
425 paniclog_append_noflush("macOS version: %.256s\n",
426 ('\0' != macosversion[0]) ? macosversion : "Not set");
427 #endif
428 paniclog_append_noflush("Kernel version: %.512s\n", version);
429
430 if (kernelcache_uuid_valid) {
431 if (filesetKC) {
432 paniclog_append_noflush("Fileset Kernelcache UUID: ");
433 } else {
434 paniclog_append_noflush("KernelCache UUID: ");
435 }
436 for (size_t index = 0; index < sizeof(uuid_t); index++) {
437 paniclog_append_noflush("%02X", kernelcache_uuid[index]);
438 }
439 paniclog_append_noflush("\n");
440 }
441 panic_display_kernel_uuid();
442
443 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version);
444 paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
445 #if defined(XNU_TARGET_OS_BRIDGE)
446 paniclog_append_noflush("x86 EFI Boot State: ");
447 if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
448 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
449 } else {
450 paniclog_append_noflush("not available\n");
451 }
452 paniclog_append_noflush("x86 System State: ");
453 if (PE_smc_stashed_x86_system_state != 0xFF) {
454 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
455 } else {
456 paniclog_append_noflush("not available\n");
457 }
458 paniclog_append_noflush("x86 Power State: ");
459 if (PE_smc_stashed_x86_power_state != 0xFF) {
460 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
461 } else {
462 paniclog_append_noflush("not available\n");
463 }
464 paniclog_append_noflush("x86 Shutdown Cause: ");
465 if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
466 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
467 } else {
468 paniclog_append_noflush("not available\n");
469 }
470 paniclog_append_noflush("x86 Previous Power Transitions: ");
471 if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
472 paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
473 } else {
474 paniclog_append_noflush("not available\n");
475 }
476 paniclog_append_noflush("PCIeUp link state: ");
477 if (PE_pcie_stashed_link_state != UINT32_MAX) {
478 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
479 } else {
480 paniclog_append_noflush("not available\n");
481 }
482 #endif
483 if (panic_data_buffers != NULL) {
484 paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name);
485 uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
486 for (int i = 0; i < panic_data_buffers->len; i++) {
487 paniclog_append_noflush("%02X", panic_buffer_data[i]);
488 }
489 paniclog_append_noflush("\n");
490 }
491 paniclog_append_noflush("Paniclog version: %d\n", logversion);
492
493 panic_display_kernel_aslr();
494 panic_display_times();
495 panic_display_zalloc();
496 panic_display_hung_cpus_help();
497 panic_display_pvhs_locked();
498 panic_display_pvh_to_lock();
499 panic_display_last_pc_lr();
500 #if CONFIG_ECC_LOGGING
501 panic_display_ecc_errors();
502 #endif /* CONFIG_ECC_LOGGING */
503
504 #if DEVELOPMENT || DEBUG
505 if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
506 paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
507 paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
508 }
509 #endif
510
511 // Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
512 if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) {
513 thread_t top_runnable[5] = {0};
514 thread_t thread;
515 int total_cpu_usage = 0;
516
517 print_vnodes = 1;
518
519
520 for (thread = (thread_t)queue_first(&threads);
521 PANIC_VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
522 thread = (thread_t)queue_next(&thread->threads)) {
523 total_cpu_usage += thread->cpu_usage;
524
525 // Look for the 5 runnable threads with highest priority
526 if (thread->state & TH_RUN) {
527 int k;
528 thread_t comparison_thread = thread;
529
530 for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
531 if (top_runnable[k] == 0) {
532 top_runnable[k] = comparison_thread;
533 break;
534 } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
535 thread_t temp = top_runnable[k];
536 top_runnable[k] = comparison_thread;
537 comparison_thread = temp;
538 } // if comparison thread has higher priority than previously saved thread
539 } // loop through highest priority runnable threads
540 } // Check if thread is runnable
541 } // Loop through all threads
542
543 // Print the relevant info for each thread identified
544 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
545 paniclog_append_noflush("Thread task pri cpu_usage\n");
546
547 for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
548 if (top_runnable[i] &&
549 panic_get_thread_proc_task(top_runnable[i], &task, &proc) && proc) {
550 char name[MAXCOMLEN + 1];
551 proc_name_kdp(proc, name, sizeof(name));
552 paniclog_append_noflush("%p %s %d %d\n",
553 top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
554 }
555 } // Loop through highest priority runnable threads
556 paniclog_append_noflush("\n");
557 }
558
559 // print current task info
560 if (panic_get_thread_proc_task(cur_thread, &task, &proc)) {
561 if (PANIC_VALIDATE_PTR(task->map) &&
562 PANIC_VALIDATE_PTR(task->map->pmap)) {
563 ledger_amount_t resident = 0;
564 if (task != kernel_task) {
565 ledger_get_balance(task->ledger, task_ledgers.phys_mem, &resident);
566 resident >>= VM_MAP_PAGE_SHIFT(task->map);
567 }
568 paniclog_append_noflush("Panicked task %p: %lld pages, %d threads: ",
569 task, resident, task->thread_count);
570 } else {
571 paniclog_append_noflush("Panicked task %p: %d threads: ",
572 task, task->thread_count);
573 }
574
575 if (proc) {
576 char name[MAXCOMLEN + 1];
577 proc_name_kdp(proc, name, sizeof(name));
578 paniclog_append_noflush("pid %d: %s", proc_pid(proc), name);
579 } else {
580 paniclog_append_noflush("unknown task");
581 }
582
583 paniclog_append_noflush("\n");
584 }
585
586 if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
587 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
588 cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
589 #if __LP64__
590 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE, filesetKC);
591 #else
592 print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC);
593 #endif
594 } else {
595 paniclog_append_noflush("Could not print panicked thread backtrace:"
596 "frame pointer outside kernel vm.\n");
597 }
598
599 paniclog_append_noflush("\n");
600 if (filesetKC) {
601 kext_dump_panic_lists(&paniclog_append_noflush);
602 paniclog_append_noflush("\n");
603 }
604 panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
605 /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
606 if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
607 snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
608 osproductversion, osversion);
609 }
610 #if defined(XNU_TARGET_OS_BRIDGE)
611 if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
612 snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
613 macosproductversion, macosversion);
614 }
615 #endif
616
617 if (debug_ack_timeout_count) {
618 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
619 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
620 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
621 } else if (stackshot_active()) {
622 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
623 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
624 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
625 } else {
626 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
627 debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
628 stackshot_begin_loc = debug_buf_ptr;
629
630 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
631 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
632 KCDATA_BUFFER_BEGIN_COMPRESSED, bytes_remaining - end_marker_bytes,
633 KCFLAG_USE_MEMCOPY);
634 if (err == KERN_SUCCESS) {
635 uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
636 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS |
637 STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ |
638 STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
639
640 err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, stackshot_memcpy, KCDCT_ZLIB);
641 if (err != KERN_SUCCESS) {
642 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED;
643 stackshot_flags &= ~STACKSHOT_DO_COMPRESS;
644 }
645 if (filesetKC) {
646 stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
647 }
648
649 kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
650 stackshot_flags, &kc_panic_data, 0, 0);
651 err = do_stackshot(NULL);
652 bytes_traced = kdp_stack_snapshot_bytes_traced();
653 if (bytes_traced > 0 && !err) {
654 debug_buf_ptr += bytes_traced;
655 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
656 panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
657 panic_info->eph_stackshot_len = bytes_traced;
658
659 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
660 if (stackshot_flags & STACKSHOT_DO_COMPRESS) {
661 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED;
662 bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed();
663 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed);
664 } else {
665 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
666 }
667 } else {
668 bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
669 if (bytes_used > 0) {
670 /* Zero out the stackshot data */
671 bzero(stackshot_begin_loc, bytes_used);
672 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
673
674 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
675 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
676 } else {
677 bzero(stackshot_begin_loc, bytes_used);
678 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
679
680 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
681 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
682 }
683 }
684 } else {
685 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
686 panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
687 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
688 }
689 }
690
691 assert(panic_info->eph_other_log_offset != 0);
692
693 if (print_vnodes != 0) {
694 panic_print_vnodes();
695 }
696
697 panic_bt_depth--;
698 }
699
700 /*
701 * Entry to print_all_backtraces is serialized by the debugger lock
702 */
703 static void
print_all_backtraces(const char * message,uint64_t panic_options)704 print_all_backtraces(const char *message, uint64_t panic_options)
705 {
706 unsigned int initial_not_in_kdp = not_in_kdp;
707
708 cpu_data_t * cpu_data_ptr = getCpuDatap();
709
710 assert(cpu_data_ptr->PAB_active == FALSE);
711 cpu_data_ptr->PAB_active = TRUE;
712
713 /*
714 * Because print all backtraces uses the pmap routines, it needs to
715 * avoid taking pmap locks. Right now, this is conditionalized on
716 * not_in_kdp.
717 */
718 not_in_kdp = 0;
719 do_print_all_backtraces(message, panic_options);
720
721 not_in_kdp = initial_not_in_kdp;
722
723 cpu_data_ptr->PAB_active = FALSE;
724 }
725
726 void
panic_display_times()727 panic_display_times()
728 {
729 if (kdp_clock_is_locked()) {
730 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
731 return;
732 }
733
734 extern lck_ticket_t clock_lock;
735
736 if ((is_clock_configured) && (lck_ticket_lock_try(&clock_lock, LCK_GRP_NULL))) {
737 clock_sec_t secs, boot_secs;
738 clock_usec_t usecs, boot_usecs;
739
740 lck_ticket_unlock(&clock_lock);
741
742 clock_get_calendar_microtime(&secs, &usecs);
743 clock_get_boottime_microtime(&boot_secs, &boot_usecs);
744
745 paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
746 paniclog_append_noflush("Epoch Time: sec usec\n");
747 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
748 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
749 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
750 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
751 }
752 }
753
754 void
panic_print_symbol_name(vm_address_t search)755 panic_print_symbol_name(vm_address_t search)
756 {
757 #pragma unused(search)
758 // empty stub. Really only used on x86_64.
759 return;
760 }
761
762 void
SavePanicInfo(const char * message,__unused void * panic_data,uint64_t panic_options)763 SavePanicInfo(
764 const char *message, __unused void *panic_data, uint64_t panic_options)
765 {
766 /*
767 * This should be initialized by the time we get here, but
768 * if it is not, asserting about it will be of no use (it will
769 * come right back to here), so just loop right here and now.
770 * This prevents early-boot panics from becoming recursive and
771 * thus makes them easier to debug. If you attached to a device
772 * and see your PC here, look down a few frames to see your
773 * early-boot panic there.
774 */
775 while (!panic_info || panic_info->eph_panic_log_offset == 0) {
776 ;
777 }
778
779 if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
780 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
781 }
782
783 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
784 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
785 }
786
787 #if defined(XNU_TARGET_OS_BRIDGE)
788 panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
789 panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
790 panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
791 #endif
792
793 /*
794 * On newer targets, panic data is stored directly into the iBoot panic region.
795 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
796 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
797 */
798 if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
799 unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
800 PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size);
801 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
802 }
803
804 if (PanicInfoSaved || (debug_buf_size == 0)) {
805 return;
806 }
807
808 PanicInfoSaved = TRUE;
809
810 print_all_backtraces(message, panic_options);
811
812 assert(panic_info->eph_panic_log_len != 0);
813 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
814
815 PEHaltRestart(kPEPanicSync);
816
817 /*
818 * Notifies registered IOPlatformPanicAction callbacks
819 * (which includes one to disable the memcache) and flushes
820 * the buffer contents from the cache
821 */
822 paniclog_flush();
823 }
824
825 void
paniclog_flush()826 paniclog_flush()
827 {
828 unsigned int panicbuf_length = 0;
829
830 panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
831 if (!panicbuf_length) {
832 return;
833 }
834
835 /*
836 * Updates the log length of the last part of the panic log.
837 */
838 panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
839
840 /*
841 * Updates the metadata at the beginning of the panic buffer,
842 * updates the CRC.
843 */
844 PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length);
845
846 /*
847 * This is currently unused by platform KEXTs on embedded but is
848 * kept for compatibility with the published IOKit interfaces.
849 */
850 PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
851
852 PE_sync_panic_buffers();
853 }
854
855 /*
856 * @function _was_in_userspace
857 *
858 * @abstract Unused function used to indicate that a CPU was in userspace
859 * before it was IPI'd to enter the Debugger context.
860 *
861 * @discussion This function should never actually be called.
862 */
863 static void __attribute__((__noreturn__))
_was_in_userspace(void)864 _was_in_userspace(void)
865 {
866 panic("%s: should not have been invoked.", __FUNCTION__);
867 }
868
869 /*
870 * @function DebuggerXCallEnter
871 *
872 * @abstract IPI other cores so this core can run in a single-threaded context.
873 *
874 * @discussion This function should be called with the debugger lock held. It
875 * signals the other cores to go into a busy loop so this core can run in a
876 * single-threaded context and inspect kernel memory.
877 *
878 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
879 * if we can't synch with the other cores. This is inherently unsafe and should
880 * only be used if the kernel is going down in flames anyway.
881 *
882 * @param is_stackshot If true, this is a stackshot request.
883 *
884 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
885 * proceed_on_sync_failure is false.
886 */
887 kern_return_t
DebuggerXCallEnter(boolean_t proceed_on_sync_failure,bool is_stackshot)888 DebuggerXCallEnter(
889 boolean_t proceed_on_sync_failure, bool is_stackshot)
890 {
891 uint64_t max_mabs_time, current_mabs_time;
892 int cpu;
893 int max_cpu;
894 cpu_data_t *target_cpu_datap;
895 cpu_data_t *cpu_data_ptr = getCpuDatap();
896
897 /* Check for nested debugger entry. */
898 cpu_data_ptr->debugger_active++;
899 if (cpu_data_ptr->debugger_active != 1) {
900 return KERN_SUCCESS;
901 }
902
903 /*
904 * If debugger_sync is not 0, someone responded excessively late to the last
905 * debug request (we zero the sync variable in the return function). Zero it
906 * again here. This should prevent us from getting out of sync (heh) and
907 * timing out on every entry to the debugger if we timeout once.
908 */
909
910 debugger_sync = 0;
911 mp_kdp_trap = 1;
912 debug_cpus_spinning = 0;
913
914 #pragma unused(is_stackshot)
915
916 /*
917 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
918 * to the signal.
919 */
920 __builtin_arm_dmb(DMB_ISH);
921
922 /*
923 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
924 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
925 * is not synchronous).
926 */
927 bool cpu_signal_failed = false;
928 max_cpu = ml_get_max_cpu_number();
929
930 boolean_t immediate_halt = FALSE;
931 if (proceed_on_sync_failure && force_immediate_debug_halt) {
932 immediate_halt = TRUE;
933 }
934
935 if (!immediate_halt) {
936 for (cpu = 0; cpu <= max_cpu; cpu++) {
937 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
938
939 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
940 continue;
941 }
942
943 if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) {
944 os_atomic_inc(&debugger_sync, relaxed);
945 os_atomic_inc(&debug_cpus_spinning, relaxed);
946 } else {
947 cpu_signal_failed = true;
948 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
949 }
950 }
951
952 max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
953
954 if (max_mabs_time > 0) {
955 current_mabs_time = mach_absolute_time();
956 max_mabs_time += current_mabs_time;
957 assert(max_mabs_time > current_mabs_time);
958 }
959
960 /*
961 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
962 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
963 * uninterruptibly spinning on someone else. The best we can hope for is that
964 * all other CPUs have either responded or are spinning in a context that is
965 * debugger safe.
966 */
967 while ((debugger_sync != 0) && (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
968 current_mabs_time = mach_absolute_time();
969 }
970 }
971
972 if (cpu_signal_failed && !proceed_on_sync_failure) {
973 DebuggerXCallReturn();
974 return KERN_FAILURE;
975 } else if (immediate_halt || (max_mabs_time > 0 && current_mabs_time >= max_mabs_time)) {
976 /*
977 * For the moment, we're aiming for a timeout that the user shouldn't notice,
978 * but will be sufficient to let the other core respond.
979 */
980 __builtin_arm_dmb(DMB_ISH);
981 for (cpu = 0; cpu <= max_cpu; cpu++) {
982 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
983
984 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
985 continue;
986 }
987 if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) {
988 continue;
989 }
990 if (proceed_on_sync_failure) {
991 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
992 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
993 if (halt_status < 0) {
994 paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
995 } else {
996 if (halt_status > 0) {
997 paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
998 }
999 target_cpu_datap->halt_status = CPU_HALTED;
1000 }
1001 } else {
1002 kprintf("Debugger synch pending on cpu %d\n", cpu);
1003 }
1004 }
1005 if (proceed_on_sync_failure) {
1006 for (cpu = 0; cpu <= max_cpu; cpu++) {
1007 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1008
1009 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
1010 (target_cpu_datap->halt_status == CPU_NOT_HALTED)) {
1011 continue;
1012 }
1013 dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
1014 NSEC_PER_SEC, &target_cpu_datap->halt_state);
1015 if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
1016 paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1017 } else {
1018 paniclog_append_noflush("cpu %d successfully halted\n", cpu);
1019 target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
1020 }
1021 }
1022 if (immediate_halt) {
1023 paniclog_append_noflush("Immediate halt requested on all cores\n");
1024 } else {
1025 paniclog_append_noflush("Debugger synchronization timed out; waited %u nanoseconds\n",
1026 os_atomic_load(&debug_ack_timeout, relaxed));
1027 }
1028 debug_ack_timeout_count++;
1029 return KERN_SUCCESS;
1030 } else {
1031 DebuggerXCallReturn();
1032 return KERN_OPERATION_TIMED_OUT;
1033 }
1034 } else {
1035 return KERN_SUCCESS;
1036 }
1037 }
1038
1039 /*
1040 * @function DebuggerXCallReturn
1041 *
1042 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
1043 *
1044 * @discussion This function should be called with debugger lock held.
1045 */
1046 void
DebuggerXCallReturn(void)1047 DebuggerXCallReturn(
1048 void)
1049 {
1050 cpu_data_t *cpu_data_ptr = getCpuDatap();
1051 uint64_t max_mabs_time, current_mabs_time;
1052
1053 cpu_data_ptr->debugger_active--;
1054 if (cpu_data_ptr->debugger_active != 0) {
1055 return;
1056 }
1057
1058 mp_kdp_trap = 0;
1059 debugger_sync = 0;
1060
1061 max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1062
1063 if (max_mabs_time > 0) {
1064 current_mabs_time = mach_absolute_time();
1065 max_mabs_time += current_mabs_time;
1066 assert(max_mabs_time > current_mabs_time);
1067 }
1068
1069 /*
1070 * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall).
1071 * It's possible for one or more CPUs to not decrement debug_cpus_spinning,
1072 * since they may be stuck somewhere else with interrupts disabled.
1073 * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it.
1074 *
1075 * Note that the same is done in DebuggerXCallEnter, when we wait for other
1076 * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be
1077 * spinning in a debugger-safe context
1078 */
1079 while ((debug_cpus_spinning != 0) && (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
1080 current_mabs_time = mach_absolute_time();
1081 }
1082
1083 /* Do we need a barrier here? */
1084 __builtin_arm_dmb(DMB_ISH);
1085 }
1086
1087 void
DebuggerXCall(void * ctx)1088 DebuggerXCall(
1089 void *ctx)
1090 {
1091 boolean_t save_context = FALSE;
1092 vm_offset_t kstackptr = 0;
1093 arm_saved_state_t *regs = (arm_saved_state_t *) ctx;
1094
1095 if (regs != NULL) {
1096 #if defined(__arm64__)
1097 current_cpu_datap()->ipi_pc = (uint64_t)get_saved_state_pc(regs);
1098 current_cpu_datap()->ipi_lr = (uint64_t)get_saved_state_lr(regs);
1099 current_cpu_datap()->ipi_fp = (uint64_t)get_saved_state_fp(regs);
1100 save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
1101 #else
1102 save_context = PSR_IS_KERNEL(regs->cpsr);
1103 #endif
1104 }
1105
1106 kstackptr = current_thread()->machine.kstackptr;
1107
1108 #if defined(__arm64__)
1109 arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr;
1110
1111 if (save_context) {
1112 /* Save the interrupted context before acknowledging the signal */
1113 current_thread()->machine.kpcb = regs;
1114 } else if (regs) {
1115 /* zero old state so machine_trace_thread knows not to backtrace it */
1116 register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer);
1117 state->fp = 0;
1118 state->pc = pc;
1119 state->lr = 0;
1120 state->sp = 0;
1121 }
1122 #else
1123 arm_saved_state_t *state = (arm_saved_state_t *)kstackptr;
1124
1125 if (save_context) {
1126 /* Save the interrupted context before acknowledging the signal */
1127 copy_signed_thread_state(state, regs);
1128 } else if (regs) {
1129 /* zero old state so machine_trace_thread knows not to backtrace it */
1130 register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer);
1131 set_saved_state_fp(state, 0);
1132 set_saved_state_pc(state, pc);
1133 set_saved_state_lr(state, 0);
1134 set_saved_state_sp(state, 0);
1135 }
1136 #endif
1137
1138 /*
1139 * When running in serial mode, the core capturing the dump may hold interrupts disabled
1140 * for a time longer than the timeout. That path includes logic to reset the timestamp
1141 * so that we do not eventually trigger the interrupt timeout assert().
1142 *
1143 * Here we check whether other cores have already gone over the timeout at this point
1144 * before spinning, so we at least cover the IPI reception path. After spinning, however,
1145 * we reset the timestamp so as to avoid hitting the interrupt timeout assert().
1146 */
1147 if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1148 INTERRUPT_MASKED_DEBUG_END();
1149 }
1150
1151 os_atomic_dec(&debugger_sync, relaxed);
1152 __builtin_arm_dmb(DMB_ISH);
1153
1154
1155 while (mp_kdp_trap) {
1156 ;
1157 }
1158
1159 /**
1160 * Alert the triggering CPU that this CPU is done spinning. The CPU that
1161 * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for
1162 * all of the CPUs to exit the above loop before continuing.
1163 */
1164 os_atomic_dec(&debug_cpus_spinning, relaxed);
1165
1166 if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1167 INTERRUPT_MASKED_DEBUG_START(current_thread()->machine.int_handler_addr, current_thread()->machine.int_type);
1168 }
1169
1170 #if defined(__arm64__)
1171 current_thread()->machine.kpcb = NULL;
1172 #endif /* defined(__arm64__) */
1173
1174 /* Any cleanup for our pushed context should go here */
1175 }
1176
1177 void
DebuggerCall(unsigned int reason,void * ctx)1178 DebuggerCall(
1179 unsigned int reason,
1180 void *ctx)
1181 {
1182 #if !MACH_KDP
1183 #pragma unused(reason,ctx)
1184 #endif /* !MACH_KDP */
1185
1186 #if ALTERNATE_DEBUGGER
1187 alternate_debugger_enter();
1188 #endif
1189
1190 #if MACH_KDP
1191 kdp_trap(reason, (struct arm_saved_state *)ctx);
1192 #else
1193 /* TODO: decide what to do if no debugger config */
1194 #endif
1195 }
1196
1197 boolean_t
bootloader_valid_page(ppnum_t ppn)1198 bootloader_valid_page(ppnum_t ppn)
1199 {
1200 return pmap_bootloader_page(ppn);
1201 }
1202