xref: /xnu-10002.61.3/osfmk/arm/model_dep.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 #include <mach_kdp.h>
31 
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
35 
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
42 
43 #include <ptrauth.h>
44 
45 #include <kern/misc_protos.h>
46 #include <kern/startup.h>
47 #include <kern/clock.h>
48 #include <kern/debug.h>
49 #include <kern/processor.h>
50 #include <kdp/kdp_core.h>
51 #if ALTERNATE_DEBUGGER
52 #include <arm64/alternate_debugger.h>
53 #endif
54 #include <machine/atomic.h>
55 #include <machine/trap.h>
56 #include <kern/spl.h>
57 #include <pexpert/pexpert.h>
58 #include <kdp/kdp_callout.h>
59 #include <kdp/kdp_dyld.h>
60 #include <kdp/kdp_internal.h>
61 #include <kdp/kdp_common.h>
62 #include <uuid/uuid.h>
63 #include <sys/codesign.h>
64 #include <sys/time.h>
65 
66 #include <IOKit/IOPlatformExpert.h>
67 #include <IOKit/IOKitServer.h>
68 
69 #include <mach/vm_prot.h>
70 #include <vm/vm_map.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_shared_region.h>
73 #include <mach/time_value.h>
74 #include <machine/machparam.h>  /* for btop */
75 
76 #include <console/video_console.h>
77 #include <console/serial_protos.h>
78 #include <arm/cpu_data.h>
79 #include <arm/cpu_data_internal.h>
80 #include <arm/cpu_internal.h>
81 #include <arm/misc_protos.h>
82 #include <libkern/OSKextLibPrivate.h>
83 #include <vm/vm_kern.h>
84 #include <kern/kern_cdata.h>
85 #include <kern/ledger.h>
86 
87 
88 #if DEVELOPMENT || DEBUG
89 #include <kern/ext_paniclog.h>
90 #endif
91 
92 #if     MACH_KDP
93 void    kdp_trap(unsigned int, struct arm_saved_state *);
94 #endif
95 
96 extern kern_return_t    do_stackshot(void *);
97 extern void                    kdp_snapshot_preflight(int pid, void * tracebuf,
98     uint32_t tracebuf_size, uint64_t flags,
99     kcdata_descriptor_t data_p,
100     uint64_t since_timestamp, uint32_t pagetable_mask);
101 extern int              kdp_stack_snapshot_bytes_traced(void);
102 extern int              kdp_stack_snapshot_bytes_uncompressed(void);
103 
104 /*
105  * Increment the PANICLOG_VERSION if you change the format of the panic
106  * log in any way.
107  */
108 #define PANICLOG_VERSION 14
109 static struct kcdata_descriptor kc_panic_data;
110 
111 extern char iBoot_version[];
112 #if defined(TARGET_OS_OSX) && defined(__arm64__)
113 extern char iBoot_Stage_2_version[];
114 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
115 
116 extern volatile uint32_t        debug_enabled;
117 extern unsigned int         not_in_kdp;
118 
119 extern int                              copyinframe(vm_address_t fp, uint32_t * frame);
120 extern void                             kdp_callouts(kdp_event_t event);
121 
122 /* #include <sys/proc.h> */
123 #define MAXCOMLEN 16
124 struct proc;
125 extern int        proc_pid(struct proc *p);
126 extern void       proc_name_kdp(struct proc *, char *, int);
127 
128 /*
129  * Make sure there's enough space to include the relevant bits in the format required
130  * within the space allocated for the panic version string in the panic header.
131  * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'.
132  */
133 #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
134 
135 extern const char version[];
136 extern char       osversion[];
137 extern char       osproductversion[];
138 extern char       osreleasetype[];
139 
140 #if defined(XNU_TARGET_OS_BRIDGE)
141 extern char     macosproductversion[];
142 extern char     macosversion[];
143 #endif
144 
145 extern uint8_t          gPlatformECID[8];
146 extern uint32_t         gPlatformMemoryID;
147 
148 extern uint64_t         last_hwaccess_thread;
149 
150 /*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32
151  *  since the target name and model name typically  doesn't exceed this size */
152 extern char  gTargetTypeBuffer[16];
153 extern char  gModelTypeBuffer[32];
154 
155 extern struct timeval    gIOLastSleepTime;
156 extern struct timeval    gIOLastWakeTime;
157 extern boolean_t                 is_clock_configured;
158 extern boolean_t kernelcache_uuid_valid;
159 extern uuid_t kernelcache_uuid;
160 extern uuid_string_t bootsessionuuid_string;
161 
162 extern uint64_t roots_installed;
163 
164 /* Definitions for frame pointers */
165 #define FP_ALIGNMENT_MASK      ((uint32_t)(0x3))
166 #define FP_LR_OFFSET           ((uint32_t)4)
167 #define FP_LR_OFFSET64         ((uint32_t)8)
168 #define FP_MAX_NUM_TO_EVALUATE (50)
169 
170 /* Timeout for all processors responding to debug crosscall */
171 MACHINE_TIMEOUT(debug_ack_timeout, "debug-ack", 240000, MACHINE_TIMEOUT_UNIT_TIMEBASE, NULL);
172 
173 /* Forward functions definitions */
174 void panic_display_times(void);
175 void panic_print_symbol_name(vm_address_t search);
176 
177 
178 /* Global variables */
179 static uint32_t       panic_bt_depth;
180 boolean_t             PanicInfoSaved = FALSE;
181 boolean_t             force_immediate_debug_halt = FALSE;
182 unsigned int          debug_ack_timeout_count = 0;
183 volatile unsigned int debugger_sync = 0;
184 volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
185 volatile unsigned int debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */
186 unsigned int          DebugContextCount = 0;
187 
188 #if defined(__arm64__)
189 uint8_t PE_smc_stashed_x86_system_state = 0xFF;
190 uint8_t PE_smc_stashed_x86_power_state = 0xFF;
191 uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
192 uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
193 uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
194 uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
195 uint64_t PE_nvram_stashed_x86_macos_slide = UINT64_MAX;
196 #endif
197 
198 
199 /*
200  * Backtrace a single frame.
201  */
202 static void
print_one_backtrace(pmap_t pmap,vm_offset_t topfp,const char * cur_marker,boolean_t is_64_bit,boolean_t print_kexts_in_backtrace)203 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
204     boolean_t is_64_bit, boolean_t print_kexts_in_backtrace)
205 {
206 	unsigned int    i = 0;
207 	addr64_t        lr = 0;
208 	addr64_t        fp = topfp;
209 	addr64_t        fp_for_ppn = 0;
210 	ppnum_t         ppn = (ppnum_t)NULL;
211 	vm_offset_t     raddrs[FP_MAX_NUM_TO_EVALUATE] = { 0 };
212 	bool            dump_kernel_stack = (fp >= VM_MIN_KERNEL_ADDRESS);
213 
214 #if defined(HAS_APPLE_PAC)
215 	fp = (addr64_t)ptrauth_strip((void *)fp, ptrauth_key_frame_pointer);
216 #endif
217 	do {
218 		if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
219 			break;
220 		}
221 		if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
222 			break;
223 		}
224 		if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
225 			break;
226 		}
227 
228 		/*
229 		 * Check to see if current address will result in a different
230 		 * ppn than previously computed (to avoid recomputation) via
231 		 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
232 		 */
233 		if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
234 			ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
235 			fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
236 		}
237 		if (ppn != (ppnum_t)NULL) {
238 			if (is_64_bit) {
239 				lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
240 #if defined(HAS_APPLE_PAC)
241 				/* return addresses on stack will be signed by arm64e ABI */
242 				lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address);
243 #endif
244 			} else {
245 				lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
246 			}
247 		} else {
248 			if (is_64_bit) {
249 				paniclog_append_noflush("%s\t  Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
250 			} else {
251 				paniclog_append_noflush("%s\t  Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
252 			}
253 			break;
254 		}
255 		if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
256 			ppn = pmap_find_phys(pmap, fp);
257 			fp_for_ppn = fp;
258 		}
259 		if (ppn != (ppnum_t)NULL) {
260 			if (is_64_bit) {
261 				fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
262 #if defined(HAS_APPLE_PAC)
263 				/* frame pointers on stack will be signed by arm64e ABI */
264 				fp = (addr64_t) ptrauth_strip((void *)fp, ptrauth_key_frame_pointer);
265 #endif
266 			} else {
267 				fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
268 			}
269 		} else {
270 			if (is_64_bit) {
271 				paniclog_append_noflush("%s\t  Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
272 			} else {
273 				paniclog_append_noflush("%s\t  Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
274 			}
275 			break;
276 		}
277 		/*
278 		 * Counter 'i' may == FP_MAX_NUM_TO_EVALUATE when running one
279 		 * extra round to check whether we have all frames in order to
280 		 * indicate (in)complete backtrace below. This happens in a case
281 		 * where total frame count and FP_MAX_NUM_TO_EVALUATE are equal.
282 		 * Do not capture anything.
283 		 */
284 		if (i < FP_MAX_NUM_TO_EVALUATE && lr) {
285 			if (is_64_bit) {
286 				paniclog_append_noflush("%s\t  lr: 0x%016llx  fp: 0x%016llx\n", cur_marker, lr, fp);
287 			} else {
288 				paniclog_append_noflush("%s\t  lr: 0x%08x  fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
289 			}
290 			raddrs[i] = lr;
291 		}
292 	} while ((++i <= FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
293 
294 	if (i > FP_MAX_NUM_TO_EVALUATE && fp != 0) {
295 		paniclog_append_noflush("Backtrace continues...\n");
296 	}
297 
298 	if (print_kexts_in_backtrace && i > 0) {
299 		kmod_panic_dump(&raddrs[0], i);
300 	}
301 }
302 
303 #define SANE_TASK_LIMIT 256
304 #define TOP_RUNNABLE_LIMIT 5
305 #define PANICLOG_UUID_BUF_SIZE 256
306 
307 extern void panic_print_vnodes(void);
308 
309 static void
panic_display_tpidrs(void)310 panic_display_tpidrs(void)
311 {
312 #if defined(__arm64__)
313 	paniclog_append_noflush("TPIDRx_ELy = {1: 0x%016llx  0: 0x%016llx  0ro: 0x%016llx }\n",
314 	    __builtin_arm_rsr64("TPIDR_EL1"), __builtin_arm_rsr64("TPIDR_EL0"),
315 	    __builtin_arm_rsr64("TPIDRRO_EL0"));
316 #endif //defined(__arm64__)
317 }
318 
319 static void
panic_display_hung_cpus_help(void)320 panic_display_hung_cpus_help(void)
321 {
322 #if defined(__arm64__)
323 	const uint32_t pcsr_offset = 0x90;
324 
325 	/*
326 	 * Print some info that might help in cases where nothing
327 	 * else does
328 	 */
329 	const ml_topology_info_t *info = ml_get_topology_info();
330 	if (info) {
331 		unsigned i, retry;
332 
333 		for (i = 0; i < info->num_cpus; i++) {
334 			if (!PE_cpu_power_check_kdp(i)) {
335 				paniclog_append_noflush("CORE %u is offline, skipping\n", i);
336 				continue;
337 			}
338 			if (info->cpus[i].cpu_UTTDBG_regs) {
339 				volatile uint64_t *pcsr = (volatile uint64_t*)(info->cpus[i].cpu_UTTDBG_regs + pcsr_offset);
340 				volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr;
341 				uint64_t pc = 0;
342 
343 				// a number of retries are needed till this works
344 				for (retry = 1024; retry && !pc; retry--) {
345 					//a 32-bit read is required to make a PC sample be produced, else we'll only get a zero
346 					(void)*pcsrTrigger;
347 					pc = *pcsr;
348 				}
349 
350 				//postprocessing (same as astris does)
351 				if (pc >> 48) {
352 					pc |= 0xffff000000000000ull;
353 				}
354 				paniclog_append_noflush("CORE %u recently retired instr at 0x%016llx\n", i, pc);
355 			}
356 		}
357 	}
358 #endif //defined(__arm64__)
359 }
360 
361 
362 static void
panic_display_pvhs_locked(void)363 panic_display_pvhs_locked(void)
364 {
365 }
366 
367 static void
panic_display_pvh_to_lock(void)368 panic_display_pvh_to_lock(void)
369 {
370 }
371 
372 static void
panic_display_last_pc_lr(void)373 panic_display_last_pc_lr(void)
374 {
375 #if defined(__arm64__)
376 	const int max_cpu = ml_get_max_cpu_number();
377 
378 	for (int cpu = 0; cpu <= max_cpu; cpu++) {
379 		cpu_data_t *current_cpu_datap = cpu_datap(cpu);
380 
381 		if (current_cpu_datap == NULL) {
382 			continue;
383 		}
384 
385 		if (current_cpu_datap == getCpuDatap()) {
386 			/**
387 			 * Skip printing the PC/LR if this is the CPU
388 			 * that initiated the panic.
389 			 */
390 			paniclog_append_noflush("CORE %u is the one that panicked. Check the full backtrace for details.\n", cpu);
391 			continue;
392 		}
393 
394 		paniclog_append_noflush("CORE %u: PC=0x%016llx, LR=0x%016llx, FP=0x%016llx\n", cpu,
395 		    current_cpu_datap->ipi_pc, (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_lr),
396 		    (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_fp));
397 	}
398 #endif
399 }
400 
401 static void
do_print_all_backtraces(const char * message,uint64_t panic_options)402 do_print_all_backtraces(const char *message, uint64_t panic_options)
403 {
404 	int             logversion = PANICLOG_VERSION;
405 	thread_t        cur_thread = current_thread();
406 	uintptr_t       cur_fp;
407 	task_t          task;
408 	struct proc    *proc;
409 	int             print_vnodes = 0;
410 	const char *nohilite_thread_marker = "\t";
411 
412 	/* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
413 	int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
414 	int bytes_uncompressed = 0;
415 	uint64_t bytes_used = 0ULL;
416 	int err = 0;
417 	char *stackshot_begin_loc = NULL;
418 	kc_format_t kc_format;
419 	bool filesetKC = false;
420 #if CONFIG_EXT_PANICLOG
421 	uint32_t ext_paniclog_bytes = 0;
422 #endif
423 
424 #if defined(__arm64__)
425 	__asm__         volatile ("add %0, xzr, fp":"=r"(cur_fp));
426 #else
427 #error Unknown architecture.
428 #endif
429 	if (panic_bt_depth != 0) {
430 		return;
431 	}
432 	panic_bt_depth++;
433 
434 	__unused bool result = PE_get_primary_kc_format(&kc_format);
435 	assert(result == true);
436 	filesetKC = kc_format == KCFormatFileset;
437 
438 	/* Truncate panic string to 1200 bytes */
439 	paniclog_append_noflush("Debugger message: %.1200s\n", message);
440 	if (debug_enabled) {
441 		paniclog_append_noflush("Device: %s\n",
442 		    ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
443 		paniclog_append_noflush("Hardware Model: %s\n",
444 		    ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
445 		paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
446 		    gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
447 		    gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
448 		if (last_hwaccess_thread) {
449 			paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
450 		}
451 		paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
452 	}
453 	paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
454 	paniclog_append_noflush("OS release type: %.256s\n",
455 	    ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet");
456 	paniclog_append_noflush("OS version: %.256s\n",
457 	    ('\0' != osversion[0]) ? osversion : "Not set yet");
458 #if defined(XNU_TARGET_OS_BRIDGE)
459 	paniclog_append_noflush("macOS version: %.256s\n",
460 	    ('\0' != macosversion[0]) ? macosversion : "Not set");
461 #endif
462 	paniclog_append_noflush("Kernel version: %.512s\n", version);
463 
464 	if (kernelcache_uuid_valid) {
465 		if (filesetKC) {
466 			paniclog_append_noflush("Fileset Kernelcache UUID: ");
467 		} else {
468 			paniclog_append_noflush("KernelCache UUID: ");
469 		}
470 		for (size_t index = 0; index < sizeof(uuid_t); index++) {
471 			paniclog_append_noflush("%02X", kernelcache_uuid[index]);
472 		}
473 		paniclog_append_noflush("\n");
474 	}
475 	panic_display_kernel_uuid();
476 
477 	if (bootsessionuuid_string[0] != '\0') {
478 		paniclog_append_noflush("Boot session UUID: %s\n", bootsessionuuid_string);
479 	} else {
480 		paniclog_append_noflush("Boot session UUID not yet initialized\n");
481 	}
482 
483 	paniclog_append_noflush("iBoot version: %.128s\n", iBoot_version);
484 #if defined(TARGET_OS_OSX) && defined(__arm64__)
485 	paniclog_append_noflush("iBoot Stage 2 version: %.128s\n", iBoot_Stage_2_version);
486 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
487 
488 	paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
489 	paniclog_append_noflush("roots installed: %lld\n", roots_installed);
490 #if defined(XNU_TARGET_OS_BRIDGE)
491 	paniclog_append_noflush("x86 EFI Boot State: ");
492 	if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
493 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
494 	} else {
495 		paniclog_append_noflush("not available\n");
496 	}
497 	paniclog_append_noflush("x86 System State: ");
498 	if (PE_smc_stashed_x86_system_state != 0xFF) {
499 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
500 	} else {
501 		paniclog_append_noflush("not available\n");
502 	}
503 	paniclog_append_noflush("x86 Power State: ");
504 	if (PE_smc_stashed_x86_power_state != 0xFF) {
505 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
506 	} else {
507 		paniclog_append_noflush("not available\n");
508 	}
509 	paniclog_append_noflush("x86 Shutdown Cause: ");
510 	if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
511 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
512 	} else {
513 		paniclog_append_noflush("not available\n");
514 	}
515 	paniclog_append_noflush("x86 Previous Power Transitions: ");
516 	if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
517 		paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
518 	} else {
519 		paniclog_append_noflush("not available\n");
520 	}
521 	paniclog_append_noflush("PCIeUp link state: ");
522 	if (PE_pcie_stashed_link_state != UINT32_MAX) {
523 		paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
524 	} else {
525 		paniclog_append_noflush("not available\n");
526 	}
527 	paniclog_append_noflush("macOS kernel slide: ");
528 	if (PE_nvram_stashed_x86_macos_slide != UINT64_MAX) {
529 		paniclog_append_noflush("%#llx\n", PE_nvram_stashed_x86_macos_slide);
530 	} else {
531 		paniclog_append_noflush("not available\n");
532 	}
533 #endif
534 	if (panic_data_buffers != NULL) {
535 		paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name);
536 		uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
537 		for (int i = 0; i < panic_data_buffers->len; i++) {
538 			paniclog_append_noflush("%02X", panic_buffer_data[i]);
539 		}
540 		paniclog_append_noflush("\n");
541 	}
542 	paniclog_append_noflush("Paniclog version: %d\n", logversion);
543 
544 	panic_display_kernel_aslr();
545 	panic_display_times();
546 	panic_display_zalloc();
547 	panic_display_hung_cpus_help();
548 	panic_display_tpidrs();
549 	panic_display_pvhs_locked();
550 	panic_display_pvh_to_lock();
551 	panic_display_last_pc_lr();
552 #if CONFIG_ECC_LOGGING
553 	panic_display_ecc_errors();
554 #endif /* CONFIG_ECC_LOGGING */
555 	panic_display_compressor_stats();
556 
557 #if DEVELOPMENT || DEBUG
558 	if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
559 		paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
560 		paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
561 	}
562 #endif
563 
564 	// Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
565 	if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) {
566 		thread_t        top_runnable[5] = {0};
567 		thread_t        thread;
568 		int                     total_cpu_usage = 0;
569 
570 		print_vnodes = 1;
571 
572 
573 		for (thread = (thread_t)queue_first(&threads);
574 		    PANIC_VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
575 		    thread = (thread_t)queue_next(&thread->threads)) {
576 			total_cpu_usage += thread->cpu_usage;
577 
578 			// Look for the 5 runnable threads with highest priority
579 			if (thread->state & TH_RUN) {
580 				int                     k;
581 				thread_t        comparison_thread = thread;
582 
583 				for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
584 					if (top_runnable[k] == 0) {
585 						top_runnable[k] = comparison_thread;
586 						break;
587 					} else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
588 						thread_t temp = top_runnable[k];
589 						top_runnable[k] = comparison_thread;
590 						comparison_thread = temp;
591 					} // if comparison thread has higher priority than previously saved thread
592 				} // loop through highest priority runnable threads
593 			} // Check if thread is runnable
594 		} // Loop through all threads
595 
596 		// Print the relevant info for each thread identified
597 		paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
598 		paniclog_append_noflush("Thread task pri cpu_usage\n");
599 
600 		for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
601 			if (top_runnable[i] &&
602 			    panic_get_thread_proc_task(top_runnable[i], &task, &proc) && proc) {
603 				char name[MAXCOMLEN + 1];
604 				proc_name_kdp(proc, name, sizeof(name));
605 				paniclog_append_noflush("%p %s %d %d\n",
606 				    top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
607 			}
608 		} // Loop through highest priority runnable threads
609 		paniclog_append_noflush("\n");
610 	}
611 
612 	// print current task info
613 	if (panic_get_thread_proc_task(cur_thread, &task, &proc)) {
614 		if (PANIC_VALIDATE_PTR(task->map) &&
615 		    PANIC_VALIDATE_PTR(task->map->pmap)) {
616 			ledger_amount_t resident = 0;
617 			if (task != kernel_task) {
618 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &resident);
619 				resident >>= VM_MAP_PAGE_SHIFT(task->map);
620 			}
621 			paniclog_append_noflush("Panicked task %p: %lld pages, %d threads: ",
622 			    task, resident, task->thread_count);
623 		} else {
624 			paniclog_append_noflush("Panicked task %p: %d threads: ",
625 			    task, task->thread_count);
626 		}
627 
628 		if (proc) {
629 			char            name[MAXCOMLEN + 1];
630 			proc_name_kdp(proc, name, sizeof(name));
631 			paniclog_append_noflush("pid %d: %s", proc_pid(proc), name);
632 		} else {
633 			paniclog_append_noflush("unknown task");
634 		}
635 
636 		paniclog_append_noflush("\n");
637 	}
638 
639 	if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
640 		paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
641 		    cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread));
642 #if __LP64__
643 		print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE, filesetKC);
644 #else
645 		print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC);
646 #endif
647 	} else {
648 		paniclog_append_noflush("Could not print panicked thread backtrace:"
649 		    "frame pointer outside kernel vm.\n");
650 	}
651 
652 	paniclog_append_noflush("\n");
653 	if (filesetKC) {
654 		kext_dump_panic_lists(&paniclog_append_noflush);
655 		paniclog_append_noflush("\n");
656 	}
657 	panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
658 	/* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
659 	if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
660 		snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
661 		    osproductversion, osversion);
662 	}
663 #if defined(XNU_TARGET_OS_BRIDGE)
664 	if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
665 		snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
666 		    macosproductversion, macosversion);
667 	}
668 #endif
669 	if (bootsessionuuid_string[0] != '\0') {
670 		memcpy(panic_info->eph_bootsessionuuid_string, bootsessionuuid_string,
671 		    sizeof(panic_info->eph_bootsessionuuid_string));
672 	}
673 	panic_info->eph_roots_installed = roots_installed;
674 
675 	if (debug_ack_timeout_count) {
676 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
677 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
678 		paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
679 	} else if (stackshot_active()) {
680 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
681 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
682 		paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
683 	} else {
684 		/* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
685 		debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
686 		stackshot_begin_loc = debug_buf_ptr;
687 
688 		bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
689 		err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
690 		    KCDATA_BUFFER_BEGIN_COMPRESSED, bytes_remaining - end_marker_bytes,
691 		    KCFLAG_USE_MEMCOPY);
692 		if (err == KERN_SUCCESS) {
693 			uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
694 			    STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS |
695 			    STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ |
696 			    STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
697 
698 			err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, kdp_memcpy, KCDCT_ZLIB);
699 			if (err != KERN_SUCCESS) {
700 				panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED;
701 				stackshot_flags &= ~STACKSHOT_DO_COMPRESS;
702 			}
703 			if (filesetKC) {
704 				stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
705 			}
706 
707 			kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
708 			    stackshot_flags, &kc_panic_data, 0, 0);
709 			err = do_stackshot(NULL);
710 			bytes_traced = kdp_stack_snapshot_bytes_traced();
711 			if (bytes_traced > 0 && !err) {
712 				debug_buf_ptr += bytes_traced;
713 				panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
714 				panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
715 				panic_info->eph_stackshot_len = bytes_traced;
716 
717 				panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
718 				if (stackshot_flags & STACKSHOT_DO_COMPRESS) {
719 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED;
720 					bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed();
721 					paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed);
722 				} else {
723 					paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
724 				}
725 			} else {
726 				bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
727 				if (bytes_used > 0) {
728 					/* Zero out the stackshot data */
729 					bzero(stackshot_begin_loc, bytes_used);
730 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
731 
732 					panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
733 					paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used);
734 				} else {
735 					bzero(stackshot_begin_loc, bytes_used);
736 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
737 
738 					panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
739 					paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
740 				}
741 			}
742 		} else {
743 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
744 			panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
745 			paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
746 		}
747 	}
748 
749 #if CONFIG_EXT_PANICLOG
750 	// Write ext paniclog at the end of the paniclog region.
751 	ext_paniclog_bytes = ext_paniclog_write_panicdata();
752 	panic_info->eph_ext_paniclog_offset = (ext_paniclog_bytes != 0) ?
753 	    PE_get_offset_into_panic_region((debug_buf_base + debug_buf_size) - ext_paniclog_bytes) :
754 	    0;
755 	panic_info->eph_ext_paniclog_len = ext_paniclog_bytes;
756 #endif
757 
758 	assert(panic_info->eph_other_log_offset != 0);
759 
760 	if (print_vnodes != 0) {
761 		panic_print_vnodes();
762 	}
763 
764 	panic_bt_depth--;
765 }
766 
767 /*
768  * Entry to print_all_backtraces is serialized by the debugger lock
769  */
770 static void
print_all_backtraces(const char * message,uint64_t panic_options)771 print_all_backtraces(const char *message, uint64_t panic_options)
772 {
773 	unsigned int initial_not_in_kdp = not_in_kdp;
774 
775 	cpu_data_t * cpu_data_ptr = getCpuDatap();
776 
777 	assert(cpu_data_ptr->PAB_active == FALSE);
778 	cpu_data_ptr->PAB_active = TRUE;
779 
780 	/*
781 	 * Because print all backtraces uses the pmap routines, it needs to
782 	 * avoid taking pmap locks.  Right now, this is conditionalized on
783 	 * not_in_kdp.
784 	 */
785 	not_in_kdp = 0;
786 	do_print_all_backtraces(message, panic_options);
787 
788 	not_in_kdp = initial_not_in_kdp;
789 
790 	cpu_data_ptr->PAB_active = FALSE;
791 }
792 
793 void
panic_display_times()794 panic_display_times()
795 {
796 	if (kdp_clock_is_locked()) {
797 		paniclog_append_noflush("Warning: clock is locked.  Can't get time\n");
798 		return;
799 	}
800 
801 	extern lck_ticket_t clock_lock;
802 	extern lck_grp_t clock_lock_grp;
803 
804 	if ((is_clock_configured) && (lck_ticket_lock_try(&clock_lock, &clock_lock_grp))) {
805 		clock_sec_t     secs, boot_secs;
806 		clock_usec_t    usecs, boot_usecs;
807 
808 		lck_ticket_unlock(&clock_lock);
809 
810 		clock_get_calendar_microtime(&secs, &usecs);
811 		clock_get_boottime_microtime(&boot_secs, &boot_usecs);
812 
813 		paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
814 		paniclog_append_noflush("Epoch Time:        sec       usec\n");
815 		paniclog_append_noflush("  Boot    : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
816 		paniclog_append_noflush("  Sleep   : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
817 		paniclog_append_noflush("  Wake    : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
818 		paniclog_append_noflush("  Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
819 	}
820 }
821 
822 void
panic_print_symbol_name(vm_address_t search)823 panic_print_symbol_name(vm_address_t search)
824 {
825 #pragma unused(search)
826 	// empty stub. Really only used on x86_64.
827 	return;
828 }
829 
830 void
SavePanicInfo(const char * message,__unused void * panic_data,uint64_t panic_options)831 SavePanicInfo(
832 	const char *message, __unused void *panic_data, uint64_t panic_options)
833 {
834 	/*
835 	 * This should be initialized by the time we get here, but
836 	 * if it is not, asserting about it will be of no use (it will
837 	 * come right back to here), so just loop right here and now.
838 	 * This prevents early-boot panics from becoming recursive and
839 	 * thus makes them easier to debug. If you attached to a device
840 	 * and see your PC here, look down a few frames to see your
841 	 * early-boot panic there.
842 	 */
843 	while (!panic_info || panic_info->eph_panic_log_offset == 0) {
844 		// rdar://87170225 (PanicHardening: audit panic code for naked spinloops)
845 		// rdar://88094367 (Add test hooks for panic at different stages in XNU)
846 		;
847 	}
848 
849 	if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
850 		panic_info->eph_panic_flags  |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
851 	}
852 
853 	if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
854 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
855 	}
856 
857 #if defined(XNU_TARGET_OS_BRIDGE)
858 	panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
859 	panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
860 	panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
861 #endif
862 
863 	/*
864 	 * On newer targets, panic data is stored directly into the iBoot panic region.
865 	 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
866 	 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
867 	 */
868 	if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
869 		unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
870 		PE_update_panic_crc((unsigned char*)gPanicBase, &pi_size);
871 		PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
872 	}
873 
874 	if (PanicInfoSaved || (debug_buf_size == 0)) {
875 		return;
876 	}
877 
878 	PanicInfoSaved = TRUE;
879 
880 
881 	print_all_backtraces(message, panic_options);
882 
883 	assert(panic_info->eph_panic_log_len != 0);
884 	panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
885 
886 	PEHaltRestart(kPEPanicSync);
887 
888 	/*
889 	 * Notifies registered IOPlatformPanicAction callbacks
890 	 * (which includes one to disable the memcache) and flushes
891 	 * the buffer contents from the cache
892 	 */
893 	paniclog_flush();
894 }
895 
896 void
paniclog_flush()897 paniclog_flush()
898 {
899 	unsigned int panicbuf_length = 0;
900 
901 	panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
902 	if (!debug_buf_ptr || !panicbuf_length) {
903 		return;
904 	}
905 
906 	/*
907 	 * Updates the log length of the last part of the panic log.
908 	 */
909 	panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
910 
911 	/*
912 	 * Updates the metadata at the beginning of the panic buffer,
913 	 * updates the CRC.
914 	 */
915 	PE_update_panic_crc((unsigned char *)gPanicBase, &panicbuf_length);
916 
917 	/*
918 	 * This is currently unused by platform KEXTs on embedded but is
919 	 * kept for compatibility with the published IOKit interfaces.
920 	 */
921 	PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
922 
923 	PE_sync_panic_buffers();
924 }
925 
926 /*
927  * @function DebuggerXCallEnter
928  *
929  * @abstract IPI other cores so this core can run in a single-threaded context.
930  *
931  * @discussion This function should be called with the debugger lock held.  It
932  * signals the other cores to go into a busy loop so this core can run in a
933  * single-threaded context and inspect kernel memory.
934  *
935  * @param proceed_on_sync_failure If true, then go ahead and try to debug even
936  * if we can't synch with the other cores.  This is inherently unsafe and should
937  * only be used if the kernel is going down in flames anyway.
938  *
939  * @param is_stackshot If true, this is a stackshot request.
940  *
941  * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
942  * proceed_on_sync_failure is false.
943  */
944 kern_return_t
DebuggerXCallEnter(boolean_t proceed_on_sync_failure,bool is_stackshot)945 DebuggerXCallEnter(
946 	boolean_t proceed_on_sync_failure, bool is_stackshot)
947 {
948 	uint64_t max_mabs_time, current_mabs_time;
949 	int cpu;
950 	int max_cpu;
951 	cpu_data_t      *target_cpu_datap;
952 	cpu_data_t      *cpu_data_ptr = getCpuDatap();
953 
954 	/* Check for nested debugger entry. */
955 	cpu_data_ptr->debugger_active++;
956 	if (cpu_data_ptr->debugger_active != 1) {
957 		return KERN_SUCCESS;
958 	}
959 
960 	/*
961 	 * If debugger_sync is not 0, someone responded excessively late to the last
962 	 * debug request (we zero the sync variable in the return function).  Zero it
963 	 * again here.  This should prevent us from getting out of sync (heh) and
964 	 * timing out on every entry to the debugger if we timeout once.
965 	 */
966 
967 	debugger_sync = 0;
968 	mp_kdp_trap = 1;
969 	debug_cpus_spinning = 0;
970 
971 #pragma unused(is_stackshot)
972 
973 	/*
974 	 * Try to signal all CPUs (except ourselves, of course).  Use debugger_sync to
975 	 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
976 	 * is not synchronous).
977 	 */
978 	max_cpu = ml_get_max_cpu_number();
979 
980 	boolean_t immediate_halt = FALSE;
981 	if (proceed_on_sync_failure && force_immediate_debug_halt) {
982 		immediate_halt = TRUE;
983 	}
984 
985 	if (!immediate_halt) {
986 		for (cpu = 0; cpu <= max_cpu; cpu++) {
987 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
988 
989 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
990 				continue;
991 			}
992 
993 			kern_return_t ret = cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL);
994 			if (ret == KERN_SUCCESS) {
995 				os_atomic_inc(&debugger_sync, relaxed);
996 				os_atomic_inc(&debug_cpus_spinning, relaxed);
997 			} else if (proceed_on_sync_failure) {
998 				kprintf("cpu_signal failed in DebuggerXCallEnter\n");
999 			}
1000 		}
1001 
1002 		max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1003 
1004 		if (max_mabs_time > 0) {
1005 			current_mabs_time = mach_absolute_time();
1006 			max_mabs_time += current_mabs_time;
1007 			assert(max_mabs_time > current_mabs_time);
1008 		}
1009 
1010 		/*
1011 		 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd.  If we
1012 		 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
1013 		 * uninterruptibly spinning on someone else.  The best we can hope for is that
1014 		 * all other CPUs have either responded or are spinning in a context that is
1015 		 * debugger safe.
1016 		 */
1017 		while ((debugger_sync != 0) && (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
1018 			current_mabs_time = mach_absolute_time();
1019 		}
1020 	}
1021 
1022 	if (!proceed_on_sync_failure && (max_mabs_time > 0 && current_mabs_time >= max_mabs_time)) {
1023 		__builtin_arm_dmb(DMB_ISH);
1024 		for (cpu = 0; cpu <= max_cpu; cpu++) {
1025 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1026 
1027 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1028 				continue;
1029 			}
1030 			if (!(target_cpu_datap->cpu_signal & SIGPdebug)) {
1031 				continue;
1032 			}
1033 			if (processor_array[cpu]->state <= PROCESSOR_PENDING_OFFLINE) {
1034 				/*
1035 				 * This is a processor that was successfully sent a SIGPdebug signal
1036 				 * but which hasn't acknowledged it because it went offline with
1037 				 * interrupts disabled before the IPI was delivered, so count it
1038 				 * here.
1039 				 */
1040 				os_atomic_dec(&debugger_sync, relaxed);
1041 				kprintf("%s>found CPU %d offline, debugger_sync=%d\n", __FUNCTION__, cpu, debugger_sync);
1042 				continue;
1043 			}
1044 
1045 			kprintf("%s>Debugger synch pending on cpu %d\n", __FUNCTION__, cpu);
1046 		}
1047 
1048 		if (debugger_sync == 0) {
1049 			return KERN_SUCCESS;
1050 		} else {
1051 			DebuggerXCallReturn();
1052 			kprintf("%s>returning KERN_OPERATION_TIMED_OUT\n", __FUNCTION__);
1053 			return KERN_OPERATION_TIMED_OUT;
1054 		}
1055 	} else if (immediate_halt || (max_mabs_time > 0 && current_mabs_time >= max_mabs_time)) {
1056 		/*
1057 		 * For the moment, we're aiming for a timeout that the user shouldn't notice,
1058 		 * but will be sufficient to let the other core respond.
1059 		 */
1060 		__builtin_arm_dmb(DMB_ISH);
1061 		for (cpu = 0; cpu <= max_cpu; cpu++) {
1062 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1063 
1064 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1065 				continue;
1066 			}
1067 			if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) {
1068 				continue;
1069 			}
1070 			if (proceed_on_sync_failure) {
1071 				paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
1072 				dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
1073 				if (halt_status < 0) {
1074 					paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1075 				} else {
1076 					if (halt_status > 0) {
1077 						paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1078 					}
1079 					target_cpu_datap->halt_status = CPU_HALTED;
1080 				}
1081 			} else {
1082 				kprintf("Debugger synch pending on cpu %d\n", cpu);
1083 			}
1084 		}
1085 		if (proceed_on_sync_failure) {
1086 			for (cpu = 0; cpu <= max_cpu; cpu++) {
1087 				target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1088 
1089 				if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) ||
1090 				    (target_cpu_datap->halt_status == CPU_NOT_HALTED)) {
1091 					continue;
1092 				}
1093 				dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
1094 				    NSEC_PER_SEC, &target_cpu_datap->halt_state);
1095 				if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
1096 					paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1097 				} else {
1098 					paniclog_append_noflush("cpu %d successfully halted\n", cpu);
1099 					target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
1100 				}
1101 			}
1102 			if (immediate_halt) {
1103 				paniclog_append_noflush("Immediate halt requested on all cores\n");
1104 			} else {
1105 				paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n",
1106 				    os_atomic_load(&debug_ack_timeout, relaxed));
1107 			}
1108 			debug_ack_timeout_count++;
1109 			return KERN_SUCCESS;
1110 		} else {
1111 			DebuggerXCallReturn();
1112 			return KERN_OPERATION_TIMED_OUT;
1113 		}
1114 	} else {
1115 		return KERN_SUCCESS;
1116 	}
1117 }
1118 
1119 /*
1120  * @function DebuggerXCallReturn
1121  *
1122  * @abstract Resume normal multicore operation after DebuggerXCallEnter()
1123  *
1124  * @discussion This function should be called with debugger lock held.
1125  */
1126 void
DebuggerXCallReturn(void)1127 DebuggerXCallReturn(
1128 	void)
1129 {
1130 	cpu_data_t      *cpu_data_ptr = getCpuDatap();
1131 	uint64_t max_mabs_time, current_mabs_time;
1132 
1133 	cpu_data_ptr->debugger_active--;
1134 	if (cpu_data_ptr->debugger_active != 0) {
1135 		return;
1136 	}
1137 
1138 	mp_kdp_trap = 0;
1139 	debugger_sync = 0;
1140 
1141 	max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1142 
1143 	if (max_mabs_time > 0) {
1144 		current_mabs_time = mach_absolute_time();
1145 		max_mabs_time += current_mabs_time;
1146 		assert(max_mabs_time > current_mabs_time);
1147 	}
1148 
1149 	/*
1150 	 * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall).
1151 	 * It's possible for one or more CPUs to not decrement debug_cpus_spinning,
1152 	 * since they may be stuck somewhere else with interrupts disabled.
1153 	 * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it.
1154 	 *
1155 	 * Note that the same is done in DebuggerXCallEnter, when we wait for other
1156 	 * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be
1157 	 * spinning in a debugger-safe context
1158 	 */
1159 	while ((os_atomic_load_exclusive(&debug_cpus_spinning, relaxed) != 0) &&
1160 	    (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
1161 		__builtin_arm_wfe();
1162 		current_mabs_time = mach_absolute_time();
1163 	}
1164 	os_atomic_clear_exclusive();
1165 }
1166 
1167 extern void wait_while_mp_kdp_trap(bool check_SIGPdebug);
1168 /*
1169  * Spin while mp_kdp_trap is set.
1170  *
1171  * processor_offline() calls this with check_SIGPdebug=true
1172  * to break out of the spin loop if the cpu has SIGPdebug
1173  * pending.
1174  */
1175 void
wait_while_mp_kdp_trap(bool check_SIGPdebug)1176 wait_while_mp_kdp_trap(bool check_SIGPdebug)
1177 {
1178 	bool found_mp_kdp_trap = false;
1179 	bool found_SIGPdebug = false;
1180 
1181 	while (os_atomic_load_exclusive(&mp_kdp_trap, relaxed) != 0) {
1182 		found_mp_kdp_trap = true;
1183 		if (check_SIGPdebug && cpu_has_SIGPdebug_pending()) {
1184 			found_SIGPdebug = true;
1185 			break;
1186 		}
1187 		__builtin_arm_wfe();
1188 	}
1189 	os_atomic_clear_exclusive();
1190 
1191 	if (check_SIGPdebug && found_mp_kdp_trap) {
1192 		kprintf("%s>found_mp_kdp_trap=true found_SIGPdebug=%s\n", __FUNCTION__, found_SIGPdebug ? "true" : "false");
1193 	}
1194 }
1195 
1196 void
DebuggerXCall(void * ctx)1197 DebuggerXCall(
1198 	void            *ctx)
1199 {
1200 	boolean_t               save_context = FALSE;
1201 	vm_offset_t             kstackptr = 0;
1202 	arm_saved_state_t       *regs = (arm_saved_state_t *) ctx;
1203 
1204 	if (regs != NULL) {
1205 #if defined(__arm64__)
1206 		current_cpu_datap()->ipi_pc = (uint64_t)get_saved_state_pc(regs);
1207 		current_cpu_datap()->ipi_lr = (uint64_t)get_saved_state_lr(regs);
1208 		current_cpu_datap()->ipi_fp = (uint64_t)get_saved_state_fp(regs);
1209 		save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
1210 #endif
1211 	}
1212 
1213 	kstackptr = (vm_offset_t)current_thread()->machine.kstackptr;
1214 
1215 #if defined(__arm64__)
1216 	arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr;
1217 
1218 	if (save_context) {
1219 		/* Save the interrupted context before acknowledging the signal */
1220 		current_thread()->machine.kpcb = regs;
1221 	} else if (regs) {
1222 		/* zero old state so machine_trace_thread knows not to backtrace it */
1223 		state->fp = 0;
1224 		state->pc_was_in_userspace = true;
1225 		state->lr = 0;
1226 		state->sp = 0;
1227 		state->ssbs = 0;
1228 		state->uao = 0;
1229 		state->dit = 0;
1230 	}
1231 #endif
1232 
1233 	/*
1234 	 * When running in serial mode, the core capturing the dump may hold interrupts disabled
1235 	 * for a time longer than the timeout. That path includes logic to reset the timestamp
1236 	 * so that we do not eventually trigger the interrupt timeout assert().
1237 	 *
1238 	 * Here we check whether other cores have already gone over the timeout at this point
1239 	 * before spinning, so we at least cover the IPI reception path. After spinning, however,
1240 	 * we reset the timestamp so as to avoid hitting the interrupt timeout assert().
1241 	 */
1242 	if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1243 		INTERRUPT_MASKED_DEBUG_END();
1244 	}
1245 
1246 	os_atomic_dec(&debugger_sync, relaxed);
1247 
1248 
1249 	wait_while_mp_kdp_trap(false);
1250 
1251 	/**
1252 	 * Alert the triggering CPU that this CPU is done spinning. The CPU that
1253 	 * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for
1254 	 * all of the CPUs to exit the above loop before continuing.
1255 	 */
1256 	os_atomic_dec(&debug_cpus_spinning, relaxed);
1257 
1258 #if SCHED_HYGIENE_DEBUG
1259 	/*
1260 	 * We also abandon the measurement for preemption disable
1261 	 * timeouts, if any. Normally, time in interrupt handlers would be
1262 	 * subtracted from preemption disable time, and this will happen
1263 	 * up to this point here, but since we here "end" the interrupt
1264 	 * handler prematurely (from the point of view of interrupt masked
1265 	 * debugging), the time spinning would otherwise still be
1266 	 * attributed to preemption disable time, and potentially trigger
1267 	 * an event, which could be a panic.
1268 	 */
1269 	abandon_preemption_disable_measurement();
1270 #endif /* SCHED_HYGIENE_DEBUG */
1271 
1272 	if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) {
1273 		INTERRUPT_MASKED_DEBUG_START(current_thread()->machine.int_handler_addr, current_thread()->machine.int_type);
1274 	}
1275 
1276 #if defined(__arm64__)
1277 	current_thread()->machine.kpcb = NULL;
1278 #endif /* defined(__arm64__) */
1279 
1280 	/* Any cleanup for our pushed context should go here */
1281 }
1282 
1283 void
DebuggerCall(unsigned int reason,void * ctx)1284 DebuggerCall(
1285 	unsigned int    reason,
1286 	void            *ctx)
1287 {
1288 #if     !MACH_KDP
1289 #pragma unused(reason,ctx)
1290 #endif /* !MACH_KDP */
1291 
1292 #if ALTERNATE_DEBUGGER
1293 	alternate_debugger_enter();
1294 #endif
1295 
1296 #if     MACH_KDP
1297 	kdp_trap(reason, (struct arm_saved_state *)ctx);
1298 #else
1299 	/* TODO: decide what to do if no debugger config */
1300 #endif
1301 }
1302 
1303 boolean_t
bootloader_valid_page(ppnum_t ppn)1304 bootloader_valid_page(ppnum_t ppn)
1305 {
1306 	return pmap_bootloader_page(ppn);
1307 }
1308