xref: /xnu-12377.1.9/osfmk/arm/model_dep.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <debug.h>
30 #include <mach_kdp.h>
31 #include <kern/kern_stackshot.h>
32 
33 #include <kern/thread.h>
34 #include <machine/pmap.h>
35 #include <device/device_types.h>
36 
37 #include <mach/vm_param.h>
38 #include <mach/clock_types.h>
39 #include <mach/machine.h>
40 #include <mach/kmod.h>
41 #include <pexpert/boot.h>
42 #include <pexpert/pexpert.h>
43 
44 #include <ptrauth.h>
45 
46 #include <kern/misc_protos.h>
47 #include <kern/startup.h>
48 #include <kern/clock.h>
49 #include <kern/debug.h>
50 #include <kern/processor.h>
51 #include <kdp/kdp_core.h>
52 #if ALTERNATE_DEBUGGER
53 #include <arm64/alternate_debugger.h>
54 #endif
55 #include <machine/atomic.h>
56 #include <machine/trap.h>
57 #include <kern/spl.h>
58 #include <pexpert/pexpert.h>
59 #include <kdp/kdp_callout.h>
60 #include <kdp/kdp_dyld.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_common.h>
63 #include <uuid/uuid.h>
64 #include <sys/codesign.h>
65 #include <sys/time.h>
66 
67 #if CONFIG_SPTM
68 #include <kern/percpu.h>
69 #include <arm64/sptm/pmap/pmap_data.h>
70 #endif
71 
72 #include <IOKit/IOPlatformExpert.h>
73 #include <IOKit/IOKitServer.h>
74 
75 #include <mach/vm_prot.h>
76 #include <vm/vm_map_xnu.h>
77 #include <vm/pmap.h>
78 #include <vm/vm_shared_region.h>
79 #include <mach/time_value.h>
80 #include <machine/machparam.h>  /* for btop */
81 
82 #include <console/video_console.h>
83 #include <console/serial_protos.h>
84 #include <arm/cpu_data.h>
85 #include <arm/cpu_data_internal.h>
86 #include <arm/cpu_internal.h>
87 #include <arm/misc_protos.h>
88 #include <libkern/OSKextLibPrivate.h>
89 #include <vm/vm_kern.h>
90 #include <kern/kern_cdata.h>
91 #include <kern/ledger.h>
92 
93 
94 #if DEVELOPMENT || DEBUG
95 #include <kern/ext_paniclog.h>
96 #endif
97 
98 #if CONFIG_EXCLAVES
99 #include <kern/exclaves_panic.h>
100 #include <kern/exclaves_inspection.h>
101 #endif
102 
103 
104 #if     MACH_KDP
105 void    kdp_trap(unsigned int, struct arm_saved_state *);
106 #endif
107 
108 /*
109  * Increment the PANICLOG_VERSION if you change the format of the panic
110  * log in any way.
111  */
112 #define PANICLOG_VERSION 15
113 static struct kcdata_descriptor kc_panic_data;
114 
115 extern char iBoot_version[];
116 #if defined(TARGET_OS_OSX) && defined(__arm64__)
117 extern char iBoot_Stage_2_version[];
118 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
119 
120 extern volatile uint32_t        debug_enabled;
121 extern unsigned int         not_in_kdp;
122 
123 extern int                              copyinframe(vm_address_t fp, uint32_t * frame);
124 extern void                             kdp_callouts(kdp_event_t event);
125 
126 #define MAX_PROCNAME_LEN 32
127 /* #include <sys/proc.h> */
128 struct proc;
129 extern int        proc_pid(struct proc *p);
130 extern void       proc_name_kdp(struct proc *, char *, int);
131 
132 /*
133  * Make sure there's enough space to include the relevant bits in the format required
134  * within the space allocated for the panic version string in the panic header.
135  * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'.
136  */
137 #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
138 
139 extern const char version[];
140 extern char       osversion[];
141 extern char       osproductversion[];
142 extern char       osreleasetype[];
143 
144 #if defined(XNU_TARGET_OS_BRIDGE)
145 extern char     macosproductversion[];
146 extern char     macosversion[];
147 #endif
148 
149 extern uint8_t          gPlatformECID[8];
150 extern uint32_t         gPlatformMemoryID;
151 
152 extern uint64_t         last_hwaccess_thread;
153 extern uint8_t          last_hwaccess_type; /* 0 : read, 1 : write. */
154 extern uint8_t          last_hwaccess_size;
155 extern uint64_t         last_hwaccess_paddr;
156 
157 /*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32
158  *  since the target name and model name typically  doesn't exceed this size */
159 extern char  gTargetTypeBuffer[16];
160 extern char  gModelTypeBuffer[32];
161 
162 extern struct timeval    gIOLastSleepTime;
163 extern struct timeval    gIOLastWakeTime;
164 extern boolean_t                 is_clock_configured;
165 extern boolean_t kernelcache_uuid_valid;
166 extern uuid_t kernelcache_uuid;
167 extern uuid_string_t bootsessionuuid_string;
168 
169 extern uint64_t roots_installed;
170 
171 /* Definitions for frame pointers */
172 #define FP_ALIGNMENT_MASK      ((uint32_t)(0x3))
173 #define FP_LR_OFFSET           ((uint32_t)4)
174 #define FP_LR_OFFSET64         ((uint32_t)8)
175 #define FP_MAX_NUM_TO_EVALUATE (50)
176 
177 /* Timeout for all processors responding to debug crosscall */
178 MACHINE_TIMEOUT_ALWAYS_ENABLED(debug_ack_timeout, "debug-ack", 240000, MACHINE_TIMEOUT_UNIT_TIMEBASE);
179 
180 /* Forward functions definitions */
181 void panic_display_times(void);
182 void panic_print_symbol_name(vm_address_t search);
183 
184 
185 /* Global variables */
186 static uint32_t       panic_bt_depth;
187 boolean_t             PanicInfoSaved = FALSE;
188 boolean_t             force_immediate_debug_halt = FALSE;
189 unsigned int          debug_ack_timeout_count = 0;
190 _Atomic unsigned int  debugger_sync = 0;
191 _Atomic unsigned int  mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */
192 _Atomic unsigned int  debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */
193 unsigned int          DebugContextCount = 0;
194 bool                  trap_is_stackshot = false; /* Whether the trap is for a stackshot */
195 
196 #if defined(__arm64__)
197 uint8_t PE_smc_stashed_x86_system_state = 0xFF;
198 uint8_t PE_smc_stashed_x86_power_state = 0xFF;
199 uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF;
200 uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF;
201 uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX;
202 uint32_t PE_pcie_stashed_link_state = UINT32_MAX;
203 uint64_t PE_nvram_stashed_x86_macos_slide = UINT64_MAX;
204 #endif
205 
206 
207 static void
do_print_backtrace_internal(pmap_t pmap,vm_offset_t topfp,const char * cur_marker,boolean_t is_64_bit,boolean_t print_kexts_in_backtrace)208 do_print_backtrace_internal(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
209     boolean_t is_64_bit, boolean_t print_kexts_in_backtrace)
210 {
211 	unsigned int    i = 0;
212 	addr64_t        lr = 0;
213 	addr64_t        fp = topfp;
214 	addr64_t        fp_for_ppn = 0;
215 	ppnum_t         ppn = (ppnum_t)NULL;
216 	vm_offset_t     raddrs[FP_MAX_NUM_TO_EVALUATE] = { 0 };
217 	bool            dump_kernel_stack = (fp >= VM_MIN_KERNEL_ADDRESS);
218 
219 	do {
220 		if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
221 			break;
222 		}
223 
224 		if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
225 			break;
226 		}
227 
228 		/*
229 		 * Check to see if current address will result in a different
230 		 * ppn than previously computed (to avoid recomputation) via
231 		 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
232 		 */
233 		if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
234 			ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
235 			fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
236 		}
237 		if (ppn != (ppnum_t)NULL) {
238 			if (is_64_bit) {
239 				lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
240 #if defined(HAS_APPLE_PAC)
241 				/* return addresses on stack will be signed by arm64e ABI */
242 				lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address);
243 #endif
244 			} else {
245 				lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
246 			}
247 		} else {
248 			if (is_64_bit) {
249 				paniclog_append_noflush("%s\t  Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
250 			} else {
251 				paniclog_append_noflush("%s\t  Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
252 			}
253 			break;
254 		}
255 		if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
256 			ppn = pmap_find_phys(pmap, fp);
257 			fp_for_ppn = fp;
258 		}
259 		if (ppn != (ppnum_t)NULL) {
260 			if (is_64_bit) {
261 				fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
262 #if defined(HAS_APPLE_PAC)
263 				/* frame pointers on stack will be signed by arm64e ABI */
264 				fp = (addr64_t) ptrauth_strip((void *)fp, ptrauth_key_frame_pointer);
265 #endif
266 			} else {
267 				fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
268 			}
269 		} else {
270 			if (is_64_bit) {
271 				paniclog_append_noflush("%s\t  Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
272 			} else {
273 				paniclog_append_noflush("%s\t  Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
274 			}
275 			break;
276 		}
277 		/*
278 		 * Counter 'i' may == FP_MAX_NUM_TO_EVALUATE when running one
279 		 * extra round to check whether we have all frames in order to
280 		 * indicate (in)complete backtrace below. This happens in a case
281 		 * where total frame count and FP_MAX_NUM_TO_EVALUATE are equal.
282 		 * Do not capture anything.
283 		 */
284 		if (i < FP_MAX_NUM_TO_EVALUATE && lr) {
285 			if (is_64_bit) {
286 				paniclog_append_noflush("%s\t  lr: 0x%016llx  fp: 0x%016llx\n", cur_marker, lr, fp);
287 			} else {
288 				paniclog_append_noflush("%s\t  lr: 0x%08x  fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
289 			}
290 			raddrs[i] = lr;
291 		}
292 	} while ((++i <= FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
293 
294 	if (i > FP_MAX_NUM_TO_EVALUATE && fp != 0) {
295 		paniclog_append_noflush("Backtrace continues...\n");
296 	}
297 
298 	if (print_kexts_in_backtrace && i > 0) {
299 		kmod_panic_dump(&raddrs[0], i);
300 	}
301 }
302 
303 #define SANE_TASK_LIMIT 256
304 #define TOP_RUNNABLE_LIMIT 5
305 #define PANICLOG_UUID_BUF_SIZE 256
306 
307 extern void panic_print_vnodes(void);
308 
309 static void
panic_display_tpidrs(void)310 panic_display_tpidrs(void)
311 {
312 #if defined(__arm64__)
313 	paniclog_append_noflush("TPIDRx_ELy = {1: 0x%016llx  0: 0x%016llx  0ro: 0x%016llx }\n",
314 	    __builtin_arm_rsr64("TPIDR_EL1"), __builtin_arm_rsr64("TPIDR_EL0"),
315 	    __builtin_arm_rsr64("TPIDRRO_EL0"));
316 #endif //defined(__arm64__)
317 }
318 
319 
320 
321 static void
panic_display_hung_cpus_help(void)322 panic_display_hung_cpus_help(void)
323 {
324 #if defined(__arm64__)
325 	const uint32_t pcsr_offset = 0x90;
326 
327 	/*
328 	 * Print some info that might help in cases where nothing
329 	 * else does
330 	 */
331 	const ml_topology_info_t *info = ml_get_topology_info();
332 	if (info) {
333 		unsigned i, retry;
334 
335 		for (i = 0; i < info->num_cpus; i++) {
336 			ml_topology_cpu_t *cpu = &info->cpus[i];
337 			char cluster_name[16], cluster_letter;
338 
339 			switch (cpu->cluster_type) {
340 			case CLUSTER_TYPE_E:
341 				cluster_letter = 'E';
342 				break;
343 			case CLUSTER_TYPE_P:
344 				cluster_letter = 'P';
345 				break;
346 			default:
347 				cluster_letter = '?';
348 			}
349 			snprintf(cluster_name, sizeof(cluster_name), "%cACC%d", cluster_letter, cpu->cluster_id);
350 
351 			if (!PE_cpu_power_check_kdp(i)) {
352 				paniclog_append_noflush("CORE %u [%s] is offline, skipping\n", i, cluster_name);
353 				continue;
354 			}
355 			if (cpu->cpu_UTTDBG_regs) {
356 				volatile uint64_t *pcsr = (volatile uint64_t*)(cpu->cpu_UTTDBG_regs + pcsr_offset);
357 				volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr;
358 				uint64_t pc = 0;
359 
360 				// a number of retries are needed till this works
361 				for (retry = 1024; retry && !pc; retry--) {
362 					//a 32-bit read is required to make a PC sample be produced, else we'll only get a zero
363 					(void)*pcsrTrigger;
364 					pc = *pcsr;
365 				}
366 
367 				//postprocessing (same as astris does)
368 				if (pc >> 48) {
369 					pc |= 0xffff000000000000ull;
370 				}
371 				paniclog_append_noflush("CORE %u [%s] recently retired instr at 0x%016llx\n", i, cluster_name, pc);
372 			}
373 		}
374 	}
375 #endif //defined(__arm64__)
376 }
377 
378 
379 
380 static void
panic_display_pvhs_locked(void)381 panic_display_pvhs_locked(void)
382 {
383 }
384 
385 static void
panic_display_pvh_to_lock(void)386 panic_display_pvh_to_lock(void)
387 {
388 }
389 
390 static void
panic_display_last_pc_lr(void)391 panic_display_last_pc_lr(void)
392 {
393 #if defined(__arm64__)
394 	const int max_cpu = ml_get_max_cpu_number();
395 
396 	for (int cpu = 0; cpu <= max_cpu; cpu++) {
397 		cpu_data_t *current_cpu_datap = cpu_datap(cpu);
398 
399 		if (current_cpu_datap == NULL) {
400 			continue;
401 		}
402 
403 		if (current_cpu_datap == getCpuDatap()) {
404 			/**
405 			 * Skip printing the PC/LR if this is the CPU
406 			 * that initiated the panic.
407 			 */
408 			paniclog_append_noflush("CORE %u is the one that panicked. Check the full backtrace for details.\n", cpu);
409 			continue;
410 		}
411 
412 		paniclog_append_noflush("CORE %u: PC=0x%016llx, LR=0x%016llx, FP=0x%016llx\n", cpu,
413 		    current_cpu_datap->ipi_pc, (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_lr),
414 		    (uint64_t)VM_KERNEL_STRIP_PTR(current_cpu_datap->ipi_fp));
415 	}
416 #endif
417 }
418 
419 #if CONFIG_EXCLAVES
420 static void
panic_report_exclaves_stackshot(void)421 panic_report_exclaves_stackshot(void)
422 {
423 	if (exclaves_panic_ss_status == EXCLAVES_PANIC_STACKSHOT_FOUND) {
424 		paniclog_append_noflush("** Exclaves panic stackshot found\n");
425 	} else if (exclaves_panic_ss_status == EXCLAVES_PANIC_STACKSHOT_NOT_FOUND) {
426 		paniclog_append_noflush("** Exclaves panic stackshot not found\n");
427 	} else if (exclaves_panic_ss_status == EXCLAVES_PANIC_STACKSHOT_DECODE_FAILED) {
428 		paniclog_append_noflush("!! Exclaves panic stackshot decode failed !!\n");
429 	}
430 }
431 #endif /* CONFIG_EXCLAVES */
432 
433 __attribute__((always_inline))
434 static inline void
print_backtrace_internal(thread_t thread,bool filesetKC)435 print_backtrace_internal(thread_t thread, bool filesetKC)
436 {
437 	uintptr_t cur_fp = (uintptr_t)__builtin_frame_address(0);
438 	const char              *nohilite_thread_marker = "\t";
439 
440 #if defined(HAS_APPLE_PAC)
441 	cur_fp = (addr64_t)ptrauth_strip((void *)cur_fp, ptrauth_key_frame_pointer);
442 #endif
443 
444 	if (cur_fp < VM_MAX_KERNEL_ADDRESS) {
445 		paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
446 		    thread, (addr64_t)cur_fp, thread_tid(thread));
447 #if __LP64__
448 		do_print_backtrace_internal(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE, filesetKC);
449 #else
450 		do_print_backtrace_internal(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC);
451 #endif
452 	} else {
453 		paniclog_append_noflush("Could not print panicked thread backtrace:"
454 		    "frame pointer outside kernel vm.\n");
455 	}
456 }
457 
458 static bool
is_filesetKC(void)459 is_filesetKC(void)
460 {
461 	kc_format_t     kc_format;
462 	bool            filesetKC = false;
463 
464 	__unused bool result = PE_get_primary_kc_format(&kc_format);
465 	assert(result == true);
466 	filesetKC = kc_format == KCFormatFileset;
467 	return filesetKC;
468 }
469 
470 
471 static void
do_print_all_panic_info(const char * message,uint64_t panic_options,const char * panic_initiator)472 do_print_all_panic_info(const char *message, uint64_t panic_options, const char *panic_initiator)
473 {
474 	int             logversion = PANICLOG_VERSION;
475 	thread_t        cur_thread = current_thread();
476 	task_t          task;
477 	struct proc    *proc;
478 	int             print_vnodes = 0;
479 
480 	/* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
481 	int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200;
482 	int bytes_uncompressed = 0;
483 	uint64_t bytes_used = 0ULL;
484 	int err = 0;
485 	char *stackshot_begin_loc = NULL;
486 	bool filesetKC = is_filesetKC();
487 	uint32_t panic_initiator_len = 0;
488 #if CONFIG_EXT_PANICLOG
489 	uint32_t ext_paniclog_bytes = 0;
490 #endif
491 
492 	if (panic_bt_depth != 0) {
493 		return;
494 	}
495 	panic_bt_depth++;
496 
497 	/* Truncate panic string to 1200 bytes */
498 	paniclog_append_noflush("Debugger message: %.1200s\n", message);
499 	if (debug_enabled) {
500 		paniclog_append_noflush("Device: %s\n",
501 		    ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet");
502 		paniclog_append_noflush("Hardware Model: %s\n",
503 		    ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet");
504 		paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7],
505 		    gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3],
506 		    gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]);
507 		if (last_hwaccess_thread) {
508 			paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread);
509 			if (!last_hwaccess_size) {
510 				paniclog_append_noflush("AppleHWAccess last access: no access data, this is unexpected.\n");
511 			} else {
512 				const char *typ = last_hwaccess_type ? "write" : "read";
513 				paniclog_append_noflush("AppleHWAccess last access: %s of size %u at address 0x%llx\n", typ, last_hwaccess_size, last_hwaccess_paddr);
514 			}
515 		}
516 		paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
517 	}
518 	paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID);
519 	paniclog_append_noflush("OS release type: %.256s\n",
520 	    ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet");
521 	paniclog_append_noflush("OS version: %.256s\n",
522 	    ('\0' != osversion[0]) ? osversion : "Not set yet");
523 #if defined(XNU_TARGET_OS_BRIDGE)
524 	paniclog_append_noflush("macOS version: %.256s\n",
525 	    ('\0' != macosversion[0]) ? macosversion : "Not set");
526 #endif
527 	paniclog_append_noflush("Kernel version: %.512s\n", version);
528 
529 #if CONFIG_EXCLAVES
530 	exclaves_panic_append_info();
531 #endif
532 
533 	if (kernelcache_uuid_valid) {
534 		if (filesetKC) {
535 			paniclog_append_noflush("Fileset Kernelcache UUID: ");
536 		} else {
537 			paniclog_append_noflush("KernelCache UUID: ");
538 		}
539 		for (size_t index = 0; index < sizeof(uuid_t); index++) {
540 			paniclog_append_noflush("%02X", kernelcache_uuid[index]);
541 		}
542 		paniclog_append_noflush("\n");
543 	}
544 	panic_display_kernel_uuid();
545 
546 	if (bootsessionuuid_string[0] != '\0') {
547 		paniclog_append_noflush("Boot session UUID: %s\n", bootsessionuuid_string);
548 	} else {
549 		paniclog_append_noflush("Boot session UUID not yet initialized\n");
550 	}
551 
552 	paniclog_append_noflush("iBoot version: %.128s\n", iBoot_version);
553 #if defined(TARGET_OS_OSX) && defined(__arm64__)
554 	paniclog_append_noflush("iBoot Stage 2 version: %.128s\n", iBoot_Stage_2_version);
555 #endif /* defined(TARGET_OS_OSX) && defined(__arm64__) */
556 
557 	paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES");
558 	paniclog_append_noflush("roots installed: %lld\n", roots_installed);
559 #if defined(XNU_TARGET_OS_BRIDGE)
560 	paniclog_append_noflush("x86 EFI Boot State: ");
561 	if (PE_smc_stashed_x86_efi_boot_state != 0xFF) {
562 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state);
563 	} else {
564 		paniclog_append_noflush("not available\n");
565 	}
566 	paniclog_append_noflush("x86 System State: ");
567 	if (PE_smc_stashed_x86_system_state != 0xFF) {
568 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state);
569 	} else {
570 		paniclog_append_noflush("not available\n");
571 	}
572 	paniclog_append_noflush("x86 Power State: ");
573 	if (PE_smc_stashed_x86_power_state != 0xFF) {
574 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state);
575 	} else {
576 		paniclog_append_noflush("not available\n");
577 	}
578 	paniclog_append_noflush("x86 Shutdown Cause: ");
579 	if (PE_smc_stashed_x86_shutdown_cause != 0xFF) {
580 		paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause);
581 	} else {
582 		paniclog_append_noflush("not available\n");
583 	}
584 	paniclog_append_noflush("x86 Previous Power Transitions: ");
585 	if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) {
586 		paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions);
587 	} else {
588 		paniclog_append_noflush("not available\n");
589 	}
590 	paniclog_append_noflush("PCIeUp link state: ");
591 	if (PE_pcie_stashed_link_state != UINT32_MAX) {
592 		paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state);
593 	} else {
594 		paniclog_append_noflush("not available\n");
595 	}
596 	paniclog_append_noflush("macOS kernel slide: ");
597 	if (PE_nvram_stashed_x86_macos_slide != UINT64_MAX) {
598 		paniclog_append_noflush("%#llx\n", PE_nvram_stashed_x86_macos_slide);
599 	} else {
600 		paniclog_append_noflush("not available\n");
601 	}
602 #endif
603 	if (panic_data_buffers != NULL) {
604 		paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name);
605 		uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf;
606 		for (int i = 0; i < panic_data_buffers->len; i++) {
607 			paniclog_append_noflush("%02X", panic_buffer_data[i]);
608 		}
609 		paniclog_append_noflush("\n");
610 	}
611 	paniclog_append_noflush("Paniclog version: %d\n", logversion);
612 
613 	panic_display_kernel_aslr();
614 	panic_display_times();
615 	panic_display_zalloc();
616 	panic_display_hung_cpus_help();
617 	panic_display_tpidrs();
618 
619 
620 	panic_display_pvhs_locked();
621 	panic_display_pvh_to_lock();
622 	panic_display_last_pc_lr();
623 #if CONFIG_ECC_LOGGING
624 	panic_display_ecc_errors();
625 #endif /* CONFIG_ECC_LOGGING */
626 	panic_display_compressor_stats();
627 
628 #if DEVELOPMENT || DEBUG
629 	if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) {
630 		paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures);
631 		paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures);
632 	}
633 #endif
634 
635 	// Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
636 	if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) {
637 		thread_t        top_runnable[5] = {0};
638 		thread_t        thread;
639 		int                     total_cpu_usage = 0;
640 
641 		print_vnodes = 1;
642 
643 
644 		for (thread = (thread_t)queue_first(&threads);
645 		    PANIC_VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread);
646 		    thread = (thread_t)queue_next(&thread->threads)) {
647 			total_cpu_usage += thread->cpu_usage;
648 
649 			// Look for the 5 runnable threads with highest priority
650 			if (thread->state & TH_RUN) {
651 				int                     k;
652 				thread_t        comparison_thread = thread;
653 
654 				for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) {
655 					if (top_runnable[k] == 0) {
656 						top_runnable[k] = comparison_thread;
657 						break;
658 					} else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) {
659 						thread_t temp = top_runnable[k];
660 						top_runnable[k] = comparison_thread;
661 						comparison_thread = temp;
662 					} // if comparison thread has higher priority than previously saved thread
663 				} // loop through highest priority runnable threads
664 			} // Check if thread is runnable
665 		} // Loop through all threads
666 
667 		// Print the relevant info for each thread identified
668 		paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage);
669 		paniclog_append_noflush("Thread task pri cpu_usage\n");
670 
671 		for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) {
672 			if (top_runnable[i] &&
673 			    panic_get_thread_proc_task(top_runnable[i], &task, &proc) && proc) {
674 				char name[MAX_PROCNAME_LEN + 1];
675 				proc_name_kdp(proc, name, sizeof(name));
676 				paniclog_append_noflush("%p %s %d %d\n",
677 				    top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage);
678 			}
679 		} // Loop through highest priority runnable threads
680 		paniclog_append_noflush("\n");
681 	}
682 
683 	// print current task info
684 	if (panic_get_thread_proc_task(cur_thread, &task, &proc)) {
685 		if (PANIC_VALIDATE_PTR(task->map) &&
686 		    PANIC_VALIDATE_PTR(task->map->pmap)) {
687 			ledger_amount_t resident = 0;
688 			if (task != kernel_task) {
689 				ledger_get_balance(task->ledger, task_ledgers.phys_mem, &resident);
690 				resident >>= VM_MAP_PAGE_SHIFT(task->map);
691 			}
692 			paniclog_append_noflush("Panicked task %p: %lld pages, %d threads: ",
693 			    task, resident, task->thread_count);
694 		} else {
695 			paniclog_append_noflush("Panicked task %p: %d threads: ",
696 			    task, task->thread_count);
697 		}
698 
699 		if (proc) {
700 			char            name[MAX_PROCNAME_LEN + 1];
701 			proc_name_kdp(proc, name, sizeof(name));
702 			paniclog_append_noflush("pid %d: %s", proc_pid(proc), name);
703 		} else {
704 			paniclog_append_noflush("unknown task");
705 		}
706 
707 		paniclog_append_noflush("\n");
708 	}
709 
710 	print_backtrace_internal(cur_thread, filesetKC);
711 
712 	paniclog_append_noflush("\n");
713 	dump_cpu_event_log(&paniclog_append_noflush);
714 
715 	paniclog_append_noflush("\n");
716 	if (filesetKC) {
717 		kext_dump_panic_lists(&paniclog_append_noflush);
718 		paniclog_append_noflush("\n");
719 	}
720 	panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset;
721 	/* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
722 	if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) {
723 		snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR,
724 		    osproductversion, osversion);
725 	}
726 #if defined(XNU_TARGET_OS_BRIDGE)
727 	if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) {
728 		snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR,
729 		    macosproductversion, macosversion);
730 	}
731 #endif
732 	if (bootsessionuuid_string[0] != '\0') {
733 		memcpy(panic_info->eph_bootsessionuuid_string, bootsessionuuid_string,
734 		    sizeof(panic_info->eph_bootsessionuuid_string));
735 	}
736 	panic_info->eph_roots_installed = roots_installed;
737 
738 	if (panic_initiator != NULL) {
739 		bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)debug_buf_ptr - (uintptr_t)debug_buf_base);
740 		// If panic_initiator isn't null, safely copy up to MAX_PANIC_INITIATOR_SIZE
741 		panic_initiator_len = strnlen(panic_initiator, MAX_PANIC_INITIATOR_SIZE);
742 		// Calculate the bytes to write, accounting for remaining buffer space, and ensuring the lowest size we can have is 0
743 		panic_initiator_len = MAX(0, MIN(panic_initiator_len, bytes_remaining));
744 		panic_info->eph_panic_initiator_offset = (panic_initiator_len != 0) ? PE_get_offset_into_panic_region(debug_buf_ptr) : 0;
745 		panic_info->eph_panic_initiator_len = panic_initiator_len;
746 		memcpy(debug_buf_ptr, panic_initiator, panic_initiator_len);
747 		debug_buf_ptr += panic_initiator_len;
748 	}
749 
750 	if (debug_ack_timeout_count) {
751 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC;
752 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
753 		paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
754 	} else if (panic_stackshot_active()) {
755 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
756 		panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
757 		paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
758 	} else {
759 		/* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
760 		debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8));
761 		stackshot_begin_loc = debug_buf_ptr;
762 
763 		bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
764 		err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr,
765 		    KCDATA_BUFFER_BEGIN_COMPRESSED, bytes_remaining - end_marker_bytes,
766 		    KCFLAG_USE_MEMCOPY);
767 		if (err == KERN_SUCCESS) {
768 			uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
769 			    STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS |
770 			    STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ |
771 			    STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
772 
773 			err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, kdp_memcpy, KCDCT_ZLIB);
774 			if (err != KERN_SUCCESS) {
775 				panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED;
776 				stackshot_flags &= ~STACKSHOT_DO_COMPRESS;
777 			}
778 			if (filesetKC) {
779 				stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
780 			}
781 
782 			kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes,
783 			    stackshot_flags, &kc_panic_data, 0, 0);
784 			err = do_panic_stackshot(NULL);
785 			bytes_traced = kdp_stack_snapshot_bytes_traced();
786 			if (bytes_traced > 0 && !err) {
787 				debug_buf_ptr += bytes_traced;
788 				panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
789 				panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
790 				panic_info->eph_stackshot_len = bytes_traced;
791 
792 				panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
793 #if CONFIG_EXCLAVES
794 				panic_report_exclaves_stackshot();
795 #endif /* CONFIG_EXCLAVES */
796 				if (stackshot_flags & STACKSHOT_DO_COMPRESS) {
797 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED;
798 					bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed();
799 					paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed);
800 				} else {
801 					paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
802 				}
803 			} else {
804 				bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data);
805 #if CONFIG_EXCLAVES
806 				panic_report_exclaves_stackshot();
807 #endif /* CONFIG_EXCLAVES */
808 				if (bytes_used > 0) {
809 					/* Zero out the stackshot data */
810 					bzero(stackshot_begin_loc, bytes_used);
811 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
812 
813 					panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
814 					paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu, err %d **\n", bytes_used, err);
815 				} else {
816 					bzero(stackshot_begin_loc, bytes_used);
817 					panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
818 
819 					panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
820 					paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err);
821 				}
822 			}
823 		} else {
824 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
825 			panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
826 			paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err);
827 		}
828 	}
829 
830 #if CONFIG_EXT_PANICLOG
831 	// Write ext paniclog at the end of the paniclog region.
832 	ext_paniclog_bytes = ext_paniclog_write_panicdata();
833 	panic_info->eph_ext_paniclog_offset = (ext_paniclog_bytes != 0) ?
834 	    PE_get_offset_into_panic_region((debug_buf_base + debug_buf_size) - ext_paniclog_bytes) :
835 	    0;
836 	panic_info->eph_ext_paniclog_len = ext_paniclog_bytes;
837 #endif
838 
839 	assert(panic_info->eph_other_log_offset != 0);
840 
841 	if (print_vnodes != 0) {
842 		panic_print_vnodes();
843 	}
844 
845 	panic_bt_depth--;
846 }
847 
848 /*
849  * Entry to print_all_panic_info is serialized by the debugger lock
850  */
851 static void
print_all_panic_info(const char * message,uint64_t panic_options,const char * panic_initiator)852 print_all_panic_info(const char *message, uint64_t panic_options, const char *panic_initiator)
853 {
854 	unsigned int initial_not_in_kdp = not_in_kdp;
855 
856 	cpu_data_t * cpu_data_ptr = getCpuDatap();
857 
858 	assert(cpu_data_ptr->PAB_active == FALSE);
859 	cpu_data_ptr->PAB_active = TRUE;
860 
861 	/*
862 	 * Because print all backtraces uses the pmap routines, it needs to
863 	 * avoid taking pmap locks.  Right now, this is conditionalized on
864 	 * not_in_kdp.
865 	 */
866 	not_in_kdp = 0;
867 	do_print_all_panic_info(message, panic_options, panic_initiator);
868 
869 	not_in_kdp = initial_not_in_kdp;
870 
871 	cpu_data_ptr->PAB_active = FALSE;
872 }
873 
874 void
print_curr_backtrace(void)875 print_curr_backtrace(void)
876 {
877 	print_backtrace_internal(current_thread(), is_filesetKC());
878 }
879 
880 void
panic_display_times()881 panic_display_times()
882 {
883 	if (kdp_clock_is_locked()) {
884 		paniclog_append_noflush("Warning: clock is locked.  Can't get time\n");
885 		return;
886 	}
887 
888 	extern lck_ticket_t clock_lock;
889 	extern lck_grp_t clock_lock_grp;
890 
891 	if ((is_clock_configured) && (lck_ticket_lock_try(&clock_lock, &clock_lock_grp))) {
892 		clock_sec_t     secs, boot_secs;
893 		clock_usec_t    usecs, boot_usecs;
894 
895 		lck_ticket_unlock(&clock_lock);
896 
897 		clock_get_calendar_microtime(&secs, &usecs);
898 		clock_get_boottime_microtime(&boot_secs, &boot_usecs);
899 
900 		paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
901 		paniclog_append_noflush("Epoch Time:        sec       usec\n");
902 		paniclog_append_noflush("  Boot    : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs);
903 		paniclog_append_noflush("  Sleep   : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec);
904 		paniclog_append_noflush("  Wake    : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec);
905 		paniclog_append_noflush("  Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs);
906 	}
907 }
908 
909 void
panic_print_symbol_name(vm_address_t search)910 panic_print_symbol_name(vm_address_t search)
911 {
912 #pragma unused(search)
913 	// empty stub. Really only used on x86_64.
914 	return;
915 }
916 
917 void
SavePanicInfo(const char * message,__unused void * panic_data,uint64_t panic_options,const char * panic_initiator)918 SavePanicInfo(
919 	const char *message, __unused void *panic_data, uint64_t panic_options, const char* panic_initiator)
920 {
921 	/*
922 	 * This should be initialized by the time we get here, but
923 	 * if it is not, asserting about it will be of no use (it will
924 	 * come right back to here), so just loop right here and now.
925 	 * This prevents early-boot panics from becoming recursive and
926 	 * thus makes them easier to debug. If you attached to a device
927 	 * and see your PC here, look down a few frames to see your
928 	 * early-boot panic there.
929 	 */
930 	while (!panic_info || panic_info->eph_panic_log_offset == 0) {
931 		// rdar://87170225 (PanicHardening: audit panic code for naked spinloops)
932 		// rdar://88094367 (Add test hooks for panic at different stages in XNU)
933 		;
934 	}
935 
936 	if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
937 		panic_info->eph_panic_flags  |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC;
938 	}
939 
940 	if (panic_options & DEBUGGER_OPTION_COMPANION_PROC_INITIATED_PANIC) {
941 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPANION_PROC_INITIATED_PANIC;
942 	}
943 
944 	if (panic_options & DEBUGGER_OPTION_INTEGRATED_COPROC_INITIATED_PANIC) {
945 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_INTEGRATED_COPROC_INITIATED_PANIC;
946 	}
947 
948 	if (panic_options & DEBUGGER_OPTION_USERSPACE_INITIATED_PANIC) {
949 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_USERSPACE_INITIATED_PANIC;
950 	}
951 
952 #if defined(XNU_TARGET_OS_BRIDGE)
953 	panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state;
954 	panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state;
955 	panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state;
956 #endif
957 
958 	/*
959 	 * On newer targets, panic data is stored directly into the iBoot panic region.
960 	 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
961 	 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
962 	 */
963 	if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) {
964 		unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase);
965 		PE_update_panic_crc((unsigned char*)gPanicBase, &pi_size);
966 		PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
967 	}
968 
969 	if (PanicInfoSaved || (debug_buf_size == 0)) {
970 		return;
971 	}
972 
973 	PanicInfoSaved = TRUE;
974 
975 
976 	print_all_panic_info(message, panic_options, panic_initiator);
977 
978 	assert(panic_info->eph_panic_log_len != 0);
979 	panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
980 
981 	PEHaltRestart(kPEPanicSync);
982 
983 	/*
984 	 * Notifies registered IOPlatformPanicAction callbacks
985 	 * (which includes one to disable the memcache) and flushes
986 	 * the buffer contents from the cache
987 	 */
988 	paniclog_flush();
989 }
990 
991 void
paniclog_flush()992 paniclog_flush()
993 {
994 	unsigned int panicbuf_length = 0;
995 
996 	panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase);
997 	if (!debug_buf_ptr || !panicbuf_length) {
998 		return;
999 	}
1000 
1001 	/*
1002 	 * Updates the log length of the last part of the panic log.
1003 	 */
1004 	panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset;
1005 
1006 	/*
1007 	 * Updates the metadata at the beginning of the panic buffer,
1008 	 * updates the CRC.
1009 	 */
1010 	PE_update_panic_crc((unsigned char *)gPanicBase, &panicbuf_length);
1011 
1012 	/*
1013 	 * This is currently unused by platform KEXTs on embedded but is
1014 	 * kept for compatibility with the published IOKit interfaces.
1015 	 */
1016 	PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length);
1017 
1018 	PE_sync_panic_buffers();
1019 }
1020 
1021 #if CONFIG_SPTM
1022 /*
1023  * Patch thread state to appear as if a debugger stop IPI occurred, when a thread
1024  * is parked in SPTM panic loop. This allows stackshot to proceed as usual.
1025  */
1026 static void
DebuggerPatchupThreadState(int cpu,xnu_saved_registers_t * regp)1027 DebuggerPatchupThreadState(
1028 	int cpu, xnu_saved_registers_t *regp)
1029 {
1030 	cpu_data_t         *target_cpu_datap;
1031 	arm_saved_state_t  *statep;
1032 	vm_offset_t        kstackptr;
1033 
1034 	target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1035 	statep = target_cpu_datap->cpu_active_thread->machine.kpcb;
1036 	kstackptr = (vm_offset_t)target_cpu_datap->cpu_active_thread->machine.kstackptr;
1037 
1038 	target_cpu_datap->ipi_pc = regp->pc;
1039 	target_cpu_datap->ipi_lr = regp->lr;
1040 	target_cpu_datap->ipi_fp = regp->fp;
1041 
1042 	if (statep != NULL) {
1043 		statep->ss_64.fp = regp->fp;
1044 		statep->ss_64.lr = regp->lr;
1045 		statep->ss_64.sp = regp->sp;
1046 		statep->ss_64.pc = regp->pc;
1047 	} else if ((void *)kstackptr != NULL) {
1048 		arm_kernel_saved_state_t *kstatep = (arm_kernel_saved_state_t *)kstackptr;
1049 		kstatep->fp = regp->fp;
1050 		kstatep->lr = regp->lr;
1051 		kstatep->sp = regp->sp;
1052 	}
1053 }
1054 #endif
1055 
1056 /*
1057  * @function DebuggerXCallEnter
1058  *
1059  * @abstract IPI other cores so this core can run in a single-threaded context.
1060  *
1061  * @discussion This function should be called with the debugger lock held.  It
1062  * signals the other cores to go into a busy loop so this core can run in a
1063  * single-threaded context and inspect kernel memory.
1064  *
1065  * @param proceed_on_sync_failure If true, then go ahead and try to debug even
1066  * if we can't synch with the other cores.  This is inherently unsafe and should
1067  * only be used if the kernel is going down in flames anyway.
1068  *
1069  * @param is_stackshot If true, this is a stackshot request.
1070  *
1071  * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
1072  * proceed_on_sync_failure is false.
1073  */
1074 kern_return_t
DebuggerXCallEnter(boolean_t proceed_on_sync_failure,bool is_stackshot)1075 DebuggerXCallEnter(
1076 	boolean_t proceed_on_sync_failure, bool is_stackshot)
1077 {
1078 	uint64_t max_mabs_time, current_mabs_time;
1079 	int cpu;
1080 	int timeout_cpu = -1;
1081 	int max_cpu;
1082 	unsigned int sync_pending;
1083 	cpu_data_t      *target_cpu_datap;
1084 	cpu_data_t      *cpu_data_ptr = getCpuDatap();
1085 
1086 	/* Check for nested debugger entry. */
1087 	cpu_data_ptr->debugger_active++;
1088 	if (cpu_data_ptr->debugger_active != 1) {
1089 		return KERN_SUCCESS;
1090 	}
1091 
1092 	/*
1093 	 * If debugger_sync is not 0, someone responded excessively late to the last
1094 	 * debug request (we zero the sync variable in the return function).  Zero it
1095 	 * again here.  This should prevent us from getting out of sync (heh) and
1096 	 * timing out on every entry to the debugger if we timeout once.
1097 	 */
1098 
1099 	os_atomic_store(&debugger_sync, 0, relaxed);
1100 	os_atomic_store(&mp_kdp_trap, 1, relaxed);
1101 	os_atomic_store(&debug_cpus_spinning, 0, relaxed);
1102 	trap_is_stackshot = is_stackshot;
1103 
1104 
1105 	/*
1106 	 * Try to signal all CPUs (except ourselves, of course).  Use debugger_sync to
1107 	 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
1108 	 * is not synchronous).
1109 	 */
1110 	max_cpu = ml_get_max_cpu_number();
1111 
1112 	boolean_t immediate_halt = FALSE;
1113 	if (proceed_on_sync_failure && force_immediate_debug_halt) {
1114 		immediate_halt = TRUE;
1115 	}
1116 
1117 	if (!immediate_halt) {
1118 		for (cpu = 0; cpu <= max_cpu; cpu++) {
1119 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1120 
1121 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1122 				continue;
1123 			}
1124 
1125 			kern_return_t ret = cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL);
1126 			if (ret == KERN_SUCCESS) {
1127 				os_atomic_inc(&debugger_sync, relaxed);
1128 				os_atomic_inc(&debug_cpus_spinning, relaxed);
1129 			} else {
1130 				kprintf("%s: cpu_signal failed. cpu=%d ret=%d proceed=%d\n", __func__, cpu, ret, proceed_on_sync_failure);
1131 			}
1132 		}
1133 
1134 		max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1135 
1136 		if (max_mabs_time > 0) {
1137 			current_mabs_time = mach_absolute_time();
1138 			max_mabs_time += current_mabs_time;
1139 			assert(max_mabs_time > current_mabs_time);
1140 		}
1141 
1142 		/*
1143 		 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd.  If we
1144 		 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
1145 		 * uninterruptibly spinning on someone else.  The best we can hope for is that
1146 		 * all other CPUs have either responded or are spinning in a context that is
1147 		 * debugger safe.
1148 		 */
1149 		do {
1150 			current_mabs_time = mach_absolute_time();
1151 			sync_pending = os_atomic_load(&debugger_sync, acquire);
1152 		} while ((sync_pending != 0) && (max_mabs_time == 0 || current_mabs_time < max_mabs_time));
1153 	}
1154 
1155 	if (!immediate_halt && max_mabs_time > 0 && current_mabs_time >= max_mabs_time) {
1156 		/*
1157 		 * We timed out trying to IPI the other CPUs. Skip counting any CPUs that
1158 		 * are offline; then we must account for the remainder, either counting
1159 		 * them as halted, or trying to dbgwrap them to get them to halt in the
1160 		 * case where the system is going down and we are running a dev fused
1161 		 * device.
1162 		 */
1163 		__builtin_arm_dmb(DMB_ISH);
1164 		for (cpu = 0; cpu <= max_cpu; cpu++) {
1165 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1166 
1167 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1168 				continue;
1169 			}
1170 			if (!(target_cpu_datap->cpu_signal & SIGPdebug)) {
1171 				continue;
1172 			}
1173 			if (processor_array[cpu]->state <= PROCESSOR_PENDING_OFFLINE) {
1174 				int dbg_sync_count;
1175 
1176 				/*
1177 				 * This is a processor that was successfully sent a SIGPdebug signal
1178 				 * but which hasn't acknowledged it because it went offline with
1179 				 * interrupts disabled before the IPI was delivered, so count it
1180 				 * as halted here.
1181 				 */
1182 				dbg_sync_count = os_atomic_dec(&debugger_sync, relaxed);
1183 				kprintf("%s>found CPU %d offline, debugger_sync=%d\n", __FUNCTION__, cpu, dbg_sync_count);
1184 				continue;
1185 			}
1186 			kprintf("%s>Debugger synch pending on cpu %d\n", __FUNCTION__, cpu);
1187 			timeout_cpu = cpu;
1188 #if CONFIG_SPTM
1189 			if (proceed_on_sync_failure) {
1190 				/*
1191 				 * If a core is spinning in the SPTM panic loop, consider it
1192 				 * as sync'd, and try to patch up the thread state from the
1193 				 * SPTM callee saved registers.
1194 				 */
1195 				bool sptm_panic_loop = false;
1196 				vm_offset_t base = other_percpu_base(cpu);
1197 				pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET_WITH_BASE(base, pmap_sptm_percpu);
1198 				uint64_t sptm_cpuid = sptm_pcpu->sptm_cpu_id;
1199 
1200 				if (sptm_get_cpu_state(sptm_cpuid, CPUSTATE_PANIC_SPIN, &sptm_panic_loop)
1201 				    == SPTM_SUCCESS && sptm_panic_loop) {
1202 					xnu_saved_registers_t regs;
1203 
1204 					if (sptm_copy_callee_saved_state(sptm_cpuid, &regs)
1205 					    == LIBSPTM_SUCCESS) {
1206 						DebuggerPatchupThreadState(cpu, &regs);
1207 					}
1208 
1209 					kprintf("%s>found CPU %d in SPTM\n", __FUNCTION__, cpu);
1210 					os_atomic_dec(&debugger_sync, relaxed);
1211 				}
1212 			}
1213 #endif
1214 		}
1215 
1216 		if (debugger_sync == 0) {
1217 			return KERN_SUCCESS;
1218 		} else if (!proceed_on_sync_failure) {
1219 			panic("%s>Debugger synch pending on cpu %d\n",
1220 			    __FUNCTION__, timeout_cpu);
1221 		}
1222 	}
1223 	if (immediate_halt || (max_mabs_time > 0 && current_mabs_time >= max_mabs_time)) {
1224 		if (immediate_halt) {
1225 			__builtin_arm_dmb(DMB_ISH);
1226 		}
1227 		for (cpu = 0; cpu <= max_cpu; cpu++) {
1228 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1229 
1230 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1231 				continue;
1232 			}
1233 			paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu);
1234 			dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0);
1235 			if (halt_status < 0) {
1236 				paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1237 			} else {
1238 				if (halt_status > 0) {
1239 					paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1240 				}
1241 				target_cpu_datap->halt_status = CPU_HALTED;
1242 			}
1243 		}
1244 		for (cpu = 0; cpu <= max_cpu; cpu++) {
1245 			target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
1246 
1247 			if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
1248 				continue;
1249 			}
1250 			dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu,
1251 			    NSEC_PER_SEC, &target_cpu_datap->halt_state);
1252 			if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) {
1253 				paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status));
1254 				debug_ack_timeout_count++;
1255 			} else {
1256 				paniclog_append_noflush("cpu %d successfully halted\n", cpu);
1257 				target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE;
1258 			}
1259 		}
1260 		if (immediate_halt) {
1261 			paniclog_append_noflush("Immediate halt requested on all cores\n");
1262 		} else {
1263 			paniclog_append_noflush("Debugger synchronization timed out; timeout %llu nanoseconds\n",
1264 			    os_atomic_load(&debug_ack_timeout, relaxed));
1265 		}
1266 	}
1267 	return KERN_SUCCESS;
1268 }
1269 
1270 /*
1271  * @function DebuggerXCallReturn
1272  *
1273  * @abstract Resume normal multicore operation after DebuggerXCallEnter()
1274  *
1275  * @discussion This function should be called with debugger lock held.
1276  */
1277 void
DebuggerXCallReturn(void)1278 DebuggerXCallReturn(
1279 	void)
1280 {
1281 	cpu_data_t      *cpu_data_ptr = getCpuDatap();
1282 	uint64_t max_mabs_time, current_mabs_time;
1283 
1284 	cpu_data_ptr->debugger_active--;
1285 	if (cpu_data_ptr->debugger_active != 0) {
1286 		return;
1287 	}
1288 
1289 	os_atomic_store(&mp_kdp_trap, 0, release);
1290 	os_atomic_store(&debugger_sync, 0, relaxed);
1291 
1292 	max_mabs_time = os_atomic_load(&debug_ack_timeout, relaxed);
1293 
1294 	if (max_mabs_time > 0) {
1295 		current_mabs_time = mach_absolute_time();
1296 		max_mabs_time += current_mabs_time;
1297 		assert(max_mabs_time > current_mabs_time);
1298 	}
1299 
1300 	/*
1301 	 * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall).
1302 	 * It's possible for one or more CPUs to not decrement debug_cpus_spinning,
1303 	 * since they may be stuck somewhere else with interrupts disabled.
1304 	 * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it.
1305 	 *
1306 	 * Note that the same is done in DebuggerXCallEnter, when we wait for other
1307 	 * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be
1308 	 * spinning in a debugger-safe context
1309 	 */
1310 	while ((os_atomic_load_exclusive(&debug_cpus_spinning, acquire) != 0) &&
1311 	    (max_mabs_time == 0 || current_mabs_time < max_mabs_time)) {
1312 		__builtin_arm_wfe();
1313 		current_mabs_time = mach_absolute_time();
1314 	}
1315 	os_atomic_clear_exclusive();
1316 
1317 	// checking debug_ack_timeout != 0 is a workaround for rdar://124242354
1318 	if (current_mabs_time >= max_mabs_time && os_atomic_load(&debug_ack_timeout, relaxed) != 0) {
1319 		panic("Resuming from debugger synchronization failed: waited %llu nanoseconds\n", os_atomic_load(&debug_ack_timeout, relaxed));
1320 	}
1321 }
1322 
1323 extern void wait_while_mp_kdp_trap(bool check_SIGPdebug);
1324 /*
1325  * Spin while mp_kdp_trap is set.
1326  *
1327  * processor_offline() calls this with check_SIGPdebug=true
1328  * to break out of the spin loop if the cpu has SIGPdebug
1329  * pending.
1330  */
1331 void
wait_while_mp_kdp_trap(bool check_SIGPdebug)1332 wait_while_mp_kdp_trap(bool check_SIGPdebug)
1333 {
1334 	bool found_mp_kdp_trap = false;
1335 	bool found_SIGPdebug = false;
1336 
1337 	while (os_atomic_load_exclusive(&mp_kdp_trap, acquire) != 0) {
1338 		found_mp_kdp_trap = true;
1339 		if (check_SIGPdebug && cpu_has_SIGPdebug_pending()) {
1340 			found_SIGPdebug = true;
1341 			break;
1342 		}
1343 		__builtin_arm_wfe();
1344 	}
1345 	os_atomic_clear_exclusive();
1346 
1347 	if (check_SIGPdebug && found_mp_kdp_trap) {
1348 		kprintf("%s>found_mp_kdp_trap=true found_SIGPdebug=%s\n", __FUNCTION__, found_SIGPdebug ? "true" : "false");
1349 	}
1350 }
1351 
1352 void
DebuggerXCall(void * ctx)1353 DebuggerXCall(
1354 	void            *ctx)
1355 {
1356 	boolean_t               save_context = FALSE;
1357 	vm_offset_t             kstackptr = 0;
1358 	arm_saved_state_t       *regs = (arm_saved_state_t *) ctx;
1359 
1360 	if (regs != NULL) {
1361 #if defined(__arm64__)
1362 		current_cpu_datap()->ipi_pc = (uint64_t)get_saved_state_pc(regs);
1363 		current_cpu_datap()->ipi_lr = (uint64_t)get_saved_state_lr(regs);
1364 		current_cpu_datap()->ipi_fp = (uint64_t)get_saved_state_fp(regs);
1365 		save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs));
1366 #endif
1367 	}
1368 
1369 	kstackptr = (vm_offset_t)current_thread()->machine.kstackptr;
1370 
1371 #if defined(__arm64__)
1372 	arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr;
1373 
1374 	if (save_context) {
1375 		/* Save the interrupted context before acknowledging the signal */
1376 		current_thread()->machine.kpcb = regs;
1377 	} else if (regs) {
1378 		/* zero old state so machine_trace_thread knows not to backtrace it */
1379 		state->fp = 0;
1380 		state->pc_was_in_userspace = true;
1381 		state->lr = 0;
1382 		state->sp = 0;
1383 		state->ssbs = 0;
1384 		state->uao = 0;
1385 		state->dit = 0;
1386 	}
1387 #endif
1388 
1389 	/*
1390 	 * When running in serial mode, the core capturing the dump may hold interrupts disabled
1391 	 * for a time longer than the timeout. That path includes logic to reset the timestamp
1392 	 * so that we do not eventually trigger the interrupt timeout assert().
1393 	 *
1394 	 * Here we check whether other cores have already gone over the timeout at this point
1395 	 * before spinning, so we at least cover the IPI reception path. After spinning, however,
1396 	 * we reset the timestamp so as to avoid hitting the interrupt timeout assert().
1397 	 */
1398 	if ((serialmode & SERIALMODE_OUTPUT) || trap_is_stackshot) {
1399 		ml_interrupt_masked_debug_end();
1400 	}
1401 
1402 	/*
1403 	 * Before we decrement debugger sync, do stackshot preflight work (if applicable).
1404 	 * Namely, we want to signal that we're available to do stackshot work, and we need to
1405 	 * signal so before the stackshot-calling CPU starts work.
1406 	 */
1407 
1408 	if (trap_is_stackshot) {
1409 		stackshot_cpu_preflight();
1410 	}
1411 
1412 	os_atomic_dec(&debugger_sync, release);
1413 
1414 	/* If we trapped because we're doing a stackshot, do our work first. */
1415 	if (trap_is_stackshot) {
1416 		stackshot_aux_cpu_entry();
1417 	}
1418 
1419 
1420 	wait_while_mp_kdp_trap(false);
1421 
1422 	/**
1423 	 * Alert the triggering CPU that this CPU is done spinning. The CPU that
1424 	 * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for
1425 	 * all of the CPUs to exit the above loop before continuing.
1426 	 */
1427 	os_atomic_dec(&debug_cpus_spinning, release);
1428 
1429 #if SCHED_HYGIENE_DEBUG
1430 	/*
1431 	 * We also abandon the measurement for preemption disable
1432 	 * timeouts, if any. Normally, time in interrupt handlers would be
1433 	 * subtracted from preemption disable time, and this will happen
1434 	 * up to this point here, but since we here "end" the interrupt
1435 	 * handler prematurely (from the point of view of interrupt masked
1436 	 * debugging), the time spinning would otherwise still be
1437 	 * attributed to preemption disable time, and potentially trigger
1438 	 * an event, which could be a panic.
1439 	 */
1440 	abandon_preemption_disable_measurement();
1441 
1442 	if ((serialmode & SERIALMODE_OUTPUT) || trap_is_stackshot) {
1443 		ml_interrupt_masked_debug_start((void *)current_thread()->machine.int_handler_addr, current_thread()->machine.int_type);
1444 	}
1445 #endif /* SCHED_HYGIENE_DEBUG */
1446 
1447 #if defined(__arm64__)
1448 	current_thread()->machine.kpcb = NULL;
1449 #endif /* defined(__arm64__) */
1450 
1451 	/* Any cleanup for our pushed context should go here */
1452 }
1453 
1454 void
DebuggerCall(unsigned int reason,void * ctx)1455 DebuggerCall(
1456 	unsigned int    reason,
1457 	void            *ctx)
1458 {
1459 #if     !MACH_KDP
1460 #pragma unused(reason,ctx)
1461 #endif /* !MACH_KDP */
1462 
1463 #if ALTERNATE_DEBUGGER
1464 	alternate_debugger_enter();
1465 #endif
1466 
1467 #if     MACH_KDP
1468 	kdp_trap(reason, (struct arm_saved_state *)ctx);
1469 #else
1470 	/* TODO: decide what to do if no debugger config */
1471 #endif
1472 }
1473 
1474 boolean_t
bootloader_valid_page(ppnum_t ppn)1475 bootloader_valid_page(ppnum_t ppn)
1476 {
1477 	return pmap_bootloader_page(ppn);
1478 }
1479