xref: /xnu-8020.101.4/osfmk/kdp/ml/arm/kdp_machdep.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
32 #include <arm/pmap.h>
33 #include <arm/proc_reg.h>
34 #include <arm/thread.h>
35 #include <arm/trap.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <libkern/OSAtomic.h>
41 #include <vm/vm_map.h>
42 
43 #if defined(HAS_APPLE_PAC)
44 #include <ptrauth.h>
45 #endif
46 
47 #define KDP_TEST_HARNESS 0
48 #if KDP_TEST_HARNESS
49 #define dprintf(x) kprintf x
50 #else
51 #define dprintf(x) do {} while (0)
52 #endif
53 
54 void            halt_all_cpus(boolean_t);
55 void kdp_call(void);
56 int kdp_getc(void);
57 int machine_trace_thread(thread_t thread,
58     char * tracepos,
59     char * tracebound,
60     int nframes,
61     uint32_t * thread_trace_flags);
62 int machine_trace_thread64(thread_t thread,
63     char * tracepos,
64     char * tracebound,
65     int nframes,
66     uint32_t * thread_trace_flags);
67 
68 void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
69 
70 extern bool machine_trace_thread_validate_kva(vm_offset_t addr);
71 
72 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
73 void
kdp_exception(unsigned char * pkt,int * len,unsigned short * remote_port,unsigned int exception,unsigned int code,unsigned int subcode)74 kdp_exception(
75 	unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
76 {
77 	struct {
78 		kdp_exception_t pkt;
79 		kdp_exc_info_t exc;
80 	} aligned_pkt;
81 
82 	kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
83 
84 	bcopy((char *)pkt, (char *)rq, sizeof(*rq));
85 	rq->hdr.request = KDP_EXCEPTION;
86 	rq->hdr.is_reply = 0;
87 	rq->hdr.seq = kdp.exception_seq;
88 	rq->hdr.key = 0;
89 	rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
90 
91 	rq->n_exc_info = 1;
92 	rq->exc_info[0].cpu = 0;
93 	rq->exc_info[0].exception = exception;
94 	rq->exc_info[0].code = code;
95 	rq->exc_info[0].subcode = subcode;
96 
97 	rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
98 
99 	bcopy((char *)rq, (char *)pkt, rq->hdr.len);
100 
101 	kdp.exception_ack_needed = TRUE;
102 
103 	*remote_port = kdp.exception_port;
104 	*len = rq->hdr.len;
105 }
106 
107 boolean_t
kdp_exception_ack(unsigned char * pkt,int len)108 kdp_exception_ack(unsigned char * pkt, int len)
109 {
110 	kdp_exception_ack_t aligned_pkt;
111 	kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
112 
113 	if ((unsigned)len < sizeof(*rq)) {
114 		return FALSE;
115 	}
116 
117 	bcopy((char *)pkt, (char *)rq, sizeof(*rq));
118 
119 	if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
120 		return FALSE;
121 	}
122 
123 	dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
124 
125 	if (rq->hdr.seq == kdp.exception_seq) {
126 		kdp.exception_ack_needed = FALSE;
127 		kdp.exception_seq++;
128 	}
129 	return TRUE;
130 }
131 
132 static void
kdp_getintegerstate(char * out_state)133 kdp_getintegerstate(char * out_state)
134 {
135 #if defined(__arm__)
136 	struct arm_thread_state thread_state;
137 	struct arm_saved_state *saved_state;
138 
139 	saved_state = kdp.saved_state;
140 
141 	bzero((char *) &thread_state, sizeof(struct arm_thread_state));
142 
143 	saved_state_to_thread_state32(saved_state, &thread_state);
144 
145 	bcopy((char *) &thread_state, (char *) out_state, sizeof(struct arm_thread_state));
146 #elif defined(__arm64__)
147 	struct arm_thread_state64 thread_state64;
148 	arm_saved_state_t *saved_state;
149 
150 	saved_state = kdp.saved_state;
151 	assert(is_saved_state64(saved_state));
152 
153 	bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
154 
155 	saved_state_to_thread_state64(saved_state, &thread_state64);
156 
157 	bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
158 #else
159 #error Unknown architecture.
160 #endif
161 }
162 
163 kdp_error_t
kdp_machine_read_regs(__unused unsigned int cpu,unsigned int flavor,char * data,int * size)164 kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
165 {
166 	switch (flavor) {
167 #if defined(__arm__)
168 	case ARM_THREAD_STATE:
169 		dprintf(("kdp_readregs THREAD_STATE\n"));
170 		kdp_getintegerstate(data);
171 		*size = ARM_THREAD_STATE_COUNT * sizeof(int);
172 		return KDPERR_NO_ERROR;
173 #elif defined(__arm64__)
174 	case ARM_THREAD_STATE64:
175 		dprintf(("kdp_readregs THREAD_STATE64\n"));
176 		kdp_getintegerstate(data);
177 		*size = ARM_THREAD_STATE64_COUNT * sizeof(int);
178 		return KDPERR_NO_ERROR;
179 #endif
180 
181 	case ARM_VFP_STATE:
182 		dprintf(("kdp_readregs THREAD_FPSTATE\n"));
183 		bzero((char *) data, sizeof(struct arm_vfp_state));
184 		*size = ARM_VFP_STATE_COUNT * sizeof(int);
185 		return KDPERR_NO_ERROR;
186 
187 	default:
188 		dprintf(("kdp_readregs bad flavor %d\n"));
189 		return KDPERR_BADFLAVOR;
190 	}
191 }
192 
193 static void
kdp_setintegerstate(char * state_in)194 kdp_setintegerstate(char * state_in)
195 {
196 #if defined(__arm__)
197 	struct arm_thread_state thread_state;
198 	struct arm_saved_state *saved_state;
199 
200 	bcopy((char *) state_in, (char *) &thread_state, sizeof(struct arm_thread_state));
201 	saved_state = kdp.saved_state;
202 
203 	thread_state32_to_saved_state(&thread_state, saved_state);
204 #elif defined(__arm64__)
205 	struct arm_thread_state64 thread_state64;
206 	struct arm_saved_state *saved_state;
207 
208 	bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
209 	saved_state = kdp.saved_state;
210 	assert(is_saved_state64(saved_state));
211 
212 	thread_state64_to_saved_state(&thread_state64, saved_state);
213 	set_saved_state_cpsr(saved_state, thread_state64.cpsr); /* override CPSR sanitization */
214 #else
215 #error Unknown architecture.
216 #endif
217 }
218 
219 kdp_error_t
kdp_machine_write_regs(__unused unsigned int cpu,unsigned int flavor,char * data,__unused int * size)220 kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
221 {
222 	switch (flavor) {
223 #if defined(__arm__)
224 	case ARM_THREAD_STATE:
225 		dprintf(("kdp_writeregs THREAD_STATE\n"));
226 		kdp_setintegerstate(data);
227 		return KDPERR_NO_ERROR;
228 #elif defined(__arm64__)
229 	case ARM_THREAD_STATE64:
230 		dprintf(("kdp_writeregs THREAD_STATE64\n"));
231 		kdp_setintegerstate(data);
232 		return KDPERR_NO_ERROR;
233 #endif
234 
235 	case ARM_VFP_STATE:
236 		dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
237 		return KDPERR_NO_ERROR;
238 
239 	default:
240 		dprintf(("kdp_writeregs bad flavor %d\n"));
241 		return KDPERR_BADFLAVOR;
242 	}
243 }
244 
245 void
kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)246 kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
247 {
248 	hostinfo->cpus_mask = 1;
249 	hostinfo->cpu_type = slot_type(0);
250 	hostinfo->cpu_subtype = slot_subtype(0);
251 }
252 
253 __attribute__((noreturn))
254 void
kdp_panic(const char * fmt,...)255 kdp_panic(const char * fmt, ...)
256 {
257 #pragma clang diagnostic push
258 #pragma clang diagnostic ignored "-Wformat-nonliteral"
259 	char kdp_fmt[256];
260 	va_list args;
261 
262 	va_start(args, fmt);
263 	(void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
264 	vprintf(kdp_fmt, args);
265 	va_end(args);
266 
267 	while (1) {
268 	}
269 	;
270 #pragma clang diagnostic pop
271 }
272 
273 int
kdp_intr_disbl(void)274 kdp_intr_disbl(void)
275 {
276 	return splhigh();
277 }
278 
279 void
kdp_intr_enbl(int s)280 kdp_intr_enbl(int s)
281 {
282 	splx(s);
283 }
284 
285 void
kdp_us_spin(int usec)286 kdp_us_spin(int usec)
287 {
288 	delay(usec / 100);
289 }
290 
291 void
kdp_call(void)292 kdp_call(void)
293 {
294 	Debugger("inline call to debugger(machine_startup)");
295 }
296 
297 int
kdp_getc(void)298 kdp_getc(void)
299 {
300 	return console_try_read_char();
301 }
302 
303 void
kdp_machine_get_breakinsn(uint8_t * bytes,uint32_t * size)304 kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
305 {
306 	*(uint32_t *)bytes = GDB_TRAP_INSTR1;
307 	*size = sizeof(uint32_t);
308 }
309 
310 void
kdp_sync_cache(void)311 kdp_sync_cache(void)
312 {
313 }
314 
315 int
kdp_machine_ioport_read(kdp_readioport_req_t * rq,caddr_t data,uint16_t lcpu)316 kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
317 {
318 #pragma unused(rq, data, lcpu)
319 	return 0;
320 }
321 
322 int
kdp_machine_ioport_write(kdp_writeioport_req_t * rq,caddr_t data,uint16_t lcpu)323 kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
324 {
325 #pragma unused(rq, data, lcpu)
326 	return 0;
327 }
328 
329 int
kdp_machine_msr64_read(kdp_readmsr64_req_t * rq,caddr_t data,uint16_t lcpu)330 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
331 {
332 #pragma unused(rq, data, lcpu)
333 	return 0;
334 }
335 
336 int
kdp_machine_msr64_write(kdp_writemsr64_req_t * rq,caddr_t data,uint16_t lcpu)337 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
338 {
339 #pragma unused(rq, data, lcpu)
340 	return 0;
341 }
342 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
343 
344 void
kdp_trap(unsigned int exception,struct arm_saved_state * saved_state)345 kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
346 {
347 	handle_debugger_trap(exception, 0, 0, saved_state);
348 
349 #if defined(__arm__)
350 	if (saved_state->cpsr & PSR_TF) {
351 		unsigned short instr = *((unsigned short *)(saved_state->pc));
352 		if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) {
353 			saved_state->pc += 2;
354 		}
355 	} else {
356 		unsigned int instr = *((unsigned int *)(saved_state->pc));
357 		if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
358 			saved_state->pc += 4;
359 		}
360 	}
361 
362 #elif defined(__arm64__)
363 	assert(is_saved_state64(saved_state));
364 
365 	uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
366 
367 	/*
368 	 * As long as we are using the arm32 trap encoding to handling
369 	 * traps to the debugger, we should identify both variants and
370 	 * increment for both of them.
371 	 */
372 	if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
373 		add_saved_state_pc(saved_state, 4);
374 	}
375 #else
376 #error Unknown architecture.
377 #endif
378 }
379 
380 #define ARM32_LR_OFFSET 4
381 #define ARM64_LR_OFFSET 8
382 
383 /*
384  * Since sizeof (struct thread_snapshot) % 4 == 2
385  * make sure the compiler does not try to use word-aligned
386  * access to this data, which can result in alignment faults
387  * that can't be emulated in KDP context.
388  */
389 typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
390 
391 #if !defined(__arm64__)
392 
393 int
machine_trace_thread(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)394 machine_trace_thread(thread_t thread,
395     char * tracepos,
396     char * tracebound,
397     int nframes,
398     uint32_t * thread_trace_flags)
399 {
400 	uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos;
401 
402 	vm_size_t framesize = sizeof(uint32_t);
403 
404 	vm_offset_t stacklimit        = 0;
405 	vm_offset_t stacklimit_bottom = 0;
406 	int framecount                = 0;
407 	uint32_t short_fp             = 0;
408 	vm_offset_t fp                = 0;
409 	vm_offset_t pc, sp;
410 	vm_offset_t prevfp            = 0;
411 	uint32_t prevlr               = 0;
412 	struct arm_saved_state * state;
413 	vm_offset_t kern_virt_addr = 0;
414 
415 	nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
416 	if (!nframes) {
417 		return 0;
418 	}
419 	framecount = 0;
420 
421 #if defined(__arm__)
422 	/* kstackptr may not always be there, so recompute it */
423 	state = &thread_get_kernel_state(thread)->machine;
424 
425 	stacklimit = VM_MAX_KERNEL_ADDRESS;
426 	stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
427 #else
428 #error Unknown architecture.
429 #endif
430 
431 	/* Get the frame pointer */
432 	fp = get_saved_state_fp(state);
433 
434 	/* Fill in the current link register */
435 	prevlr = (uint32_t)get_saved_state_lr(state);
436 	pc = get_saved_state_pc(state);
437 	sp = get_saved_state_sp(state);
438 
439 	if (!prevlr && !fp && !sp && !pc) {
440 		return 0;
441 	}
442 
443 	prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
444 
445 	for (; framecount < nframes; framecount++) {
446 		*tracebuf++ = prevlr;
447 
448 		/* Invalid frame */
449 		if (!fp) {
450 			break;
451 		}
452 		/* Unaligned frame */
453 		if (fp & 0x0000003) {
454 			break;
455 		}
456 		/* Frame is out of range, maybe a user FP while doing kernel BT */
457 		if (fp > stacklimit) {
458 			break;
459 		}
460 		if (fp < stacklimit_bottom) {
461 			break;
462 		}
463 		/* Stack grows downward */
464 		if (fp < prevfp) {
465 			boolean_t prev_in_interrupt_stack = FALSE;
466 
467 			/*
468 			 * As a special case, sometimes we are backtracing out of an interrupt
469 			 * handler, and the stack jumps downward because of the memory allocation
470 			 * pattern during early boot due to KASLR.
471 			 */
472 			int cpu;
473 			int max_cpu = ml_get_max_cpu_number();
474 
475 			for (cpu = 0; cpu <= max_cpu; cpu++) {
476 				cpu_data_t      *target_cpu_datap;
477 
478 				target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
479 				if (target_cpu_datap == (cpu_data_t *)NULL) {
480 					continue;
481 				}
482 
483 				if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
484 					prev_in_interrupt_stack = TRUE;
485 					break;
486 				}
487 
488 				if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
489 					prev_in_interrupt_stack = TRUE;
490 					break;
491 				}
492 			}
493 
494 			if (!prev_in_interrupt_stack) {
495 				/* Corrupt frame pointer? */
496 				break;
497 			}
498 		}
499 		/* Assume there's a saved link register, and read it */
500 		kern_virt_addr = fp + ARM32_LR_OFFSET;
501 		bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
502 		if (!ok) {
503 			if (thread_trace_flags != NULL) {
504 				*thread_trace_flags |= kThreadTruncatedBT;
505 			}
506 			break;
507 		}
508 
509 		prevlr = (uint32_t)VM_KERNEL_UNSLIDE(*(uint32_t *)kern_virt_addr);
510 		prevfp = fp;
511 
512 		/*
513 		 * Next frame; read the fp value into short_fp first
514 		 * as it is 32-bit.
515 		 */
516 		kern_virt_addr = fp;
517 		ok = machine_trace_thread_validate_kva(kern_virt_addr);
518 		if (!ok) {
519 			if (thread_trace_flags != NULL) {
520 				*thread_trace_flags |= kThreadTruncatedBT;
521 			}
522 			fp = 0;
523 			break;
524 		}
525 
526 		short_fp = *(uint32_t *)kern_virt_addr;
527 		fp = (vm_offset_t) short_fp;
528 	}
529 	return (int)(((char *)tracebuf) - tracepos);
530 }
531 
532 #endif // !defined(__arm64__)
533 
534 int
machine_trace_thread64(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)535 machine_trace_thread64(thread_t thread,
536     char * tracepos,
537     char * tracebound,
538     int nframes,
539     uint32_t * thread_trace_flags)
540 {
541 #if defined(__arm__)
542 #pragma unused(thread, tracepos, tracebound, nframes, thread_trace_flags)
543 	return 0;
544 #elif defined(__arm64__)
545 
546 	uint64_t * tracebuf = (uint64_t *)tracepos;
547 	vm_size_t framesize = sizeof(uint64_t);
548 
549 	vm_offset_t stacklimit        = 0;
550 	vm_offset_t stacklimit_bottom = 0;
551 	int framecount                = 0;
552 	vm_offset_t pc                = 0;
553 	vm_offset_t fp                = 0;
554 	vm_offset_t sp                = 0;
555 	vm_offset_t prevfp            = 0;
556 	uint64_t prevlr               = 0;
557 	vm_offset_t kern_virt_addr    = 0;
558 
559 	nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
560 	if (!nframes) {
561 		return 0;
562 	}
563 	framecount = 0;
564 
565 	struct arm_saved_state *state = thread->machine.kpcb;
566 	if (state != NULL) {
567 		fp = state->ss_64.fp;
568 
569 		prevlr = state->ss_64.lr;
570 		pc = state->ss_64.pc;
571 		sp = state->ss_64.sp;
572 	} else {
573 		/* kstackptr may not always be there, so recompute it */
574 		arm_kernel_saved_state_t *kstate = &thread_get_kernel_state(thread)->machine.ss;
575 
576 		fp = kstate->fp;
577 		prevlr = kstate->lr;
578 		pc = kstate->pc;
579 		sp = kstate->sp;
580 	}
581 
582 	stacklimit = VM_MAX_KERNEL_ADDRESS;
583 	stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
584 
585 	if (!prevlr && !fp && !sp && !pc) {
586 		return 0;
587 	}
588 
589 	prevlr = VM_KERNEL_UNSLIDE(prevlr);
590 
591 	for (; framecount < nframes; framecount++) {
592 		*tracebuf++ = prevlr;
593 
594 		/* Invalid frame */
595 		if (!fp) {
596 			break;
597 		}
598 		/*
599 		 * Unaligned frame; given that the stack register must always be
600 		 * 16-byte aligned, we are assured 8-byte alignment of the saved
601 		 * frame pointer and link register.
602 		 */
603 		if (fp & 0x0000007) {
604 			break;
605 		}
606 		/* Frame is out of range, maybe a user FP while doing kernel BT */
607 		if (fp > stacklimit) {
608 			break;
609 		}
610 		if (fp < stacklimit_bottom) {
611 			break;
612 		}
613 		/* Stack grows downward */
614 		if (fp < prevfp) {
615 			bool switched_stacks = false;
616 
617 			/*
618 			 * As a special case, sometimes we are backtracing out of an interrupt
619 			 * handler, and the stack jumps downward because of the memory allocation
620 			 * pattern during early boot due to KASLR.
621 			 */
622 			int cpu;
623 			int max_cpu = ml_get_max_cpu_number();
624 
625 			for (cpu = 0; cpu <= max_cpu; cpu++) {
626 				cpu_data_t      *target_cpu_datap;
627 
628 				target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
629 				if (target_cpu_datap == (cpu_data_t *)NULL) {
630 					continue;
631 				}
632 
633 				if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
634 					switched_stacks = true;
635 					break;
636 				}
637 #if defined(__arm__)
638 				if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
639 					switched_stacks = true;
640 					break;
641 				}
642 #elif defined(__arm64__)
643 				if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
644 					switched_stacks = true;
645 					break;
646 				}
647 #endif
648 			}
649 
650 #if XNU_MONITOR
651 			vm_offset_t cpu_base = (vm_offset_t)pmap_stacks_start;
652 			vm_offset_t cpu_top = (vm_offset_t)pmap_stacks_end;
653 
654 			if (((prevfp >= cpu_base) && (prevfp < cpu_top)) !=
655 			    ((fp >= cpu_base) && (fp < cpu_top))) {
656 				switched_stacks = true;
657 				break;
658 			}
659 #endif
660 
661 			if (!switched_stacks) {
662 				/* Corrupt frame pointer? */
663 				break;
664 			}
665 		}
666 
667 		/* Assume there's a saved link register, and read it */
668 		kern_virt_addr = fp + ARM64_LR_OFFSET;
669 		bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
670 		if (!ok) {
671 			if (thread_trace_flags != NULL) {
672 				*thread_trace_flags |= kThreadTruncatedBT;
673 			}
674 			break;
675 		}
676 
677 		prevlr = *(uint64_t *)kern_virt_addr;
678 #if defined(HAS_APPLE_PAC)
679 		/* return addresses on stack signed by arm64e ABI */
680 		prevlr = (uint64_t) ptrauth_strip((void *)prevlr, ptrauth_key_return_address);
681 #endif
682 		prevlr = VM_KERNEL_UNSLIDE(prevlr);
683 
684 		prevfp = fp;
685 		/* Next frame */
686 		kern_virt_addr = fp;
687 		ok = machine_trace_thread_validate_kva(kern_virt_addr);
688 		if (!ok) {
689 			if (thread_trace_flags != NULL) {
690 				*thread_trace_flags |= kThreadTruncatedBT;
691 			}
692 			fp = 0;
693 			break;
694 		}
695 
696 		fp = *(uint64_t *)kern_virt_addr;
697 	}
698 	return (int)(((char *)tracebuf) - tracepos);
699 #else
700 #error Unknown architecture.
701 #endif
702 }
703 
704 void
kdp_ml_enter_debugger(void)705 kdp_ml_enter_debugger(void)
706 {
707 	__asm__ volatile (".long 0xe7ffdefe");
708 }
709