1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
32 #include <arm/pmap.h>
33 #include <arm/proc_reg.h>
34 #include <arm/thread.h>
35 #include <arm/trap.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <libkern/OSAtomic.h>
41 #include <vm/vm_map.h>
42
43 #if defined(HAS_APPLE_PAC)
44 #include <ptrauth.h>
45 #endif
46
47 #define KDP_TEST_HARNESS 0
48 #if KDP_TEST_HARNESS
49 #define dprintf(x) kprintf x
50 #else
51 #define dprintf(x) do {} while (0)
52 #endif
53
54 void halt_all_cpus(boolean_t);
55 void kdp_call(void);
56 int kdp_getc(void);
57 int machine_trace_thread(thread_t thread,
58 char * tracepos,
59 char * tracebound,
60 int nframes,
61 uint32_t * thread_trace_flags);
62 int machine_trace_thread64(thread_t thread,
63 char * tracepos,
64 char * tracebound,
65 int nframes,
66 uint32_t * thread_trace_flags);
67
68 void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
69
70 extern bool machine_trace_thread_validate_kva(vm_offset_t addr);
71
72 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
73 void
kdp_exception(unsigned char * pkt,int * len,unsigned short * remote_port,unsigned int exception,unsigned int code,unsigned int subcode)74 kdp_exception(
75 unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
76 {
77 struct {
78 kdp_exception_t pkt;
79 kdp_exc_info_t exc;
80 } aligned_pkt;
81
82 kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
83
84 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
85 rq->hdr.request = KDP_EXCEPTION;
86 rq->hdr.is_reply = 0;
87 rq->hdr.seq = kdp.exception_seq;
88 rq->hdr.key = 0;
89 rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
90
91 rq->n_exc_info = 1;
92 rq->exc_info[0].cpu = 0;
93 rq->exc_info[0].exception = exception;
94 rq->exc_info[0].code = code;
95 rq->exc_info[0].subcode = subcode;
96
97 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
98
99 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
100
101 kdp.exception_ack_needed = TRUE;
102
103 *remote_port = kdp.exception_port;
104 *len = rq->hdr.len;
105 }
106
107 boolean_t
kdp_exception_ack(unsigned char * pkt,int len)108 kdp_exception_ack(unsigned char * pkt, int len)
109 {
110 kdp_exception_ack_t aligned_pkt;
111 kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
112
113 if ((unsigned)len < sizeof(*rq)) {
114 return FALSE;
115 }
116
117 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
118
119 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
120 return FALSE;
121 }
122
123 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
124
125 if (rq->hdr.seq == kdp.exception_seq) {
126 kdp.exception_ack_needed = FALSE;
127 kdp.exception_seq++;
128 }
129 return TRUE;
130 }
131
132 static void
kdp_getintegerstate(char * out_state)133 kdp_getintegerstate(char * out_state)
134 {
135 #if defined(__arm__)
136 struct arm_thread_state thread_state;
137 struct arm_saved_state *saved_state;
138
139 saved_state = kdp.saved_state;
140
141 bzero((char *) &thread_state, sizeof(struct arm_thread_state));
142
143 saved_state_to_thread_state32(saved_state, &thread_state);
144
145 bcopy((char *) &thread_state, (char *) out_state, sizeof(struct arm_thread_state));
146 #elif defined(__arm64__)
147 struct arm_thread_state64 thread_state64;
148 arm_saved_state_t *saved_state;
149
150 saved_state = kdp.saved_state;
151 assert(is_saved_state64(saved_state));
152
153 bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
154
155 saved_state_to_thread_state64(saved_state, &thread_state64);
156
157 bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
158 #else
159 #error Unknown architecture.
160 #endif
161 }
162
163 kdp_error_t
kdp_machine_read_regs(__unused unsigned int cpu,unsigned int flavor,char * data,int * size)164 kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
165 {
166 switch (flavor) {
167 #if defined(__arm__)
168 case ARM_THREAD_STATE:
169 dprintf(("kdp_readregs THREAD_STATE\n"));
170 kdp_getintegerstate(data);
171 *size = ARM_THREAD_STATE_COUNT * sizeof(int);
172 return KDPERR_NO_ERROR;
173 #elif defined(__arm64__)
174 case ARM_THREAD_STATE64:
175 dprintf(("kdp_readregs THREAD_STATE64\n"));
176 kdp_getintegerstate(data);
177 *size = ARM_THREAD_STATE64_COUNT * sizeof(int);
178 return KDPERR_NO_ERROR;
179 #endif
180
181 case ARM_VFP_STATE:
182 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
183 bzero((char *) data, sizeof(struct arm_vfp_state));
184 *size = ARM_VFP_STATE_COUNT * sizeof(int);
185 return KDPERR_NO_ERROR;
186
187 default:
188 dprintf(("kdp_readregs bad flavor %d\n"));
189 return KDPERR_BADFLAVOR;
190 }
191 }
192
193 static void
kdp_setintegerstate(char * state_in)194 kdp_setintegerstate(char * state_in)
195 {
196 #if defined(__arm__)
197 struct arm_thread_state thread_state;
198 struct arm_saved_state *saved_state;
199
200 bcopy((char *) state_in, (char *) &thread_state, sizeof(struct arm_thread_state));
201 saved_state = kdp.saved_state;
202
203 thread_state32_to_saved_state(&thread_state, saved_state);
204 #elif defined(__arm64__)
205 struct arm_thread_state64 thread_state64;
206 struct arm_saved_state *saved_state;
207
208 bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
209 saved_state = kdp.saved_state;
210 assert(is_saved_state64(saved_state));
211
212 thread_state64_to_saved_state(&thread_state64, saved_state);
213 set_saved_state_cpsr(saved_state, thread_state64.cpsr); /* override CPSR sanitization */
214 #else
215 #error Unknown architecture.
216 #endif
217 }
218
219 kdp_error_t
kdp_machine_write_regs(__unused unsigned int cpu,unsigned int flavor,char * data,__unused int * size)220 kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
221 {
222 switch (flavor) {
223 #if defined(__arm__)
224 case ARM_THREAD_STATE:
225 dprintf(("kdp_writeregs THREAD_STATE\n"));
226 kdp_setintegerstate(data);
227 return KDPERR_NO_ERROR;
228 #elif defined(__arm64__)
229 case ARM_THREAD_STATE64:
230 dprintf(("kdp_writeregs THREAD_STATE64\n"));
231 kdp_setintegerstate(data);
232 return KDPERR_NO_ERROR;
233 #endif
234
235 case ARM_VFP_STATE:
236 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
237 return KDPERR_NO_ERROR;
238
239 default:
240 dprintf(("kdp_writeregs bad flavor %d\n"));
241 return KDPERR_BADFLAVOR;
242 }
243 }
244
245 void
kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)246 kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
247 {
248 hostinfo->cpus_mask = 1;
249 hostinfo->cpu_type = slot_type(0);
250 hostinfo->cpu_subtype = slot_subtype(0);
251 }
252
253 __attribute__((noreturn))
254 void
kdp_panic(const char * fmt,...)255 kdp_panic(const char * fmt, ...)
256 {
257 char kdp_fmt[256];
258 va_list args;
259
260 va_start(args, fmt);
261 (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
262 vprintf(kdp_fmt, args);
263 va_end(args);
264
265 while (1) {
266 }
267 ;
268 }
269
270 int
kdp_intr_disbl(void)271 kdp_intr_disbl(void)
272 {
273 return splhigh();
274 }
275
276 void
kdp_intr_enbl(int s)277 kdp_intr_enbl(int s)
278 {
279 splx(s);
280 }
281
282 void
kdp_us_spin(int usec)283 kdp_us_spin(int usec)
284 {
285 delay(usec / 100);
286 }
287
288 void
kdp_call(void)289 kdp_call(void)
290 {
291 Debugger("inline call to debugger(machine_startup)");
292 }
293
294 int
kdp_getc(void)295 kdp_getc(void)
296 {
297 return console_try_read_char();
298 }
299
300 void
kdp_machine_get_breakinsn(uint8_t * bytes,uint32_t * size)301 kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
302 {
303 *(uint32_t *)bytes = GDB_TRAP_INSTR1;
304 *size = sizeof(uint32_t);
305 }
306
307 void
kdp_sync_cache(void)308 kdp_sync_cache(void)
309 {
310 }
311
312 int
kdp_machine_ioport_read(kdp_readioport_req_t * rq,caddr_t data,uint16_t lcpu)313 kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
314 {
315 #pragma unused(rq, data, lcpu)
316 return 0;
317 }
318
319 int
kdp_machine_ioport_write(kdp_writeioport_req_t * rq,caddr_t data,uint16_t lcpu)320 kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
321 {
322 #pragma unused(rq, data, lcpu)
323 return 0;
324 }
325
326 int
kdp_machine_msr64_read(kdp_readmsr64_req_t * rq,caddr_t data,uint16_t lcpu)327 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
328 {
329 #pragma unused(rq, data, lcpu)
330 return 0;
331 }
332
333 int
kdp_machine_msr64_write(kdp_writemsr64_req_t * rq,caddr_t data,uint16_t lcpu)334 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
335 {
336 #pragma unused(rq, data, lcpu)
337 return 0;
338 }
339 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
340
341 void
kdp_trap(unsigned int exception,struct arm_saved_state * saved_state)342 kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
343 {
344 handle_debugger_trap(exception, 0, 0, saved_state);
345
346 #if defined(__arm__)
347 if (saved_state->cpsr & PSR_TF) {
348 unsigned short instr = *((unsigned short *)(saved_state->pc));
349 if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) {
350 saved_state->pc += 2;
351 }
352 } else {
353 unsigned int instr = *((unsigned int *)(saved_state->pc));
354 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
355 saved_state->pc += 4;
356 }
357 }
358
359 #elif defined(__arm64__)
360 assert(is_saved_state64(saved_state));
361
362 uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
363
364 /*
365 * As long as we are using the arm32 trap encoding to handling
366 * traps to the debugger, we should identify both variants and
367 * increment for both of them.
368 */
369 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
370 add_saved_state_pc(saved_state, 4);
371 }
372 #else
373 #error Unknown architecture.
374 #endif
375 }
376
377 #define ARM32_LR_OFFSET 4
378 #define ARM64_LR_OFFSET 8
379
380 /*
381 * Since sizeof (struct thread_snapshot) % 4 == 2
382 * make sure the compiler does not try to use word-aligned
383 * access to this data, which can result in alignment faults
384 * that can't be emulated in KDP context.
385 */
386 typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
387
388 #if !defined(__arm64__)
389
390 int
machine_trace_thread(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)391 machine_trace_thread(thread_t thread,
392 char * tracepos,
393 char * tracebound,
394 int nframes,
395 uint32_t * thread_trace_flags)
396 {
397 uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos;
398
399 vm_size_t framesize = sizeof(uint32_t);
400
401 vm_offset_t stacklimit = 0;
402 vm_offset_t stacklimit_bottom = 0;
403 int framecount = 0;
404 uint32_t short_fp = 0;
405 vm_offset_t fp = 0;
406 vm_offset_t pc, sp;
407 vm_offset_t prevfp = 0;
408 uint32_t prevlr = 0;
409 struct arm_saved_state * state;
410 vm_offset_t kern_virt_addr = 0;
411
412 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
413 if (!nframes) {
414 return 0;
415 }
416 framecount = 0;
417
418 #if defined(__arm__)
419 /* kstackptr may not always be there, so recompute it */
420 state = &thread_get_kernel_state(thread)->machine;
421
422 stacklimit = VM_MAX_KERNEL_ADDRESS;
423 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
424 #else
425 #error Unknown architecture.
426 #endif
427
428 /* Get the frame pointer */
429 fp = get_saved_state_fp(state);
430
431 /* Fill in the current link register */
432 prevlr = (uint32_t)get_saved_state_lr(state);
433 pc = get_saved_state_pc(state);
434 sp = get_saved_state_sp(state);
435
436 if (!prevlr && !fp && !sp && !pc) {
437 return 0;
438 }
439
440 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
441
442 for (; framecount < nframes; framecount++) {
443 *tracebuf++ = prevlr;
444
445 /* Invalid frame */
446 if (!fp) {
447 break;
448 }
449 /* Unaligned frame */
450 if (fp & 0x0000003) {
451 break;
452 }
453 /* Frame is out of range, maybe a user FP while doing kernel BT */
454 if (fp > stacklimit) {
455 break;
456 }
457 if (fp < stacklimit_bottom) {
458 break;
459 }
460 /* Stack grows downward */
461 if (fp < prevfp) {
462 boolean_t prev_in_interrupt_stack = FALSE;
463
464 /*
465 * As a special case, sometimes we are backtracing out of an interrupt
466 * handler, and the stack jumps downward because of the memory allocation
467 * pattern during early boot due to KASLR.
468 */
469 int cpu;
470 int max_cpu = ml_get_max_cpu_number();
471
472 for (cpu = 0; cpu <= max_cpu; cpu++) {
473 cpu_data_t *target_cpu_datap;
474
475 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
476 if (target_cpu_datap == (cpu_data_t *)NULL) {
477 continue;
478 }
479
480 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
481 prev_in_interrupt_stack = TRUE;
482 break;
483 }
484
485 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
486 prev_in_interrupt_stack = TRUE;
487 break;
488 }
489 }
490
491 if (!prev_in_interrupt_stack) {
492 /* Corrupt frame pointer? */
493 break;
494 }
495 }
496 /* Assume there's a saved link register, and read it */
497 kern_virt_addr = fp + ARM32_LR_OFFSET;
498 bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
499 if (!ok) {
500 if (thread_trace_flags != NULL) {
501 *thread_trace_flags |= kThreadTruncatedBT;
502 }
503 break;
504 }
505
506 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(*(uint32_t *)kern_virt_addr);
507 prevfp = fp;
508
509 /*
510 * Next frame; read the fp value into short_fp first
511 * as it is 32-bit.
512 */
513 kern_virt_addr = fp;
514 ok = machine_trace_thread_validate_kva(kern_virt_addr);
515 if (!ok) {
516 if (thread_trace_flags != NULL) {
517 *thread_trace_flags |= kThreadTruncatedBT;
518 }
519 fp = 0;
520 break;
521 }
522
523 short_fp = *(uint32_t *)kern_virt_addr;
524 fp = (vm_offset_t) short_fp;
525 }
526 return (int)(((char *)tracebuf) - tracepos);
527 }
528
529 #endif // !defined(__arm64__)
530
531 int
machine_trace_thread64(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)532 machine_trace_thread64(thread_t thread,
533 char * tracepos,
534 char * tracebound,
535 int nframes,
536 uint32_t * thread_trace_flags)
537 {
538 #if defined(__arm__)
539 #pragma unused(thread, tracepos, tracebound, nframes, thread_trace_flags)
540 return 0;
541 #elif defined(__arm64__)
542
543 uint64_t * tracebuf = (uint64_t *)tracepos;
544 vm_size_t framesize = sizeof(uint64_t);
545
546 vm_offset_t stacklimit = 0;
547 vm_offset_t stacklimit_bottom = 0;
548 int framecount = 0;
549 vm_offset_t pc = 0;
550 vm_offset_t fp = 0;
551 vm_offset_t sp = 0;
552 vm_offset_t prevfp = 0;
553 uint64_t prevlr = 0;
554 vm_offset_t kern_virt_addr = 0;
555
556 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
557 if (!nframes) {
558 return 0;
559 }
560 framecount = 0;
561
562 struct arm_saved_state *state = thread->machine.kpcb;
563 if (state != NULL) {
564 fp = state->ss_64.fp;
565
566 prevlr = state->ss_64.lr;
567 pc = state->ss_64.pc;
568 sp = state->ss_64.sp;
569 } else {
570 /* kstackptr may not always be there, so recompute it */
571 arm_kernel_saved_state_t *kstate = &thread_get_kernel_state(thread)->machine.ss;
572
573 fp = kstate->fp;
574 prevlr = kstate->lr;
575 pc = kstate->pc;
576 sp = kstate->sp;
577 }
578
579 stacklimit = VM_MAX_KERNEL_ADDRESS;
580 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
581
582 if (!prevlr && !fp && !sp && !pc) {
583 return 0;
584 }
585
586 prevlr = VM_KERNEL_UNSLIDE(prevlr);
587
588 for (; framecount < nframes; framecount++) {
589 *tracebuf++ = prevlr;
590
591 /* Invalid frame */
592 if (!fp) {
593 break;
594 }
595 /*
596 * Unaligned frame; given that the stack register must always be
597 * 16-byte aligned, we are assured 8-byte alignment of the saved
598 * frame pointer and link register.
599 */
600 if (fp & 0x0000007) {
601 break;
602 }
603 /* Frame is out of range, maybe a user FP while doing kernel BT */
604 if (fp > stacklimit) {
605 break;
606 }
607 if (fp < stacklimit_bottom) {
608 break;
609 }
610 /* Stack grows downward */
611 if (fp < prevfp) {
612 bool switched_stacks = false;
613
614 /*
615 * As a special case, sometimes we are backtracing out of an interrupt
616 * handler, and the stack jumps downward because of the memory allocation
617 * pattern during early boot due to KASLR.
618 */
619 int cpu;
620 int max_cpu = ml_get_max_cpu_number();
621
622 for (cpu = 0; cpu <= max_cpu; cpu++) {
623 cpu_data_t *target_cpu_datap;
624
625 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
626 if (target_cpu_datap == (cpu_data_t *)NULL) {
627 continue;
628 }
629
630 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
631 switched_stacks = true;
632 break;
633 }
634 #if defined(__arm__)
635 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
636 switched_stacks = true;
637 break;
638 }
639 #elif defined(__arm64__)
640 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
641 switched_stacks = true;
642 break;
643 }
644 #endif
645 }
646
647 #if XNU_MONITOR
648 vm_offset_t cpu_base = (vm_offset_t)pmap_stacks_start;
649 vm_offset_t cpu_top = (vm_offset_t)pmap_stacks_end;
650
651 if (((prevfp >= cpu_base) && (prevfp < cpu_top)) !=
652 ((fp >= cpu_base) && (fp < cpu_top))) {
653 switched_stacks = true;
654 break;
655 }
656 #endif
657
658 if (!switched_stacks) {
659 /* Corrupt frame pointer? */
660 break;
661 }
662 }
663
664 /* Assume there's a saved link register, and read it */
665 kern_virt_addr = fp + ARM64_LR_OFFSET;
666 bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
667 if (!ok) {
668 if (thread_trace_flags != NULL) {
669 *thread_trace_flags |= kThreadTruncatedBT;
670 }
671 break;
672 }
673
674 prevlr = *(uint64_t *)kern_virt_addr;
675 #if defined(HAS_APPLE_PAC)
676 /* return addresses on stack signed by arm64e ABI */
677 prevlr = (uint64_t) ptrauth_strip((void *)prevlr, ptrauth_key_return_address);
678 #endif
679 prevlr = VM_KERNEL_UNSLIDE(prevlr);
680
681 prevfp = fp;
682 /* Next frame */
683 kern_virt_addr = fp;
684 ok = machine_trace_thread_validate_kva(kern_virt_addr);
685 if (!ok) {
686 if (thread_trace_flags != NULL) {
687 *thread_trace_flags |= kThreadTruncatedBT;
688 }
689 fp = 0;
690 break;
691 }
692
693 fp = *(uint64_t *)kern_virt_addr;
694 }
695 return (int)(((char *)tracebuf) - tracepos);
696 #else
697 #error Unknown architecture.
698 #endif
699 }
700
701 void
kdp_ml_enter_debugger(void)702 kdp_ml_enter_debugger(void)
703 {
704 __asm__ volatile (".long 0xe7ffdefe");
705 }
706