1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
32 #include <arm/pmap.h>
33 #include <arm64/proc_reg.h>
34 #include <arm/thread.h>
35 #include <arm/trap.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <libkern/OSAtomic.h>
41 #include <vm/vm_map.h>
42 #include <arm/misc_protos.h>
43
44 #if defined(HAS_APPLE_PAC)
45 #include <ptrauth.h>
46 #endif
47
48 #define KDP_TEST_HARNESS 0
49 #if KDP_TEST_HARNESS
50 #define dprintf(x) kprintf x
51 #else
52 #define dprintf(x) do {} while (0)
53 #endif
54
55 void halt_all_cpus(boolean_t);
56 void kdp_call(void);
57 int kdp_getc(void);
58 int machine_trace_thread(thread_t thread,
59 char * tracepos,
60 char * tracebound,
61 int nframes,
62 uint32_t * thread_trace_flags);
63 int machine_trace_thread64(thread_t thread,
64 char * tracepos,
65 char * tracebound,
66 int nframes,
67 uint32_t * thread_trace_flags);
68
69 void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
70
71 extern bool machine_trace_thread_validate_kva(vm_offset_t addr);
72
73 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
74 void
kdp_exception(unsigned char * pkt,int * len,unsigned short * remote_port,unsigned int exception,unsigned int code,unsigned int subcode)75 kdp_exception(
76 unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
77 {
78 struct {
79 kdp_exception_t pkt;
80 kdp_exc_info_t exc;
81 } aligned_pkt;
82
83 kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
84
85 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
86 rq->hdr.request = KDP_EXCEPTION;
87 rq->hdr.is_reply = 0;
88 rq->hdr.seq = kdp.exception_seq;
89 rq->hdr.key = 0;
90 rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
91
92 rq->n_exc_info = 1;
93 rq->exc_info[0].cpu = 0;
94 rq->exc_info[0].exception = exception;
95 rq->exc_info[0].code = code;
96 rq->exc_info[0].subcode = subcode;
97
98 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
99
100 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
101
102 kdp.exception_ack_needed = TRUE;
103
104 *remote_port = kdp.exception_port;
105 *len = rq->hdr.len;
106 }
107
108 boolean_t
kdp_exception_ack(unsigned char * pkt,int len)109 kdp_exception_ack(unsigned char * pkt, int len)
110 {
111 kdp_exception_ack_t aligned_pkt;
112 kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
113
114 if ((unsigned)len < sizeof(*rq)) {
115 return FALSE;
116 }
117
118 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
119
120 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
121 return FALSE;
122 }
123
124 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
125
126 if (rq->hdr.seq == kdp.exception_seq) {
127 kdp.exception_ack_needed = FALSE;
128 kdp.exception_seq++;
129 }
130 return TRUE;
131 }
132
133 static void
kdp_getintegerstate(char * out_state)134 kdp_getintegerstate(char * out_state)
135 {
136 #if defined(__arm64__)
137 struct arm_thread_state64 thread_state64;
138 arm_saved_state_t *saved_state;
139
140 saved_state = kdp.saved_state;
141 assert(is_saved_state64(saved_state));
142
143 bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
144
145 saved_state_to_thread_state64(saved_state, &thread_state64);
146
147 bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
148 #else
149 #error Unknown architecture.
150 #endif
151 }
152
153 kdp_error_t
kdp_machine_read_regs(__unused unsigned int cpu,unsigned int flavor,char * data,int * size)154 kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
155 {
156 switch (flavor) {
157 #if defined(__arm64__)
158 case ARM_THREAD_STATE64:
159 dprintf(("kdp_readregs THREAD_STATE64\n"));
160 kdp_getintegerstate(data);
161 *size = ARM_THREAD_STATE64_COUNT * sizeof(int);
162 return KDPERR_NO_ERROR;
163 #endif
164
165 case ARM_VFP_STATE:
166 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
167 bzero((char *) data, sizeof(struct arm_vfp_state));
168 *size = ARM_VFP_STATE_COUNT * sizeof(int);
169 return KDPERR_NO_ERROR;
170
171 default:
172 dprintf(("kdp_readregs bad flavor %d\n"));
173 return KDPERR_BADFLAVOR;
174 }
175 }
176
177 static void
kdp_setintegerstate(char * state_in)178 kdp_setintegerstate(char * state_in)
179 {
180 #if defined(__arm64__)
181 struct arm_thread_state64 thread_state64;
182 struct arm_saved_state *saved_state;
183
184 bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
185 saved_state = kdp.saved_state;
186 assert(is_saved_state64(saved_state));
187
188 thread_state64_to_saved_state(&thread_state64, saved_state);
189 set_saved_state_cpsr(saved_state, thread_state64.cpsr); /* override CPSR sanitization */
190 #else
191 #error Unknown architecture.
192 #endif
193 }
194
195 kdp_error_t
kdp_machine_write_regs(__unused unsigned int cpu,unsigned int flavor,char * data,__unused int * size)196 kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
197 {
198 switch (flavor) {
199 #if defined(__arm64__)
200 case ARM_THREAD_STATE64:
201 dprintf(("kdp_writeregs THREAD_STATE64\n"));
202 kdp_setintegerstate(data);
203 return KDPERR_NO_ERROR;
204 #endif
205
206 case ARM_VFP_STATE:
207 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
208 return KDPERR_NO_ERROR;
209
210 default:
211 dprintf(("kdp_writeregs bad flavor %d\n"));
212 return KDPERR_BADFLAVOR;
213 }
214 }
215
216 void
kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)217 kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
218 {
219 hostinfo->cpus_mask = 1;
220 hostinfo->cpu_type = slot_type(0);
221 hostinfo->cpu_subtype = slot_subtype(0);
222 }
223
224 __attribute__((noreturn))
225 void
kdp_panic(const char * fmt,...)226 kdp_panic(const char * fmt, ...)
227 {
228 #pragma clang diagnostic push
229 #pragma clang diagnostic ignored "-Wformat-nonliteral"
230 char kdp_fmt[256];
231 va_list args;
232
233 va_start(args, fmt);
234 (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
235 vprintf(kdp_fmt, args);
236 va_end(args);
237
238 while (1) {
239 }
240 ;
241 #pragma clang diagnostic pop
242 }
243
244 int
kdp_intr_disbl(void)245 kdp_intr_disbl(void)
246 {
247 return splhigh();
248 }
249
250 void
kdp_intr_enbl(int s)251 kdp_intr_enbl(int s)
252 {
253 splx(s);
254 }
255
256 void
kdp_us_spin(int usec)257 kdp_us_spin(int usec)
258 {
259 delay(usec / 100);
260 }
261
262 void
kdp_call(void)263 kdp_call(void)
264 {
265 Debugger("inline call to debugger(machine_startup)");
266 }
267
268 int
kdp_getc(void)269 kdp_getc(void)
270 {
271 return console_try_read_char();
272 }
273
274 void
kdp_machine_get_breakinsn(uint8_t * bytes,uint32_t * size)275 kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
276 {
277 *(uint32_t *)bytes = GDB_TRAP_INSTR1;
278 *size = sizeof(uint32_t);
279 }
280
281 void
kdp_sync_cache(void)282 kdp_sync_cache(void)
283 {
284 }
285
286 int
kdp_machine_ioport_read(kdp_readioport_req_t * rq,caddr_t data,uint16_t lcpu)287 kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
288 {
289 #pragma unused(rq, data, lcpu)
290 return 0;
291 }
292
293 int
kdp_machine_ioport_write(kdp_writeioport_req_t * rq,caddr_t data,uint16_t lcpu)294 kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
295 {
296 #pragma unused(rq, data, lcpu)
297 return 0;
298 }
299
300 int
kdp_machine_msr64_read(kdp_readmsr64_req_t * rq,caddr_t data,uint16_t lcpu)301 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
302 {
303 #pragma unused(rq, data, lcpu)
304 return 0;
305 }
306
307 int
kdp_machine_msr64_write(kdp_writemsr64_req_t * rq,caddr_t data,uint16_t lcpu)308 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
309 {
310 #pragma unused(rq, data, lcpu)
311 return 0;
312 }
313 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
314
315 void
kdp_trap(unsigned int exception,struct arm_saved_state * saved_state)316 kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
317 {
318 handle_debugger_trap(exception, 0, 0, saved_state);
319
320 #if defined(__arm64__)
321 assert(is_saved_state64(saved_state));
322
323 uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
324
325 /*
326 * As long as we are using the arm32 trap encoding to handling
327 * traps to the debugger, we should identify both variants and
328 * increment for both of them.
329 */
330 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
331 add_saved_state_pc(saved_state, 4);
332 }
333 #else
334 #error Unknown architecture.
335 #endif
336 }
337
338 #define ARM32_LR_OFFSET 4
339 #define ARM64_LR_OFFSET 8
340
341 /*
342 * Since sizeof (struct thread_snapshot) % 4 == 2
343 * make sure the compiler does not try to use word-aligned
344 * access to this data, which can result in alignment faults
345 * that can't be emulated in KDP context.
346 */
347 typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
348
349 /*
350 * @function _was_in_userspace
351 *
352 * @abstract Unused function used to indicate that a CPU was in userspace
353 * before it was IPI'd to enter the Debugger context.
354 *
355 * @discussion This function should never actually be called.
356 */
357 void __attribute__((__noreturn__))
_was_in_userspace(void)358 _was_in_userspace(void)
359 {
360 panic("%s: should not have been invoked.", __FUNCTION__);
361 }
362
363 int
machine_trace_thread64(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)364 machine_trace_thread64(thread_t thread,
365 char * tracepos,
366 char * tracebound,
367 int nframes,
368 uint32_t * thread_trace_flags)
369 {
370 #if defined(__arm64__)
371
372 uint64_t * tracebuf = (uint64_t *)tracepos;
373 vm_size_t framesize = sizeof(uint64_t);
374
375 vm_offset_t stacklimit = 0;
376 vm_offset_t stacklimit_bottom = 0;
377 int framecount = 0;
378 vm_offset_t pc = 0;
379 vm_offset_t fp = 0;
380 vm_offset_t sp = 0;
381 vm_offset_t prevfp = 0;
382 uint64_t prevlr = 0;
383 vm_offset_t kern_virt_addr = 0;
384
385 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
386 if (!nframes) {
387 return 0;
388 }
389 framecount = 0;
390
391 struct arm_saved_state *state = thread->machine.kpcb;
392 if (state != NULL) {
393 fp = state->ss_64.fp;
394
395 prevlr = state->ss_64.lr;
396 pc = state->ss_64.pc;
397 sp = state->ss_64.sp;
398 } else {
399 /* kstackptr may not always be there, so recompute it */
400 arm_kernel_saved_state_t *kstate = &thread_get_kernel_state(thread)->machine.ss;
401
402 fp = kstate->fp;
403 prevlr = kstate->lr;
404 pc = kstate->pc_was_in_userspace ? (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer) : 0;
405 sp = kstate->sp;
406 }
407
408 stacklimit = VM_MAX_KERNEL_ADDRESS;
409 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
410
411 if (!prevlr && !fp && !sp && !pc) {
412 return 0;
413 }
414
415 prevlr = VM_KERNEL_UNSLIDE(prevlr);
416
417 for (; framecount < nframes; framecount++) {
418 *tracebuf++ = prevlr;
419
420 /* Invalid frame */
421 if (!fp) {
422 break;
423 }
424 /*
425 * Unaligned frame; given that the stack register must always be
426 * 16-byte aligned, we are assured 8-byte alignment of the saved
427 * frame pointer and link register.
428 */
429 if (fp & 0x0000007) {
430 break;
431 }
432 /* Frame is out of range, maybe a user FP while doing kernel BT */
433 if (fp > stacklimit) {
434 break;
435 }
436 if (fp < stacklimit_bottom) {
437 break;
438 }
439 /* Stack grows downward */
440 if (fp < prevfp) {
441 bool switched_stacks = false;
442
443 /*
444 * As a special case, sometimes we are backtracing out of an interrupt
445 * handler, and the stack jumps downward because of the memory allocation
446 * pattern during early boot due to KASLR.
447 */
448 int cpu;
449 int max_cpu = ml_get_max_cpu_number();
450
451 for (cpu = 0; cpu <= max_cpu; cpu++) {
452 cpu_data_t *target_cpu_datap;
453
454 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
455 if (target_cpu_datap == (cpu_data_t *)NULL) {
456 continue;
457 }
458
459 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
460 switched_stacks = true;
461 break;
462 }
463 #if defined(__arm64__)
464 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
465 switched_stacks = true;
466 break;
467 }
468 #endif
469 }
470
471 /**
472 * The stack could be "growing upwards" because this frame is
473 * stitching two different stacks together. There can be more than
474 * one non-XNU stack so if both frames are in non-XNU stacks but it
475 * looks like the stack is growing upward, then assume that we've
476 * switched from one non-XNU stack to another.
477 */
478 if ((ml_addr_in_non_xnu_stack(prevfp) != ml_addr_in_non_xnu_stack(fp)) ||
479 (ml_addr_in_non_xnu_stack(prevfp) && ml_addr_in_non_xnu_stack(fp))) {
480 switched_stacks = true;
481 }
482
483 if (!switched_stacks) {
484 /* Corrupt frame pointer? */
485 break;
486 }
487 }
488
489 /* Assume there's a saved link register, and read it */
490 kern_virt_addr = fp + ARM64_LR_OFFSET;
491 bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
492 if (!ok) {
493 if (thread_trace_flags != NULL) {
494 *thread_trace_flags |= kThreadTruncatedBT;
495 }
496
497 break;
498 }
499
500 prevlr = *(uint64_t *)kern_virt_addr;
501 #if defined(HAS_APPLE_PAC)
502 /* return addresses on stack signed by arm64e ABI */
503 prevlr = (uint64_t) ptrauth_strip((void *)prevlr, ptrauth_key_return_address);
504 #endif
505 prevlr = VM_KERNEL_UNSLIDE(prevlr);
506
507 prevfp = fp;
508 /* Next frame */
509 kern_virt_addr = fp;
510 ok = machine_trace_thread_validate_kva(kern_virt_addr);
511 if (!ok) {
512 if (thread_trace_flags != NULL) {
513 *thread_trace_flags |= kThreadTruncatedBT;
514 }
515 fp = 0;
516 break;
517 }
518
519 fp = *(uint64_t *)kern_virt_addr;
520 }
521 return (int)(((char *)tracebuf) - tracepos);
522 #else
523 #error Unknown architecture.
524 #endif
525 }
526
527 void
kdp_ml_enter_debugger(void)528 kdp_ml_enter_debugger(void)
529 {
530 __asm__ volatile (".long 0xe7ffdefe");
531 }
532