xref: /xnu-8020.121.3/osfmk/kdp/ml/x86_64/kdp_machdep.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/nlist.h>
39 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40 #include <kern/machine.h> /* for halt_all_cpus */
41 #include <libkern/OSAtomic.h>
42 
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 
48 #define KDP_TEST_HARNESS 0
49 #if KDP_TEST_HARNESS
50 #define dprintf(x) printf x
51 #else
52 #define dprintf(x)
53 #endif
54 
55 extern cpu_type_t cpuid_cputype(void);
56 extern cpu_subtype_t cpuid_cpusubtype(void);
57 
58 void            print_saved_state(void *);
59 void            kdp_call(void);
60 int             kdp_getc(void);
61 void            kdp_getstate(x86_thread_state64_t *);
62 void            kdp_setstate(x86_thread_state64_t *);
63 unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
64 int machine_trace_thread64(thread_t thread, char * tracepos, char * tracebound,
65     int nframes, uint32_t * thread_trace_flags);
66 
67 void
kdp_exception(unsigned char * pkt,int * len,unsigned short * remote_port,unsigned int exception,unsigned int code,unsigned int subcode)68 kdp_exception(
69 	unsigned char       *pkt,
70 	int *len,
71 	unsigned short      *remote_port,
72 	unsigned int        exception,
73 	unsigned int        code,
74 	unsigned int        subcode
75 	)
76 {
77 	kdp_exception_t     *rq = (kdp_exception_t *)pkt;
78 
79 	rq->hdr.request = KDP_EXCEPTION;
80 	rq->hdr.is_reply = 0;
81 	rq->hdr.seq = kdp.exception_seq;
82 	rq->hdr.key = 0;
83 	rq->hdr.len = sizeof(*rq);
84 
85 	rq->n_exc_info = 1;
86 	rq->exc_info[0].cpu = 0;
87 	rq->exc_info[0].exception = exception;
88 	rq->exc_info[0].code = code;
89 	rq->exc_info[0].subcode = subcode;
90 
91 	rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
92 
93 	bcopy((char *)rq, (char *)pkt, rq->hdr.len);
94 
95 	kdp.exception_ack_needed = TRUE;
96 
97 	*remote_port = kdp.exception_port;
98 	*len = rq->hdr.len;
99 }
100 
101 boolean_t
kdp_exception_ack(unsigned char * pkt,int len)102 kdp_exception_ack(
103 	unsigned char       *pkt,
104 	int                 len
105 	)
106 {
107 	kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
108 
109 	if (((unsigned int) len) < sizeof(*rq)) {
110 		return FALSE;
111 	}
112 
113 	if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
114 		return FALSE;
115 	}
116 
117 	dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
118 
119 	if (rq->hdr.seq == kdp.exception_seq) {
120 		kdp.exception_ack_needed = FALSE;
121 		kdp.exception_seq++;
122 	}
123 	return TRUE;
124 }
125 
126 void
kdp_getstate(x86_thread_state64_t * state)127 kdp_getstate(
128 	x86_thread_state64_t        *state
129 	)
130 {
131 	x86_saved_state64_t *saved_state;
132 
133 	saved_state = (x86_saved_state64_t *)kdp.saved_state;
134 
135 	state->rax = saved_state->rax;
136 	state->rbx = saved_state->rbx;
137 	state->rcx = saved_state->rcx;
138 	state->rdx = saved_state->rdx;
139 	state->rdi = saved_state->rdi;
140 	state->rsi = saved_state->rsi;
141 	state->rbp = saved_state->rbp;
142 
143 	state->r8  = saved_state->r8;
144 	state->r9  = saved_state->r9;
145 	state->r10 = saved_state->r10;
146 	state->r11 = saved_state->r11;
147 	state->r12 = saved_state->r12;
148 	state->r13 = saved_state->r13;
149 	state->r14 = saved_state->r14;
150 	state->r15 = saved_state->r15;
151 
152 	state->rsp = saved_state->isf.rsp;
153 	state->rflags = saved_state->isf.rflags;
154 	state->rip = saved_state->isf.rip;
155 
156 	state->cs = saved_state->isf.cs;
157 	state->fs = saved_state->fs;
158 	state->gs = saved_state->gs;
159 }
160 
161 
162 void
kdp_setstate(x86_thread_state64_t * state)163 kdp_setstate(
164 	x86_thread_state64_t        *state
165 	)
166 {
167 	x86_saved_state64_t         *saved_state;
168 
169 	saved_state = (x86_saved_state64_t *)kdp.saved_state;
170 	saved_state->rax = state->rax;
171 	saved_state->rbx = state->rbx;
172 	saved_state->rcx = state->rcx;
173 	saved_state->rdx = state->rdx;
174 	saved_state->rdi = state->rdi;
175 	saved_state->rsi = state->rsi;
176 	saved_state->rbp = state->rbp;
177 	saved_state->r8  = state->r8;
178 	saved_state->r9  = state->r9;
179 	saved_state->r10 = state->r10;
180 	saved_state->r11 = state->r11;
181 	saved_state->r12 = state->r12;
182 	saved_state->r13 = state->r13;
183 	saved_state->r14 = state->r14;
184 	saved_state->r15 = state->r15;
185 
186 	saved_state->isf.rflags = state->rflags;
187 	saved_state->isf.rsp = state->rsp;
188 	saved_state->isf.rip = state->rip;
189 
190 	saved_state->fs = (uint32_t)state->fs;
191 	saved_state->gs = (uint32_t)state->gs;
192 }
193 
194 
195 kdp_error_t
kdp_machine_read_regs(__unused unsigned int cpu,unsigned int flavor,char * data,int * size)196 kdp_machine_read_regs(
197 	__unused unsigned int cpu,
198 	unsigned int flavor,
199 	char *data,
200 	int *size
201 	)
202 {
203 	static x86_float_state64_t  null_fpstate;
204 
205 	switch (flavor) {
206 	case x86_THREAD_STATE64:
207 		dprintf(("kdp_readregs THREAD_STATE64\n"));
208 		kdp_getstate((x86_thread_state64_t *)data);
209 		*size = sizeof(x86_thread_state64_t);
210 		return KDPERR_NO_ERROR;
211 
212 	case x86_FLOAT_STATE64:
213 		dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
214 		*(x86_float_state64_t *)data = null_fpstate;
215 		*size = sizeof(x86_float_state64_t);
216 		return KDPERR_NO_ERROR;
217 
218 	default:
219 		dprintf(("kdp_readregs bad flavor %d\n", flavor));
220 		*size = 0;
221 		return KDPERR_BADFLAVOR;
222 	}
223 }
224 
225 kdp_error_t
kdp_machine_write_regs(__unused unsigned int cpu,unsigned int flavor,char * data,__unused int * size)226 kdp_machine_write_regs(
227 	__unused unsigned int cpu,
228 	unsigned int flavor,
229 	char *data,
230 	__unused int *size
231 	)
232 {
233 	switch (flavor) {
234 	case x86_THREAD_STATE64:
235 		dprintf(("kdp_writeregs THREAD_STATE64\n"));
236 		kdp_setstate((x86_thread_state64_t *)data);
237 		return KDPERR_NO_ERROR;
238 
239 	case x86_FLOAT_STATE64:
240 		dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
241 		return KDPERR_NO_ERROR;
242 
243 	default:
244 		dprintf(("kdp_writeregs bad flavor %d\n", flavor));
245 		return KDPERR_BADFLAVOR;
246 	}
247 }
248 
249 
250 
251 void
kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)252 kdp_machine_hostinfo(
253 	kdp_hostinfo_t *hostinfo
254 	)
255 {
256 	int                 i;
257 
258 	hostinfo->cpus_mask = 0;
259 
260 	for (i = 0; i < machine_info.max_cpus; i++) {
261 		if (cpu_data_ptr[i] == NULL) {
262 			continue;
263 		}
264 
265 		hostinfo->cpus_mask |= (1 << i);
266 	}
267 
268 	hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
269 	hostinfo->cpu_subtype = cpuid_cpusubtype();
270 }
271 
272 void
kdp_panic(const char * fmt,...)273 kdp_panic(
274 	const char          *fmt,
275 	...
276 	)
277 {
278 #pragma clang diagnostic push
279 #pragma clang diagnostic ignored "-Wformat-nonliteral"
280 	char kdp_fmt[256];
281 	va_list args;
282 
283 	va_start(args, fmt);
284 	(void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
285 	vprintf(kdp_fmt, args);
286 	va_end(args);
287 
288 	__asm__ volatile ("hlt");
289 #pragma clang diagnostic pop
290 }
291 
292 int
kdp_intr_disbl(void)293 kdp_intr_disbl(void)
294 {
295 	return splhigh();
296 }
297 
298 void
kdp_intr_enbl(int s)299 kdp_intr_enbl(int s)
300 {
301 	splx(s);
302 }
303 
304 int
kdp_getc(void)305 kdp_getc(void)
306 {
307 	return console_try_read_char();
308 }
309 
310 void
kdp_us_spin(int usec)311 kdp_us_spin(int usec)
312 {
313 	delay(usec / 100);
314 }
315 
316 void
print_saved_state(void * state)317 print_saved_state(void *state)
318 {
319 	x86_saved_state64_t         *saved_state;
320 
321 	saved_state = state;
322 
323 	kprintf("pc = 0x%llx\n", saved_state->isf.rip);
324 	kprintf("cr2= 0x%llx\n", saved_state->cr2);
325 	kprintf("rp = TODO FIXME\n");
326 	kprintf("sp = %p\n", saved_state);
327 }
328 
329 void
kdp_sync_cache(void)330 kdp_sync_cache(void)
331 {
332 	return; /* No op here. */
333 }
334 
335 void
kdp_call(void)336 kdp_call(void)
337 {
338 	__asm__ volatile ("int	$3");   /* Let the processor do the work */
339 }
340 
341 
342 typedef struct _cframe_t {
343 	struct _cframe_t    *prev;
344 	unsigned            caller;
345 	unsigned            args[0];
346 } cframe_t;
347 
348 boolean_t
kdp_i386_trap(unsigned int trapno,x86_saved_state64_t * saved_state,kern_return_t result,vm_offset_t va)349 kdp_i386_trap(
350 	unsigned int                trapno,
351 	x86_saved_state64_t *saved_state,
352 	kern_return_t       result,
353 	vm_offset_t         va
354 	)
355 {
356 	unsigned int exception, code, subcode = 0;
357 	boolean_t prev_interrupts_state;
358 
359 	if (trapno != T_INT3 && trapno != T_DEBUG) {
360 		kprintf("Debugger: Unexpected kernel trap number: "
361 		    "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
362 		    trapno, saved_state->isf.rip, saved_state->cr2);
363 		if (!kdp.is_conn) {
364 			return FALSE;
365 		}
366 	}
367 
368 	prev_interrupts_state = ml_set_interrupts_enabled(FALSE);
369 	disable_preemption();
370 
371 	if (saved_state->isf.rflags & EFL_TF) {
372 		enable_preemption_no_check();
373 	}
374 
375 	switch (trapno) {
376 	case T_DIVIDE_ERROR:
377 		exception = EXC_ARITHMETIC;
378 		code = EXC_I386_DIVERR;
379 		break;
380 
381 	case T_OVERFLOW:
382 		exception = EXC_SOFTWARE;
383 		code = EXC_I386_INTOFLT;
384 		break;
385 
386 	case T_OUT_OF_BOUNDS:
387 		exception = EXC_ARITHMETIC;
388 		code = EXC_I386_BOUNDFLT;
389 		break;
390 
391 	case T_INVALID_OPCODE:
392 		exception = EXC_BAD_INSTRUCTION;
393 		code = EXC_I386_INVOPFLT;
394 		break;
395 
396 	case T_SEGMENT_NOT_PRESENT:
397 		exception = EXC_BAD_INSTRUCTION;
398 		code = EXC_I386_SEGNPFLT;
399 		subcode = (unsigned int)saved_state->isf.err;
400 		break;
401 
402 	case T_STACK_FAULT:
403 		exception = EXC_BAD_INSTRUCTION;
404 		code = EXC_I386_STKFLT;
405 		subcode = (unsigned int)saved_state->isf.err;
406 		break;
407 
408 	case T_GENERAL_PROTECTION:
409 		exception = EXC_BAD_INSTRUCTION;
410 		code = EXC_I386_GPFLT;
411 		subcode = (unsigned int)saved_state->isf.err;
412 		break;
413 
414 	case T_PAGE_FAULT:
415 		exception = EXC_BAD_ACCESS;
416 		code = result;
417 		subcode = (unsigned int)va;
418 		break;
419 
420 	case T_WATCHPOINT:
421 		exception = EXC_SOFTWARE;
422 		code = EXC_I386_ALIGNFLT;
423 		break;
424 
425 	case T_DEBUG:
426 	case T_INT3:
427 		exception = EXC_BREAKPOINT;
428 		code = EXC_I386_BPTFLT;
429 		break;
430 
431 	default:
432 		exception = EXC_BAD_INSTRUCTION;
433 		code = trapno;
434 		break;
435 	}
436 
437 	if (current_cpu_datap()->cpu_fatal_trap_state) {
438 		current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
439 		saved_state = current_cpu_datap()->cpu_fatal_trap_state;
440 	}
441 
442 	handle_debugger_trap(exception, code, subcode, saved_state);
443 
444 	enable_preemption();
445 	ml_set_interrupts_enabled(prev_interrupts_state);
446 
447 	/* If the instruction single step bit is set, disable kernel preemption
448 	 */
449 	if (saved_state->isf.rflags & EFL_TF) {
450 		disable_preemption();
451 	}
452 
453 	return TRUE;
454 }
455 
456 void
kdp_machine_get_breakinsn(uint8_t * bytes,uint32_t * size)457 kdp_machine_get_breakinsn(
458 	uint8_t *bytes,
459 	uint32_t *size
460 	)
461 {
462 	bytes[0] = 0xcc;
463 	*size = 1;
464 }
465 
466 #define RETURN_OFFSET64 8
467 /* Routine to encapsulate the 64-bit address read hack*/
468 unsigned
machine_read64(addr64_t srcaddr,caddr_t dstaddr,uint32_t len)469 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
470 {
471 	return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
472 }
473 
474 int
machine_trace_thread64(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)475 machine_trace_thread64(thread_t thread,
476     char * tracepos,
477     char * tracebound,
478     int nframes,
479     uint32_t * thread_trace_flags)
480 {
481 	extern bool machine_trace_thread_validate_kva(vm_offset_t addr);
482 
483 	uint64_t * tracebuf = (uint64_t *)tracepos;
484 	unsigned framesize  = sizeof(addr64_t);
485 
486 	uint32_t fence             = 0;
487 	int framecount             = 0;
488 	addr64_t prev_rip          = 0;
489 	addr64_t prevsp            = 0;
490 	vm_offset_t kern_virt_addr = 0;
491 
492 	nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
493 
494 	addr64_t stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
495 	prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
496 	prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
497 
498 	for (framecount = 0; framecount < nframes; framecount++) {
499 		*tracebuf++ = prev_rip;
500 
501 		if (!stackptr || (stackptr == fence)) {
502 			break;
503 		}
504 		if (stackptr & 0x0000007) {
505 			break;
506 		}
507 		if (stackptr <= prevsp) {
508 			break;
509 		}
510 
511 		kern_virt_addr = stackptr + RETURN_OFFSET64;
512 		bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
513 		if (!ok) {
514 			if (thread_trace_flags) {
515 				*thread_trace_flags |= kThreadTruncatedBT;
516 			}
517 			break;
518 		}
519 
520 		prev_rip = VM_KERNEL_UNSLIDE(*(uint64_t *)kern_virt_addr);
521 		prevsp = stackptr;
522 
523 		kern_virt_addr = stackptr;
524 		ok = machine_trace_thread_validate_kva(kern_virt_addr);
525 		if (!ok) {
526 			if (thread_trace_flags) {
527 				*thread_trace_flags |= kThreadTruncatedBT;
528 			}
529 			break;
530 		}
531 		stackptr = *(uint64_t *)kern_virt_addr;
532 	}
533 
534 	return (uint32_t) (((char *) tracebuf) - tracepos);
535 }
536 
537 void
kdp_ml_enter_debugger(void)538 kdp_ml_enter_debugger(void)
539 {
540 	__asm__ __volatile__ ("int3");
541 }
542