xref: /xnu-11417.121.6/osfmk/kdp/ml/x86_64/kdp_machdep.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/nlist.h>
39 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40 #include <kern/machine.h> /* for halt_all_cpus */
41 #include <libkern/OSAtomic.h>
42 
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <i386/trap_internal.h>
46 #include <vm/vm_map.h>
47 #include <i386/pmap.h>
48 
49 #define KDP_TEST_HARNESS 0
50 #if KDP_TEST_HARNESS
51 #define dprintf(x) printf x
52 #else
53 #define dprintf(x)
54 #endif
55 
56 extern cpu_type_t cpuid_cputype(void);
57 extern cpu_subtype_t cpuid_cpusubtype(void);
58 
59 void            print_saved_state(void *);
60 void            kdp_call(void);
61 int             kdp_getc(void);
62 void            kdp_getstate(x86_thread_state64_t *);
63 void            kdp_setstate(x86_thread_state64_t *);
64 unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
65 int machine_trace_thread64(thread_t thread, char * tracepos, char * tracebound,
66     int nframes, uint32_t * thread_trace_flags);
67 
68 void
kdp_exception(unsigned char * pkt,int * len,unsigned short * remote_port,unsigned int exception,unsigned int code,unsigned int subcode)69 kdp_exception(
70 	unsigned char       *pkt,
71 	int *len,
72 	unsigned short      *remote_port,
73 	unsigned int        exception,
74 	unsigned int        code,
75 	unsigned int        subcode
76 	)
77 {
78 	kdp_exception_t     *rq = (kdp_exception_t *)pkt;
79 
80 	rq->hdr.request = KDP_EXCEPTION;
81 	rq->hdr.is_reply = 0;
82 	rq->hdr.seq = kdp.exception_seq;
83 	rq->hdr.key = 0;
84 	rq->hdr.len = sizeof(*rq);
85 
86 	rq->n_exc_info = 1;
87 	rq->exc_info[0].cpu = 0;
88 	rq->exc_info[0].exception = exception;
89 	rq->exc_info[0].code = code;
90 	rq->exc_info[0].subcode = subcode;
91 
92 	rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
93 
94 	bcopy((char *)rq, (char *)pkt, rq->hdr.len);
95 
96 	kdp.exception_ack_needed = TRUE;
97 
98 	*remote_port = kdp.exception_port;
99 	*len = rq->hdr.len;
100 }
101 
102 boolean_t
kdp_exception_ack(unsigned char * pkt,int len)103 kdp_exception_ack(
104 	unsigned char       *pkt,
105 	int                 len
106 	)
107 {
108 	kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
109 
110 	if (((unsigned int) len) < sizeof(*rq)) {
111 		return FALSE;
112 	}
113 
114 	if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
115 		return FALSE;
116 	}
117 
118 	dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
119 
120 	if (rq->hdr.seq == kdp.exception_seq) {
121 		kdp.exception_ack_needed = FALSE;
122 		kdp.exception_seq++;
123 	}
124 	return TRUE;
125 }
126 
127 void
kdp_getstate(x86_thread_state64_t * state)128 kdp_getstate(
129 	x86_thread_state64_t        *state
130 	)
131 {
132 	x86_saved_state64_t *saved_state;
133 
134 	saved_state = (x86_saved_state64_t *)kdp.saved_state;
135 
136 	state->rax = saved_state->rax;
137 	state->rbx = saved_state->rbx;
138 	state->rcx = saved_state->rcx;
139 	state->rdx = saved_state->rdx;
140 	state->rdi = saved_state->rdi;
141 	state->rsi = saved_state->rsi;
142 	state->rbp = saved_state->rbp;
143 
144 	state->r8  = saved_state->r8;
145 	state->r9  = saved_state->r9;
146 	state->r10 = saved_state->r10;
147 	state->r11 = saved_state->r11;
148 	state->r12 = saved_state->r12;
149 	state->r13 = saved_state->r13;
150 	state->r14 = saved_state->r14;
151 	state->r15 = saved_state->r15;
152 
153 	state->rsp = saved_state->isf.rsp;
154 	state->rflags = saved_state->isf.rflags;
155 	state->rip = saved_state->isf.rip;
156 
157 	state->cs = saved_state->isf.cs;
158 	state->fs = saved_state->fs;
159 	state->gs = saved_state->gs;
160 }
161 
162 
163 void
kdp_setstate(x86_thread_state64_t * state)164 kdp_setstate(
165 	x86_thread_state64_t        *state
166 	)
167 {
168 	x86_saved_state64_t         *saved_state;
169 
170 	saved_state = (x86_saved_state64_t *)kdp.saved_state;
171 	saved_state->rax = state->rax;
172 	saved_state->rbx = state->rbx;
173 	saved_state->rcx = state->rcx;
174 	saved_state->rdx = state->rdx;
175 	saved_state->rdi = state->rdi;
176 	saved_state->rsi = state->rsi;
177 	saved_state->rbp = state->rbp;
178 	saved_state->r8  = state->r8;
179 	saved_state->r9  = state->r9;
180 	saved_state->r10 = state->r10;
181 	saved_state->r11 = state->r11;
182 	saved_state->r12 = state->r12;
183 	saved_state->r13 = state->r13;
184 	saved_state->r14 = state->r14;
185 	saved_state->r15 = state->r15;
186 
187 	saved_state->isf.rflags = state->rflags;
188 	saved_state->isf.rsp = state->rsp;
189 	saved_state->isf.rip = state->rip;
190 
191 	saved_state->fs = (uint32_t)state->fs;
192 	saved_state->gs = (uint32_t)state->gs;
193 }
194 
195 
196 kdp_error_t
kdp_machine_read_regs(__unused unsigned int cpu,unsigned int flavor,char * data,int * size)197 kdp_machine_read_regs(
198 	__unused unsigned int cpu,
199 	unsigned int flavor,
200 	char *data,
201 	int *size
202 	)
203 {
204 	static x86_float_state64_t  null_fpstate;
205 
206 	switch (flavor) {
207 	case x86_THREAD_STATE64:
208 		dprintf(("kdp_readregs THREAD_STATE64\n"));
209 		kdp_getstate((x86_thread_state64_t *)data);
210 		*size = sizeof(x86_thread_state64_t);
211 		return KDPERR_NO_ERROR;
212 
213 	case x86_FLOAT_STATE64:
214 		dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
215 		*(x86_float_state64_t *)data = null_fpstate;
216 		*size = sizeof(x86_float_state64_t);
217 		return KDPERR_NO_ERROR;
218 
219 	default:
220 		dprintf(("kdp_readregs bad flavor %d\n", flavor));
221 		*size = 0;
222 		return KDPERR_BADFLAVOR;
223 	}
224 }
225 
226 kdp_error_t
kdp_machine_write_regs(__unused unsigned int cpu,unsigned int flavor,char * data,__unused int * size)227 kdp_machine_write_regs(
228 	__unused unsigned int cpu,
229 	unsigned int flavor,
230 	char *data,
231 	__unused int *size
232 	)
233 {
234 	switch (flavor) {
235 	case x86_THREAD_STATE64:
236 		dprintf(("kdp_writeregs THREAD_STATE64\n"));
237 		kdp_setstate((x86_thread_state64_t *)data);
238 		return KDPERR_NO_ERROR;
239 
240 	case x86_FLOAT_STATE64:
241 		dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
242 		return KDPERR_NO_ERROR;
243 
244 	default:
245 		dprintf(("kdp_writeregs bad flavor %d\n", flavor));
246 		return KDPERR_BADFLAVOR;
247 	}
248 }
249 
250 
251 
252 void
kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)253 kdp_machine_hostinfo(
254 	kdp_hostinfo_t *hostinfo
255 	)
256 {
257 	int                 i;
258 
259 	hostinfo->cpus_mask = 0;
260 
261 	for (i = 0; i < machine_info.max_cpus; i++) {
262 		if (cpu_data_ptr[i] == NULL) {
263 			continue;
264 		}
265 
266 		hostinfo->cpus_mask |= (1 << i);
267 	}
268 
269 	hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
270 	hostinfo->cpu_subtype = cpuid_cpusubtype();
271 }
272 
273 void
kdp_panic(const char * fmt,...)274 kdp_panic(
275 	const char          *fmt,
276 	...
277 	)
278 {
279 #pragma clang diagnostic push
280 #pragma clang diagnostic ignored "-Wformat-nonliteral"
281 	char kdp_fmt[256];
282 	va_list args;
283 
284 	va_start(args, fmt);
285 	(void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
286 	vprintf(kdp_fmt, args);
287 	va_end(args);
288 
289 	__asm__ volatile ("hlt");
290 #pragma clang diagnostic pop
291 }
292 
293 int
kdp_intr_disbl(void)294 kdp_intr_disbl(void)
295 {
296 	return splhigh();
297 }
298 
299 void
kdp_intr_enbl(int s)300 kdp_intr_enbl(int s)
301 {
302 	splx(s);
303 }
304 
305 int
kdp_getc(void)306 kdp_getc(void)
307 {
308 	return console_try_read_char();
309 }
310 
311 void
kdp_us_spin(int usec)312 kdp_us_spin(int usec)
313 {
314 	delay(usec / 100);
315 }
316 
317 void
print_saved_state(void * state)318 print_saved_state(void *state)
319 {
320 	x86_saved_state64_t         *saved_state;
321 
322 	saved_state = state;
323 
324 	kprintf("pc = 0x%llx\n", saved_state->isf.rip);
325 	kprintf("cr2= 0x%llx\n", saved_state->cr2);
326 	kprintf("rp = TODO FIXME\n");
327 	kprintf("sp = %p\n", saved_state);
328 }
329 
330 void
kdp_sync_cache(void)331 kdp_sync_cache(void)
332 {
333 	return; /* No op here. */
334 }
335 
336 void
kdp_call(void)337 kdp_call(void)
338 {
339 	__asm__ volatile ("int	$3");   /* Let the processor do the work */
340 }
341 
342 
343 typedef struct _cframe_t {
344 	struct _cframe_t    *prev;
345 	unsigned            caller;
346 	unsigned            args[0];
347 } cframe_t;
348 
349 boolean_t
kdp_i386_trap(unsigned int trapno,x86_saved_state64_t * saved_state,kern_return_t result,vm_offset_t va)350 kdp_i386_trap(
351 	unsigned int                trapno,
352 	x86_saved_state64_t *saved_state,
353 	kern_return_t       result,
354 	vm_offset_t         va
355 	)
356 {
357 	unsigned int exception, code, subcode = 0;
358 	boolean_t prev_interrupts_state;
359 
360 	if (trapno != T_INT3 && trapno != T_DEBUG) {
361 		kprintf("Debugger: Unexpected kernel trap number: "
362 		    "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
363 		    trapno, saved_state->isf.rip, saved_state->cr2);
364 		if (!kdp.is_conn) {
365 			return FALSE;
366 		}
367 	}
368 
369 	prev_interrupts_state = ml_set_interrupts_enabled(FALSE);
370 	disable_preemption();
371 
372 	if (saved_state->isf.rflags & EFL_TF) {
373 		enable_preemption_no_check();
374 	}
375 
376 	switch (trapno) {
377 	case T_DIVIDE_ERROR:
378 		exception = EXC_ARITHMETIC;
379 		code = EXC_I386_DIVERR;
380 		break;
381 
382 	case T_OVERFLOW:
383 		exception = EXC_SOFTWARE;
384 		code = EXC_I386_INTOFLT;
385 		break;
386 
387 	case T_OUT_OF_BOUNDS:
388 		exception = EXC_ARITHMETIC;
389 		code = EXC_I386_BOUNDFLT;
390 		break;
391 
392 	case T_INVALID_OPCODE:
393 		exception = EXC_BAD_INSTRUCTION;
394 		code = EXC_I386_INVOPFLT;
395 		break;
396 
397 	case T_SEGMENT_NOT_PRESENT:
398 		exception = EXC_BAD_INSTRUCTION;
399 		code = EXC_I386_SEGNPFLT;
400 		subcode = (unsigned int)saved_state->isf.err;
401 		break;
402 
403 	case T_STACK_FAULT:
404 		exception = EXC_BAD_INSTRUCTION;
405 		code = EXC_I386_STKFLT;
406 		subcode = (unsigned int)saved_state->isf.err;
407 		break;
408 
409 	case T_GENERAL_PROTECTION:
410 		exception = EXC_BAD_INSTRUCTION;
411 		code = EXC_I386_GPFLT;
412 		subcode = (unsigned int)saved_state->isf.err;
413 		break;
414 
415 	case T_PAGE_FAULT:
416 		exception = EXC_BAD_ACCESS;
417 		code = result;
418 		subcode = (unsigned int)va;
419 		break;
420 
421 	case T_WATCHPOINT:
422 		exception = EXC_SOFTWARE;
423 		code = EXC_I386_ALIGNFLT;
424 		break;
425 
426 	case T_DEBUG:
427 	case T_INT3:
428 		exception = EXC_BREAKPOINT;
429 		code = EXC_I386_BPTFLT;
430 		break;
431 
432 	default:
433 		exception = EXC_BAD_INSTRUCTION;
434 		code = trapno;
435 		break;
436 	}
437 
438 	if (current_cpu_datap()->cpu_fatal_trap_state) {
439 		current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
440 		saved_state = current_cpu_datap()->cpu_fatal_trap_state;
441 	}
442 
443 	handle_debugger_trap(exception, code, subcode, saved_state);
444 
445 	enable_preemption();
446 	ml_set_interrupts_enabled(prev_interrupts_state);
447 
448 	/* If the instruction single step bit is set, disable kernel preemption
449 	 */
450 	if (saved_state->isf.rflags & EFL_TF) {
451 		disable_preemption();
452 	}
453 
454 	return TRUE;
455 }
456 
457 void
kdp_machine_get_breakinsn(uint8_t * bytes,uint32_t * size)458 kdp_machine_get_breakinsn(
459 	uint8_t *bytes,
460 	uint32_t *size
461 	)
462 {
463 	bytes[0] = 0xcc;
464 	*size = 1;
465 }
466 
467 #define RETURN_OFFSET64 8
468 /* Routine to encapsulate the 64-bit address read hack*/
469 unsigned
machine_read64(addr64_t srcaddr,caddr_t dstaddr,uint32_t len)470 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
471 {
472 	return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
473 }
474 
475 int
machine_trace_thread64(thread_t thread,char * tracepos,char * tracebound,int nframes,uint32_t * thread_trace_flags)476 machine_trace_thread64(thread_t thread,
477     char * tracepos,
478     char * tracebound,
479     int nframes,
480     uint32_t * thread_trace_flags)
481 {
482 	extern bool machine_trace_thread_validate_kva(vm_offset_t addr);
483 
484 	uint64_t * tracebuf = (uint64_t *)tracepos;
485 	unsigned framesize  = sizeof(addr64_t);
486 
487 	uint32_t fence             = 0;
488 	int framecount             = 0;
489 	addr64_t prev_rip          = 0;
490 	addr64_t prevsp            = 0;
491 	vm_offset_t kern_virt_addr = 0;
492 
493 	nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
494 
495 	addr64_t stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
496 	prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
497 	prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
498 
499 	for (framecount = 0; framecount < nframes; framecount++) {
500 		*tracebuf++ = prev_rip;
501 
502 		if (!stackptr || (stackptr == fence)) {
503 			break;
504 		}
505 		if (stackptr & 0x0000007) {
506 			break;
507 		}
508 		if (stackptr <= prevsp) {
509 			break;
510 		}
511 
512 		kern_virt_addr = stackptr + RETURN_OFFSET64;
513 		bool ok = machine_trace_thread_validate_kva(kern_virt_addr);
514 		if (!ok) {
515 			if (thread_trace_flags) {
516 				*thread_trace_flags |= kThreadTruncatedBT;
517 			}
518 			break;
519 		}
520 
521 		prev_rip = VM_KERNEL_UNSLIDE(*(uint64_t *)kern_virt_addr);
522 		prevsp = stackptr;
523 
524 		kern_virt_addr = stackptr;
525 		ok = machine_trace_thread_validate_kva(kern_virt_addr);
526 		if (!ok) {
527 			if (thread_trace_flags) {
528 				*thread_trace_flags |= kThreadTruncatedBT;
529 			}
530 			break;
531 		}
532 		stackptr = *(uint64_t *)kern_virt_addr;
533 	}
534 
535 	return (uint32_t) (((char *) tracebuf) - tracepos);
536 }
537 
538 void
kdp_ml_enter_debugger(void)539 kdp_ml_enter_debugger(void)
540 {
541 	__asm__ __volatile__ ("int3");
542 }
543