xref: /xnu-11215.41.3/osfmk/kdp/ml/i386/kdp_x86_common.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <kern/hvg_hypercall.h>
32 #include <mach/mach_types.h>
33 #include <mach/vm_attributes.h>
34 #include <mach/vm_param.h>
35 #include <libsa/types.h>
36 
37 #include <vm/vm_map_xnu.h>
38 #include <i386/pmap.h>
39 #include <i386/pmap_internal.h> /* pmap_pde */
40 #include <i386/mp.h>
41 #include <i386/misc_protos.h>
42 #include <i386/pio.h>
43 #include <i386/proc_reg.h>
44 
45 #include <i386/pmap_internal.h>
46 
47 #include <kdp/kdp_internal.h>
48 #include <kdp/kdp_core.h>
49 #include <kdp/ml/i386/kdp_x86_common.h>
50 #include <mach/vm_map.h>
51 
52 #include <vm/vm_protos.h>
53 #include <vm/vm_kern_xnu.h>
54 
55 #include <machine/pal_routines.h>
56 #include <libkern/kernel_mach_header.h>
57 
58 // #define KDP_VM_READ_DEBUG 1
59 // #define KDP_VM_WRITE_DEBUG 1
60 
61 /*
62  * A (potentially valid) physical address is not a kernel address
63  * i.e. it'a a user address.
64  */
65 #define IS_PHYS_ADDR(addr)              IS_USERADDR64_CANONICAL(addr)
66 
67 boolean_t kdp_read_io;
68 boolean_t kdp_trans_off;
69 
70 pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va);
71 
72 pmap_t kdp_pmap = 0;
73 
74 kdp_jtag_coredump_t kdp_jtag_coredump;
75 
76 pmap_paddr_t
kdp_vtophys(pmap_t pmap,vm_offset_t va)77 kdp_vtophys(
78 	pmap_t pmap,
79 	vm_offset_t va)
80 {
81 	pmap_paddr_t    pa;
82 
83 	pa = pmap_find_pa(pmap, va);
84 
85 	return pa;
86 }
87 
88 mach_vm_size_t
kdp_machine_vm_read(mach_vm_address_t src,caddr_t dst,mach_vm_size_t len)89 kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
90 {
91 	addr64_t cur_virt_src = PAL_KDP_ADDR((addr64_t)src);
92 	addr64_t cur_virt_dst = PAL_KDP_ADDR((addr64_t)(intptr_t)dst);
93 	addr64_t cur_phys_dst, cur_phys_src;
94 	mach_vm_size_t resid = len;
95 	mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
96 	pmap_t src_pmap = kernel_pmap;
97 
98 #ifdef KDP_VM_READ_DEBUG
99 	printf("kdp_vm_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
100 #endif
101 
102 	if (kdp_trans_off && IS_PHYS_ADDR(src)) {
103 		kdp_readphysmem64_req_t rq;
104 		mach_vm_size_t ret;
105 
106 		rq.address = src;
107 		rq.nbytes = (uint32_t)len;
108 		ret = kdp_machine_phys_read(&rq, dst, KDP_CURRENT_LCPU);
109 		return ret;
110 	}
111 
112 /* If a different pmap has been specified with kdp_pmap, use it to translate the
113  * source (cur_virt_src); otherwise, the source is translated using the
114  * kernel_pmap.
115  */
116 	if (kdp_pmap) {
117 		src_pmap = kdp_pmap;
118 	}
119 
120 	while (resid != 0) {
121 		if (!(cur_phys_src = kdp_vtophys(src_pmap,
122 		    cur_virt_src))) {
123 			goto exit;
124 		}
125 
126 /* Always translate the destination buffer using the kernel_pmap */
127 		if (!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) {
128 			goto exit;
129 		}
130 
131 		/* Validate physical page numbers unless kdp_read_io is set */
132 		if (kdp_read_io == FALSE) {
133 			if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) {
134 				goto exit;
135 			}
136 		}
137 
138 /* Get length left on page */
139 		cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
140 		cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
141 		if (cnt_src > cnt_dst) {
142 			cnt = cnt_dst;
143 		} else {
144 			cnt = cnt_src;
145 		}
146 		if (cnt > resid) {
147 			cnt = resid;
148 		}
149 
150 /* Do a physical copy */
151 		if (EFAULT == ml_copy_phys(cur_phys_src,
152 		    cur_phys_dst,
153 		    (vm_size_t)cnt)) {
154 			goto exit;
155 		}
156 		cur_virt_src += cnt;
157 		cur_virt_dst += cnt;
158 		resid -= cnt;
159 	}
160 exit:
161 	return len - resid;
162 }
163 
164 mach_vm_size_t
kdp_machine_phys_read(kdp_readphysmem64_req_t * rq,caddr_t dst,uint16_t lcpu)165 kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst,
166     uint16_t lcpu)
167 {
168 	mach_vm_address_t src = rq->address;
169 	mach_vm_size_t    len = rq->nbytes;
170 
171 	addr64_t cur_virt_dst;
172 	addr64_t cur_phys_dst, cur_phys_src;
173 	mach_vm_size_t resid = len;
174 	mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
175 
176 	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
177 		return (mach_vm_size_t)
178 		       kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst, 0);
179 	}
180 
181 #ifdef KDP_VM_READ_DEBUG
182 	printf("kdp_phys_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
183 #endif
184 
185 	cur_virt_dst = (addr64_t)(intptr_t)dst;
186 	cur_phys_src = (addr64_t)src;
187 
188 	while (resid != 0) {
189 		if (!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) {
190 			goto exit;
191 		}
192 
193 /* Get length left on page */
194 		cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
195 		cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
196 		if (cnt_src > cnt_dst) {
197 			cnt = cnt_dst;
198 		} else {
199 			cnt = cnt_src;
200 		}
201 		if (cnt > resid) {
202 			cnt = resid;
203 		}
204 
205 		/* Do a physical copy; use ml_copy_phys() in the event this is
206 		 * a short read with potential side effects.
207 		 */
208 		if (EFAULT == ml_copy_phys(cur_phys_src,
209 		    cur_phys_dst,
210 		    (vm_size_t)cnt)) {
211 			goto exit;
212 		}
213 		cur_phys_src += cnt;
214 		cur_virt_dst += cnt;
215 		resid -= cnt;
216 	}
217 exit:
218 	return len - resid;
219 }
220 
221 /*
222  *
223  */
224 mach_vm_size_t
kdp_machine_vm_write(caddr_t src,mach_vm_address_t dst,mach_vm_size_t len)225 kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
226 {
227 	addr64_t cur_virt_src, cur_virt_dst;
228 	addr64_t cur_phys_src, cur_phys_dst;
229 	unsigned resid, cnt, cnt_src, cnt_dst;
230 
231 #ifdef KDP_VM_WRITE_DEBUG
232 	printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
233 #endif
234 
235 	cur_virt_src = PAL_KDP_ADDR((addr64_t)(intptr_t)src);
236 	cur_virt_dst = PAL_KDP_ADDR((addr64_t)dst);
237 
238 	resid = (unsigned)len;
239 
240 	while (resid != 0) {
241 		if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) {
242 			goto exit;
243 		}
244 
245 		if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) {
246 			goto exit;
247 		}
248 
249 		/* Copy as many bytes as possible without crossing a page */
250 		cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
251 		cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
252 
253 		if (cnt_src > cnt_dst) {
254 			cnt = cnt_dst;
255 		} else {
256 			cnt = cnt_src;
257 		}
258 		if (cnt > resid) {
259 			cnt = resid;
260 		}
261 
262 		if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) {
263 			goto exit;              /* Copy stuff over */
264 		}
265 		cur_virt_src += cnt;
266 		cur_virt_dst += cnt;
267 		resid -= cnt;
268 	}
269 exit:
270 	return len - resid;
271 }
272 
273 /*
274  *
275  */
276 mach_vm_size_t
kdp_machine_phys_write(kdp_writephysmem64_req_t * rq,caddr_t src,uint16_t lcpu)277 kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src,
278     uint16_t lcpu)
279 {
280 	mach_vm_address_t dst = rq->address;
281 	mach_vm_size_t    len = rq->nbytes;
282 	addr64_t cur_virt_src;
283 	addr64_t cur_phys_src, cur_phys_dst;
284 	unsigned resid, cnt, cnt_src, cnt_dst;
285 
286 	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
287 		return (mach_vm_size_t)
288 		       kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src, 0);
289 	}
290 
291 #ifdef KDP_VM_WRITE_DEBUG
292 	printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
293 #endif
294 
295 	cur_virt_src = (addr64_t)(intptr_t)src;
296 	cur_phys_dst = (addr64_t)dst;
297 
298 	resid = (unsigned)len;
299 
300 	while (resid != 0) {
301 		if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) {
302 			goto exit;
303 		}
304 
305 		/* Copy as many bytes as possible without crossing a page */
306 		cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
307 		cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
308 
309 		if (cnt_src > cnt_dst) {
310 			cnt = cnt_dst;
311 		} else {
312 			cnt = cnt_src;
313 		}
314 		if (cnt > resid) {
315 			cnt = resid;
316 		}
317 
318 		if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) {
319 			goto exit;              /* Copy stuff over */
320 		}
321 		cur_virt_src += cnt;
322 		cur_phys_dst += cnt;
323 		resid -= cnt;
324 	}
325 
326 exit:
327 	return len - resid;
328 }
329 
330 #pragma clang diagnostic push
331 #pragma clang diagnostic ignored "-Wcast-function-type"
332 
333 int
kdp_machine_ioport_read(kdp_readioport_req_t * rq,caddr_t data,uint16_t lcpu)334 kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu)
335 {
336 	uint16_t addr = rq->address;
337 	uint16_t size = rq->nbytes;
338 
339 	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
340 		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data, 0);
341 	}
342 
343 	switch (size) {
344 	case 1:
345 		*((uint8_t *) data)  = inb(addr);
346 		break;
347 	case 2:
348 		*((uint16_t *) data) = inw(addr);
349 		break;
350 	case 4:
351 		*((uint32_t *) data) = inl(addr);
352 		break;
353 	default:
354 		return KDPERR_BADFLAVOR;
355 	}
356 
357 	return KDPERR_NO_ERROR;
358 }
359 
360 int
kdp_machine_ioport_write(kdp_writeioport_req_t * rq,caddr_t data,uint16_t lcpu)361 kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu)
362 {
363 	uint16_t addr = rq->address;
364 	uint16_t size = rq->nbytes;
365 
366 	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
367 		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data, 0);
368 	}
369 
370 	switch (size) {
371 	case 1:
372 		outb(addr, *((uint8_t *) data));
373 		break;
374 	case 2:
375 		outw(addr, *((uint16_t *) data));
376 		break;
377 	case 4:
378 		outl(addr, *((uint32_t *) data));
379 		break;
380 	default:
381 		return KDPERR_BADFLAVOR;
382 	}
383 
384 	return KDPERR_NO_ERROR;
385 }
386 
387 int
kdp_machine_msr64_read(kdp_readmsr64_req_t * rq,caddr_t data,uint16_t lcpu)388 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
389 {
390 	uint64_t *value = (uint64_t *) data;
391 	uint32_t msr    = rq->address;
392 
393 	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
394 		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_read, rq, data, 0);
395 	}
396 
397 	*value = rdmsr64(msr);
398 	return KDPERR_NO_ERROR;
399 }
400 
401 int
kdp_machine_msr64_write(kdp_writemsr64_req_t * rq,caddr_t data,uint16_t lcpu)402 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
403 {
404 	uint64_t *value = (uint64_t *) data;
405 	uint32_t msr    = rq->address;
406 
407 	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
408 		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_write, rq, data, 0);
409 	}
410 
411 	wrmsr64(msr, *value);
412 	return KDPERR_NO_ERROR;
413 }
414 
415 #pragma clang diagnostic pop
416 
417 pt_entry_t *debugger_ptep;
418 vm_offset_t debugger_window_kva;
419 
420 /* Establish a pagetable window that can be remapped on demand.
421  * This is utilized by the debugger to address regions outside
422  * the physical map.
423  */
424 
425 void
kdp_map_debug_pagetable_window(void)426 kdp_map_debug_pagetable_window(void)
427 {
428 	kmem_alloc(kernel_map, &debugger_window_kva, PAGE_SIZE,
429 	    KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_PAGEABLE,
430 	    VM_KERN_MEMORY_OSFMK);
431 
432 	debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
433 
434 	if (debugger_ptep == NULL) {
435 		pmap_expand(kernel_pmap, debugger_window_kva, PMAP_EXPAND_OPTIONS_NONE);
436 		debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
437 	}
438 }
439 
440 /* initialize kdp_jtag_coredump with data needed for JTAG coredump extraction */
441 
442 void
kdp_jtag_coredump_init(void)443 kdp_jtag_coredump_init(void)
444 {
445 	kdp_jtag_coredump.version                   = (uint64_t) KDP_JTAG_COREDUMP_VERSION_1;
446 	kdp_jtag_coredump.kernel_map_start          = (uint64_t) kernel_map->min_offset;
447 	kdp_jtag_coredump.kernel_map_end            = (uint64_t) kernel_map->max_offset;
448 	kdp_jtag_coredump.kernel_pmap_pml4          = (uint64_t) kernel_pmap->pm_pml4;
449 	kdp_jtag_coredump.pmap_memory_regions       = (uint64_t) &pmap_memory_regions;
450 	kdp_jtag_coredump.pmap_memory_region_count  = (uint64_t) pmap_memory_region_count;
451 	kdp_jtag_coredump.pmap_memory_region_t_size = (uint64_t) sizeof(pmap_memory_region_t);
452 	kdp_jtag_coredump.physmap_base              = (uint64_t) &physmap_base;
453 
454 	/* update signature last so that JTAG can trust that structure has valid data */
455 	kdp_jtag_coredump.signature                 = (uint64_t) KDP_JTAG_COREDUMP_SIGNATURE;
456 }
457 
458 void
kdp_machine_init(void)459 kdp_machine_init(void)
460 {
461 	/*
462 	 * If the kernel is running on top of a hypervisor that supports AH#1, it will inform
463 	 * the hypervisor of its debugging info.
464 	 */
465 	if (hvg_is_hcall_available(HVG_HCALL_SET_COREDUMP_DATA)) {
466 		hvg_hcall_set_coredump_data();
467 	}
468 
469 	if (debug_boot_arg == 0) {
470 		return;
471 	}
472 
473 	kdp_map_debug_pagetable_window();
474 	kdp_jtag_coredump_init();
475 }
476