1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <kern/hvg_hypercall.h>
32 #include <mach/mach_types.h>
33 #include <mach/vm_attributes.h>
34 #include <mach/vm_param.h>
35 #include <libsa/types.h>
36
37 #include <vm/vm_map.h>
38 #include <i386/pmap.h>
39 #include <i386/pmap_internal.h> /* pmap_pde */
40 #include <i386/mp.h>
41 #include <i386/misc_protos.h>
42 #include <i386/pio.h>
43 #include <i386/proc_reg.h>
44
45 #include <i386/pmap_internal.h>
46
47 #include <kdp/kdp_internal.h>
48 #include <kdp/kdp_core.h>
49 #include <kdp/ml/i386/kdp_x86_common.h>
50 #include <mach/vm_map.h>
51
52 #include <vm/vm_protos.h>
53 #include <vm/vm_kern.h>
54
55 #include <machine/pal_routines.h>
56 #include <libkern/kernel_mach_header.h>
57
58 // #define KDP_VM_READ_DEBUG 1
59 // #define KDP_VM_WRITE_DEBUG 1
60
61 /*
62 * A (potentially valid) physical address is not a kernel address
63 * i.e. it'a a user address.
64 */
65 #define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr)
66
67 boolean_t kdp_read_io;
68 boolean_t kdp_trans_off;
69
70 pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va);
71
72 pmap_t kdp_pmap = 0;
73
74 kdp_jtag_coredump_t kdp_jtag_coredump;
75
76 pmap_paddr_t
kdp_vtophys(pmap_t pmap,vm_offset_t va)77 kdp_vtophys(
78 pmap_t pmap,
79 vm_offset_t va)
80 {
81 pmap_paddr_t pa;
82
83 pa = pmap_find_pa(pmap, va);
84
85 return pa;
86 }
87
88 mach_vm_size_t
kdp_machine_vm_read(mach_vm_address_t src,caddr_t dst,mach_vm_size_t len)89 kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
90 {
91 addr64_t cur_virt_src = PAL_KDP_ADDR((addr64_t)src);
92 addr64_t cur_virt_dst = PAL_KDP_ADDR((addr64_t)(intptr_t)dst);
93 addr64_t cur_phys_dst, cur_phys_src;
94 mach_vm_size_t resid = len;
95 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
96 pmap_t src_pmap = kernel_pmap;
97
98 #ifdef KDP_VM_READ_DEBUG
99 printf("kdp_vm_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
100 #endif
101
102 if (kdp_trans_off && IS_PHYS_ADDR(src)) {
103 kdp_readphysmem64_req_t rq;
104 mach_vm_size_t ret;
105
106 rq.address = src;
107 rq.nbytes = (uint32_t)len;
108 ret = kdp_machine_phys_read(&rq, dst, KDP_CURRENT_LCPU);
109 return ret;
110 }
111
112 /* If a different pmap has been specified with kdp_pmap, use it to translate the
113 * source (cur_virt_src); otherwise, the source is translated using the
114 * kernel_pmap.
115 */
116 if (kdp_pmap) {
117 src_pmap = kdp_pmap;
118 }
119
120 while (resid != 0) {
121 if (!(cur_phys_src = kdp_vtophys(src_pmap,
122 cur_virt_src))) {
123 goto exit;
124 }
125
126 /* Always translate the destination buffer using the kernel_pmap */
127 if (!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) {
128 goto exit;
129 }
130
131 /* Validate physical page numbers unless kdp_read_io is set */
132 if (kdp_read_io == FALSE) {
133 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) {
134 goto exit;
135 }
136 }
137
138 /* Get length left on page */
139 cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
140 cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
141 if (cnt_src > cnt_dst) {
142 cnt = cnt_dst;
143 } else {
144 cnt = cnt_src;
145 }
146 if (cnt > resid) {
147 cnt = resid;
148 }
149
150 /* Do a physical copy */
151 if (EFAULT == ml_copy_phys(cur_phys_src,
152 cur_phys_dst,
153 (vm_size_t)cnt)) {
154 goto exit;
155 }
156 cur_virt_src += cnt;
157 cur_virt_dst += cnt;
158 resid -= cnt;
159 }
160 exit:
161 return len - resid;
162 }
163
164 mach_vm_size_t
kdp_machine_phys_read(kdp_readphysmem64_req_t * rq,caddr_t dst,uint16_t lcpu)165 kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst,
166 uint16_t lcpu)
167 {
168 mach_vm_address_t src = rq->address;
169 mach_vm_size_t len = rq->nbytes;
170
171 addr64_t cur_virt_dst;
172 addr64_t cur_phys_dst, cur_phys_src;
173 mach_vm_size_t resid = len;
174 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
175
176 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
177 return (mach_vm_size_t)
178 kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst, 0);
179 }
180
181 #ifdef KDP_VM_READ_DEBUG
182 printf("kdp_phys_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
183 #endif
184
185 cur_virt_dst = (addr64_t)(intptr_t)dst;
186 cur_phys_src = (addr64_t)src;
187
188 while (resid != 0) {
189 if (!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) {
190 goto exit;
191 }
192
193 /* Get length left on page */
194 cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
195 cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
196 if (cnt_src > cnt_dst) {
197 cnt = cnt_dst;
198 } else {
199 cnt = cnt_src;
200 }
201 if (cnt > resid) {
202 cnt = resid;
203 }
204
205 /* Do a physical copy; use ml_copy_phys() in the event this is
206 * a short read with potential side effects.
207 */
208 if (EFAULT == ml_copy_phys(cur_phys_src,
209 cur_phys_dst,
210 (vm_size_t)cnt)) {
211 goto exit;
212 }
213 cur_phys_src += cnt;
214 cur_virt_dst += cnt;
215 resid -= cnt;
216 }
217 exit:
218 return len - resid;
219 }
220
221 /*
222 *
223 */
224 mach_vm_size_t
kdp_machine_vm_write(caddr_t src,mach_vm_address_t dst,mach_vm_size_t len)225 kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
226 {
227 addr64_t cur_virt_src, cur_virt_dst;
228 addr64_t cur_phys_src, cur_phys_dst;
229 unsigned resid, cnt, cnt_src, cnt_dst;
230
231 #ifdef KDP_VM_WRITE_DEBUG
232 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
233 #endif
234
235 cur_virt_src = PAL_KDP_ADDR((addr64_t)(intptr_t)src);
236 cur_virt_dst = PAL_KDP_ADDR((addr64_t)dst);
237
238 resid = (unsigned)len;
239
240 while (resid != 0) {
241 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) {
242 goto exit;
243 }
244
245 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) {
246 goto exit;
247 }
248
249 /* Copy as many bytes as possible without crossing a page */
250 cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
251 cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
252
253 if (cnt_src > cnt_dst) {
254 cnt = cnt_dst;
255 } else {
256 cnt = cnt_src;
257 }
258 if (cnt > resid) {
259 cnt = resid;
260 }
261
262 if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) {
263 goto exit; /* Copy stuff over */
264 }
265 cur_virt_src += cnt;
266 cur_virt_dst += cnt;
267 resid -= cnt;
268 }
269 exit:
270 return len - resid;
271 }
272
273 /*
274 *
275 */
276 mach_vm_size_t
kdp_machine_phys_write(kdp_writephysmem64_req_t * rq,caddr_t src,uint16_t lcpu)277 kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src,
278 uint16_t lcpu)
279 {
280 mach_vm_address_t dst = rq->address;
281 mach_vm_size_t len = rq->nbytes;
282 addr64_t cur_virt_src;
283 addr64_t cur_phys_src, cur_phys_dst;
284 unsigned resid, cnt, cnt_src, cnt_dst;
285
286 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
287 return (mach_vm_size_t)
288 kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src, 0);
289 }
290
291 #ifdef KDP_VM_WRITE_DEBUG
292 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
293 #endif
294
295 cur_virt_src = (addr64_t)(intptr_t)src;
296 cur_phys_dst = (addr64_t)dst;
297
298 resid = (unsigned)len;
299
300 while (resid != 0) {
301 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) {
302 goto exit;
303 }
304
305 /* Copy as many bytes as possible without crossing a page */
306 cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
307 cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
308
309 if (cnt_src > cnt_dst) {
310 cnt = cnt_dst;
311 } else {
312 cnt = cnt_src;
313 }
314 if (cnt > resid) {
315 cnt = resid;
316 }
317
318 if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) {
319 goto exit; /* Copy stuff over */
320 }
321 cur_virt_src += cnt;
322 cur_phys_dst += cnt;
323 resid -= cnt;
324 }
325
326 exit:
327 return len - resid;
328 }
329
330 int
kdp_machine_ioport_read(kdp_readioport_req_t * rq,caddr_t data,uint16_t lcpu)331 kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu)
332 {
333 uint16_t addr = rq->address;
334 uint16_t size = rq->nbytes;
335
336 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
337 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data, 0);
338 }
339
340 switch (size) {
341 case 1:
342 *((uint8_t *) data) = inb(addr);
343 break;
344 case 2:
345 *((uint16_t *) data) = inw(addr);
346 break;
347 case 4:
348 *((uint32_t *) data) = inl(addr);
349 break;
350 default:
351 return KDPERR_BADFLAVOR;
352 }
353
354 return KDPERR_NO_ERROR;
355 }
356
357 int
kdp_machine_ioport_write(kdp_writeioport_req_t * rq,caddr_t data,uint16_t lcpu)358 kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu)
359 {
360 uint16_t addr = rq->address;
361 uint16_t size = rq->nbytes;
362
363 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
364 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data, 0);
365 }
366
367 switch (size) {
368 case 1:
369 outb(addr, *((uint8_t *) data));
370 break;
371 case 2:
372 outw(addr, *((uint16_t *) data));
373 break;
374 case 4:
375 outl(addr, *((uint32_t *) data));
376 break;
377 default:
378 return KDPERR_BADFLAVOR;
379 }
380
381 return KDPERR_NO_ERROR;
382 }
383
384 int
kdp_machine_msr64_read(kdp_readmsr64_req_t * rq,caddr_t data,uint16_t lcpu)385 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
386 {
387 uint64_t *value = (uint64_t *) data;
388 uint32_t msr = rq->address;
389
390 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
391 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_read, rq, data, 0);
392 }
393
394 *value = rdmsr64(msr);
395 return KDPERR_NO_ERROR;
396 }
397
398 int
kdp_machine_msr64_write(kdp_writemsr64_req_t * rq,caddr_t data,uint16_t lcpu)399 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
400 {
401 uint64_t *value = (uint64_t *) data;
402 uint32_t msr = rq->address;
403
404 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
405 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_write, rq, data, 0);
406 }
407
408 wrmsr64(msr, *value);
409 return KDPERR_NO_ERROR;
410 }
411
412 pt_entry_t *debugger_ptep;
413 vm_map_offset_t debugger_window_kva;
414
415 /* Establish a pagetable window that can be remapped on demand.
416 * This is utilized by the debugger to address regions outside
417 * the physical map.
418 */
419
420 void
kdp_map_debug_pagetable_window(void)421 kdp_map_debug_pagetable_window(void)
422 {
423 vm_map_entry_t e;
424 kern_return_t kr;
425
426 kr = vm_map_find_space(kernel_map,
427 &debugger_window_kva,
428 PAGE_SIZE, 0,
429 0,
430 VM_MAP_KERNEL_FLAGS_NONE,
431 VM_KERN_MEMORY_OSFMK,
432 &e);
433
434 if (kr != KERN_SUCCESS) {
435 panic("%s: vm_map_find_space failed with %d", __FUNCTION__, kr);
436 }
437
438 vm_map_unlock(kernel_map);
439
440 debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
441
442 if (debugger_ptep == NULL) {
443 pmap_expand(kernel_pmap, debugger_window_kva, PMAP_EXPAND_OPTIONS_NONE);
444 debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
445 }
446 }
447
448 /* initialize kdp_jtag_coredump with data needed for JTAG coredump extraction */
449
450 void
kdp_jtag_coredump_init(void)451 kdp_jtag_coredump_init(void)
452 {
453 kdp_jtag_coredump.version = (uint64_t) KDP_JTAG_COREDUMP_VERSION_1;
454 kdp_jtag_coredump.kernel_map_start = (uint64_t) kernel_map->min_offset;
455 kdp_jtag_coredump.kernel_map_end = (uint64_t) kernel_map->max_offset;
456 kdp_jtag_coredump.kernel_pmap_pml4 = (uint64_t) kernel_pmap->pm_pml4;
457 kdp_jtag_coredump.pmap_memory_regions = (uint64_t) &pmap_memory_regions;
458 kdp_jtag_coredump.pmap_memory_region_count = (uint64_t) pmap_memory_region_count;
459 kdp_jtag_coredump.pmap_memory_region_t_size = (uint64_t) sizeof(pmap_memory_region_t);
460 kdp_jtag_coredump.physmap_base = (uint64_t) &physmap_base;
461
462 /* update signature last so that JTAG can trust that structure has valid data */
463 kdp_jtag_coredump.signature = (uint64_t) KDP_JTAG_COREDUMP_SIGNATURE;
464 }
465
466 void
kdp_machine_init(void)467 kdp_machine_init(void)
468 {
469 /*
470 * If the kernel is running on top of a hypervisor that supports AH#1, it will inform
471 * the hypervisor of its debugging info.
472 */
473 hvg_hcall_set_coredump_data();
474
475 if (debug_boot_arg == 0) {
476 return;
477 }
478
479 kdp_map_debug_pagetable_window();
480 kdp_jtag_coredump_init();
481 }
482