1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
31
32 #include <vm/pmap.h>
33
34 #include <mach/thread_status.h>
35 #include <mach-o/loader.h>
36 #include <mach/vm_region.h>
37 #include <mach/vm_statistics.h>
38
39 #include <vm/vm_kern.h>
40 #include <vm/vm_object_xnu.h>
41 #include <vm/vm_protos.h>
42 #include <kdp/kdp_core.h>
43 #include <kdp/kdp_udp.h>
44 #include <kdp/kdp_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/caches_internal.h>
47 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49
50 pmap_t kdp_pmap = 0;
51 boolean_t kdp_trans_off;
52 boolean_t kdp_read_io = 0;
53
54 pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va);
55
56 /*
57 * kdp_vtophys
58 */
59 pmap_paddr_t
kdp_vtophys(pmap_t pmap,vm_offset_t va)60 kdp_vtophys(
61 pmap_t pmap,
62 vm_offset_t va)
63 {
64 pmap_paddr_t pa;
65
66 #if HAS_MTE
67 /* Strip any non-valid VA bits */
68 if (pmap) {
69 va = pmap_strip_addr(pmap, va);
70 }
71 #endif /* HAS_MTE */
72
73 /* Ensure that the provided va resides within the provided pmap range. */
74 if (!pmap || ((pmap != kernel_pmap) && ((va < pmap->min) || (va >= pmap->max)))) {
75 #ifdef KDP_VTOPHYS_DEBUG
76 printf("kdp_vtophys(%08x, %016lx) not in range %08x .. %08x\n", (unsigned int) pmap,
77 (unsigned long) va,
78 (unsigned int) (pmap ? pmap->min : 0),
79 (unsigned int) (pmap ? pmap->max : 0));
80 #endif
81 return 0; /* Just return if no translation */
82 }
83
84 pa = pmap_find_pa(pmap, va); /* Get the physical address */
85 return pa;
86 }
87
88 /*
89 * kdp_machine_vm_read
90 *
91 * Verify that src is valid, and physically copy len bytes from src to
92 * dst, translating if necessary. If translation is enabled
93 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
94 * when translating src.
95 */
96
97 mach_vm_size_t
kdp_machine_vm_read(mach_vm_address_t src,caddr_t dst,mach_vm_size_t len)98 kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
99 {
100 addr64_t cur_virt_src, cur_virt_dst;
101 addr64_t cur_phys_src, cur_phys_dst;
102 mach_vm_size_t resid, cnt;
103 pmap_t pmap;
104
105 #ifdef KDP_VM_READ_DEBUG
106 kprintf("kdp_machine_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *) src)[0], ((unsigned long *) src)[1]);
107 #endif
108
109 cur_virt_src = (addr64_t) src;
110 cur_virt_dst = (addr64_t) dst;
111
112 if (kdp_trans_off) {
113 kdp_readphysmem64_req_t rq;
114 mach_vm_size_t ret;
115
116 rq.address = src;
117 rq.nbytes = (uint32_t)len;
118 ret = kdp_machine_phys_read(&rq, dst, 0 /* unused */);
119 return ret;
120 } else {
121 resid = len;
122
123 if (kdp_pmap) {
124 pmap = kdp_pmap; /* If special pmap, use it */
125 } else {
126 pmap = kernel_pmap; /* otherwise, use kernel's */
127 }
128 while (resid != 0) {
129 /*
130 * Always translate the destination using the
131 * kernel_pmap.
132 */
133 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) {
134 goto exit;
135 }
136
137 if ((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0) {
138 goto exit;
139 }
140
141 /* Attempt to ensure that there are valid translations for src and dst. */
142 if (!kdp_read_io && ((!pmap_valid_address(cur_phys_dst)) || (!pmap_valid_address(cur_phys_src)))) {
143 goto exit;
144 }
145
146 cnt = ARM_PGBYTES - (cur_virt_src & PAGE_MASK); /* Get length left on
147 * page */
148 if (cnt > (ARM_PGBYTES - (cur_virt_dst & PAGE_MASK))) {
149 cnt = ARM_PGBYTES - (cur_virt_dst & PAGE_MASK);
150 }
151
152 if (cnt > resid) {
153 cnt = resid;
154 }
155
156 #ifdef KDP_VM_READ_DEBUG
157 kprintf("kdp_machine_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
158 pmap, cur_virt_src, cur_phys_src);
159 #endif
160 bcopy_phys(cur_phys_src, cur_phys_dst, cnt);
161
162 cur_virt_src += cnt;
163 cur_virt_dst += cnt;
164 resid -= cnt;
165 }
166 }
167 exit:
168 #ifdef KDP_VM_READ_DEBUG
169 kprintf("kdp_machine_vm_read: ret %08X\n", len - resid);
170 #endif
171 return len - resid;
172 }
173
174 mach_vm_size_t
kdp_machine_phys_read(kdp_readphysmem64_req_t * rq,caddr_t dst,uint16_t lcpu __unused)175 kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, uint16_t lcpu __unused)
176 {
177 mach_vm_address_t src = rq->address;
178 mach_vm_size_t len = rq->nbytes;
179
180 addr64_t cur_virt_dst;
181 addr64_t cur_phys_src, cur_phys_dst;
182 mach_vm_size_t resid = len;
183 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
184
185 #ifdef KDP_VM_READ_DEBUG
186 kprintf("kdp_phys_read src %x dst %p len %x\n", src, dst, len);
187 #endif
188
189 cur_virt_dst = (addr64_t) dst;
190 cur_phys_src = (addr64_t) src;
191
192 while (resid != 0) {
193 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) {
194 goto exit;
195 }
196
197 /* Get length left on page */
198
199 cnt_src = ARM_PGBYTES - (cur_phys_src & PAGE_MASK);
200 cnt_dst = ARM_PGBYTES - (cur_phys_dst & PAGE_MASK);
201 if (cnt_src > cnt_dst) {
202 cnt = cnt_dst;
203 } else {
204 cnt = cnt_src;
205 }
206 if (cnt > resid) {
207 cnt = resid;
208 }
209
210 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
211 cur_phys_src += cnt;
212 cur_virt_dst += cnt;
213 resid -= cnt;
214 }
215
216 exit:
217 return len - resid;
218 }
219
220 /*
221 * kdp_vm_write
222 */
223 mach_vm_size_t
kdp_machine_vm_write(caddr_t src,mach_vm_address_t dst,mach_vm_size_t len)224 kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
225 {
226 addr64_t cur_virt_src, cur_virt_dst;
227 addr64_t cur_phys_src, cur_phys_dst;
228 mach_vm_size_t resid, cnt, cnt_src, cnt_dst;
229
230 #ifdef KDP_VM_WRITE_DEBUG
231 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *) src)[0], ((unsigned long *) src)[1]);
232 #endif
233
234 cur_virt_src = (addr64_t) src;
235 cur_virt_dst = (addr64_t) dst;
236
237 resid = len;
238
239 while (resid != 0) {
240 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) {
241 goto exit;
242 }
243
244 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) {
245 goto exit;
246 }
247
248 /* Attempt to ensure that there are valid translations for src and dst. */
249 /* No support for enabling writes for an invalid translation at the moment. */
250 if ((!pmap_valid_address(cur_phys_dst)) || (!pmap_valid_address(cur_phys_src))) {
251 goto exit;
252 }
253
254 cnt_src = ((cur_phys_src + ARM_PGBYTES) & (-ARM_PGBYTES)) - cur_phys_src;
255 cnt_dst = ((cur_phys_dst + ARM_PGBYTES) & (-ARM_PGBYTES)) - cur_phys_dst;
256
257 if (cnt_src > cnt_dst) {
258 cnt = cnt_dst;
259 } else {
260 cnt = cnt_src;
261 }
262 if (cnt > resid) {
263 cnt = resid;
264 }
265
266 #ifdef KDP_VM_WRITE_DEBUG
267 printf("kdp_vm_write: cur_phys_src %x cur_phys_src %x len %x - %08X %08X\n", src, dst, cnt);
268 #endif
269 bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
270 flush_dcache64(cur_phys_dst, (unsigned int)cnt, TRUE);
271 invalidate_icache64(cur_phys_dst, (unsigned int)cnt, TRUE);
272
273 cur_virt_src += cnt;
274 cur_virt_dst += cnt;
275 resid -= cnt;
276 }
277 exit:
278 return len - resid;
279 }
280
281 mach_vm_size_t
kdp_machine_phys_write(kdp_writephysmem64_req_t * rq __unused,caddr_t src __unused,uint16_t lcpu __unused)282 kdp_machine_phys_write(kdp_writephysmem64_req_t *rq __unused, caddr_t src __unused,
283 uint16_t lcpu __unused)
284 {
285 return 0; /* unimplemented */
286 }
287
288 void
kern_collectth_state_size(uint64_t * tstate_count,uint64_t * tstate_size)289 kern_collectth_state_size(uint64_t * tstate_count, uint64_t * tstate_size)
290 {
291 uint64_t count = ml_get_max_cpu_number() + 1;
292
293 *tstate_count = count;
294 *tstate_size = sizeof(struct thread_command)
295 + (sizeof(arm_state_hdr_t)
296 #if defined(__arm64__)
297 + ARM_THREAD_STATE64_COUNT * sizeof(uint32_t));
298 #else
299 + ARM_THREAD_STATE32_COUNT * sizeof(uint32_t));
300 #endif
301 }
302
303 void
kern_collectth_state(thread_t thread __unused,void * buffer,uint64_t size,void ** iter)304 kern_collectth_state(thread_t thread __unused, void *buffer, uint64_t size, void ** iter)
305 {
306 cpu_data_entry_t *cpuentryp = *iter;
307 if (cpuentryp == NULL) {
308 cpuentryp = &CpuDataEntries[0];
309 }
310
311 if (cpuentryp == &CpuDataEntries[ml_get_max_cpu_number()]) {
312 *iter = NULL;
313 } else {
314 *iter = cpuentryp + 1;
315 }
316
317 struct cpu_data *cpudatap = cpuentryp->cpu_data_vaddr;
318
319 struct thread_command *tc = (struct thread_command *)buffer;
320 arm_state_hdr_t *hdr = (arm_state_hdr_t *)(void *)(tc + 1);
321 #if defined(__arm64__)
322 hdr->flavor = ARM_THREAD_STATE64;
323 hdr->count = ARM_THREAD_STATE64_COUNT;
324 arm_thread_state64_t *state = (arm_thread_state64_t *)(void *)(hdr + 1);
325 #else
326 hdr->flavor = ARM_THREAD_STATE;
327 hdr->count = ARM_THREAD_STATE_COUNT;
328 arm_thread_state_t *state = (arm_thread_state_t *)(void *)(hdr + 1);
329 #endif
330
331 tc->cmd = LC_THREAD;
332 tc->cmdsize = (uint32_t) size;
333
334 if ((cpudatap != NULL) && (cpudatap->halt_status == CPU_HALTED_WITH_STATE)) {
335 *state = cpudatap->halt_state;
336 return;
337 }
338
339 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpudatap);
340 if ((cpudatap == NULL) || (processor->active_thread == NULL)) {
341 bzero(state, hdr->count * sizeof(uint32_t));
342 return;
343 }
344
345 #if defined(__arm64__)
346 void *kpcb = processor->active_thread->machine.kpcb;
347 if (kpcb != NULL) {
348 arm_saved_state_t *saved_state = (arm_saved_state_t *)kpcb;
349
350 state->fp = saved_state->ss_64.fp;
351 state->lr = saved_state->ss_64.lr;
352 state->sp = saved_state->ss_64.sp;
353 state->pc = saved_state->ss_64.pc;
354 state->cpsr = saved_state->ss_64.cpsr;
355 bcopy(&saved_state->ss_64.x[0], &state->x[0], sizeof(state->x));
356 } else {
357 vm_offset_t kstackptr = (vm_offset_t) processor->active_thread->machine.kstackptr;
358 arm_kernel_saved_state_t *saved_state = (arm_kernel_saved_state_t *) kstackptr;
359
360 state->fp = saved_state->fp;
361 state->lr = saved_state->lr;
362 state->sp = saved_state->sp;
363 state->pc = saved_state->pc_was_in_userspace ? (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer) : 0;
364 state->cpsr = PSR64_KERNEL_DEFAULT;
365 }
366
367 #else /* __arm64__ */
368 vm_offset_t kstackptr = (vm_offset_t) processor->active_thread->machine.kstackptr;
369 arm_saved_state_t *saved_state = (arm_saved_state_t *) kstackptr;
370
371 state->lr = saved_state->lr;
372 state->sp = saved_state->sp;
373 state->pc = saved_state->pc;
374 state->cpsr = saved_state->cpsr;
375 bcopy(&saved_state->r[0], &state->r[0], sizeof(state->r));
376
377 #endif /* !__arm64__ */
378 }
379
380 static const arm_state_hdr_t user32_thread_flavor_array[] = {
381 { ARM_THREAD_STATE, ARM_UNIFIED_THREAD_STATE_COUNT },
382 };
383
384 #if defined(__arm64__)
385 static const arm_state_hdr_t user64_thread_flavor_array[] = {
386 { ARM_THREAD_STATE64, ARM_THREAD_STATE64_COUNT },
387 { ARM_VFP_STATE, ARM_VFP_STATE_COUNT },
388 { ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT },
389 };
390 #endif
391
392 void
kern_collect_userth_state_size(task_t task,uint64_t * tstate_count,uint64_t * tstate_size)393 kern_collect_userth_state_size(task_t task, uint64_t * tstate_count, uint64_t * tstate_size)
394 {
395 uint64_t per_thread_size = 0;
396 uint64_t num_flavors = 0;
397 const arm_state_hdr_t * flavors;
398 #if defined(__arm64__)
399 bool is64bit = task_has_64Bit_addr(task);
400
401 if (is64bit) {
402 flavors = user64_thread_flavor_array;
403 num_flavors = sizeof(user64_thread_flavor_array) / sizeof(user64_thread_flavor_array[0]);
404 } else {
405 flavors = user32_thread_flavor_array;
406 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
407 }
408 #else
409 flavors = user32_thread_flavor_array;
410 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
411 #endif
412
413 for (size_t i = 0; i < num_flavors; i++) {
414 per_thread_size += sizeof(arm_state_hdr_t) + (flavors[i].count * sizeof(natural_t));
415 }
416
417 *tstate_count = task->thread_count;
418 *tstate_size = sizeof(struct thread_command) + per_thread_size;
419 }
420
421 void
kern_collect_userth_state(task_t task,thread_t thread,void * buffer,uint64_t size)422 kern_collect_userth_state(task_t task, thread_t thread, void *buffer, uint64_t size)
423 {
424 kern_return_t ret;
425 uint64_t num_flavors = 0;
426 const arm_state_hdr_t * flavors;
427 #if defined(__arm64__)
428 bool is64bit = task_has_64Bit_addr(task);
429
430 if (is64bit) {
431 flavors = user64_thread_flavor_array;
432 num_flavors = sizeof(user64_thread_flavor_array) / sizeof(user64_thread_flavor_array[0]);
433 } else {
434 flavors = user32_thread_flavor_array;
435 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
436 }
437 #else
438 (void)task;
439 flavors = user32_thread_flavor_array;
440 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
441 #endif
442
443 struct thread_command *tc = buffer;
444 tc->cmd = LC_THREAD;
445 tc->cmdsize = (uint32_t)size;
446
447 arm_state_hdr_t *hdr = (arm_state_hdr_t *)(tc + 1);
448
449 for (size_t i = 0; i < num_flavors; i++) {
450 hdr->flavor = flavors[i].flavor;
451 hdr->count = flavors[i].count;
452 /* Ensure we can't write past the end of the buffer */
453 assert(hdr->count + sizeof(arm_state_hdr_t) + ((uintptr_t)hdr - (uintptr_t)buffer) <= size);
454 ret = machine_thread_get_state(thread, hdr->flavor, (thread_state_t)(hdr + 1), &hdr->count);
455 assert(ret == KERN_SUCCESS);
456
457 hdr = (arm_state_hdr_t *)((uintptr_t)(hdr + 1) + hdr->count * sizeof(natural_t));
458 }
459 }
460
461 /*
462 * kdp_core_start_addr
463 *
464 * return the address where the kernel core file starts
465 *
466 * The kernel start address is VM_MIN_KERNEL_AND_KEXT_ADDRESS
467 * unless the physical aperture has been relocated below
468 * VM_MIN_KERNEL_AND_KEXT_ADDRESS as in the case of
469 * ARM_LARGE_MEMORY systems
470 *
471 */
472 vm_map_offset_t
kdp_core_start_addr()473 kdp_core_start_addr()
474 {
475 #if defined(__arm64__)
476 extern const vm_map_address_t physmap_base;
477 return MIN(physmap_base, VM_MIN_KERNEL_AND_KEXT_ADDRESS);
478 #else /* !defined(__arm64__) */
479 return VM_MIN_KERNEL_AND_KEXT_ADDRESS;
480 #endif /* !defined(__arm64__) */
481 }
482