1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <kdp/kdp_common.h>
31 #include <kdp/kdp_dyld.h>
32 #include <vm/vm_map_xnu.h>
33 #include <vm/vm_kern.h>
34 #include <vm/vm_pageout.h>
35 #include <vm/vm_fault_xnu.h>
36 #include <vm/vm_shared_region.h>
37 #include <vm/vm_compressor_xnu.h>
38 #include <sys/errno.h>
39
40 extern unsigned int not_in_kdp;
41 extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
42 extern pmap_paddr_t kdp_vtophys(pmap_t pmap, addr64_t va);
43
44 /*
45 * Sets the appropriate page mask and size to use for dealing with pages --
46 * it's important that this is a "min" of page size to account for both K16/U4
47 * (Rosetta) and K4/U16 (armv7k) environments.
48 */
49 size_t
kdp_vm_map_get_page_size(vm_map_t map,size_t * effective_page_mask)50 kdp_vm_map_get_page_size(vm_map_t map, size_t *effective_page_mask)
51 {
52 /* must be called from debugger context */
53 assert(!not_in_kdp);
54
55 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
56 if (effective_page_mask) {
57 *effective_page_mask = VM_MAP_PAGE_MASK(map);
58 }
59 return VM_MAP_PAGE_SIZE(map);
60 } else {
61 if (effective_page_mask) {
62 *effective_page_mask = PAGE_MASK;
63 }
64 return PAGE_SIZE;
65 }
66 }
67
68 void
kdp_memcpy(void * dst,const void * src,size_t len)69 kdp_memcpy(void *dst, const void *src, size_t len)
70 {
71 /* must be called from debugger context */
72 assert(!not_in_kdp);
73
74 #if defined(__arm64__)
75 /* Identify if destination buffer is in panic storage area */
76 if (((vm_offset_t)dst >= gPanicBase) && ((vm_offset_t)dst < (gPanicBase + gPanicSize))) {
77 /* Copy over bytes individually to prevent unaligned access */
78 uint8_t *dest_bytes = (uint8_t *)dst;
79 const uint8_t *src_bytes = (const uint8_t *)src;
80 for (size_t i = 0; i < len; i++) {
81 dest_bytes[i] = src_bytes[i];
82 }
83 } else
84 #endif
85 memcpy(dst, src, len);
86 }
87
88 size_t
kdp_strlcpy(char * dst,const char * src,size_t maxlen)89 kdp_strlcpy(char *dst, const char *src, size_t maxlen)
90 {
91 /* must be called from debugger context */
92 assert(!not_in_kdp);
93
94 const size_t srclen = strlen(src);
95
96 if (srclen < maxlen) {
97 kdp_memcpy(dst, src, srclen + 1);
98 } else if (maxlen != 0) {
99 kdp_memcpy(dst, src, maxlen - 1);
100 dst[maxlen - 1] = '\0';
101 }
102
103 return srclen;
104 }
105
106 kern_return_t
kdp_traverse_mappings(task_t task,kdp_fault_flags_t fault_flags,kdp_traverse_mappings_flags_t traverse_mappings_flags,kdp_traverse_mappings_callback callback,void * context)107 kdp_traverse_mappings(
108 task_t task,
109 kdp_fault_flags_t fault_flags,
110 kdp_traverse_mappings_flags_t traverse_mappings_flags,
111 kdp_traverse_mappings_callback callback,
112 void * context)
113 {
114 vm_map_t map = task->map;
115 vm_map_entry_t entry;
116 vm_offset_t vcur;
117 kern_return_t ret = KERN_SUCCESS;
118
119 /* must be called from debugger context */
120 assert(!not_in_kdp);
121
122 size_t effective_page_mask;
123 size_t task_page_size = kdp_vm_map_get_page_size(map, &effective_page_mask);
124
125 // Iterate vm map
126 for (entry = vm_map_first_entry(map); ret == KERN_SUCCESS && entry != NULL && entry != vm_map_to_entry(map); entry = entry->vme_next) {
127 // Found a region, iterate over pages in the region
128 for (vcur = entry->vme_start; ret == KERN_SUCCESS && vcur < entry->vme_end; vcur += task_page_size) {
129 vm_offset_t vphys = kdp_find_phys(map, vcur, fault_flags, NULL);
130 if (vphys) {
131 if (traverse_mappings_flags & KDP_TRAVERSE_MAPPINGS_FLAGS_PHYSICAL) {
132 ret = callback(vphys, vphys + task_page_size, context);
133 } else {
134 ret = callback(vcur, vcur + task_page_size, context);
135 }
136 }
137 }
138 }
139
140 return ret;
141 }
142
143 vm_offset_t
kdp_find_phys(vm_map_t map,vm_offset_t target_addr,kdp_fault_flags_t fault_flags,struct kdp_fault_result * fault_results)144 kdp_find_phys(vm_map_t map, vm_offset_t target_addr, kdp_fault_flags_t fault_flags, struct kdp_fault_result * fault_results)
145 {
146 vm_offset_t cur_phys_addr;
147
148 /* must be called from debugger context */
149 assert(!not_in_kdp);
150
151 if (map == VM_MAP_NULL) {
152 return 0;
153 }
154
155
156 cur_phys_addr = (vm_offset_t)kdp_vtophys(map->pmap, target_addr);
157 if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
158 if (!(fault_flags & KDP_FAULT_FLAGS_ENABLE_FAULTING)) {
159 if (fault_results) {
160 fault_results->flags |= KDP_FAULT_RESULT_PAGED_OUT;
161 }
162
163 return 0;
164 }
165
166 /*
167 * The pmap doesn't have a valid page so we start at the top level
168 * vm map and try a lightweight fault. Update fault path usage stats.
169 */
170 uint64_t fault_start_time = mach_absolute_time();
171 uint64_t fault_end_time;
172 size_t effective_page_mask;
173 (void)kdp_vm_map_get_page_size(map, &effective_page_mask);
174
175 cur_phys_addr = kdp_lightweight_fault(map, (target_addr & ~effective_page_mask), fault_flags & KDP_FAULT_FLAGS_MULTICPU);
176 fault_end_time = mach_absolute_time();
177
178 if (fault_results) {
179 fault_results->time_spent_faulting += fault_end_time - fault_start_time;
180 }
181
182 cur_phys_addr += (target_addr & effective_page_mask);
183
184 if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
185 if (fault_results) {
186 fault_results->flags |= (KDP_FAULT_RESULT_TRIED_FAULT | KDP_FAULT_RESULT_PAGED_OUT);
187 }
188
189 return 0;
190 }
191
192 if (fault_results) {
193 fault_results->flags |= KDP_FAULT_RESULT_FAULTED_IN;
194 }
195 } else {
196 /*
197 * This check is done in kdp_lightweight_fault for the fault path.
198 */
199 unsigned int cur_wimg_bits = pmap_cache_attributes((ppnum_t) atop(cur_phys_addr));
200
201 if ((cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
202 return 0;
203 }
204 }
205
206 return cur_phys_addr;
207 }
208
209 int
kdp_generic_copyin(vm_map_t map,uint64_t uaddr,void * dest,size_t size,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)210 kdp_generic_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
211 {
212 size_t rem = size;
213 char *kvaddr = dest;
214 size_t effective_page_mask;
215 size_t effective_page_size = kdp_vm_map_get_page_size(map, &effective_page_mask);
216
217 /* must be called from debugger context */
218 assert(!not_in_kdp);
219
220 #if defined(__arm64__)
221 /* Identify if destination buffer is in panic storage area */
222 if (!not_in_kdp && ((vm_offset_t)dest >= gPanicBase) && ((vm_offset_t)dest < (gPanicBase + gPanicSize))) {
223 if (((vm_offset_t)dest + size) > (gPanicBase + gPanicSize)) {
224 return EINVAL;
225 }
226 }
227 #endif
228
229 while (rem) {
230 uint64_t phys_src = (*find_phys_fn)(map, (vm_offset_t)uaddr, fault_flags, context);
231 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
232 uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
233 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
234 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
235 cur_size = MIN(cur_size, rem);
236
237 if (phys_src && phys_dest) {
238 #if defined(__arm64__)
239 /*
240 * On arm devices the panic buffer is mapped as device memory and doesn't allow
241 * unaligned accesses. To prevent these, we copy over bytes individually here.
242 */
243 if (!not_in_kdp) {
244 kdp_memcpy(kvaddr, (const void *)phystokv((pmap_paddr_t)phys_src), cur_size);
245 } else
246 #endif /* defined(__arm64__) */
247
248 bcopy_phys(phys_src, phys_dest, cur_size);
249 } else {
250 break;
251 }
252
253 uaddr += cur_size;
254 kvaddr += cur_size;
255 rem -= cur_size;
256 }
257
258 return 0;
259 }
260
261 int
kdp_generic_copyin_word(task_t task,uint64_t addr,uint64_t * result,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)262 kdp_generic_copyin_word(
263 task_t task, uint64_t addr, uint64_t *result, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
264 {
265 /* must be called from debugger context */
266 assert(!not_in_kdp);
267
268 if (task_has_64Bit_addr(task)) {
269 return kdp_generic_copyin(task->map, addr, result, sizeof(uint64_t), fault_flags, find_phys_fn, context);
270 } else {
271 uint32_t buf;
272 int r = kdp_generic_copyin(task->map, addr, &buf, sizeof(uint32_t), fault_flags, find_phys_fn, context);
273 if (r == KERN_SUCCESS) {
274 *result = buf;
275 }
276 return r;
277 }
278 }
279
280 static int
kdp_generic_copyin_string_slowpath(task_t task,uint64_t addr,char * buf,int buf_sz,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)281 kdp_generic_copyin_string_slowpath(
282 task_t task, uint64_t addr, char *buf, int buf_sz, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
283 {
284 int i;
285 uint64_t validated = 0, valid_from;
286 uint64_t phys_src, phys_dest;
287 vm_map_t map = task->map;
288 size_t effective_page_mask;
289 size_t effective_page_size = kdp_vm_map_get_page_size(map, &effective_page_mask);
290
291 /* must be called from debugger context */
292 assert(!not_in_kdp);
293
294 for (i = 0; i < buf_sz; i++) {
295 if (validated == 0) {
296 valid_from = i;
297 phys_src = (*find_phys_fn)(map, (vm_offset_t)(addr + i), fault_flags, context);
298 phys_dest = kvtophys((vm_offset_t)&buf[i]);
299 uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
300 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
301 if (phys_src && phys_dest) {
302 validated = MIN(src_rem, dst_rem);
303 if (validated) {
304 bcopy_phys(phys_src, phys_dest, 1);
305 validated--;
306 } else {
307 return 0;
308 }
309 } else {
310 return 0;
311 }
312 } else {
313 bcopy_phys(phys_src + (i - valid_from), phys_dest + (i - valid_from), 1);
314 validated--;
315 }
316
317 if (buf[i] == '\0') {
318 return i + 1;
319 }
320 }
321
322 /* ran out of space */
323 return -1;
324 }
325
326 int
kdp_generic_copyin_string(task_t task,uint64_t addr,char * buf,int buf_sz,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)327 kdp_generic_copyin_string(
328 task_t task, uint64_t addr, char *buf, int buf_sz, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
329 {
330 /* try to opportunistically copyin 32 bytes, most strings should fit */
331 char optbuffer[32] = {0};
332 int res;
333
334 /* must be called from debugger context */
335 assert(!not_in_kdp);
336
337 res = kdp_generic_copyin(task->map, addr, optbuffer, sizeof(optbuffer), fault_flags, find_phys_fn, context);
338 if (res != KERN_SUCCESS || strnlen(optbuffer, sizeof(optbuffer)) == sizeof(optbuffer)) {
339 /* try the slowpath */
340 return kdp_generic_copyin_string_slowpath(task, addr, buf, buf_sz, fault_flags, find_phys_fn, context);
341 }
342
343 /* success */
344 return (int) strlcpy(buf, optbuffer, buf_sz) + 1;
345 }
346
347 static int
kdp_copyin(vm_map_t map,uint64_t uaddr,void * dest,size_t size,kdp_fault_flags_t fault_flags)348 kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, kdp_fault_flags_t fault_flags)
349 {
350 return kdp_generic_copyin(map, uaddr, dest, size, fault_flags, (find_phys_fn_t)kdp_find_phys, NULL);
351 }
352
353 kern_return_t
kdp_task_dyld_info(task_t task,kdp_fault_flags_t fault_flags,uint64_t * dyld_load_address,uuid_t dyld_uuid,size_t * task_page_size)354 kdp_task_dyld_info(task_t task, kdp_fault_flags_t fault_flags, uint64_t * dyld_load_address, uuid_t dyld_uuid, size_t * task_page_size)
355 {
356 uint32_t uuid_info_count = 0;
357 mach_vm_address_t uuid_info_addr = 0;
358 mach_vm_address_t dyld_load_addr = 0;
359 boolean_t task_64bit_addr = task_has_64Bit_addr(task);
360
361 /* must be called from debugger context */
362 assert(!not_in_kdp);
363
364 if (dyld_uuid == NULL || dyld_load_address == NULL || task_page_size == NULL) {
365 return KERN_INVALID_ARGUMENT;
366 }
367
368 *task_page_size = kdp_vm_map_get_page_size(task->map, NULL);
369
370 if (task_64bit_addr) {
371 struct user64_dyld_all_image_infos task_image_infos;
372 if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos,
373 sizeof(struct user64_dyld_all_image_infos), fault_flags) == KERN_SUCCESS) {
374 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
375 uuid_info_addr = task_image_infos.uuidArray;
376 dyld_load_addr = task_image_infos.dyldImageLoadAddress;
377 }
378 } else {
379 struct user32_dyld_all_image_infos task_image_infos;
380 if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos,
381 sizeof(struct user32_dyld_all_image_infos), fault_flags) == KERN_SUCCESS) {
382 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
383 uuid_info_addr = task_image_infos.uuidArray;
384 dyld_load_addr = task_image_infos.dyldImageLoadAddress;
385 }
386 }
387
388 if (uuid_info_count == 0 || uuid_info_addr == 0 || dyld_load_addr == 0) {
389 return KERN_NOT_FOUND;
390 }
391
392 // Find the UUID of dyld
393 for (size_t i = 0; i < uuid_info_count; i++) {
394 if (task_64bit_addr) {
395 struct user64_dyld_uuid_info uuid_info;
396 if (kdp_copyin(task->map, uuid_info_addr + (i * sizeof(struct user64_dyld_uuid_info)), &uuid_info, sizeof(struct user64_dyld_uuid_info), fault_flags) == KERN_SUCCESS) {
397 if (uuid_info.imageLoadAddress == dyld_load_addr) {
398 uuid_copy(dyld_uuid, uuid_info.imageUUID);
399 *dyld_load_address = dyld_load_addr;
400 return KERN_SUCCESS;
401 }
402 }
403 } else {
404 struct user32_dyld_uuid_info uuid_info;
405 if (kdp_copyin(task->map, uuid_info_addr + (i * sizeof(struct user32_dyld_uuid_info)), &uuid_info, sizeof(struct user32_dyld_uuid_info), fault_flags) == KERN_SUCCESS) {
406 if (uuid_info.imageLoadAddress == dyld_load_addr) {
407 uuid_copy(dyld_uuid, uuid_info.imageUUID);
408 *dyld_load_address = dyld_load_addr;
409 return KERN_SUCCESS;
410 }
411 }
412 }
413 }
414
415 return KERN_NOT_FOUND;
416 }
417