xref: /xnu-12377.61.12/osfmk/kdp/kdp_common.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 
30 #include <kdp/kdp_common.h>
31 #include <kdp/kdp_dyld.h>
32 #include <vm/vm_map_xnu.h>
33 #include <vm/vm_kern.h>
34 #include <vm/vm_pageout.h>
35 #include <vm/vm_fault_xnu.h>
36 #include <vm/vm_shared_region.h>
37 #include <vm/vm_compressor_xnu.h>
38 #include <sys/errno.h>
39 
40 extern unsigned int not_in_kdp;
41 extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
42 #if HAS_MTE
43 extern void bcopy_phys_with_options(addr64_t from, addr64_t to, vm_size_t nbytes, int options);
44 #endif /* HAS_MTE */
45 extern pmap_paddr_t kdp_vtophys(pmap_t pmap, addr64_t va);
46 
47 /*
48  * Sets the appropriate page mask and size to use for dealing with pages --
49  * it's important that this is a "min" of page size to account for both K16/U4
50  * (Rosetta) and K4/U16 (armv7k) environments.
51  */
52 size_t
kdp_vm_map_get_page_size(vm_map_t map,size_t * effective_page_mask)53 kdp_vm_map_get_page_size(vm_map_t map, size_t *effective_page_mask)
54 {
55 	/* must be called from debugger context */
56 	assert(!not_in_kdp);
57 
58 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
59 		if (effective_page_mask) {
60 			*effective_page_mask = VM_MAP_PAGE_MASK(map);
61 		}
62 		return VM_MAP_PAGE_SIZE(map);
63 	} else {
64 		if (effective_page_mask) {
65 			*effective_page_mask = PAGE_MASK;
66 		}
67 		return PAGE_SIZE;
68 	}
69 }
70 
71 void
kdp_memcpy(void * dst,const void * src,size_t len)72 kdp_memcpy(void *dst, const void *src, size_t len)
73 {
74 	/* must be called from debugger context */
75 	assert(!not_in_kdp);
76 
77 #if defined(__arm64__)
78 	/* Identify if destination buffer is in panic storage area */
79 	if (((vm_offset_t)dst >= gPanicBase) && ((vm_offset_t)dst < (gPanicBase + gPanicSize))) {
80 		/* Copy over bytes individually to prevent unaligned access */
81 		uint8_t *dest_bytes = (uint8_t *)dst;
82 		const uint8_t *src_bytes = (const uint8_t *)src;
83 		for (size_t i = 0; i < len; i++) {
84 			dest_bytes[i] = src_bytes[i];
85 		}
86 	} else
87 #endif
88 	memcpy(dst, src, len);
89 }
90 
91 size_t
kdp_strlcpy(char * dst,const char * src,size_t maxlen)92 kdp_strlcpy(char *dst, const char *src, size_t maxlen)
93 {
94 	/* must be called from debugger context */
95 	assert(!not_in_kdp);
96 
97 	const size_t srclen = strlen(src);
98 
99 	if (srclen < maxlen) {
100 		kdp_memcpy(dst, src, srclen + 1);
101 	} else if (maxlen != 0) {
102 		kdp_memcpy(dst, src, maxlen - 1);
103 		dst[maxlen - 1] = '\0';
104 	}
105 
106 	return srclen;
107 }
108 
109 kern_return_t
kdp_traverse_mappings(task_t task,kdp_fault_flags_t fault_flags,kdp_traverse_mappings_flags_t traverse_mappings_flags,kdp_traverse_mappings_callback callback,void * context)110 kdp_traverse_mappings(
111 	task_t task,
112 	kdp_fault_flags_t fault_flags,
113 	kdp_traverse_mappings_flags_t traverse_mappings_flags,
114 	kdp_traverse_mappings_callback callback,
115 	void * context)
116 {
117 	vm_map_t map = task->map;
118 	vm_map_entry_t entry;
119 	vm_offset_t vcur;
120 	kern_return_t ret = KERN_SUCCESS;
121 
122 	/* must be called from debugger context */
123 	assert(!not_in_kdp);
124 
125 	size_t effective_page_mask;
126 	size_t task_page_size = kdp_vm_map_get_page_size(map, &effective_page_mask);
127 
128 	// Iterate vm map
129 	for (entry = vm_map_first_entry(map); ret == KERN_SUCCESS && entry != NULL && entry != vm_map_to_entry(map); entry = entry->vme_next) {
130 		// Found a region, iterate over pages in the region
131 		for (vcur = entry->vme_start; ret == KERN_SUCCESS && vcur < entry->vme_end; vcur += task_page_size) {
132 			vm_offset_t vphys = kdp_find_phys(map, vcur, fault_flags, NULL);
133 			if (vphys) {
134 				if (traverse_mappings_flags & KDP_TRAVERSE_MAPPINGS_FLAGS_PHYSICAL) {
135 					ret = callback(vphys, vphys + task_page_size, context);
136 				} else {
137 					ret = callback(vcur, vcur + task_page_size, context);
138 				}
139 			}
140 		}
141 	}
142 
143 	return ret;
144 }
145 
146 vm_offset_t
kdp_find_phys(vm_map_t map,vm_offset_t target_addr,kdp_fault_flags_t fault_flags,struct kdp_fault_result * fault_results)147 kdp_find_phys(vm_map_t map, vm_offset_t target_addr, kdp_fault_flags_t fault_flags, struct kdp_fault_result * fault_results)
148 {
149 	vm_offset_t cur_phys_addr;
150 
151 	/* must be called from debugger context */
152 	assert(!not_in_kdp);
153 
154 	if (map == VM_MAP_NULL) {
155 		return 0;
156 	}
157 
158 #if HAS_MTE
159 	/*
160 	 * The address we want to find could be tagged, so strip it properly here.
161 	 */
162 	if (map->pmap) {
163 		target_addr = vm_memtag_canonicalize(map, target_addr);
164 	}
165 #endif /* HAS_MTE */
166 
167 	cur_phys_addr = (vm_offset_t)kdp_vtophys(map->pmap, target_addr);
168 	if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
169 		if (!(fault_flags & KDP_FAULT_FLAGS_ENABLE_FAULTING)) {
170 			if (fault_results) {
171 				fault_results->flags |= KDP_FAULT_RESULT_PAGED_OUT;
172 			}
173 
174 			return 0;
175 		}
176 
177 		/*
178 		 * The pmap doesn't have a valid page so we start at the top level
179 		 * vm map and try a lightweight fault. Update fault path usage stats.
180 		 */
181 		uint64_t fault_start_time = mach_absolute_time();
182 		uint64_t fault_end_time;
183 		size_t effective_page_mask;
184 		(void)kdp_vm_map_get_page_size(map, &effective_page_mask);
185 
186 		cur_phys_addr = kdp_lightweight_fault(map, (target_addr & ~effective_page_mask), fault_flags & KDP_FAULT_FLAGS_MULTICPU);
187 		fault_end_time = mach_absolute_time();
188 
189 		if (fault_results) {
190 			fault_results->time_spent_faulting += fault_end_time - fault_start_time;
191 		}
192 
193 		cur_phys_addr += (target_addr & effective_page_mask);
194 
195 		if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
196 			if (fault_results) {
197 				fault_results->flags |= (KDP_FAULT_RESULT_TRIED_FAULT | KDP_FAULT_RESULT_PAGED_OUT);
198 			}
199 
200 			return 0;
201 		}
202 
203 		if (fault_results) {
204 			fault_results->flags |= KDP_FAULT_RESULT_FAULTED_IN;
205 		}
206 	} else {
207 		/*
208 		 * This check is done in kdp_lightweight_fault for the fault path.
209 		 */
210 		unsigned int cur_wimg_bits = pmap_cache_attributes((ppnum_t) atop(cur_phys_addr));
211 
212 #if HAS_MTE
213 		if ((cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT && (cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_MTE) {
214 			return 0;
215 		}
216 #else /* !HAS_MTE */
217 		if ((cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
218 			return 0;
219 		}
220 #endif /* HAS_MTE */
221 	}
222 
223 	return cur_phys_addr;
224 }
225 
226 int
kdp_generic_copyin(vm_map_t map,uint64_t uaddr,void * dest,size_t size,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)227 kdp_generic_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
228 {
229 	size_t rem = size;
230 	char *kvaddr = dest;
231 	size_t effective_page_mask;
232 	size_t effective_page_size = kdp_vm_map_get_page_size(map, &effective_page_mask);
233 
234 	/* must be called from debugger context */
235 	assert(!not_in_kdp);
236 
237 #if defined(__arm64__)
238 	/* Identify if destination buffer is in panic storage area */
239 	if (!not_in_kdp && ((vm_offset_t)dest >= gPanicBase) && ((vm_offset_t)dest < (gPanicBase + gPanicSize))) {
240 		if (((vm_offset_t)dest + size) > (gPanicBase + gPanicSize)) {
241 			return EINVAL;
242 		}
243 	}
244 #endif
245 
246 	while (rem) {
247 		uint64_t phys_src = (*find_phys_fn)(map, (vm_offset_t)uaddr, fault_flags, context);
248 		uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
249 		uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
250 		uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
251 		size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
252 		cur_size = MIN(cur_size, rem);
253 
254 		if (phys_src && phys_dest) {
255 #if defined(__arm64__)
256 			/*
257 			 * On arm devices the panic buffer is mapped as device memory and doesn't allow
258 			 * unaligned accesses. To prevent these, we copy over bytes individually here.
259 			 */
260 			if (!not_in_kdp) {
261 #if HAS_MTE
262 				mte_disable_tag_checking();
263 #endif /* HAS_MTE */
264 				kdp_memcpy(kvaddr, (const void *)phystokv((pmap_paddr_t)phys_src), cur_size);
265 #if HAS_MTE
266 				mte_enable_tag_checking();
267 #endif /* HAS_MTE */
268 			} else
269 #endif /* defined(__arm64__) */
270 
271 #if HAS_MTE
272 			bcopy_phys_with_options(phys_src, phys_dest, cur_size, cppvDisableTagCheck);
273 #else /* HAS_MTE */
274 			bcopy_phys(phys_src, phys_dest, cur_size);
275 #endif /* HAS_MTE */
276 		} else {
277 			break;
278 		}
279 
280 		uaddr += cur_size;
281 		kvaddr += cur_size;
282 		rem -= cur_size;
283 	}
284 
285 	return 0;
286 }
287 
288 int
kdp_generic_copyin_word(task_t task,uint64_t addr,uint64_t * result,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)289 kdp_generic_copyin_word(
290 	task_t task, uint64_t addr, uint64_t *result, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
291 {
292 	/* must be called from debugger context */
293 	assert(!not_in_kdp);
294 
295 	if (task_has_64Bit_addr(task)) {
296 		return kdp_generic_copyin(task->map, addr, result, sizeof(uint64_t), fault_flags, find_phys_fn, context);
297 	} else {
298 		uint32_t buf;
299 		int r = kdp_generic_copyin(task->map, addr, &buf, sizeof(uint32_t), fault_flags, find_phys_fn, context);
300 		if (r == KERN_SUCCESS) {
301 			*result = buf;
302 		}
303 		return r;
304 	}
305 }
306 
307 static int
kdp_generic_copyin_string_slowpath(task_t task,uint64_t addr,char * buf,int buf_sz,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)308 kdp_generic_copyin_string_slowpath(
309 	task_t task, uint64_t addr, char *buf, int buf_sz, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
310 {
311 	int i;
312 	uint64_t validated = 0, valid_from;
313 	uint64_t phys_src, phys_dest;
314 	vm_map_t map = task->map;
315 	size_t effective_page_mask;
316 	size_t effective_page_size = kdp_vm_map_get_page_size(map, &effective_page_mask);
317 
318 	/* must be called from debugger context */
319 	assert(!not_in_kdp);
320 
321 	for (i = 0; i < buf_sz; i++) {
322 		if (validated == 0) {
323 			valid_from = i;
324 			phys_src = (*find_phys_fn)(map, (vm_offset_t)(addr + i), fault_flags, context);
325 			phys_dest = kvtophys((vm_offset_t)&buf[i]);
326 			uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
327 			uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
328 			if (phys_src && phys_dest) {
329 				validated = MIN(src_rem, dst_rem);
330 				if (validated) {
331 #if HAS_MTE
332 					bcopy_phys_with_options(phys_src, phys_dest, 1, cppvDisableTagCheck);
333 #else /* HAS_MTE */
334 					bcopy_phys(phys_src, phys_dest, 1);
335 #endif /* HAS_MTE */
336 					validated--;
337 				} else {
338 					return 0;
339 				}
340 			} else {
341 				return 0;
342 			}
343 		} else {
344 #if HAS_MTE
345 			bcopy_phys_with_options(phys_src + (i - valid_from),
346 			    phys_dest + (i - valid_from), 1, cppvDisableTagCheck);
347 #else /* HAS_MTE */
348 			bcopy_phys(phys_src + (i - valid_from), phys_dest + (i - valid_from), 1);
349 #endif /* HAS_MTE */
350 			validated--;
351 		}
352 
353 		if (buf[i] == '\0') {
354 			return i + 1;
355 		}
356 	}
357 
358 	/* ran out of space */
359 	return -1;
360 }
361 
362 int
kdp_generic_copyin_string(task_t task,uint64_t addr,char * buf,int buf_sz,kdp_fault_flags_t fault_flags,find_phys_fn_t find_phys_fn,void * context)363 kdp_generic_copyin_string(
364 	task_t task, uint64_t addr, char *buf, int buf_sz, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
365 {
366 	/* try to opportunistically copyin 32 bytes, most strings should fit */
367 	char optbuffer[32] = {0};
368 	int res;
369 
370 	/* must be called from debugger context */
371 	assert(!not_in_kdp);
372 
373 	res = kdp_generic_copyin(task->map, addr, optbuffer, sizeof(optbuffer), fault_flags, find_phys_fn, context);
374 	if (res != KERN_SUCCESS || strnlen(optbuffer, sizeof(optbuffer)) == sizeof(optbuffer)) {
375 		/* try the slowpath */
376 		return kdp_generic_copyin_string_slowpath(task, addr, buf, buf_sz, fault_flags, find_phys_fn, context);
377 	}
378 
379 	/* success */
380 	return (int) strlcpy(buf, optbuffer, buf_sz) + 1;
381 }
382 
383 static int
kdp_copyin(vm_map_t map,uint64_t uaddr,void * dest,size_t size,kdp_fault_flags_t fault_flags)384 kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, kdp_fault_flags_t fault_flags)
385 {
386 	return kdp_generic_copyin(map, uaddr, dest, size, fault_flags, (find_phys_fn_t)kdp_find_phys, NULL);
387 }
388 
389 kern_return_t
kdp_task_dyld_info(task_t task,kdp_fault_flags_t fault_flags,uint64_t * dyld_load_address,uuid_t dyld_uuid,size_t * task_page_size)390 kdp_task_dyld_info(task_t task, kdp_fault_flags_t fault_flags, uint64_t * dyld_load_address, uuid_t dyld_uuid, size_t * task_page_size)
391 {
392 	uint32_t uuid_info_count = 0;
393 	mach_vm_address_t uuid_info_addr = 0;
394 	mach_vm_address_t dyld_load_addr = 0;
395 	boolean_t task_64bit_addr = task_has_64Bit_addr(task);
396 
397 	/* must be called from debugger context */
398 	assert(!not_in_kdp);
399 
400 	if (dyld_uuid == NULL || dyld_load_address == NULL || task_page_size == NULL) {
401 		return KERN_INVALID_ARGUMENT;
402 	}
403 
404 	*task_page_size = kdp_vm_map_get_page_size(task->map, NULL);
405 
406 	if (task_64bit_addr) {
407 		struct user64_dyld_all_image_infos task_image_infos;
408 		if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos,
409 		    sizeof(struct user64_dyld_all_image_infos), fault_flags) == KERN_SUCCESS) {
410 			uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
411 			uuid_info_addr = task_image_infos.uuidArray;
412 			dyld_load_addr = task_image_infos.dyldImageLoadAddress;
413 		}
414 	} else {
415 		struct user32_dyld_all_image_infos task_image_infos;
416 		if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos,
417 		    sizeof(struct user32_dyld_all_image_infos), fault_flags) == KERN_SUCCESS) {
418 			uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
419 			uuid_info_addr = task_image_infos.uuidArray;
420 			dyld_load_addr = task_image_infos.dyldImageLoadAddress;
421 		}
422 	}
423 
424 	if (uuid_info_count == 0 || uuid_info_addr == 0 || dyld_load_addr == 0) {
425 		return KERN_NOT_FOUND;
426 	}
427 
428 	// Find the UUID of dyld
429 	for (size_t i = 0; i < uuid_info_count; i++) {
430 		if (task_64bit_addr) {
431 			struct user64_dyld_uuid_info uuid_info;
432 			if (kdp_copyin(task->map, uuid_info_addr + (i * sizeof(struct user64_dyld_uuid_info)), &uuid_info, sizeof(struct user64_dyld_uuid_info), fault_flags) == KERN_SUCCESS) {
433 				if (uuid_info.imageLoadAddress == dyld_load_addr) {
434 					uuid_copy(dyld_uuid, uuid_info.imageUUID);
435 					*dyld_load_address = dyld_load_addr;
436 					return KERN_SUCCESS;
437 				}
438 			}
439 		} else {
440 			struct user32_dyld_uuid_info uuid_info;
441 			if (kdp_copyin(task->map, uuid_info_addr + (i * sizeof(struct user32_dyld_uuid_info)), &uuid_info, sizeof(struct user32_dyld_uuid_info), fault_flags) == KERN_SUCCESS) {
442 				if (uuid_info.imageLoadAddress == dyld_load_addr) {
443 					uuid_copy(dyld_uuid, uuid_info.imageUUID);
444 					*dyld_load_address = dyld_load_addr;
445 					return KERN_SUCCESS;
446 				}
447 			}
448 		}
449 	}
450 
451 	return KERN_NOT_FOUND;
452 }
453