xref: /xnu-8019.80.24/osfmk/vm/vm_debug.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_debug.c.
60  *	Author:	Rich Draves
61  *	Date:	March, 1990
62  *
63  *	Exported kernel calls.  See mach_debug/mach_debug.defs.
64  */
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach_debug/vm_info.h>
69 #include <mach_debug/page_info.h>
70 #include <mach_debug/hash_info.h>
71 
72 #if MACH_VM_DEBUG
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_inherit.h>
77 #include <mach/vm_param.h>
78 #include <kern/thread.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_object.h>
82 #include <kern/task.h>
83 #include <kern/host.h>
84 #include <ipc/ipc_port.h>
85 #include <vm/vm_debug.h>
86 #endif
87 
88 #if !MACH_VM_DEBUG
89 #define __DEBUG_ONLY __unused
90 #else /* !MACH_VM_DEBUG */
91 #define __DEBUG_ONLY
92 #endif /* !MACH_VM_DEBUG */
93 
94 #ifdef VM32_SUPPORT
95 
96 #include <mach/vm32_map_server.h>
97 #include <mach/vm_map.h>
98 
99 /*
100  *	Routine:	mach_vm_region_info [kernel call]
101  *	Purpose:
102  *		Retrieve information about a VM region,
103  *		including info about the object chain.
104  *	Conditions:
105  *		Nothing locked.
106  *	Returns:
107  *		KERN_SUCCESS		Retrieve region/object info.
108  *		KERN_INVALID_TASK	The map is null.
109  *		KERN_NO_SPACE		There is no entry at/after the address.
110  *		KERN_RESOURCE_SHORTAGE	Can't allocate memory.
111  */
112 
113 kern_return_t
vm32_region_info(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY vm32_offset_t address,__DEBUG_ONLY vm_info_region_t * regionp,__DEBUG_ONLY vm_info_object_array_t * objectsp,__DEBUG_ONLY mach_msg_type_number_t * objectsCntp)114 vm32_region_info(
115 	__DEBUG_ONLY vm_map_t                   map,
116 	__DEBUG_ONLY vm32_offset_t              address,
117 	__DEBUG_ONLY vm_info_region_t           *regionp,
118 	__DEBUG_ONLY vm_info_object_array_t     *objectsp,
119 	__DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
120 {
121 #if !MACH_VM_DEBUG
122 	return KERN_FAILURE;
123 #else
124 	vm_map_copy_t copy;
125 	vm_offset_t addr = 0;   /* memory for OOL data */
126 	vm_size_t size;         /* size of the memory */
127 	unsigned int room;      /* room for this many objects */
128 	unsigned int used;      /* actually this many objects */
129 	vm_info_region_t region;
130 	kern_return_t kr;
131 
132 	if (map == VM_MAP_NULL) {
133 		return KERN_INVALID_TASK;
134 	}
135 
136 	size = 0;               /* no memory allocated yet */
137 
138 	for (;;) {
139 		vm_map_t cmap;  /* current map in traversal */
140 		vm_map_t nmap;  /* next map to look at */
141 		vm_map_entry_t entry;
142 		vm_object_t object, cobject, nobject;
143 
144 		/* nothing is locked */
145 
146 		vm_map_lock_read(map);
147 		for (cmap = map;; cmap = nmap) {
148 			/* cmap is read-locked */
149 
150 			if (!vm_map_lookup_entry(cmap,
151 			    (vm_map_address_t)address, &entry)) {
152 				entry = entry->vme_next;
153 				if (entry == vm_map_to_entry(cmap)) {
154 					vm_map_unlock_read(cmap);
155 					if (size != 0) {
156 						kmem_free(ipc_kernel_map,
157 						    addr, size);
158 					}
159 					return KERN_NO_SPACE;
160 				}
161 			}
162 
163 			if (entry->is_sub_map) {
164 				nmap = VME_SUBMAP(entry);
165 			} else {
166 				break;
167 			}
168 
169 			/* move down to the lower map */
170 
171 			vm_map_lock_read(nmap);
172 			vm_map_unlock_read(cmap);
173 		}
174 
175 		/* cmap is read-locked; we have a real entry */
176 
177 		object = VME_OBJECT(entry);
178 		region.vir_start = (natural_t) entry->vme_start;
179 		region.vir_end = (natural_t) entry->vme_end;
180 		region.vir_object = (natural_t)(uintptr_t) object;
181 		region.vir_offset = (natural_t) VME_OFFSET(entry);
182 		region.vir_needs_copy = entry->needs_copy;
183 		region.vir_protection = entry->protection;
184 		region.vir_max_protection = entry->max_protection;
185 		region.vir_inheritance = entry->inheritance;
186 		region.vir_wired_count = entry->wired_count;
187 		region.vir_user_wired_count = entry->user_wired_count;
188 
189 		used = 0;
190 		room = (unsigned int) (size / sizeof(vm_info_object_t));
191 
192 		if (object == VM_OBJECT_NULL) {
193 			vm_map_unlock_read(cmap);
194 			/* no memory needed */
195 			break;
196 		}
197 
198 		vm_object_lock(object);
199 		vm_map_unlock_read(cmap);
200 
201 		for (cobject = object;; cobject = nobject) {
202 			/* cobject is locked */
203 
204 			if (used < room) {
205 				vm_info_object_t *vio =
206 				    &((vm_info_object_t *) addr)[used];
207 
208 				vio->vio_object =
209 				    (natural_t)(uintptr_t) cobject;
210 				vio->vio_size =
211 				    (natural_t) cobject->vo_size;
212 				vio->vio_ref_count =
213 				    cobject->ref_count;
214 				vio->vio_resident_page_count =
215 				    cobject->resident_page_count;
216 				vio->vio_copy =
217 				    (natural_t)(uintptr_t) cobject->copy;
218 				vio->vio_shadow =
219 				    (natural_t)(uintptr_t) cobject->shadow;
220 				vio->vio_shadow_offset =
221 				    (natural_t) cobject->vo_shadow_offset;
222 				vio->vio_paging_offset =
223 				    (natural_t) cobject->paging_offset;
224 				vio->vio_copy_strategy =
225 				    cobject->copy_strategy;
226 				vio->vio_last_alloc =
227 				    (vm_offset_t) cobject->last_alloc;
228 				vio->vio_paging_in_progress =
229 				    cobject->paging_in_progress +
230 				    cobject->activity_in_progress;
231 				vio->vio_pager_created =
232 				    cobject->pager_created;
233 				vio->vio_pager_initialized =
234 				    cobject->pager_initialized;
235 				vio->vio_pager_ready =
236 				    cobject->pager_ready;
237 				vio->vio_can_persist =
238 				    cobject->can_persist;
239 				vio->vio_internal =
240 				    cobject->internal;
241 				vio->vio_temporary =
242 				    FALSE;
243 				vio->vio_alive =
244 				    cobject->alive;
245 				vio->vio_purgable =
246 				    (cobject->purgable != VM_PURGABLE_DENY);
247 				vio->vio_purgable_volatile =
248 				    (cobject->purgable == VM_PURGABLE_VOLATILE ||
249 				    cobject->purgable == VM_PURGABLE_EMPTY);
250 			}
251 
252 			used++;
253 			nobject = cobject->shadow;
254 			if (nobject == VM_OBJECT_NULL) {
255 				vm_object_unlock(cobject);
256 				break;
257 			}
258 
259 			vm_object_lock(nobject);
260 			vm_object_unlock(cobject);
261 		}
262 
263 		/* nothing locked */
264 
265 		if (used <= room) {
266 			break;
267 		}
268 
269 		/* must allocate more memory */
270 
271 		if (size != 0) {
272 			kmem_free(ipc_kernel_map, addr, size);
273 		}
274 		size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
275 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
276 
277 		kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
278 		if (kr != KERN_SUCCESS) {
279 			return KERN_RESOURCE_SHORTAGE;
280 		}
281 
282 		kr = vm_map_wire_kernel(
283 			ipc_kernel_map,
284 			vm_map_trunc_page(addr,
285 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
286 			vm_map_round_page(addr + size,
287 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
288 			VM_PROT_READ | VM_PROT_WRITE,
289 			VM_KERN_MEMORY_IPC,
290 			FALSE);
291 		assert(kr == KERN_SUCCESS);
292 	}
293 
294 	/* free excess memory; make remaining memory pageable */
295 
296 	if (used == 0) {
297 		copy = VM_MAP_COPY_NULL;
298 
299 		if (size != 0) {
300 			kmem_free(ipc_kernel_map, addr, size);
301 		}
302 	} else {
303 		vm_size_t size_used = (used * sizeof(vm_info_object_t));
304 		vm_size_t vmsize_used = vm_map_round_page(size_used,
305 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
306 
307 		kr = vm_map_unwire(
308 			ipc_kernel_map,
309 			vm_map_trunc_page(addr,
310 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
311 			vm_map_round_page(addr + size_used,
312 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
313 			FALSE);
314 		assert(kr == KERN_SUCCESS);
315 
316 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
317 		    (vm_map_size_t)size_used, TRUE, &copy);
318 		assert(kr == KERN_SUCCESS);
319 
320 		if (size != vmsize_used) {
321 			kmem_free(ipc_kernel_map,
322 			    addr + vmsize_used, size - vmsize_used);
323 		}
324 	}
325 
326 	*regionp = region;
327 	*objectsp = (vm_info_object_array_t) copy;
328 	*objectsCntp = used;
329 	return KERN_SUCCESS;
330 #endif /* MACH_VM_DEBUG */
331 }
332 
333 /*
334  *  Temporary call for 64 bit data path interface transiotion
335  */
336 
337 kern_return_t
vm32_region_info_64(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY vm32_offset_t address,__DEBUG_ONLY vm_info_region_64_t * regionp,__DEBUG_ONLY vm_info_object_array_t * objectsp,__DEBUG_ONLY mach_msg_type_number_t * objectsCntp)338 vm32_region_info_64(
339 	__DEBUG_ONLY vm_map_t                   map,
340 	__DEBUG_ONLY vm32_offset_t              address,
341 	__DEBUG_ONLY vm_info_region_64_t        *regionp,
342 	__DEBUG_ONLY vm_info_object_array_t     *objectsp,
343 	__DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
344 {
345 #if !MACH_VM_DEBUG
346 	return KERN_FAILURE;
347 #else
348 	vm_map_copy_t copy;
349 	vm_offset_t addr = 0;   /* memory for OOL data */
350 	vm_size_t size;         /* size of the memory */
351 	unsigned int room;      /* room for this many objects */
352 	unsigned int used;      /* actually this many objects */
353 	vm_info_region_64_t region;
354 	kern_return_t kr;
355 
356 	if (map == VM_MAP_NULL) {
357 		return KERN_INVALID_TASK;
358 	}
359 
360 	size = 0;               /* no memory allocated yet */
361 
362 	for (;;) {
363 		vm_map_t cmap;  /* current map in traversal */
364 		vm_map_t nmap;  /* next map to look at */
365 		vm_map_entry_t entry;
366 		vm_object_t object, cobject, nobject;
367 
368 		/* nothing is locked */
369 
370 		vm_map_lock_read(map);
371 		for (cmap = map;; cmap = nmap) {
372 			/* cmap is read-locked */
373 
374 			if (!vm_map_lookup_entry(cmap, address, &entry)) {
375 				entry = entry->vme_next;
376 				if (entry == vm_map_to_entry(cmap)) {
377 					vm_map_unlock_read(cmap);
378 					if (size != 0) {
379 						kmem_free(ipc_kernel_map,
380 						    addr, size);
381 					}
382 					return KERN_NO_SPACE;
383 				}
384 			}
385 
386 			if (entry->is_sub_map) {
387 				nmap = VME_SUBMAP(entry);
388 			} else {
389 				break;
390 			}
391 
392 			/* move down to the lower map */
393 
394 			vm_map_lock_read(nmap);
395 			vm_map_unlock_read(cmap);
396 		}
397 
398 		/* cmap is read-locked; we have a real entry */
399 
400 		object = VME_OBJECT(entry);
401 		region.vir_start = (natural_t) entry->vme_start;
402 		region.vir_end = (natural_t) entry->vme_end;
403 		region.vir_object = (natural_t)(uintptr_t) object;
404 		region.vir_offset = VME_OFFSET(entry);
405 		region.vir_needs_copy = entry->needs_copy;
406 		region.vir_protection = entry->protection;
407 		region.vir_max_protection = entry->max_protection;
408 		region.vir_inheritance = entry->inheritance;
409 		region.vir_wired_count = entry->wired_count;
410 		region.vir_user_wired_count = entry->user_wired_count;
411 
412 		used = 0;
413 		room = (unsigned int) (size / sizeof(vm_info_object_t));
414 
415 		if (object == VM_OBJECT_NULL) {
416 			vm_map_unlock_read(cmap);
417 			/* no memory needed */
418 			break;
419 		}
420 
421 		vm_object_lock(object);
422 		vm_map_unlock_read(cmap);
423 
424 		for (cobject = object;; cobject = nobject) {
425 			/* cobject is locked */
426 
427 			if (used < room) {
428 				vm_info_object_t *vio =
429 				    &((vm_info_object_t *) addr)[used];
430 
431 				vio->vio_object =
432 				    (natural_t)(uintptr_t) cobject;
433 				vio->vio_size =
434 				    (natural_t) cobject->vo_size;
435 				vio->vio_ref_count =
436 				    cobject->ref_count;
437 				vio->vio_resident_page_count =
438 				    cobject->resident_page_count;
439 				vio->vio_copy =
440 				    (natural_t)(uintptr_t) cobject->copy;
441 				vio->vio_shadow =
442 				    (natural_t)(uintptr_t) cobject->shadow;
443 				vio->vio_shadow_offset =
444 				    (natural_t) cobject->vo_shadow_offset;
445 				vio->vio_paging_offset =
446 				    (natural_t) cobject->paging_offset;
447 				vio->vio_copy_strategy =
448 				    cobject->copy_strategy;
449 				vio->vio_last_alloc =
450 				    (vm_offset_t) cobject->last_alloc;
451 				vio->vio_paging_in_progress =
452 				    cobject->paging_in_progress +
453 				    cobject->activity_in_progress;
454 				vio->vio_pager_created =
455 				    cobject->pager_created;
456 				vio->vio_pager_initialized =
457 				    cobject->pager_initialized;
458 				vio->vio_pager_ready =
459 				    cobject->pager_ready;
460 				vio->vio_can_persist =
461 				    cobject->can_persist;
462 				vio->vio_internal =
463 				    cobject->internal;
464 				vio->vio_temporary =
465 				    FALSE;
466 				vio->vio_alive =
467 				    cobject->alive;
468 				vio->vio_purgable =
469 				    (cobject->purgable != VM_PURGABLE_DENY);
470 				vio->vio_purgable_volatile =
471 				    (cobject->purgable == VM_PURGABLE_VOLATILE ||
472 				    cobject->purgable == VM_PURGABLE_EMPTY);
473 			}
474 
475 			used++;
476 			nobject = cobject->shadow;
477 			if (nobject == VM_OBJECT_NULL) {
478 				vm_object_unlock(cobject);
479 				break;
480 			}
481 
482 			vm_object_lock(nobject);
483 			vm_object_unlock(cobject);
484 		}
485 
486 		/* nothing locked */
487 
488 		if (used <= room) {
489 			break;
490 		}
491 
492 		/* must allocate more memory */
493 
494 		if (size != 0) {
495 			kmem_free(ipc_kernel_map, addr, size);
496 		}
497 		size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
498 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
499 
500 		kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
501 		if (kr != KERN_SUCCESS) {
502 			return KERN_RESOURCE_SHORTAGE;
503 		}
504 
505 		kr = vm_map_wire_kernel(
506 			ipc_kernel_map,
507 			vm_map_trunc_page(addr,
508 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
509 			vm_map_round_page(addr + size,
510 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
511 			VM_PROT_READ | VM_PROT_WRITE,
512 			VM_KERN_MEMORY_IPC,
513 			FALSE);
514 		assert(kr == KERN_SUCCESS);
515 	}
516 
517 	/* free excess memory; make remaining memory pageable */
518 
519 	if (used == 0) {
520 		copy = VM_MAP_COPY_NULL;
521 
522 		if (size != 0) {
523 			kmem_free(ipc_kernel_map, addr, size);
524 		}
525 	} else {
526 		vm_size_t size_used = (used * sizeof(vm_info_object_t));
527 		vm_size_t vmsize_used = vm_map_round_page(size_used,
528 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
529 
530 		kr = vm_map_unwire(
531 			ipc_kernel_map,
532 			vm_map_trunc_page(addr,
533 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
534 			vm_map_round_page(addr + size_used,
535 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
536 			FALSE);
537 		assert(kr == KERN_SUCCESS);
538 
539 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
540 		    (vm_map_size_t)size_used, TRUE, &copy);
541 		assert(kr == KERN_SUCCESS);
542 
543 		if (size != vmsize_used) {
544 			kmem_free(ipc_kernel_map,
545 			    addr + vmsize_used, size - vmsize_used);
546 		}
547 	}
548 
549 	*regionp = region;
550 	*objectsp = (vm_info_object_array_t) copy;
551 	*objectsCntp = used;
552 	return KERN_SUCCESS;
553 #endif /* MACH_VM_DEBUG */
554 }
555 /*
556  * Return an array of virtual pages that are mapped to a task.
557  */
558 kern_return_t
vm32_mapped_pages_info(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY page_address_array_t * pages,__DEBUG_ONLY mach_msg_type_number_t * pages_count)559 vm32_mapped_pages_info(
560 	__DEBUG_ONLY vm_map_t                   map,
561 	__DEBUG_ONLY page_address_array_t       *pages,
562 	__DEBUG_ONLY mach_msg_type_number_t     *pages_count)
563 {
564 #if !MACH_VM_DEBUG
565 	return KERN_FAILURE;
566 #elif 1 /* pmap_resident_count is gone with rdar://68290810 */
567 	(void)map; (void)pages; (void)pages_count;
568 	return KERN_FAILURE;
569 #else
570 	pmap_t          pmap;
571 	vm_size_t       size, size_used;
572 	unsigned int    actual, space;
573 	page_address_array_t list;
574 	vm_offset_t     addr = 0;
575 
576 	if (map == VM_MAP_NULL) {
577 		return KERN_INVALID_ARGUMENT;
578 	}
579 
580 	pmap = map->pmap;
581 	size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
582 	size = vm_map_round_page(size,
583 	    VM_MAP_PAGE_MASK(ipc_kernel_map));
584 
585 	for (;;) {
586 		(void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
587 		(void) vm_map_unwire(
588 			ipc_kernel_map,
589 			vm_map_trunc_page(addr,
590 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
591 			vm_map_round_page(addr + size,
592 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
593 			FALSE);
594 
595 		list = (page_address_array_t) addr;
596 		space = (unsigned int) (size / sizeof(vm_offset_t));
597 
598 		actual = pmap_list_resident_pages(pmap,
599 		    list,
600 		    space);
601 		if (actual <= space) {
602 			break;
603 		}
604 
605 		/*
606 		 * Free memory if not enough
607 		 */
608 		(void) kmem_free(ipc_kernel_map, addr, size);
609 
610 		/*
611 		 * Try again, doubling the size
612 		 */
613 		size = vm_map_round_page(actual * sizeof(vm_offset_t),
614 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
615 	}
616 	if (actual == 0) {
617 		*pages = 0;
618 		*pages_count = 0;
619 		(void) kmem_free(ipc_kernel_map, addr, size);
620 	} else {
621 		vm_size_t vmsize_used;
622 		*pages_count = actual;
623 		size_used = (actual * sizeof(vm_offset_t));
624 		vmsize_used = vm_map_round_page(size_used,
625 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
626 		(void) vm_map_wire_kernel(
627 			ipc_kernel_map,
628 			vm_map_trunc_page(addr,
629 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
630 			vm_map_round_page(addr + size,
631 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
632 			VM_PROT_READ | VM_PROT_WRITE,
633 			VM_KERN_MEMORY_IPC,
634 			FALSE);
635 		(void) vm_map_copyin(ipc_kernel_map,
636 		    (vm_map_address_t)addr,
637 		    (vm_map_size_t)size_used,
638 		    TRUE,
639 		    (vm_map_copy_t *)pages);
640 		if (vmsize_used != size) {
641 			(void) kmem_free(ipc_kernel_map,
642 			    addr + vmsize_used,
643 			    size - vmsize_used);
644 		}
645 	}
646 
647 	return KERN_SUCCESS;
648 #endif /* MACH_VM_DEBUG */
649 }
650 
651 #endif /* VM32_SUPPORT */
652 
653 /*
654  *	Routine:	host_virtual_physical_table_info
655  *	Purpose:
656  *		Return information about the VP table.
657  *	Conditions:
658  *		Nothing locked.  Obeys CountInOut protocol.
659  *	Returns:
660  *		KERN_SUCCESS		Returned information.
661  *		KERN_INVALID_HOST	The host is null.
662  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
663  */
664 
665 kern_return_t
host_virtual_physical_table_info(__DEBUG_ONLY host_t host,__DEBUG_ONLY hash_info_bucket_array_t * infop,__DEBUG_ONLY mach_msg_type_number_t * countp)666 host_virtual_physical_table_info(
667 	__DEBUG_ONLY host_t                     host,
668 	__DEBUG_ONLY hash_info_bucket_array_t   *infop,
669 	__DEBUG_ONLY mach_msg_type_number_t     *countp)
670 {
671 #if !MACH_VM_DEBUG
672 	return KERN_FAILURE;
673 #else
674 	vm_offset_t addr = 0;
675 	vm_size_t size = 0;
676 	hash_info_bucket_t *info;
677 	unsigned int potential, actual;
678 	kern_return_t kr;
679 
680 	if (host == HOST_NULL) {
681 		return KERN_INVALID_HOST;
682 	}
683 
684 	/* start with in-line data */
685 
686 	info = *infop;
687 	potential = *countp;
688 
689 	for (;;) {
690 		actual = vm_page_info(info, potential);
691 		if (actual <= potential) {
692 			break;
693 		}
694 
695 		/* allocate more memory */
696 
697 		if (info != *infop) {
698 			kmem_free(ipc_kernel_map, addr, size);
699 		}
700 
701 		size = vm_map_round_page(actual * sizeof *info,
702 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
703 		kr = vm_allocate_kernel(ipc_kernel_map, &addr, size,
704 		    VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
705 		if (kr != KERN_SUCCESS) {
706 			return KERN_RESOURCE_SHORTAGE;
707 		}
708 
709 		info = (hash_info_bucket_t *) addr;
710 		potential = (unsigned int) (size / sizeof(*info));
711 	}
712 
713 	if (info == *infop) {
714 		/* data fit in-line; nothing to deallocate */
715 
716 		*countp = actual;
717 	} else if (actual == 0) {
718 		kmem_free(ipc_kernel_map, addr, size);
719 
720 		*countp = 0;
721 	} else {
722 		vm_map_copy_t copy;
723 		vm_size_t used, vmused;
724 
725 		used = (actual * sizeof(*info));
726 		vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
727 
728 		if (vmused != size) {
729 			kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
730 		}
731 
732 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
733 		    (vm_map_size_t)used, TRUE, &copy);
734 		assert(kr == KERN_SUCCESS);
735 
736 		*infop = (hash_info_bucket_t *) copy;
737 		*countp = actual;
738 	}
739 
740 	return KERN_SUCCESS;
741 #endif /* MACH_VM_DEBUG */
742 }
743