xref: /xnu-12377.41.6/osfmk/vm/vm_debug.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_debug.c.
60  *	Author:	Rich Draves
61  *	Date:	March, 1990
62  *
63  *	Exported kernel calls.  See mach_debug/mach_debug.defs.
64  */
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach_debug/vm_info.h>
69 #include <mach_debug/page_info.h>
70 #include <mach_debug/hash_info.h>
71 
72 #if MACH_VM_DEBUG
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_inherit.h>
77 #include <mach/vm_param.h>
78 #include <kern/thread.h>
79 #include <vm/vm_map_internal.h>
80 #include <vm/vm_kern_xnu.h>
81 #include <vm/vm_object_xnu.h>
82 #include <kern/task.h>
83 #include <kern/host.h>
84 #include <ipc/ipc_port.h>
85 #include <vm/vm_debug_internal.h>
86 #endif
87 
88 #if !MACH_VM_DEBUG
89 #define __DEBUG_ONLY __unused
90 #else /* !MACH_VM_DEBUG */
91 #define __DEBUG_ONLY
92 #endif /* !MACH_VM_DEBUG */
93 
94 #ifdef VM32_SUPPORT
95 
96 #include <mach/vm32_map_server.h>
97 #include <mach/vm_map.h>
98 #include <vm/vm_map_xnu.h>
99 #include <vm/vm_lock_perf.h>
100 
101 /*
102  *	Routine:	mach_vm_region_info [kernel call]
103  *	Purpose:
104  *		Retrieve information about a VM region,
105  *		including info about the object chain.
106  *	Conditions:
107  *		Nothing locked.
108  *	Returns:
109  *		KERN_SUCCESS		Retrieve region/object info.
110  *		KERN_INVALID_TASK	The map is null.
111  *		KERN_NO_SPACE		There is no entry at/after the address.
112  *		KERN_RESOURCE_SHORTAGE	Can't allocate memory.
113  */
114 
115 kern_return_t
vm32_mach_vm_region_info(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY vm32_offset_ut address_u,__DEBUG_ONLY vm_info_region_t * regionp,__DEBUG_ONLY vm_info_object_array_t * objectsp,__DEBUG_ONLY mach_msg_type_number_t * objectsCntp)116 vm32_mach_vm_region_info(
117 	__DEBUG_ONLY vm_map_t                   map,
118 	__DEBUG_ONLY vm32_offset_ut             address_u,
119 	__DEBUG_ONLY vm_info_region_t           *regionp,
120 	__DEBUG_ONLY vm_info_object_array_t     *objectsp,
121 	__DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
122 {
123 	vmlp_api_start(VM32_REGION_INFO);
124 
125 #if !MACH_VM_DEBUG
126 	vmlp_api_end(VM32_REGION_INFO, KERN_FAILURE);
127 	return KERN_FAILURE;
128 #else
129 	/* This unwrap is safe as this function is DEBUG only. */
130 	vm32_offset_t address = VM_SANITIZE_UNSAFE_UNWRAP(address_u);
131 	vm_map_copy_t copy;
132 	vm_offset_t addr = 0;   /* memory for OOL data */
133 	vm_size_t size;         /* size of the memory */
134 	unsigned int room;      /* room for this many objects */
135 	unsigned int used;      /* actually this many objects */
136 	vm_info_region_t region;
137 	kern_return_t kr;
138 
139 	if (map == VM_MAP_NULL) {
140 		vmlp_api_end(VM32_REGION_INFO, KERN_INVALID_TASK);
141 		return KERN_INVALID_TASK;
142 	}
143 
144 	size = 0;               /* no memory allocated yet */
145 
146 	for (;;) {
147 		vm_map_t cmap;  /* current map in traversal */
148 		vm_map_t nmap;  /* next map to look at */
149 		vm_map_entry_t entry;
150 		vm_object_t object, cobject, nobject;
151 
152 		/* nothing is locked */
153 
154 		vm_map_lock_read(map);
155 		for (cmap = map;; cmap = nmap) {
156 			/* cmap is read-locked */
157 
158 			if (!vm_map_lookup_entry(cmap, address, &entry)) {
159 				entry = entry->vme_next;
160 				if (entry == vm_map_to_entry(cmap)) {
161 					vm_map_unlock_read(cmap);
162 					if (size != 0) {
163 						kmem_free(ipc_kernel_map,
164 						    addr, size);
165 					}
166 					vmlp_api_end(VM32_REGION_INFO, KERN_NO_SPACE);
167 					return KERN_NO_SPACE;
168 				}
169 			}
170 
171 			if (entry->is_sub_map) {
172 				nmap = VME_SUBMAP(entry);
173 			} else {
174 				break;
175 			}
176 
177 			/* move down to the lower map */
178 
179 			vm_map_lock_read(nmap);
180 			vm_map_unlock_read(cmap);
181 		}
182 
183 		/* cmap is read-locked; we have a real entry */
184 		vmlp_range_event_entry(cmap, entry);
185 
186 		object = VME_OBJECT(entry);
187 		region.vir_start = (natural_t) entry->vme_start;
188 		region.vir_end = (natural_t) entry->vme_end;
189 		region.vir_object = (natural_t)(uintptr_t) object;
190 		region.vir_offset = (natural_t) VME_OFFSET(entry);
191 		region.vir_needs_copy = entry->needs_copy;
192 		region.vir_protection = entry->protection;
193 		region.vir_max_protection = entry->max_protection;
194 		region.vir_inheritance = entry->inheritance;
195 		region.vir_wired_count = entry->wired_count;
196 		region.vir_user_wired_count = entry->user_wired_count;
197 
198 		used = 0;
199 		room = (unsigned int) (size / sizeof(vm_info_object_t));
200 
201 		if (object == VM_OBJECT_NULL) {
202 			vm_map_unlock_read(cmap);
203 			/* no memory needed */
204 			break;
205 		}
206 
207 		vm_object_lock(object);
208 		vm_map_unlock_read(cmap);
209 
210 		for (cobject = object;; cobject = nobject) {
211 			/* cobject is locked */
212 
213 			if (used < room) {
214 				vm_info_object_t *vio =
215 				    &((vm_info_object_t *) addr)[used];
216 
217 				vio->vio_object =
218 				    (natural_t)(uintptr_t) cobject;
219 				vio->vio_size =
220 				    (natural_t) cobject->vo_size;
221 				vio->vio_ref_count =
222 				    cobject->ref_count;
223 				vio->vio_resident_page_count =
224 				    cobject->resident_page_count;
225 				vio->vio_copy =
226 				    (natural_t)(uintptr_t) cobject->vo_copy;
227 				vio->vio_shadow =
228 				    (natural_t)(uintptr_t) cobject->shadow;
229 				vio->vio_shadow_offset =
230 				    (natural_t) cobject->vo_shadow_offset;
231 				vio->vio_paging_offset =
232 				    (natural_t) cobject->paging_offset;
233 				vio->vio_copy_strategy =
234 				    cobject->copy_strategy;
235 				vio->vio_last_alloc =
236 				    (vm_offset_t) cobject->last_alloc;
237 				vio->vio_paging_in_progress =
238 				    cobject->paging_in_progress +
239 				    cobject->activity_in_progress;
240 				vio->vio_pager_created =
241 				    cobject->pager_created;
242 				vio->vio_pager_initialized =
243 				    cobject->pager_initialized;
244 				vio->vio_pager_ready =
245 				    cobject->pager_ready;
246 				vio->vio_can_persist =
247 				    cobject->can_persist;
248 				vio->vio_internal =
249 				    cobject->internal;
250 				vio->vio_temporary =
251 				    FALSE;
252 				vio->vio_alive =
253 				    cobject->alive;
254 				vio->vio_purgable =
255 				    (cobject->purgable != VM_PURGABLE_DENY);
256 				vio->vio_purgable_volatile =
257 				    (cobject->purgable == VM_PURGABLE_VOLATILE ||
258 				    cobject->purgable == VM_PURGABLE_EMPTY);
259 			}
260 
261 			used++;
262 			nobject = cobject->shadow;
263 			if (nobject == VM_OBJECT_NULL) {
264 				vm_object_unlock(cobject);
265 				break;
266 			}
267 
268 			vm_object_lock(nobject);
269 			vm_object_unlock(cobject);
270 		}
271 
272 		/* nothing locked */
273 
274 		if (used <= room) {
275 			break;
276 		}
277 
278 		/* must allocate more memory */
279 
280 		if (size != 0) {
281 			kmem_free(ipc_kernel_map, addr, size);
282 		}
283 		size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
284 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
285 
286 		kr = kmem_alloc(ipc_kernel_map, &addr, size,
287 		    KMA_DATA, VM_KERN_MEMORY_IPC);
288 		if (kr != KERN_SUCCESS) {
289 			vmlp_api_end(VM32_REGION_INFO, KERN_RESOURCE_SHORTAGE);
290 			return KERN_RESOURCE_SHORTAGE;
291 		}
292 	}
293 
294 	/* free excess memory; make remaining memory pageable */
295 
296 	if (used == 0) {
297 		copy = VM_MAP_COPY_NULL;
298 
299 		if (size != 0) {
300 			kmem_free(ipc_kernel_map, addr, size);
301 		}
302 	} else {
303 		vm_size_t size_used = (used * sizeof(vm_info_object_t));
304 		vm_size_t vmsize_used = vm_map_round_page(size_used,
305 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
306 
307 		if (size_used < vmsize_used) {
308 			bzero((char *)addr + size_used, vmsize_used - size_used);
309 		}
310 
311 		kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
312 		assert(kr == KERN_SUCCESS);
313 
314 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
315 		    (vm_map_size_t)size_used, TRUE, &copy);
316 		assert(kr == KERN_SUCCESS);
317 
318 		if (size != vmsize_used) {
319 			kmem_free(ipc_kernel_map,
320 			    addr + vmsize_used, size - vmsize_used);
321 		}
322 	}
323 
324 	*regionp = region;
325 	*objectsp = (vm_info_object_array_t) copy;
326 	*objectsCntp = used;
327 	vmlp_api_end(VM32_REGION_INFO, KERN_SUCCESS);
328 	return KERN_SUCCESS;
329 #endif /* MACH_VM_DEBUG */
330 }
331 
332 /*
333  *  Temporary call for 64 bit data path interface transiotion
334  */
335 
336 kern_return_t
vm32_mach_vm_region_info_64(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY vm32_offset_ut address_u,__DEBUG_ONLY vm_info_region_64_t * regionp,__DEBUG_ONLY vm_info_object_array_t * objectsp,__DEBUG_ONLY mach_msg_type_number_t * objectsCntp)337 vm32_mach_vm_region_info_64(
338 	__DEBUG_ONLY vm_map_t                   map,
339 	__DEBUG_ONLY vm32_offset_ut             address_u,
340 	__DEBUG_ONLY vm_info_region_64_t        *regionp,
341 	__DEBUG_ONLY vm_info_object_array_t     *objectsp,
342 	__DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
343 {
344 	vmlp_api_start(VM32_REGION_INFO_64);
345 
346 #if !MACH_VM_DEBUG
347 	vmlp_api_end(VM32_REGION_INFO_64, KERN_FAILURE);
348 	return KERN_FAILURE;
349 #else
350 	/* This unwrap is safe as this function is DEBUG only. */
351 	vm32_offset_t address = VM_SANITIZE_UNSAFE_UNWRAP(address_u);
352 	vm_map_copy_t copy;
353 	vm_offset_t addr = 0;   /* memory for OOL data */
354 	vm_size_t size;         /* size of the memory */
355 	unsigned int room;      /* room for this many objects */
356 	unsigned int used;      /* actually this many objects */
357 	vm_info_region_64_t region;
358 	kern_return_t kr;
359 
360 	if (map == VM_MAP_NULL) {
361 		vmlp_api_end(VM32_REGION_INFO_64, KERN_INVALID_TASK);
362 		return KERN_INVALID_TASK;
363 	}
364 
365 	size = 0;               /* no memory allocated yet */
366 
367 	for (;;) {
368 		vm_map_t cmap;  /* current map in traversal */
369 		vm_map_t nmap;  /* next map to look at */
370 		vm_map_entry_t entry;
371 		vm_object_t object, cobject, nobject;
372 
373 		/* nothing is locked */
374 
375 		vm_map_lock_read(map);
376 		for (cmap = map;; cmap = nmap) {
377 			/* cmap is read-locked */
378 
379 			if (!vm_map_lookup_entry(cmap, address, &entry)) {
380 				entry = entry->vme_next;
381 				if (entry == vm_map_to_entry(cmap)) {
382 					vm_map_unlock_read(cmap);
383 					if (size != 0) {
384 						kmem_free(ipc_kernel_map,
385 						    addr, size);
386 					}
387 					vmlp_api_end(VM32_REGION_INFO_64, KERN_NO_SPACE);
388 					return KERN_NO_SPACE;
389 				}
390 			}
391 
392 			if (entry->is_sub_map) {
393 				nmap = VME_SUBMAP(entry);
394 			} else {
395 				break;
396 			}
397 
398 			/* move down to the lower map */
399 
400 			vm_map_lock_read(nmap);
401 			vm_map_unlock_read(cmap);
402 		}
403 
404 		/* cmap is read-locked; we have a real entry */
405 		vmlp_range_event_entry(cmap, entry);
406 
407 		object = VME_OBJECT(entry);
408 		region.vir_start = (natural_t) entry->vme_start;
409 		region.vir_end = (natural_t) entry->vme_end;
410 		region.vir_object = (natural_t)(uintptr_t) object;
411 		region.vir_offset = VME_OFFSET(entry);
412 		region.vir_needs_copy = entry->needs_copy;
413 		region.vir_protection = entry->protection;
414 		region.vir_max_protection = entry->max_protection;
415 		region.vir_inheritance = entry->inheritance;
416 		region.vir_wired_count = entry->wired_count;
417 		region.vir_user_wired_count = entry->user_wired_count;
418 
419 		used = 0;
420 		room = (unsigned int) (size / sizeof(vm_info_object_t));
421 
422 		if (object == VM_OBJECT_NULL) {
423 			vm_map_unlock_read(cmap);
424 			/* no memory needed */
425 			break;
426 		}
427 
428 		vm_object_lock(object);
429 		vm_map_unlock_read(cmap);
430 
431 		for (cobject = object;; cobject = nobject) {
432 			/* cobject is locked */
433 
434 			if (used < room) {
435 				vm_info_object_t *vio =
436 				    &((vm_info_object_t *) addr)[used];
437 
438 				vio->vio_object =
439 				    (natural_t)(uintptr_t) cobject;
440 				vio->vio_size =
441 				    (natural_t) cobject->vo_size;
442 				vio->vio_ref_count =
443 				    cobject->ref_count;
444 				vio->vio_resident_page_count =
445 				    cobject->resident_page_count;
446 				vio->vio_copy =
447 				    (natural_t)(uintptr_t) cobject->vo_copy;
448 				vio->vio_shadow =
449 				    (natural_t)(uintptr_t) cobject->shadow;
450 				vio->vio_shadow_offset =
451 				    (natural_t) cobject->vo_shadow_offset;
452 				vio->vio_paging_offset =
453 				    (natural_t) cobject->paging_offset;
454 				vio->vio_copy_strategy =
455 				    cobject->copy_strategy;
456 				vio->vio_last_alloc =
457 				    (vm_offset_t) cobject->last_alloc;
458 				vio->vio_paging_in_progress =
459 				    cobject->paging_in_progress +
460 				    cobject->activity_in_progress;
461 				vio->vio_pager_created =
462 				    cobject->pager_created;
463 				vio->vio_pager_initialized =
464 				    cobject->pager_initialized;
465 				vio->vio_pager_ready =
466 				    cobject->pager_ready;
467 				vio->vio_can_persist =
468 				    cobject->can_persist;
469 				vio->vio_internal =
470 				    cobject->internal;
471 				vio->vio_temporary =
472 				    FALSE;
473 				vio->vio_alive =
474 				    cobject->alive;
475 				vio->vio_purgable =
476 				    (cobject->purgable != VM_PURGABLE_DENY);
477 				vio->vio_purgable_volatile =
478 				    (cobject->purgable == VM_PURGABLE_VOLATILE ||
479 				    cobject->purgable == VM_PURGABLE_EMPTY);
480 			}
481 
482 			used++;
483 			nobject = cobject->shadow;
484 			if (nobject == VM_OBJECT_NULL) {
485 				vm_object_unlock(cobject);
486 				break;
487 			}
488 
489 			vm_object_lock(nobject);
490 			vm_object_unlock(cobject);
491 		}
492 
493 		/* nothing locked */
494 
495 		if (used <= room) {
496 			break;
497 		}
498 
499 		/* must allocate more memory */
500 
501 		if (size != 0) {
502 			kmem_free(ipc_kernel_map, addr, size);
503 		}
504 		size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
505 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
506 
507 		kr = kmem_alloc(ipc_kernel_map, &addr, size,
508 		    KMA_DATA, VM_KERN_MEMORY_IPC);
509 		if (kr != KERN_SUCCESS) {
510 			vmlp_api_end(VM32_REGION_INFO_64, KERN_RESOURCE_SHORTAGE);
511 			return KERN_RESOURCE_SHORTAGE;
512 		}
513 	}
514 
515 	/* free excess memory; make remaining memory pageable */
516 
517 	if (used == 0) {
518 		copy = VM_MAP_COPY_NULL;
519 
520 		if (size != 0) {
521 			kmem_free(ipc_kernel_map, addr, size);
522 		}
523 	} else {
524 		vm_size_t size_used = (used * sizeof(vm_info_object_t));
525 		vm_size_t vmsize_used = vm_map_round_page(size_used,
526 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
527 
528 		if (size_used < vmsize_used) {
529 			bzero((char *)addr + size_used, vmsize_used - size_used);
530 		}
531 
532 		kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
533 		assert(kr == KERN_SUCCESS);
534 
535 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
536 		    (vm_map_size_t)size_used, TRUE, &copy);
537 		assert(kr == KERN_SUCCESS);
538 
539 		if (size != vmsize_used) {
540 			kmem_free(ipc_kernel_map,
541 			    addr + vmsize_used, size - vmsize_used);
542 		}
543 	}
544 
545 	*regionp = region;
546 	*objectsp = (vm_info_object_array_t) copy;
547 	*objectsCntp = used;
548 	vmlp_api_end(VM32_REGION_INFO_64, KERN_SUCCESS);
549 	return KERN_SUCCESS;
550 #endif /* MACH_VM_DEBUG */
551 }
552 /*
553  * Return an array of virtual pages that are mapped to a task.
554  */
555 kern_return_t
vm32_vm_mapped_pages_info(__DEBUG_ONLY vm_map_t map,__DEBUG_ONLY page_address_array_t * pages,__DEBUG_ONLY mach_msg_type_number_t * pages_count)556 vm32_vm_mapped_pages_info(
557 	__DEBUG_ONLY vm_map_t                   map,
558 	__DEBUG_ONLY page_address_array_t       *pages,
559 	__DEBUG_ONLY mach_msg_type_number_t     *pages_count)
560 {
561 #if !MACH_VM_DEBUG
562 	return KERN_FAILURE;
563 #elif 1 /* pmap_resident_count is gone with rdar://68290810 */
564 	(void)map; (void)pages; (void)pages_count;
565 	return KERN_FAILURE;
566 #else
567 	pmap_t          pmap;
568 	vm_size_t       size, size_used;
569 	unsigned int    actual, space;
570 	page_address_array_t list;
571 	mach_vm_offset_t addr = 0;
572 
573 	if (map == VM_MAP_NULL) {
574 		return KERN_INVALID_ARGUMENT;
575 	}
576 
577 	pmap = map->pmap;
578 	size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
579 	size = vm_map_round_page(size,
580 	    VM_MAP_PAGE_MASK(ipc_kernel_map));
581 
582 	for (;;) {
583 		(void) mach_vm_allocate_kernel(ipc_kernel_map, &addr, size,
584 		    VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IPC));
585 		(void) vm_map_unwire(
586 			ipc_kernel_map,
587 			vm_map_trunc_page(addr,
588 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
589 			vm_map_round_page(addr + size,
590 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
591 			FALSE);
592 
593 		list = (page_address_array_t) addr;
594 		space = (unsigned int) (size / sizeof(vm_offset_t));
595 
596 		actual = pmap_list_resident_pages(pmap,
597 		    list,
598 		    space);
599 		if (actual <= space) {
600 			break;
601 		}
602 
603 		/*
604 		 * Free memory if not enough
605 		 */
606 		(void) kmem_free(ipc_kernel_map, addr, size);
607 
608 		/*
609 		 * Try again, doubling the size
610 		 */
611 		size = vm_map_round_page(actual * sizeof(vm_offset_t),
612 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
613 	}
614 	if (actual == 0) {
615 		*pages = 0;
616 		*pages_count = 0;
617 		(void) kmem_free(ipc_kernel_map, addr, size);
618 	} else {
619 		vm_size_t vmsize_used;
620 		*pages_count = actual;
621 		size_used = (actual * sizeof(vm_offset_t));
622 		vmsize_used = vm_map_round_page(size_used,
623 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
624 		(void) vm_map_wire_kernel(
625 			ipc_kernel_map,
626 			vm_map_trunc_page(addr,
627 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
628 			vm_map_round_page(addr + size,
629 			VM_MAP_PAGE_MASK(ipc_kernel_map)),
630 			VM_PROT_READ | VM_PROT_WRITE,
631 			VM_KERN_MEMORY_IPC,
632 			FALSE);
633 		(void) vm_map_copyin(ipc_kernel_map,
634 		    (vm_map_address_t)addr,
635 		    (vm_map_size_t)size_used,
636 		    TRUE,
637 		    (vm_map_copy_t *)pages);
638 		if (vmsize_used != size) {
639 			(void) kmem_free(ipc_kernel_map,
640 			    addr + vmsize_used,
641 			    size - vmsize_used);
642 		}
643 	}
644 
645 	return KERN_SUCCESS;
646 #endif /* MACH_VM_DEBUG */
647 }
648 
649 #endif /* VM32_SUPPORT */
650 
651 /*
652  *	Routine:	host_virtual_physical_table_info
653  *	Purpose:
654  *		Return information about the VP table.
655  *	Conditions:
656  *		Nothing locked.  Obeys CountInOut protocol.
657  *	Returns:
658  *		KERN_SUCCESS		Returned information.
659  *		KERN_INVALID_HOST	The host is null.
660  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
661  */
662 
663 kern_return_t
host_virtual_physical_table_info(__DEBUG_ONLY host_t host,__DEBUG_ONLY hash_info_bucket_array_t * infop,__DEBUG_ONLY mach_msg_type_number_t * countp)664 host_virtual_physical_table_info(
665 	__DEBUG_ONLY host_t                     host,
666 	__DEBUG_ONLY hash_info_bucket_array_t   *infop,
667 	__DEBUG_ONLY mach_msg_type_number_t     *countp)
668 {
669 #if !MACH_VM_DEBUG
670 	return KERN_FAILURE;
671 #else
672 	vm_offset_t addr = 0;
673 	vm_size_t size = 0;
674 	hash_info_bucket_t *info;
675 	unsigned int potential, actual;
676 	kern_return_t kr;
677 
678 	if (host == HOST_NULL) {
679 		return KERN_INVALID_HOST;
680 	}
681 
682 	/* start with in-line data */
683 
684 	info = *infop;
685 	potential = *countp;
686 
687 	for (;;) {
688 		actual = vm_page_info(info, potential);
689 		if (actual <= potential) {
690 			break;
691 		}
692 
693 		/* allocate more memory */
694 
695 		if (info != *infop) {
696 			kmem_free(ipc_kernel_map, addr, size);
697 		}
698 
699 		size = vm_map_round_page(actual * sizeof *info,
700 		    VM_MAP_PAGE_MASK(ipc_kernel_map));
701 		kr = kmem_alloc(ipc_kernel_map, &addr, size,
702 		    KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
703 		if (kr != KERN_SUCCESS) {
704 			return KERN_RESOURCE_SHORTAGE;
705 		}
706 
707 		info = (hash_info_bucket_t *) addr;
708 		potential = (unsigned int) (size / sizeof(*info));
709 	}
710 
711 	if (info == *infop) {
712 		/* data fit in-line; nothing to deallocate */
713 
714 		*countp = actual;
715 	} else if (actual == 0) {
716 		kmem_free(ipc_kernel_map, addr, size);
717 
718 		*countp = 0;
719 	} else {
720 		vm_map_copy_t copy;
721 		vm_size_t used, vmused;
722 
723 		used = (actual * sizeof(*info));
724 		vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
725 
726 		if (vmused != size) {
727 			kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
728 		}
729 
730 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
731 		    (vm_map_size_t)used, TRUE, &copy);
732 		assert(kr == KERN_SUCCESS);
733 
734 		*infop = (hash_info_bucket_t *) copy;
735 		*countp = actual;
736 	}
737 
738 	return KERN_SUCCESS;
739 #endif /* MACH_VM_DEBUG */
740 }
741