xref: /xnu-11417.140.69/osfmk/vm/vm_user.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4) !
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_user.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	User-exported virtual memory functions.
63  */
64 
65 /*
66  * There are three implementations of the "XXX_allocate" functionality in
67  * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68  * (for a task with the same address space size, especially the current task),
69  * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70  * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71  * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72  * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73  * for new code.
74  *
75  * The entrypoints into the kernel are more complex. All platforms support a
76  * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77  * size types for the platform. On platforms that only support U32/K32,
78  * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79  * subsystem 3800 is used disambiguate the size of parameters, and they will
80  * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81  * the MIG glue should never call into vm_allocate directly, because the calling
82  * task and kernel_task are unlikely to use the same size parameters
83  *
84  * New VM call implementations should be added here and to mach_vm.defs
85  * (subsystem 4800), and use mach_vm_* "wide" types.
86  */
87 
88 #include <debug.h>
89 
90 #include <mach/boolean.h>
91 #include <mach/kern_return.h>
92 #include <mach/mach_types.h>    /* to get vm_address_t */
93 #include <mach/memory_object.h>
94 #include <mach/std_types.h>     /* to get pointer_t */
95 #include <mach/upl.h>
96 #include <mach/vm_attributes.h>
97 #include <mach/vm_param.h>
98 #include <mach/vm_statistics.h>
99 #include <mach/mach_syscalls.h>
100 #include <mach/sdt.h>
101 #include <mach/memory_entry.h>
102 
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/memory_entry_server.h>
106 #include <mach/vm_map_server.h>
107 
108 #include <kern/host.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/misc_protos.h>
112 #include <vm/vm_fault.h>
113 #include <vm/vm_map_internal.h>
114 #include <vm/vm_object_xnu.h>
115 #include <vm/vm_kern.h>
116 #include <vm/vm_page_internal.h>
117 #include <vm/memory_object_internal.h>
118 #include <vm/vm_pageout_internal.h>
119 #include <vm/vm_protos.h>
120 #include <vm/vm_purgeable_internal.h>
121 #include <vm/vm_memory_entry_xnu.h>
122 #include <vm/vm_kern_internal.h>
123 #include <vm/vm_iokit.h>
124 #include <vm/vm_sanitize_internal.h>
125 #if CONFIG_DEFERRED_RECLAIM
126 #include <vm/vm_reclaim_internal.h>
127 #endif /* CONFIG_DEFERRED_RECLAIM */
128 #include <vm/vm_init_xnu.h>
129 
130 #include <san/kasan.h>
131 
132 #include <libkern/OSDebug.h>
133 #include <IOKit/IOBSD.h>
134 #include <sys/kdebug_triage.h>
135 
136 /*
137  *	mach_vm_allocate allocates "zero fill" memory in the specfied
138  *	map.
139  */
140 kern_return_t
mach_vm_allocate_external(vm_map_t map,mach_vm_offset_ut * addr,mach_vm_size_ut size,int flags)141 mach_vm_allocate_external(
142 	vm_map_t                map,
143 	mach_vm_offset_ut      *addr,
144 	mach_vm_size_ut         size,
145 	int                     flags)
146 {
147 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
148 
149 	/* filter out any kernel-only flags */
150 	if (flags & ~VM_FLAGS_USER_ALLOCATE) {
151 		ktriage_record(thread_tid(current_thread()),
152 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
153 		    KDBG_TRIAGE_RESERVED,
154 		    KDBG_TRIAGE_VM_ALLOCATE_KERNEL_BADFLAGS_ERROR),
155 		    KERN_INVALID_ARGUMENT /* arg */);
156 		return KERN_INVALID_ARGUMENT;
157 	}
158 
159 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
160 
161 	return mach_vm_allocate_kernel(map, addr, size, vmk_flags);
162 }
163 
164 /*
165  *	vm_allocate
166  *	Legacy routine that allocates "zero fill" memory in the specfied
167  *	map (which is limited to the same size as the kernel).
168  */
169 kern_return_t
vm_allocate_external(vm_map_t map,vm_offset_ut * addr,vm_size_ut size,int flags)170 vm_allocate_external(
171 	vm_map_t        map,
172 	vm_offset_ut   *addr,
173 	vm_size_ut      size,
174 	int             flags)
175 {
176 	return mach_vm_allocate_external(map, addr, size, flags);
177 }
178 
179 static __attribute__((always_inline, warn_unused_result))
180 kern_return_t
mach_vm_deallocate_sanitize(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,mach_vm_offset_t * start,mach_vm_offset_t * end,mach_vm_size_t * size)181 mach_vm_deallocate_sanitize(
182 	vm_map_t                map,
183 	mach_vm_offset_ut       start_u,
184 	mach_vm_size_ut         size_u,
185 	mach_vm_offset_t       *start,
186 	mach_vm_offset_t       *end,
187 	mach_vm_size_t         *size)
188 {
189 	vm_sanitize_flags_t     flags = VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS;
190 
191 
192 	return vm_sanitize_addr_size(start_u, size_u,
193 	           VM_SANITIZE_CALLER_VM_DEALLOCATE, map, flags,
194 	           start, end, size);
195 }
196 
197 /*
198  *	mach_vm_deallocate -
199  *	deallocates the specified range of addresses in the
200  *	specified address map.
201  */
202 kern_return_t
mach_vm_deallocate(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u)203 mach_vm_deallocate(
204 	vm_map_t                map,
205 	mach_vm_offset_ut       start_u,
206 	mach_vm_size_ut         size_u)
207 {
208 	mach_vm_offset_t start, end;
209 	mach_vm_size_t   size;
210 	kern_return_t    kr;
211 
212 	if (map == VM_MAP_NULL) {
213 		return KERN_INVALID_ARGUMENT;
214 	}
215 
216 	kr = mach_vm_deallocate_sanitize(map,
217 	    start_u,
218 	    size_u,
219 	    &start,
220 	    &end,
221 	    &size);
222 	if (__improbable(kr != KERN_SUCCESS)) {
223 		return vm_sanitize_get_kr(kr);
224 	}
225 
226 	return vm_map_remove_guard(map, start, end,
227 	           VM_MAP_REMOVE_NO_FLAGS,
228 	           KMEM_GUARD_NONE).kmr_return;
229 }
230 
231 /*
232  *	vm_deallocate -
233  *	deallocates the specified range of addresses in the
234  *	specified address map (limited to addresses the same
235  *	size as the kernel).
236  */
237 kern_return_t
vm_deallocate(vm_map_t map,vm_offset_ut start,vm_size_ut size)238 vm_deallocate(
239 	vm_map_t                map,
240 	vm_offset_ut            start,
241 	vm_size_ut              size)
242 {
243 	return mach_vm_deallocate(map, start, size);
244 }
245 
246 /*
247  *	mach_vm_inherit -
248  *	Sets the inheritance of the specified range in the
249  *	specified map.
250  */
251 kern_return_t
mach_vm_inherit(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_inherit_ut new_inheritance_u)252 mach_vm_inherit(
253 	vm_map_t                map,
254 	mach_vm_offset_ut       start_u,
255 	mach_vm_size_ut         size_u,
256 	vm_inherit_ut           new_inheritance_u)
257 {
258 	if (map == VM_MAP_NULL) {
259 		return KERN_INVALID_ARGUMENT;
260 	}
261 
262 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
263 		return KERN_SUCCESS;
264 	}
265 
266 	return vm_map_inherit(map,
267 	           start_u,
268 	           vm_sanitize_compute_ut_end(start_u, size_u),
269 	           new_inheritance_u);
270 }
271 
272 /*
273  *	vm_inherit -
274  *	Sets the inheritance of the specified range in the
275  *	specified map (range limited to addresses
276  */
277 kern_return_t
vm_inherit(vm_map_t map,vm_offset_ut start_u,vm_size_ut size_u,vm_inherit_ut new_inheritance_u)278 vm_inherit(
279 	vm_map_t                map,
280 	vm_offset_ut            start_u,
281 	vm_size_ut              size_u,
282 	vm_inherit_ut           new_inheritance_u)
283 {
284 	return mach_vm_inherit(map, start_u, size_u, new_inheritance_u);
285 }
286 
287 /*
288  *	mach_vm_protect -
289  *	Sets the protection of the specified range in the
290  *	specified map.
291  */
292 
293 kern_return_t
mach_vm_protect(vm_map_t map,mach_vm_address_ut start_u,mach_vm_size_ut size_u,boolean_t set_maximum,vm_prot_ut new_protection_u)294 mach_vm_protect(
295 	vm_map_t                map,
296 	mach_vm_address_ut      start_u,
297 	mach_vm_size_ut         size_u,
298 	boolean_t               set_maximum,
299 	vm_prot_ut              new_protection_u)
300 {
301 	if (map == VM_MAP_NULL) {
302 		return KERN_INVALID_ARGUMENT;
303 	}
304 
305 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
306 		return KERN_SUCCESS;
307 	}
308 
309 	return vm_map_protect(map,
310 	           start_u,
311 	           vm_sanitize_compute_ut_end(start_u, size_u),
312 	           set_maximum,
313 	           new_protection_u);
314 }
315 
316 /*
317  *	vm_protect -
318  *	Sets the protection of the specified range in the
319  *	specified map. Addressability of the range limited
320  *	to the same size as the kernel.
321  */
322 
323 kern_return_t
vm_protect(vm_map_t map,vm_offset_ut start_u,vm_size_ut size_u,boolean_t set_maximum,vm_prot_ut new_protection_u)324 vm_protect(
325 	vm_map_t                map,
326 	vm_offset_ut            start_u,
327 	vm_size_ut              size_u,
328 	boolean_t               set_maximum,
329 	vm_prot_ut              new_protection_u)
330 {
331 	return mach_vm_protect(map, start_u, size_u, set_maximum, new_protection_u);
332 }
333 
334 /*
335  * mach_vm_machine_attributes -
336  * Handle machine-specific attributes for a mapping, such
337  * as cachability, migrability, etc.
338  */
339 kern_return_t
mach_vm_machine_attribute(vm_map_t map,mach_vm_address_ut addr_u,mach_vm_size_ut size_u,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)340 mach_vm_machine_attribute(
341 	vm_map_t                map,
342 	mach_vm_address_ut      addr_u,
343 	mach_vm_size_ut         size_u,
344 	vm_machine_attribute_t  attribute,
345 	vm_machine_attribute_val_t *value) /* IN/OUT */
346 {
347 	if (map == VM_MAP_NULL) {
348 		return KERN_INVALID_ARGUMENT;
349 	}
350 
351 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
352 		return KERN_SUCCESS;
353 	}
354 
355 	return vm_map_machine_attribute(map,
356 	           addr_u,
357 	           vm_sanitize_compute_ut_end(addr_u, size_u),
358 	           attribute,
359 	           value);
360 }
361 
362 /*
363  * vm_machine_attribute -
364  * Handle machine-specific attributes for a mapping, such
365  * as cachability, migrability, etc. Limited addressability
366  * (same range limits as for the native kernel map).
367  */
368 kern_return_t
vm_machine_attribute(vm_map_t map,vm_address_ut addr_u,vm_size_ut size_u,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)369 vm_machine_attribute(
370 	vm_map_t                map,
371 	vm_address_ut           addr_u,
372 	vm_size_ut              size_u,
373 	vm_machine_attribute_t  attribute,
374 	vm_machine_attribute_val_t *value) /* IN/OUT */
375 {
376 	return mach_vm_machine_attribute(map, addr_u, size_u, attribute, value);
377 }
378 
379 /*
380  * mach_vm_read -
381  * Read/copy a range from one address space and return it to the caller.
382  *
383  * It is assumed that the address for the returned memory is selected by
384  * the IPC implementation as part of receiving the reply to this call.
385  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
386  * that gets returned.
387  *
388  * JMM - because of mach_msg_type_number_t, this call is limited to a
389  * single 4GB region at this time.
390  *
391  */
392 kern_return_t
mach_vm_read(vm_map_t map,mach_vm_address_ut addr,mach_vm_size_ut size,pointer_ut * data,mach_msg_type_number_t * data_size)393 mach_vm_read(
394 	vm_map_t                map,
395 	mach_vm_address_ut      addr,
396 	mach_vm_size_ut         size,
397 	pointer_ut             *data,
398 	mach_msg_type_number_t *data_size)
399 {
400 	kern_return_t   error;
401 	vm_map_copy_t   ipc_address;
402 
403 	if (map == VM_MAP_NULL) {
404 		return KERN_INVALID_ARGUMENT;
405 	}
406 
407 	/*
408 	 * mach_msg_type_number_t is a signed int,
409 	 * make sure we do not overflow it.
410 	 */
411 	if (!VM_SANITIZE_UNSAFE_FITS(size, mach_msg_type_number_t)) {
412 		return KERN_INVALID_ARGUMENT;
413 	}
414 
415 	error = vm_map_copyin(map, addr, size, FALSE, &ipc_address);
416 
417 	if (KERN_SUCCESS == error) {
418 		VM_SANITIZE_UT_SET(*data, (pointer_t) ipc_address);
419 		/* On success we know size was validated by vm_map_copyin. */
420 		*data_size =
421 		    (mach_msg_type_number_t)VM_SANITIZE_UNSAFE_UNWRAP(size);
422 	}
423 	return error;
424 }
425 
426 /*
427  * vm_read -
428  * Read/copy a range from one address space and return it to the caller.
429  * Limited addressability (same range limits as for the native kernel map).
430  *
431  * It is assumed that the address for the returned memory is selected by
432  * the IPC implementation as part of receiving the reply to this call.
433  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
434  * that gets returned.
435  */
436 kern_return_t
vm_read(vm_map_t map,vm_address_ut addr,vm_size_ut size,pointer_ut * data,mach_msg_type_number_t * data_size)437 vm_read(
438 	vm_map_t                map,
439 	vm_address_ut           addr,
440 	vm_size_ut              size,
441 	pointer_ut             *data,
442 	mach_msg_type_number_t *data_size)
443 {
444 	return mach_vm_read(map, addr, size, data, data_size);
445 }
446 
447 /*
448  * mach_vm_read_list -
449  * Read/copy a list of address ranges from specified map.
450  *
451  * MIG does not know how to deal with a returned array of
452  * vm_map_copy_t structures, so we have to do the copyout
453  * manually here.
454  */
455 kern_return_t
mach_vm_read_list(vm_map_t map,mach_vm_read_entry_t data_list,natural_t count)456 mach_vm_read_list(
457 	vm_map_t                        map,
458 	mach_vm_read_entry_t            data_list,
459 	natural_t                       count)
460 {
461 	mach_msg_type_number_t  i;
462 	kern_return_t   error;
463 	vm_map_copy_t   copy;
464 
465 	if (map == VM_MAP_NULL ||
466 	    count > VM_MAP_ENTRY_MAX) {
467 		return KERN_INVALID_ARGUMENT;
468 	}
469 
470 	error = KERN_SUCCESS;
471 	for (i = 0; i < count; i++) {
472 		vm_map_address_t map_addr;
473 		vm_map_size_t map_size;
474 
475 		map_addr = (vm_map_address_t)(data_list[i].address);
476 		map_size = (vm_map_size_t)(data_list[i].size);
477 
478 		if (map_size != 0) {
479 			error = vm_map_copyin(map,
480 			    map_addr,
481 			    map_size,
482 			    FALSE,              /* src_destroy */
483 			    &copy);
484 			if (KERN_SUCCESS == error) {
485 				error = vm_map_copyout(
486 					current_task()->map,
487 					&map_addr,
488 					copy);
489 				if (KERN_SUCCESS == error) {
490 					data_list[i].address = map_addr;
491 					continue;
492 				}
493 				vm_map_copy_discard(copy);
494 			}
495 		}
496 		data_list[i].address = (mach_vm_address_t)0;
497 		data_list[i].size = (mach_vm_size_t)0;
498 	}
499 	return error;
500 }
501 
502 /*
503  * vm_read_list -
504  * Read/copy a list of address ranges from specified map.
505  *
506  * MIG does not know how to deal with a returned array of
507  * vm_map_copy_t structures, so we have to do the copyout
508  * manually here.
509  *
510  * The source and destination ranges are limited to those
511  * that can be described with a vm_address_t (i.e. same
512  * size map as the kernel).
513  *
514  * JMM - If the result of the copyout is an address range
515  * that cannot be described with a vm_address_t (i.e. the
516  * caller had a larger address space but used this call
517  * anyway), it will result in a truncated address being
518  * returned (and a likely confused caller).
519  */
520 
521 kern_return_t
vm_read_list(vm_map_t map,vm_read_entry_t data_list,natural_t count)522 vm_read_list(
523 	vm_map_t                map,
524 	vm_read_entry_t data_list,
525 	natural_t               count)
526 {
527 	mach_msg_type_number_t  i;
528 	kern_return_t   error;
529 	vm_map_copy_t   copy;
530 
531 	if (map == VM_MAP_NULL ||
532 	    count > VM_MAP_ENTRY_MAX) {
533 		return KERN_INVALID_ARGUMENT;
534 	}
535 
536 	error = KERN_SUCCESS;
537 	for (i = 0; i < count; i++) {
538 		vm_map_address_t map_addr;
539 		vm_map_size_t map_size;
540 
541 		map_addr = (vm_map_address_t)(data_list[i].address);
542 		map_size = (vm_map_size_t)(data_list[i].size);
543 
544 		if (map_size != 0) {
545 			error = vm_map_copyin(map,
546 			    map_addr,
547 			    map_size,
548 			    FALSE,              /* src_destroy */
549 			    &copy);
550 			if (KERN_SUCCESS == error) {
551 				error = vm_map_copyout(current_task()->map,
552 				    &map_addr,
553 				    copy);
554 				if (KERN_SUCCESS == error) {
555 					data_list[i].address =
556 					    CAST_DOWN(vm_offset_t, map_addr);
557 					continue;
558 				}
559 				vm_map_copy_discard(copy);
560 			}
561 		}
562 		data_list[i].address = (mach_vm_address_t)0;
563 		data_list[i].size = (mach_vm_size_t)0;
564 	}
565 	return error;
566 }
567 
568 /*
569  * mach_vm_read_overwrite -
570  * Overwrite a range of the current map with data from the specified
571  * map/address range.
572  *
573  * In making an assumption that the current thread is local, it is
574  * no longer cluster-safe without a fully supportive local proxy
575  * thread/task (but we don't support cluster's anymore so this is moot).
576  */
577 
578 kern_return_t
mach_vm_read_overwrite(vm_map_t map,mach_vm_address_ut address,mach_vm_size_ut size,mach_vm_address_ut data,mach_vm_size_ut * data_size)579 mach_vm_read_overwrite(
580 	vm_map_t                map,
581 	mach_vm_address_ut      address,
582 	mach_vm_size_ut         size,
583 	mach_vm_address_ut      data,
584 	mach_vm_size_ut        *data_size)
585 {
586 	kern_return_t   error;
587 	vm_map_copy_t   copy;
588 
589 	if (map == VM_MAP_NULL) {
590 		return KERN_INVALID_ARGUMENT;
591 	}
592 
593 	error = vm_map_copyin(map, address, size, FALSE, &copy);
594 
595 	if (KERN_SUCCESS == error) {
596 		if (copy) {
597 			assert(VM_SANITIZE_UNSAFE_IS_EQUAL(size, copy->size));
598 		}
599 
600 		error = vm_map_copy_overwrite(current_thread()->map,
601 		    data,
602 		    copy,
603 		    size,
604 		    FALSE);
605 		if (KERN_SUCCESS == error) {
606 			*data_size = size;
607 			return error;
608 		}
609 		vm_map_copy_discard(copy);
610 	}
611 	return error;
612 }
613 
614 /*
615  * vm_read_overwrite -
616  * Overwrite a range of the current map with data from the specified
617  * map/address range.
618  *
619  * This routine adds the additional limitation that the source and
620  * destination ranges must be describable with vm_address_t values
621  * (i.e. the same size address spaces as the kernel, or at least the
622  * the ranges are in that first portion of the respective address
623  * spaces).
624  */
625 
626 kern_return_t
vm_read_overwrite(vm_map_t map,vm_address_ut address,vm_size_ut size,vm_address_ut data,vm_size_ut * data_size)627 vm_read_overwrite(
628 	vm_map_t                map,
629 	vm_address_ut           address,
630 	vm_size_ut              size,
631 	vm_address_ut           data,
632 	vm_size_ut             *data_size)
633 {
634 	return mach_vm_read_overwrite(map, address, size, data, data_size);
635 }
636 
637 /*
638  * mach_vm_read_overwrite -
639  */
640 
641 kern_return_t
mach_vm_update_pointers_with_remote_tags(__unused vm_map_t map,__unused mach_vm_offset_list_t in_pointer_list,__unused mach_msg_type_number_t in_pointer_listCnt,__unused mach_vm_offset_list_t out_pointer_list,__unused mach_msg_type_number_t * out_pointer_listCnt)642 mach_vm_update_pointers_with_remote_tags(
643 	__unused vm_map_t map,
644 	__unused mach_vm_offset_list_t in_pointer_list,
645 	__unused mach_msg_type_number_t in_pointer_listCnt,
646 	__unused mach_vm_offset_list_t out_pointer_list,
647 	__unused mach_msg_type_number_t *out_pointer_listCnt)
648 {
649 	if (!in_pointer_list || !out_pointer_list || in_pointer_listCnt >= 512) {
650 		return KERN_INVALID_ARGUMENT;
651 	}
652 	if (!map || !map->pmap) {
653 		return KERN_INVALID_ARGUMENT;
654 	}
655 	return KERN_FAILURE;
656 }
657 
658 /*
659  * mach_vm_write -
660  * Overwrite the specified address range with the data provided
661  * (from the current map).
662  */
663 kern_return_t
mach_vm_write(vm_map_t map,mach_vm_address_ut address,pointer_ut data_u,mach_msg_type_number_t size)664 mach_vm_write(
665 	vm_map_t                map,
666 	mach_vm_address_ut      address,
667 	pointer_ut              data_u,
668 	mach_msg_type_number_t  size)
669 {
670 	if (map == VM_MAP_NULL) {
671 		return KERN_INVALID_ARGUMENT;
672 	}
673 
674 	/*
675 	 * data is created by the kernel's MIG server from a userspace buffer,
676 	 * so it is safe to unwrap.
677 	 */
678 	vm_map_copy_t data = (vm_map_copy_t) VM_SANITIZE_UNSAFE_UNWRAP(data_u);
679 
680 	return vm_map_copy_overwrite(map,
681 	           address,
682 	           data,
683 	           size,
684 	           FALSE /* interruptible XXX */);
685 }
686 
687 /*
688  * vm_write -
689  * Overwrite the specified address range with the data provided
690  * (from the current map).
691  *
692  * The addressability of the range of addresses to overwrite is
693  * limited bu the use of a vm_address_t (same size as kernel map).
694  * Either the target map is also small, or the range is in the
695  * low addresses within it.
696  */
697 kern_return_t
vm_write(vm_map_t map,vm_address_ut address,pointer_ut data,mach_msg_type_number_t size)698 vm_write(
699 	vm_map_t                map,
700 	vm_address_ut           address,
701 	pointer_ut              data,
702 	mach_msg_type_number_t  size)
703 {
704 	return mach_vm_write(map, address, data, size);
705 }
706 
707 /*
708  * mach_vm_copy -
709  * Overwrite one range of the specified map with the contents of
710  * another range within that same map (i.e. both address ranges
711  * are "over there").
712  */
713 kern_return_t
mach_vm_copy(vm_map_t map,mach_vm_address_ut source_address,mach_vm_size_ut size,mach_vm_address_ut dest_address)714 mach_vm_copy(
715 	vm_map_t                map,
716 	mach_vm_address_ut      source_address,
717 	mach_vm_size_ut         size,
718 	mach_vm_address_ut      dest_address)
719 {
720 	vm_map_copy_t copy;
721 	kern_return_t kr;
722 
723 	if (map == VM_MAP_NULL) {
724 		return KERN_INVALID_ARGUMENT;
725 	}
726 
727 	kr = vm_map_copyin(map, source_address, size, FALSE, &copy);
728 
729 	if (KERN_SUCCESS == kr) {
730 		if (copy) {
731 			assert(VM_SANITIZE_UNSAFE_IS_EQUAL(size, copy->size));
732 		}
733 
734 		kr = vm_map_copy_overwrite(map,
735 		    dest_address,
736 		    copy,
737 		    size,
738 		    FALSE);
739 
740 		if (KERN_SUCCESS != kr) {
741 			vm_map_copy_discard(copy);
742 		}
743 	}
744 	return kr;
745 }
746 
747 kern_return_t
vm_copy(vm_map_t map,vm_address_ut source_address,vm_size_ut size,vm_address_ut dest_address)748 vm_copy(
749 	vm_map_t                map,
750 	vm_address_ut           source_address,
751 	vm_size_ut              size,
752 	vm_address_ut           dest_address)
753 {
754 	return mach_vm_copy(map, source_address, size, dest_address);
755 }
756 
757 /*
758  * mach_vm_map -
759  * Map some range of an object into an address space.
760  *
761  * The object can be one of several types of objects:
762  *	NULL - anonymous memory
763  *	a named entry - a range within another address space
764  *	                or a range within a memory object
765  *	a whole memory object
766  *
767  */
768 kern_return_t
mach_vm_map_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)769 mach_vm_map_external(
770 	vm_map_t                target_map,
771 	mach_vm_offset_ut      *address,
772 	mach_vm_size_ut         initial_size,
773 	mach_vm_offset_ut       mask,
774 	int                     flags,
775 	ipc_port_t              port,
776 	memory_object_offset_ut offset,
777 	boolean_t               copy,
778 	vm_prot_ut              cur_protection,
779 	vm_prot_ut              max_protection,
780 	vm_inherit_ut           inheritance)
781 {
782 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
783 
784 	/* filter out any kernel-only flags */
785 	if (flags & ~VM_FLAGS_USER_MAP) {
786 		return KERN_INVALID_ARGUMENT;
787 	}
788 
789 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
790 	/* range_id is set by mach_vm_map_kernel */
791 	return mach_vm_map_kernel(target_map, address, initial_size, mask,
792 	           vmk_flags, port, offset, copy,
793 	           cur_protection, max_protection,
794 	           inheritance);
795 }
796 
797 /* legacy interface */
798 __attribute__((always_inline))
799 kern_return_t
vm_map_64_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)800 vm_map_64_external(
801 	vm_map_t                target_map,
802 	vm_offset_ut           *address,
803 	vm_size_ut              size,
804 	vm_offset_ut            mask,
805 	int                     flags,
806 	ipc_port_t              port,
807 	memory_object_offset_ut offset,
808 	boolean_t               copy,
809 	vm_prot_ut              cur_protection,
810 	vm_prot_ut              max_protection,
811 	vm_inherit_ut           inheritance)
812 {
813 	return mach_vm_map_external(target_map, address,
814 	           size, mask, flags, port, offset, copy,
815 	           cur_protection, max_protection, inheritance);
816 }
817 
818 /* temporary, until world build */
819 __attribute__((always_inline))
820 kern_return_t
vm_map_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,vm_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)821 vm_map_external(
822 	vm_map_t                target_map,
823 	vm_offset_ut           *address,
824 	vm_size_ut              size,
825 	vm_offset_ut            mask,
826 	int                     flags,
827 	ipc_port_t              port,
828 	vm_offset_ut            offset,
829 	boolean_t               copy,
830 	vm_prot_ut              cur_protection,
831 	vm_prot_ut              max_protection,
832 	vm_inherit_ut           inheritance)
833 {
834 	return mach_vm_map_external(target_map, address,
835 	           size, mask, flags, port, offset, copy,
836 	           cur_protection, max_protection, inheritance);
837 }
838 
839 static __attribute__((always_inline, warn_unused_result))
840 kern_return_t
mach_vm_remap_new_external_sanitize(vm_map_t target_map,vm_prot_ut cur_protection_u,vm_prot_ut max_protection_u,vm_prot_t * cur_protection,vm_prot_t * max_protection)841 mach_vm_remap_new_external_sanitize(
842 	vm_map_t                target_map,
843 	vm_prot_ut              cur_protection_u,
844 	vm_prot_ut              max_protection_u,
845 	vm_prot_t              *cur_protection,
846 	vm_prot_t              *max_protection)
847 {
848 	return vm_sanitize_cur_and_max_prots(cur_protection_u, max_protection_u,
849 	           VM_SANITIZE_CALLER_VM_MAP_REMAP, target_map,
850 	           cur_protection, max_protection);
851 }
852 
853 /*
854  * mach_vm_remap_new -
855  * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
856  * and {cur,max}_protection are in/out.
857  */
858 kern_return_t
mach_vm_remap_new_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,mach_port_t src_tport,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection_u,vm_prot_ut * max_protection_u,vm_inherit_ut inheritance)859 mach_vm_remap_new_external(
860 	vm_map_t                target_map,
861 	mach_vm_offset_ut      *address,
862 	mach_vm_size_ut         size,
863 	mach_vm_offset_ut       mask,
864 	int                     flags,
865 	mach_port_t             src_tport,
866 	mach_vm_offset_ut       memory_address,
867 	boolean_t               copy,
868 	vm_prot_ut             *cur_protection_u,   /* IN/OUT */
869 	vm_prot_ut             *max_protection_u,   /* IN/OUT */
870 	vm_inherit_ut           inheritance)
871 {
872 	vm_map_kernel_flags_t   vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
873 	vm_map_t                src_map;
874 	vm_prot_t               cur_protection, max_protection;
875 	kern_return_t           kr;
876 
877 	if (target_map == VM_MAP_NULL) {
878 		return KERN_INVALID_ARGUMENT;
879 	}
880 
881 	/* filter out any kernel-only flags */
882 	if (flags & ~VM_FLAGS_USER_REMAP) {
883 		return KERN_INVALID_ARGUMENT;
884 	}
885 
886 	vm_map_kernel_flags_set_vmflags(&vmk_flags,
887 	    flags | VM_FLAGS_RETURN_DATA_ADDR);
888 
889 	/*
890 	 * We don't need cur_protection here, but sanitizing it before
891 	 * enforcing W^X below matches historical error codes better.
892 	 */
893 	kr = mach_vm_remap_new_external_sanitize(target_map,
894 	    *cur_protection_u,
895 	    *max_protection_u,
896 	    &cur_protection,
897 	    &max_protection);
898 	if (__improbable(kr != KERN_SUCCESS)) {
899 		return vm_sanitize_get_kr(kr);
900 	}
901 
902 	if ((max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
903 	    (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
904 		/*
905 		 * XXX FBDP TODO
906 		 * enforce target's "wx" policies
907 		 */
908 		return KERN_PROTECTION_FAILURE;
909 	}
910 
911 	if (copy || max_protection == VM_PROT_READ || max_protection == VM_PROT_NONE) {
912 		src_map = convert_port_to_map_read(src_tport);
913 	} else {
914 		src_map = convert_port_to_map(src_tport);
915 	}
916 
917 	/* range_id is set by vm_map_remap */
918 	kr = vm_map_remap(target_map,
919 	    address,
920 	    size,
921 	    mask,
922 	    vmk_flags,
923 	    src_map,
924 	    memory_address,
925 	    copy,
926 	    cur_protection_u,    /* IN/OUT */
927 	    max_protection_u,    /* IN/OUT */
928 	    inheritance);
929 
930 	vm_map_deallocate(src_map);
931 
932 	if (kr == KERN_SUCCESS) {
933 		ipc_port_release_send(src_tport);  /* consume on success */
934 	}
935 	return kr;
936 }
937 
938 /*
939  * mach_vm_remap -
940  * Remap a range of memory from one task into another,
941  * to another address range within the same task, or
942  * over top of itself (with altered permissions and/or
943  * as an in-place copy of itself).
944  */
945 kern_return_t
mach_vm_remap_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)946 mach_vm_remap_external(
947 	vm_map_t                target_map,
948 	mach_vm_offset_ut      *address,
949 	mach_vm_size_ut         size,
950 	mach_vm_offset_ut       mask,
951 	int                     flags,
952 	vm_map_t                src_map,
953 	mach_vm_offset_ut       memory_address,
954 	boolean_t               copy,
955 	vm_prot_ut             *cur_protection,    /* OUT */
956 	vm_prot_ut             *max_protection,    /* OUT */
957 	vm_inherit_ut           inheritance)
958 {
959 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
960 
961 	/* filter out any kernel-only flags */
962 	if (flags & ~VM_FLAGS_USER_REMAP) {
963 		return KERN_INVALID_ARGUMENT;
964 	}
965 
966 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
967 
968 	*cur_protection = vm_sanitize_wrap_prot(VM_PROT_NONE);
969 	*max_protection = vm_sanitize_wrap_prot(VM_PROT_NONE);
970 	vmk_flags.vmkf_remap_legacy_mode = true;
971 
972 	/* range_id is set by vm_map_remap */
973 	return vm_map_remap(target_map,
974 	           address,
975 	           size,
976 	           mask,
977 	           vmk_flags,
978 	           src_map,
979 	           memory_address,
980 	           copy,
981 	           cur_protection,
982 	           max_protection,
983 	           inheritance);
984 }
985 
986 /*
987  * vm_remap_new -
988  * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
989  * and {cur,max}_protection are in/out.
990  */
991 kern_return_t
vm_remap_new_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,mach_port_t src_tport,vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)992 vm_remap_new_external(
993 	vm_map_t                target_map,
994 	vm_offset_ut           *address,
995 	vm_size_ut              size,
996 	vm_offset_ut            mask,
997 	int                     flags,
998 	mach_port_t             src_tport,
999 	vm_offset_ut            memory_address,
1000 	boolean_t               copy,
1001 	vm_prot_ut             *cur_protection,       /* IN/OUT */
1002 	vm_prot_ut             *max_protection,       /* IN/OUT */
1003 	vm_inherit_ut           inheritance)
1004 {
1005 	return mach_vm_remap_new_external(target_map,
1006 	           address,
1007 	           size,
1008 	           mask,
1009 	           flags,
1010 	           src_tport,
1011 	           memory_address,
1012 	           copy,
1013 	           cur_protection, /* IN/OUT */
1014 	           max_protection, /* IN/OUT */
1015 	           inheritance);
1016 }
1017 
1018 /*
1019  * vm_remap -
1020  * Remap a range of memory from one task into another,
1021  * to another address range within the same task, or
1022  * over top of itself (with altered permissions and/or
1023  * as an in-place copy of itself).
1024  *
1025  * The addressability of the source and target address
1026  * range is limited by the size of vm_address_t (in the
1027  * kernel context).
1028  */
1029 kern_return_t
vm_remap_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,vm_map_t src_map,vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)1030 vm_remap_external(
1031 	vm_map_t                target_map,
1032 	vm_offset_ut           *address,
1033 	vm_size_ut              size,
1034 	vm_offset_ut            mask,
1035 	int                     flags,
1036 	vm_map_t                src_map,
1037 	vm_offset_ut            memory_address,
1038 	boolean_t               copy,
1039 	vm_prot_ut             *cur_protection,    /* OUT */
1040 	vm_prot_ut             *max_protection,    /* OUT */
1041 	vm_inherit_ut           inheritance)
1042 {
1043 	return mach_vm_remap_external(target_map, address,
1044 	           size, mask, flags, src_map, memory_address, copy,
1045 	           cur_protection, max_protection, inheritance);
1046 }
1047 
1048 /*
1049  * NOTE: these routine (and this file) will no longer require mach_host_server.h
1050  * when mach_vm_wire and vm_wire are changed to use ledgers.
1051  */
1052 #include <mach/mach_host_server.h>
1053 /*
1054  *	mach_vm_wire
1055  *	Specify that the range of the virtual address space
1056  *	of the target task must not cause page faults for
1057  *	the indicated accesses.
1058  *
1059  *	[ To unwire the pages, specify VM_PROT_NONE. ]
1060  */
1061 kern_return_t
mach_vm_wire_external(host_priv_t host_priv,vm_map_t map,mach_vm_address_ut start,mach_vm_size_ut size,vm_prot_ut access)1062 mach_vm_wire_external(
1063 	host_priv_t             host_priv,
1064 	vm_map_t                map,
1065 	mach_vm_address_ut      start,
1066 	mach_vm_size_ut         size,
1067 	vm_prot_ut              access)
1068 {
1069 	kern_return_t     rc;
1070 	mach_vm_offset_ut end;
1071 
1072 	if (host_priv == HOST_PRIV_NULL) {
1073 		return KERN_INVALID_HOST;
1074 	}
1075 
1076 	if (map == VM_MAP_NULL) {
1077 		return KERN_INVALID_TASK;
1078 	}
1079 
1080 	end = vm_sanitize_compute_ut_end(start, size);
1081 	if (VM_SANITIZE_UNSAFE_IS_ZERO(access)) {
1082 		rc = vm_map_unwire_impl(map, start, end, true,
1083 		    VM_SANITIZE_CALLER_VM_UNWIRE_USER);
1084 	} else {
1085 		rc = vm_map_wire_impl(map, start, end, access,
1086 		    VM_KERN_MEMORY_MLOCK, true, NULL, VM_SANITIZE_CALLER_VM_WIRE_USER);
1087 	}
1088 
1089 	return rc;
1090 }
1091 
1092 /*
1093  *	vm_wire -
1094  *	Specify that the range of the virtual address space
1095  *	of the target task must not cause page faults for
1096  *	the indicated accesses.
1097  *
1098  *	[ To unwire the pages, specify VM_PROT_NONE. ]
1099  */
1100 kern_return_t
vm_wire(host_priv_t host_priv,vm_map_t map,vm_offset_ut start,vm_size_ut size,vm_prot_ut access)1101 vm_wire(
1102 	host_priv_t             host_priv,
1103 	vm_map_t                map,
1104 	vm_offset_ut            start,
1105 	vm_size_ut              size,
1106 	vm_prot_ut              access)
1107 {
1108 	return mach_vm_wire_external(host_priv, map, start, size, access);
1109 }
1110 
1111 /*
1112  *	vm_msync
1113  *
1114  *	Synchronises the memory range specified with its backing store
1115  *	image by either flushing or cleaning the contents to the appropriate
1116  *	memory manager.
1117  *
1118  *	interpretation of sync_flags
1119  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
1120  *				  pages to manager.
1121  *
1122  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1123  *				- discard pages, write dirty or precious
1124  *				  pages back to memory manager.
1125  *
1126  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1127  *				- write dirty or precious pages back to
1128  *				  the memory manager.
1129  *
1130  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
1131  *				  is a hole in the region, and we would
1132  *				  have returned KERN_SUCCESS, return
1133  *				  KERN_INVALID_ADDRESS instead.
1134  *
1135  *	RETURNS
1136  *	KERN_INVALID_TASK		Bad task parameter
1137  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
1138  *	KERN_SUCCESS			The usual.
1139  *	KERN_INVALID_ADDRESS		There was a hole in the region.
1140  */
1141 
1142 kern_return_t
mach_vm_msync(vm_map_t map,mach_vm_address_ut address_u,mach_vm_size_ut size_u,vm_sync_t sync_flags)1143 mach_vm_msync(
1144 	vm_map_t                map,
1145 	mach_vm_address_ut      address_u,
1146 	mach_vm_size_ut         size_u,
1147 	vm_sync_t               sync_flags)
1148 {
1149 	if (map == VM_MAP_NULL) {
1150 		return KERN_INVALID_TASK;
1151 	}
1152 
1153 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
1154 		return KERN_SUCCESS;
1155 	}
1156 
1157 	return vm_map_msync(map, address_u, size_u, sync_flags);
1158 }
1159 
1160 /*
1161  *	vm_msync
1162  *
1163  *	Synchronises the memory range specified with its backing store
1164  *	image by either flushing or cleaning the contents to the appropriate
1165  *	memory manager.
1166  *
1167  *	interpretation of sync_flags
1168  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
1169  *				  pages to manager.
1170  *
1171  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1172  *				- discard pages, write dirty or precious
1173  *				  pages back to memory manager.
1174  *
1175  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1176  *				- write dirty or precious pages back to
1177  *				  the memory manager.
1178  *
1179  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
1180  *				  is a hole in the region, and we would
1181  *				  have returned KERN_SUCCESS, return
1182  *				  KERN_INVALID_ADDRESS instead.
1183  *
1184  *	The addressability of the range is limited to that which can
1185  *	be described by a vm_address_t.
1186  *
1187  *	RETURNS
1188  *	KERN_INVALID_TASK		Bad task parameter
1189  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
1190  *	KERN_SUCCESS			The usual.
1191  *	KERN_INVALID_ADDRESS		There was a hole in the region.
1192  */
1193 
1194 kern_return_t
vm_msync(vm_map_t map,vm_address_ut address_u,vm_size_ut size_u,vm_sync_t sync_flags)1195 vm_msync(
1196 	vm_map_t        map,
1197 	vm_address_ut   address_u,
1198 	vm_size_ut      size_u,
1199 	vm_sync_t       sync_flags)
1200 {
1201 	return mach_vm_msync(map, address_u, size_u, sync_flags);
1202 }
1203 
1204 
1205 int
vm_toggle_entry_reuse(int toggle,int * old_value)1206 vm_toggle_entry_reuse(int toggle, int *old_value)
1207 {
1208 	vm_map_t map = current_map();
1209 
1210 	assert(!map->is_nested_map);
1211 	if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
1212 		*old_value = map->disable_vmentry_reuse;
1213 	} else if (toggle == VM_TOGGLE_SET) {
1214 		vm_map_entry_t map_to_entry;
1215 
1216 		vm_map_lock(map);
1217 		vm_map_disable_hole_optimization(map);
1218 		map->disable_vmentry_reuse = TRUE;
1219 		__IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
1220 		if (map->first_free == map_to_entry) {
1221 			map->highest_entry_end = vm_map_min(map);
1222 		} else {
1223 			map->highest_entry_end = map->first_free->vme_end;
1224 		}
1225 		vm_map_unlock(map);
1226 	} else if (toggle == VM_TOGGLE_CLEAR) {
1227 		vm_map_lock(map);
1228 		map->disable_vmentry_reuse = FALSE;
1229 		vm_map_unlock(map);
1230 	} else {
1231 		return KERN_INVALID_ARGUMENT;
1232 	}
1233 
1234 	return KERN_SUCCESS;
1235 }
1236 
1237 
1238 static __attribute__((always_inline, warn_unused_result))
1239 kern_return_t
mach_vm_behavior_set_sanitize(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_behavior_ut new_behavior_u,mach_vm_offset_t * start,mach_vm_offset_t * end,mach_vm_size_t * size,vm_behavior_t * new_behavior)1240 mach_vm_behavior_set_sanitize(
1241 	vm_map_t                map,
1242 	mach_vm_offset_ut       start_u,
1243 	mach_vm_size_ut         size_u,
1244 	vm_behavior_ut          new_behavior_u,
1245 	mach_vm_offset_t       *start,
1246 	mach_vm_offset_t       *end,
1247 	mach_vm_size_t         *size,
1248 	vm_behavior_t          *new_behavior)
1249 {
1250 	mach_vm_offset_t align_mask;
1251 	kern_return_t    kr;
1252 
1253 	kr = vm_sanitize_behavior(new_behavior_u, VM_SANITIZE_CALLER_VM_BEHAVIOR_SET, new_behavior);
1254 	if (__improbable(kr != KERN_SUCCESS)) {
1255 		return kr;
1256 	}
1257 
1258 	/* Choose alignment of addr/size based on the behavior being set. */
1259 	switch (*new_behavior) {
1260 	case VM_BEHAVIOR_REUSABLE:
1261 	case VM_BEHAVIOR_REUSE:
1262 	case VM_BEHAVIOR_CAN_REUSE:
1263 	case VM_BEHAVIOR_ZERO:
1264 		/*
1265 		 * Align to the hardware page size, to allow
1266 		 * malloc() to maximize the amount of re-usability,
1267 		 * even on systems with larger software page size.
1268 		 */
1269 		align_mask = PAGE_MASK;
1270 		break;
1271 	default:
1272 		align_mask = VM_MAP_PAGE_MASK(map);
1273 		break;
1274 	}
1275 
1276 	vm_sanitize_flags_t     flags = VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS;
1277 
1278 
1279 	kr = vm_sanitize_addr_size(start_u, size_u, VM_SANITIZE_CALLER_VM_BEHAVIOR_SET,
1280 	    align_mask, map, flags, start, end, size);
1281 	if (__improbable(kr != KERN_SUCCESS)) {
1282 		return kr;
1283 	}
1284 
1285 	return KERN_SUCCESS;
1286 }
1287 
1288 /*
1289  *	mach_vm_behavior_set
1290  *
1291  *	Sets the paging behavior attribute for the  specified range
1292  *	in the specified map.
1293  *
1294  *	This routine will fail with KERN_INVALID_ADDRESS if any address
1295  *	in [start,start+size) is not a valid allocated memory region.
1296  */
1297 kern_return_t
mach_vm_behavior_set(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_behavior_ut new_behavior_u)1298 mach_vm_behavior_set(
1299 	vm_map_t                map,
1300 	mach_vm_offset_ut       start_u,
1301 	mach_vm_size_ut         size_u,
1302 	vm_behavior_ut          new_behavior_u)
1303 {
1304 	kern_return_t    kr;
1305 	mach_vm_offset_t start, end;
1306 	mach_vm_size_t   size;
1307 	vm_behavior_t    new_behavior;
1308 
1309 	if (map == VM_MAP_NULL) {
1310 		return KERN_INVALID_ARGUMENT;
1311 	}
1312 
1313 	kr = mach_vm_behavior_set_sanitize(map,
1314 	    start_u, size_u, new_behavior_u,
1315 	    &start, &end, &size, &new_behavior);
1316 	if (__improbable(kr != KERN_SUCCESS)) {
1317 		return vm_sanitize_get_kr(kr);
1318 	}
1319 
1320 	return vm_map_behavior_set(map,
1321 	           start,
1322 	           end,
1323 	           new_behavior);
1324 }
1325 
1326 /*
1327  *	vm_behavior_set
1328  *
1329  *	Sets the paging behavior attribute for the  specified range
1330  *	in the specified map.
1331  *
1332  *	This routine will fail with KERN_INVALID_ADDRESS if any address
1333  *	in [start,start+size) is not a valid allocated memory region.
1334  *
1335  *	This routine is potentially limited in addressibility by the
1336  *	use of vm_offset_t (if the map provided is larger than the
1337  *	kernel's).
1338  */
1339 kern_return_t
vm_behavior_set(vm_map_t map,vm_offset_ut start,vm_size_ut size,vm_behavior_ut new_behavior)1340 vm_behavior_set(
1341 	vm_map_t                map,
1342 	vm_offset_ut            start,
1343 	vm_size_ut              size,
1344 	vm_behavior_ut          new_behavior)
1345 {
1346 	return mach_vm_behavior_set(map,
1347 	           start,
1348 	           size,
1349 	           new_behavior);
1350 }
1351 
1352 /*
1353  *	mach_vm_region:
1354  *
1355  *	User call to obtain information about a region in
1356  *	a task's address map. Currently, only one flavor is
1357  *	supported.
1358  *
1359  *	XXX The reserved and behavior fields cannot be filled
1360  *	    in until the vm merge from the IK is completed, and
1361  *	    vm_reserve is implemented.
1362  *
1363  *	XXX Dependency: syscall_vm_region() also supports only one flavor.
1364  */
1365 
1366 kern_return_t
mach_vm_region(vm_map_t map,mach_vm_offset_ut * address_u,mach_vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1367 mach_vm_region(
1368 	vm_map_t                map,
1369 	mach_vm_offset_ut      *address_u,      /* IN/OUT */
1370 	mach_vm_size_ut        *size_u,         /* OUT */
1371 	vm_region_flavor_t      flavor,         /* IN */
1372 	vm_region_info_t        info,           /* OUT */
1373 	mach_msg_type_number_t *count,          /* IN/OUT */
1374 	mach_port_t            *object_name)    /* OUT */
1375 {
1376 	if (VM_MAP_NULL == map) {
1377 		return KERN_INVALID_ARGUMENT;
1378 	}
1379 
1380 	/* legacy conversion */
1381 	if (VM_REGION_BASIC_INFO == flavor) {
1382 		flavor = VM_REGION_BASIC_INFO_64;
1383 	}
1384 
1385 	return vm_map_region(map, address_u, size_u, flavor, info, count,
1386 	           object_name);
1387 }
1388 
1389 static inline kern_return_t
vm_region_get_kern_return(kern_return_t kr,vm_offset_ut addr_u,vm_size_ut size_u)1390 vm_region_get_kern_return(
1391 	kern_return_t           kr,
1392 	vm_offset_ut            addr_u,
1393 	vm_size_ut              size_u)
1394 {
1395 	vm_offset_ut end_u = vm_sanitize_compute_ut_end(addr_u, size_u);
1396 
1397 	if (KERN_SUCCESS == kr && VM_SANITIZE_UNSAFE_UNWRAP(end_u) > VM_MAX_ADDRESS) {
1398 		return KERN_INVALID_ADDRESS;
1399 	}
1400 	return kr;
1401 }
1402 
1403 /*
1404  *	vm_region_64 and vm_region:
1405  *
1406  *	User call to obtain information about a region in
1407  *	a task's address map. Currently, only one flavor is
1408  *	supported.
1409  *
1410  *	XXX The reserved and behavior fields cannot be filled
1411  *	    in until the vm merge from the IK is completed, and
1412  *	    vm_reserve is implemented.
1413  *
1414  *	XXX Dependency: syscall_vm_region() also supports only one flavor.
1415  */
1416 
1417 kern_return_t
vm_region_64(vm_map_t map,vm_offset_ut * address_u,vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1418 vm_region_64(
1419 	vm_map_t                map,
1420 	vm_offset_ut           *address_u,      /* IN/OUT */
1421 	vm_size_ut             *size_u,         /* OUT */
1422 	vm_region_flavor_t      flavor,         /* IN */
1423 	vm_region_info_t        info,           /* OUT */
1424 	mach_msg_type_number_t *count,          /* IN/OUT */
1425 	mach_port_t            *object_name)    /* OUT */
1426 {
1427 	kern_return_t kr;
1428 
1429 	kr = mach_vm_region(map, address_u, size_u, flavor, info, count,
1430 	    object_name);
1431 
1432 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1433 }
1434 
1435 kern_return_t
vm_region(vm_map_t map,vm_address_ut * address_u,vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1436 vm_region(
1437 	vm_map_t                map,
1438 	vm_address_ut          *address_u,      /* IN/OUT */
1439 	vm_size_ut             *size_u,         /* OUT */
1440 	vm_region_flavor_t      flavor,         /* IN */
1441 	vm_region_info_t        info,           /* OUT */
1442 	mach_msg_type_number_t *count,          /* IN/OUT */
1443 	mach_port_t            *object_name)    /* OUT */
1444 {
1445 	kern_return_t kr;
1446 
1447 	if (VM_MAP_NULL == map) {
1448 		return KERN_INVALID_ARGUMENT;
1449 	}
1450 
1451 	kr = vm_map_region(map, address_u, size_u, flavor, info, count,
1452 	    object_name);
1453 
1454 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1455 }
1456 
1457 /*
1458  *	vm_region_recurse: A form of vm_region which follows the
1459  *	submaps in a target map
1460  *
1461  */
1462 kern_return_t
mach_vm_region_recurse(vm_map_t map,mach_vm_address_ut * address_u,mach_vm_size_ut * size_u,uint32_t * depth,vm_region_recurse_info_t info,mach_msg_type_number_t * infoCnt)1463 mach_vm_region_recurse(
1464 	vm_map_t                map,
1465 	mach_vm_address_ut     *address_u,
1466 	mach_vm_size_ut        *size_u,
1467 	uint32_t               *depth,
1468 	vm_region_recurse_info_t info,
1469 	mach_msg_type_number_t *infoCnt)
1470 {
1471 	if (VM_MAP_NULL == map) {
1472 		return KERN_INVALID_ARGUMENT;
1473 	}
1474 
1475 	return vm_map_region_recurse_64(map, address_u, size_u, depth,
1476 	           (vm_region_submap_info_64_t)info, infoCnt);
1477 }
1478 
1479 /*
1480  *	vm_region_recurse: A form of vm_region which follows the
1481  *	submaps in a target map
1482  *
1483  */
1484 kern_return_t
vm_region_recurse_64(vm_map_t map,vm_address_ut * address_u,vm_size_ut * size_u,uint32_t * depth,vm_region_recurse_info_64_t info,mach_msg_type_number_t * infoCnt)1485 vm_region_recurse_64(
1486 	vm_map_t                map,
1487 	vm_address_ut          *address_u,
1488 	vm_size_ut             *size_u,
1489 	uint32_t               *depth,
1490 	vm_region_recurse_info_64_t info,
1491 	mach_msg_type_number_t *infoCnt)
1492 {
1493 	kern_return_t kr;
1494 
1495 	kr = mach_vm_region_recurse(map, address_u, size_u, depth,
1496 	    (vm_region_recurse_info_t)info, infoCnt);
1497 
1498 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1499 }
1500 
1501 kern_return_t
vm_region_recurse(vm_map_t map,vm_offset_ut * address_u,vm_size_ut * size_u,natural_t * depth,vm_region_recurse_info_t info32,mach_msg_type_number_t * infoCnt)1502 vm_region_recurse(
1503 	vm_map_t                map,
1504 	vm_offset_ut           *address_u,      /* IN/OUT */
1505 	vm_size_ut             *size_u,         /* OUT */
1506 	natural_t              *depth,          /* IN/OUT */
1507 	vm_region_recurse_info_t info32,        /* IN/OUT */
1508 	mach_msg_type_number_t *infoCnt)        /* IN/OUT */
1509 {
1510 	vm_region_submap_info_data_64_t info64;
1511 	vm_region_submap_info_t info;
1512 	kern_return_t           kr;
1513 
1514 	if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
1515 		return KERN_INVALID_ARGUMENT;
1516 	}
1517 
1518 	info = (vm_region_submap_info_t)info32;
1519 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1520 
1521 	kr = vm_map_region_recurse_64(map, address_u, size_u,
1522 	    depth, &info64, infoCnt);
1523 
1524 	info->protection = info64.protection;
1525 	info->max_protection = info64.max_protection;
1526 	info->inheritance = info64.inheritance;
1527 	info->offset = (uint32_t)info64.offset; /* trouble-maker */
1528 	info->user_tag = info64.user_tag;
1529 	info->pages_resident = info64.pages_resident;
1530 	info->pages_shared_now_private = info64.pages_shared_now_private;
1531 	info->pages_swapped_out = info64.pages_swapped_out;
1532 	info->pages_dirtied = info64.pages_dirtied;
1533 	info->ref_count = info64.ref_count;
1534 	info->shadow_depth = info64.shadow_depth;
1535 	info->external_pager = info64.external_pager;
1536 	info->share_mode = info64.share_mode;
1537 	info->is_submap = info64.is_submap;
1538 	info->behavior = info64.behavior;
1539 	info->object_id = info64.object_id;
1540 	info->user_wired_count = info64.user_wired_count;
1541 
1542 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1543 
1544 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1545 }
1546 
1547 kern_return_t
mach_vm_purgable_control(vm_map_t map,mach_vm_offset_ut address_u,vm_purgable_t control,int * state)1548 mach_vm_purgable_control(
1549 	vm_map_t                map,
1550 	mach_vm_offset_ut       address_u,
1551 	vm_purgable_t           control,
1552 	int                    *state)
1553 {
1554 	if (VM_MAP_NULL == map) {
1555 		return KERN_INVALID_ARGUMENT;
1556 	}
1557 
1558 	switch (control) {
1559 	case VM_PURGABLE_SET_STATE:
1560 	case VM_PURGABLE_GET_STATE:
1561 	case VM_PURGABLE_PURGE_ALL:
1562 		break;
1563 	case VM_PURGABLE_SET_STATE_FROM_KERNEL:
1564 	default:
1565 		/* not allowed from user-space */
1566 		return KERN_INVALID_ARGUMENT;
1567 	}
1568 
1569 	return vm_map_purgable_control(map, address_u, control, state);
1570 }
1571 
1572 kern_return_t
mach_vm_purgable_control_external(mach_port_t target_tport,mach_vm_offset_ut address_u,vm_purgable_t control,int * state)1573 mach_vm_purgable_control_external(
1574 	mach_port_t             target_tport,
1575 	mach_vm_offset_ut       address_u,
1576 	vm_purgable_t           control,
1577 	int                    *state)
1578 {
1579 	vm_map_t map;
1580 	kern_return_t kr;
1581 
1582 	if (control == VM_PURGABLE_GET_STATE) {
1583 		map = convert_port_to_map_read(target_tport);
1584 	} else {
1585 		map = convert_port_to_map(target_tport);
1586 	}
1587 
1588 	kr = mach_vm_purgable_control(map, address_u, control, state);
1589 	vm_map_deallocate(map);
1590 
1591 	return kr;
1592 }
1593 
1594 kern_return_t
vm_purgable_control_external(mach_port_t target_tport,vm_offset_ut address,vm_purgable_t control,int * state)1595 vm_purgable_control_external(
1596 	mach_port_t             target_tport,
1597 	vm_offset_ut            address,
1598 	vm_purgable_t           control,
1599 	int                     *state)
1600 {
1601 	return mach_vm_purgable_control_external(target_tport, address, control, state);
1602 }
1603 
1604 
1605 kern_return_t
mach_vm_page_query(vm_map_t map,mach_vm_offset_ut offset_u,int * disposition,int * ref_count)1606 mach_vm_page_query(
1607 	vm_map_t                map,
1608 	mach_vm_offset_ut       offset_u,
1609 	int                    *disposition,
1610 	int                    *ref_count)
1611 {
1612 	kern_return_t                   kr;
1613 	vm_page_info_basic_data_t       info;
1614 	mach_msg_type_number_t          count;
1615 
1616 	if (VM_MAP_NULL == map) {
1617 		return KERN_INVALID_ARGUMENT;
1618 	}
1619 
1620 	count = VM_PAGE_INFO_BASIC_COUNT;
1621 	kr = vm_map_page_info(map, offset_u, VM_PAGE_INFO_BASIC,
1622 	    (vm_page_info_t) &info, &count);
1623 	if (kr == KERN_SUCCESS) {
1624 		*disposition = info.disposition;
1625 		*ref_count = info.ref_count;
1626 	} else {
1627 		*disposition = 0;
1628 		*ref_count = 0;
1629 	}
1630 
1631 	return kr;
1632 }
1633 
1634 kern_return_t
vm_map_page_query(vm_map_t map,vm_offset_ut offset,int * disposition,int * ref_count)1635 vm_map_page_query(
1636 	vm_map_t                map,
1637 	vm_offset_ut            offset,
1638 	int                    *disposition,
1639 	int                    *ref_count)
1640 {
1641 	return mach_vm_page_query(map, offset, disposition, ref_count);
1642 }
1643 
1644 static __attribute__((always_inline, warn_unused_result))
1645 kern_return_t
mach_vm_page_range_query_sanitize(mach_vm_offset_ut address_u,mach_vm_size_ut size_u,int effective_page_mask,mach_vm_address_ut dispositions_addr_u,mach_vm_size_ut dispositions_count_u,mach_vm_offset_t * start,mach_vm_size_t * size,mach_vm_address_t * dispositions_addr,mach_vm_size_t * disp_buf_req_size)1646 mach_vm_page_range_query_sanitize(
1647 	mach_vm_offset_ut       address_u,
1648 	mach_vm_size_ut         size_u,
1649 	int                     effective_page_mask,
1650 	mach_vm_address_ut      dispositions_addr_u,
1651 	mach_vm_size_ut         dispositions_count_u,
1652 	mach_vm_offset_t       *start,
1653 	mach_vm_size_t         *size,
1654 	mach_vm_address_t      *dispositions_addr,
1655 	mach_vm_size_t         *disp_buf_req_size)
1656 {
1657 	mach_vm_offset_t  end;
1658 	mach_vm_size_t    dispositions_count;
1659 	mach_vm_address_t discard;
1660 
1661 	/*
1662 	 * There are no alignment requirements on
1663 	 * dispositions_addr_u/dispositions_count_u, those are derived into
1664 	 * inputs into copyout. So it is safe to unwrap them. We do want to
1665 	 * check that the range starting at dispositions_addr_u and ending
1666 	 * after dispositions_count_u integers is sound (i.e., doesn't wrap
1667 	 * around due to integer overflow).
1668 	 */
1669 	*dispositions_addr = VM_SANITIZE_UNSAFE_UNWRAP(dispositions_addr_u);
1670 	dispositions_count = VM_SANITIZE_UNSAFE_UNWRAP(dispositions_count_u);
1671 	if (
1672 		os_mul_overflow(
1673 			dispositions_count,
1674 			sizeof(int),
1675 			disp_buf_req_size) ||
1676 		os_add_overflow(
1677 			*dispositions_addr,
1678 			*disp_buf_req_size,
1679 			&discard)) {
1680 		return KERN_INVALID_ARGUMENT;
1681 	}
1682 
1683 	return vm_sanitize_addr_size(address_u, size_u,
1684 	           VM_SANITIZE_CALLER_VM_MAP_PAGE_RANGE_QUERY,
1685 	           effective_page_mask,
1686 	           VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, start,
1687 	           &end, size);
1688 }
1689 
1690 kern_return_t
mach_vm_page_range_query(vm_map_t map,mach_vm_offset_ut address_u,mach_vm_size_ut size_u,mach_vm_address_ut dispositions_addr_u,mach_vm_size_ut * dispositions_count_u)1691 mach_vm_page_range_query(
1692 	vm_map_t                map,
1693 	mach_vm_offset_ut       address_u,
1694 	mach_vm_size_ut         size_u,
1695 	mach_vm_address_ut      dispositions_addr_u,
1696 	mach_vm_size_ut        *dispositions_count_u)
1697 {
1698 	kern_return_t           kr;
1699 	int                     num_pages = 0, i = 0;
1700 	mach_vm_size_t          curr_sz = 0, copy_sz = 0;
1701 	mach_vm_size_t          disp_buf_req_size = 0, disp_buf_total_size = 0;
1702 	mach_msg_type_number_t  count = 0;
1703 	mach_vm_address_t       dispositions_addr;
1704 
1705 	void                    *info = NULL;
1706 	void                    *local_disp = NULL;
1707 	vm_map_size_t           info_size = 0, local_disp_size = 0;
1708 	mach_vm_offset_t        start = 0;
1709 	vm_map_size_t           size;
1710 	int                     effective_page_shift, effective_page_size, effective_page_mask;
1711 
1712 	if (map == VM_MAP_NULL || dispositions_count_u == NULL) {
1713 		return KERN_INVALID_ARGUMENT;
1714 	}
1715 
1716 	effective_page_shift = vm_self_region_page_shift_safely(map);
1717 	if (effective_page_shift == -1) {
1718 		return KERN_INVALID_ARGUMENT;
1719 	}
1720 	effective_page_size = (1 << effective_page_shift);
1721 	effective_page_mask = effective_page_size - 1;
1722 
1723 	kr = mach_vm_page_range_query_sanitize(address_u,
1724 	    size_u,
1725 	    effective_page_mask,
1726 	    dispositions_addr_u,
1727 	    *dispositions_count_u,
1728 	    &start,
1729 	    &size,
1730 	    &dispositions_addr,
1731 	    &disp_buf_req_size);
1732 	if (__improbable(kr != KERN_SUCCESS)) {
1733 		return vm_sanitize_get_kr(kr);
1734 	}
1735 
1736 	if (disp_buf_req_size == 0 || size == 0) {
1737 		return KERN_SUCCESS;
1738 	}
1739 
1740 	/*
1741 	 * For large requests, we will go through them
1742 	 * MAX_PAGE_RANGE_QUERY chunk at a time.
1743 	 */
1744 
1745 	curr_sz = MIN(size, MAX_PAGE_RANGE_QUERY);
1746 	num_pages = (int) (curr_sz >> effective_page_shift);
1747 
1748 	info_size = num_pages * sizeof(vm_page_info_basic_data_t);
1749 	info = kalloc_data(info_size, Z_WAITOK);
1750 
1751 	local_disp_size = num_pages * sizeof(int);
1752 	local_disp = kalloc_data(local_disp_size, Z_WAITOK);
1753 
1754 	if (info == NULL || local_disp == NULL) {
1755 		kr = KERN_RESOURCE_SHORTAGE;
1756 		goto out;
1757 	}
1758 
1759 	while (size) {
1760 		count = VM_PAGE_INFO_BASIC_COUNT;
1761 		kr = vm_map_page_range_info_internal(
1762 			map,
1763 			start,
1764 			vm_map_round_page(start + curr_sz, effective_page_mask),
1765 			effective_page_shift,
1766 			VM_PAGE_INFO_BASIC,
1767 			(vm_page_info_t) info,
1768 			&count);
1769 
1770 		assert(kr == KERN_SUCCESS);
1771 
1772 		for (i = 0; i < num_pages; i++) {
1773 			((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
1774 		}
1775 
1776 		copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
1777 		kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
1778 
1779 		start += curr_sz;
1780 		disp_buf_req_size -= copy_sz;
1781 		disp_buf_total_size += copy_sz;
1782 
1783 		if (kr != 0) {
1784 			break;
1785 		}
1786 
1787 		if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
1788 			/*
1789 			 * We might have inspected the full range OR
1790 			 * more than it esp. if the user passed in
1791 			 * non-page aligned start/size and/or if we
1792 			 * descended into a submap. We are done here.
1793 			 */
1794 
1795 			size = 0;
1796 		} else {
1797 			dispositions_addr += copy_sz;
1798 
1799 			size -= curr_sz;
1800 
1801 			curr_sz = MIN(vm_map_round_page(size, effective_page_mask), MAX_PAGE_RANGE_QUERY);
1802 			num_pages = (int)(curr_sz >> effective_page_shift);
1803 		}
1804 	}
1805 
1806 	VM_SANITIZE_UT_SET(
1807 		*dispositions_count_u,
1808 		disp_buf_total_size / sizeof(int));
1809 
1810 out:
1811 	kfree_data(local_disp, local_disp_size);
1812 	kfree_data(info, info_size);
1813 	return kr;
1814 }
1815 
1816 kern_return_t
mach_vm_page_info(vm_map_t map,mach_vm_address_ut address,vm_page_info_flavor_t flavor,vm_page_info_t info,mach_msg_type_number_t * count)1817 mach_vm_page_info(
1818 	vm_map_t                map,
1819 	mach_vm_address_ut      address,
1820 	vm_page_info_flavor_t   flavor,
1821 	vm_page_info_t          info,
1822 	mach_msg_type_number_t  *count)
1823 {
1824 	kern_return_t   kr;
1825 
1826 	if (map == VM_MAP_NULL) {
1827 		return KERN_INVALID_ARGUMENT;
1828 	}
1829 
1830 	kr = vm_map_page_info(map, address, flavor, info, count);
1831 	return kr;
1832 }
1833 
1834 /*
1835  *	task_wire
1836  *
1837  *	Set or clear the map's wiring_required flag.  This flag, if set,
1838  *	will cause all future virtual memory allocation to allocate
1839  *	user wired memory.  Unwiring pages wired down as a result of
1840  *	this routine is done with the vm_wire interface.
1841  */
1842 kern_return_t
task_wire(vm_map_t map,boolean_t must_wire __unused)1843 task_wire(
1844 	vm_map_t        map,
1845 	boolean_t       must_wire __unused)
1846 {
1847 	if (map == VM_MAP_NULL) {
1848 		return KERN_INVALID_ARGUMENT;
1849 	}
1850 
1851 	return KERN_NOT_SUPPORTED;
1852 }
1853 
1854 kern_return_t
vm_map_exec_lockdown(vm_map_t map)1855 vm_map_exec_lockdown(
1856 	vm_map_t        map)
1857 {
1858 	if (map == VM_MAP_NULL) {
1859 		return KERN_INVALID_ARGUMENT;
1860 	}
1861 
1862 	vm_map_lock(map);
1863 	map->map_disallow_new_exec = TRUE;
1864 	vm_map_unlock(map);
1865 
1866 	return KERN_SUCCESS;
1867 }
1868 
1869 #if XNU_PLATFORM_MacOSX
1870 /*
1871  * Now a kernel-private interface (for BootCache
1872  * use only).  Need a cleaner way to create an
1873  * empty vm_map() and return a handle to it.
1874  */
1875 
1876 kern_return_t
vm_region_object_create(vm_map_t target_map,vm_size_t size,ipc_port_t * object_handle)1877 vm_region_object_create(
1878 	vm_map_t                target_map,
1879 	vm_size_t               size,
1880 	ipc_port_t              *object_handle)
1881 {
1882 	vm_named_entry_t        user_entry;
1883 	vm_map_t                new_map;
1884 
1885 	user_entry = mach_memory_entry_allocate(object_handle);
1886 
1887 	/* Create a named object based on a submap of specified size */
1888 
1889 	new_map = vm_map_create_options(PMAP_NULL, VM_MAP_MIN_ADDRESS,
1890 	    vm_map_round_page(size, VM_MAP_PAGE_MASK(target_map)),
1891 	    VM_MAP_CREATE_PAGEABLE);
1892 	vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
1893 
1894 	user_entry->backing.map = new_map;
1895 	user_entry->internal = TRUE;
1896 	user_entry->is_sub_map = TRUE;
1897 	user_entry->offset = 0;
1898 	user_entry->protection = VM_PROT_ALL;
1899 	user_entry->size = size;
1900 
1901 	return KERN_SUCCESS;
1902 }
1903 #endif /* XNU_PLATFORM_MacOSX */
1904 
1905 extern boolean_t proc_is_simulated(struct proc *p);
1906 
1907 kern_return_t
mach_vm_deferred_reclamation_buffer_allocate(task_t task,mach_vm_address_ut * address,uint32_t initial_capacity,uint32_t max_capacity)1908 mach_vm_deferred_reclamation_buffer_allocate(
1909 	task_t           task,
1910 	mach_vm_address_ut *address,
1911 	uint32_t initial_capacity,
1912 	uint32_t max_capacity)
1913 {
1914 #if CONFIG_DEFERRED_RECLAIM
1915 	if (task != current_task()) {
1916 		/* Remote buffer operations are not supported*/
1917 		return KERN_INVALID_TASK;
1918 	}
1919 	struct proc *p = task_get_proc_raw(task);
1920 	if (proc_is_simulated(p)) {
1921 		return KERN_NOT_SUPPORTED;
1922 	}
1923 	return vm_deferred_reclamation_buffer_allocate_internal(task, address, initial_capacity, max_capacity);
1924 #else
1925 	(void) task;
1926 	(void) address;
1927 	(void) size;
1928 	return KERN_NOT_SUPPORTED;
1929 #endif /* CONFIG_DEFERRED_RECLAIM */
1930 }
1931 
1932 kern_return_t
mach_vm_deferred_reclamation_buffer_flush(task_t task,uint32_t num_entries_to_reclaim)1933 mach_vm_deferred_reclamation_buffer_flush(
1934 	task_t task,
1935 	uint32_t num_entries_to_reclaim)
1936 {
1937 #if CONFIG_DEFERRED_RECLAIM
1938 	if (task != current_task()) {
1939 		/* Remote buffer operations are not supported */
1940 		return KERN_INVALID_TASK;
1941 	}
1942 	return vm_deferred_reclamation_buffer_flush_internal(task, num_entries_to_reclaim);
1943 #else
1944 	(void) task;
1945 	(void) num_entries_to_reclaim;
1946 	return KERN_NOT_SUPPORTED;
1947 #endif /* CONFIG_DEFERRED_RECLAIM */
1948 }
1949 
1950 kern_return_t
mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(task_t task,mach_vm_size_ut reclaimable_bytes_u)1951 mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(
1952 	task_t task,
1953 	mach_vm_size_ut reclaimable_bytes_u)
1954 {
1955 #if CONFIG_DEFERRED_RECLAIM
1956 	/*
1957 	 * This unwrapping is safe as reclaimable_bytes is not to be
1958 	 * interpreted as the size of range of addresses.
1959 	 */
1960 	mach_vm_size_t reclaimable_bytes =
1961 	    VM_SANITIZE_UNSAFE_UNWRAP(reclaimable_bytes_u);
1962 	if (task != current_task()) {
1963 		/* Remote buffer operations are not supported */
1964 		return KERN_INVALID_TASK;
1965 	}
1966 	return vm_deferred_reclamation_buffer_update_reclaimable_bytes_internal(task, reclaimable_bytes);
1967 #else
1968 	(void) task;
1969 	(void) reclaimable_bytes;
1970 	return KERN_NOT_SUPPORTED;
1971 #endif /* CONFIG_DEFERRED_RECLAIM */
1972 }
1973 
1974 kern_return_t
mach_vm_deferred_reclamation_buffer_resize(task_t task,uint32_t capacity)1975 mach_vm_deferred_reclamation_buffer_resize(task_t task,
1976     uint32_t capacity)
1977 {
1978 #if CONFIG_DEFERRED_RECLAIM
1979 	if (task != current_task()) {
1980 		/* Remote buffer operations are not supported */
1981 		return KERN_INVALID_TASK;
1982 	}
1983 	return vm_deferred_reclamation_buffer_resize_internal(task, capacity);
1984 #else
1985 	(void) task;
1986 	(void) size;
1987 	return KERN_NOT_SUPPORTED;
1988 #endif /* CONFIG_DEFERRED_RECLAIM */
1989 }
1990 
1991 #if CONFIG_MAP_RANGES
1992 
1993 extern void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
1994 
1995 static int
vm_map_user_range_cmp(const void * e1,const void * e2)1996 vm_map_user_range_cmp(const void *e1, const void *e2)
1997 {
1998 	const struct vm_map_user_range *r1 = e1;
1999 	const struct vm_map_user_range *r2 = e2;
2000 
2001 	if (r1->vmur_min_address != r2->vmur_min_address) {
2002 		return r1->vmur_min_address < r2->vmur_min_address ? -1 : 1;
2003 	}
2004 
2005 	return 0;
2006 }
2007 
2008 static int
mach_vm_range_recipe_v1_cmp(const void * e1,const void * e2)2009 mach_vm_range_recipe_v1_cmp(const void *e1, const void *e2)
2010 {
2011 	const mach_vm_range_recipe_v1_t *r1 = e1;
2012 	const mach_vm_range_recipe_v1_t *r2 = e2;
2013 
2014 	if (r1->range.min_address != r2->range.min_address) {
2015 		return r1->range.min_address < r2->range.min_address ? -1 : 1;
2016 	}
2017 
2018 	return 0;
2019 }
2020 
2021 static inline __result_use_check kern_return_t
mach_vm_range_create_v1_sanitize(vm_map_t map,mach_vm_range_recipe_v1_ut * recipe_u,uint32_t count,mach_vm_range_recipe_v1_t ** recipe_p)2022 mach_vm_range_create_v1_sanitize(
2023 	vm_map_t                map,
2024 	mach_vm_range_recipe_v1_ut *recipe_u,
2025 	uint32_t count,
2026 	mach_vm_range_recipe_v1_t **recipe_p)
2027 {
2028 	kern_return_t kr;
2029 
2030 	for (size_t i = 0; i < count; i++) {
2031 		vm_map_offset_t start, end;
2032 		vm_map_size_t size;
2033 		mach_vm_range_ut * range_u = &recipe_u[i].range_u;
2034 		kr = vm_sanitize_addr_end(
2035 			range_u->min_address_u,
2036 			range_u->max_address_u,
2037 			VM_SANITIZE_CALLER_MACH_VM_RANGE_CREATE,
2038 			map,
2039 			VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS
2040 			| VM_SANITIZE_FLAGS_CHECK_ALIGNED_START
2041 			| VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE,
2042 			&start, &end, &size); // Ignore return values
2043 		if (__improbable(kr != KERN_SUCCESS)) {
2044 			return kr;
2045 		}
2046 	}
2047 	/*
2048 	 * Sanitization only checked properties of recipe_u.
2049 	 * We can now see it through the lens of the safe type.
2050 	 * The cast is undefined behavior, but of the kind VM sanitization
2051 	 * relies on anyway, so we don't expect this to cause issues.
2052 	 */
2053 	*recipe_p = (mach_vm_range_recipe_v1_t *)recipe_u;
2054 
2055 	return KERN_SUCCESS;
2056 }
2057 
2058 /*!
2059  * @function mach_vm_range_create_v1()
2060  *
2061  * @brief
2062  * Handle the backend for mach_vm_range_create() for the
2063  * MACH_VM_RANGE_FLAVOR_V1 flavor.
2064  *
2065  * @description
2066  * This call allows to create "ranges" in the map of a task
2067  * that have special semantics/policies around placement of
2068  * new allocations (in the vm_map_locate_space() sense).
2069  *
2070  * @returns
2071  * - KERN_SUCCESS on success
2072  * - KERN_INVALID_ARGUMENT for incorrect arguments
2073  * - KERN_NO_SPACE if the maximum amount of ranges would be exceeded
2074  * - KERN_MEMORY_PRESENT if any of the requested ranges
2075  *   overlaps with existing ranges or allocations in the map.
2076  */
2077 static kern_return_t
mach_vm_range_create_v1(vm_map_t map,mach_vm_range_recipe_v1_ut * recipe_u,uint32_t new_count)2078 mach_vm_range_create_v1(
2079 	vm_map_t                   map,
2080 	mach_vm_range_recipe_v1_ut *recipe_u,
2081 	uint32_t                   new_count)
2082 {
2083 	mach_vm_range_recipe_v1_t *recipe;
2084 	vm_map_user_range_t table;
2085 	kern_return_t kr = KERN_SUCCESS;
2086 	uint16_t count;
2087 
2088 	struct mach_vm_range void1 = {
2089 		.min_address = map->default_range.max_address,
2090 		.max_address = map->data_range.min_address,
2091 	};
2092 	struct mach_vm_range void2 = {
2093 		.min_address = map->data_range.max_address,
2094 #if XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT
2095 		.max_address = MACH_VM_JUMBO_ADDRESS,
2096 #else /* !XNU_TARGET_OS_IOS || !EXTENDED_USER_VA_SUPPORT */
2097 		.max_address = vm_map_max(map),
2098 #endif /* XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT */
2099 	};
2100 
2101 	kr = mach_vm_range_create_v1_sanitize(map, recipe_u, new_count, &recipe);
2102 	if (__improbable(kr != KERN_SUCCESS)) {
2103 		return vm_sanitize_get_kr(kr);
2104 	}
2105 
2106 	qsort(recipe, new_count, sizeof(mach_vm_range_recipe_v1_t),
2107 	    mach_vm_range_recipe_v1_cmp);
2108 
2109 	/*
2110 	 * Step 1: Validate that the recipes have no intersections.
2111 	 */
2112 
2113 	for (size_t i = 0; i < new_count; i++) {
2114 		mach_vm_range_t r = &recipe[i].range;
2115 		mach_vm_size_t s;
2116 
2117 		if (recipe[i].flags) {
2118 			return KERN_INVALID_ARGUMENT;
2119 		}
2120 
2121 		static_assert((int)UMEM_RANGE_ID_FIXED == MACH_VM_RANGE_FIXED);
2122 		switch (recipe[i].range_tag) {
2123 		case MACH_VM_RANGE_FIXED:
2124 			break;
2125 		default:
2126 			return KERN_INVALID_ARGUMENT;
2127 		}
2128 
2129 		s = mach_vm_range_size(r);
2130 		if (!mach_vm_range_contains(&void1, r->min_address, s) &&
2131 		    !mach_vm_range_contains(&void2, r->min_address, s)) {
2132 			return KERN_INVALID_ARGUMENT;
2133 		}
2134 
2135 		if (i > 0 && recipe[i - 1].range.max_address >
2136 		    recipe[i].range.min_address) {
2137 			return KERN_INVALID_ARGUMENT;
2138 		}
2139 	}
2140 
2141 	vm_map_lock(map);
2142 
2143 	table = map->extra_ranges;
2144 	count = map->extra_ranges_count;
2145 
2146 	if (count + new_count > VM_MAP_EXTRA_RANGES_MAX) {
2147 		kr = KERN_NO_SPACE;
2148 		goto out_unlock;
2149 	}
2150 
2151 	/*
2152 	 * Step 2: Check that there is no intersection with existing ranges.
2153 	 */
2154 
2155 	for (size_t i = 0, j = 0; i < new_count && j < count;) {
2156 		mach_vm_range_t     r1 = &recipe[i].range;
2157 		vm_map_user_range_t r2 = &table[j];
2158 
2159 		if (r1->max_address <= r2->vmur_min_address) {
2160 			i++;
2161 		} else if (r2->vmur_max_address <= r1->min_address) {
2162 			j++;
2163 		} else {
2164 			kr = KERN_MEMORY_PRESENT;
2165 			goto out_unlock;
2166 		}
2167 	}
2168 
2169 	/*
2170 	 * Step 3: commit the new ranges.
2171 	 */
2172 
2173 	static_assert(VM_MAP_EXTRA_RANGES_MAX * sizeof(struct vm_map_user_range) <=
2174 	    KALLOC_SAFE_ALLOC_SIZE);
2175 
2176 	table = krealloc_data(table,
2177 	    count * sizeof(struct vm_map_user_range),
2178 	    (count + new_count) * sizeof(struct vm_map_user_range),
2179 	    Z_ZERO | Z_WAITOK | Z_NOFAIL);
2180 
2181 	for (size_t i = 0; i < new_count; i++) {
2182 		static_assert(MACH_VM_MAX_ADDRESS < (1ull << 56));
2183 
2184 		table[count + i] = (struct vm_map_user_range){
2185 			.vmur_min_address = recipe[i].range.min_address,
2186 			.vmur_max_address = recipe[i].range.max_address,
2187 			.vmur_range_id    = (vm_map_range_id_t)recipe[i].range_tag,
2188 		};
2189 	}
2190 
2191 	qsort(table, count + new_count,
2192 	    sizeof(struct vm_map_user_range), vm_map_user_range_cmp);
2193 
2194 	map->extra_ranges_count += new_count;
2195 	map->extra_ranges = table;
2196 
2197 out_unlock:
2198 	vm_map_unlock(map);
2199 
2200 	if (kr == KERN_SUCCESS) {
2201 		for (size_t i = 0; i < new_count; i++) {
2202 			vm_map_kernel_flags_t vmk_flags = {
2203 				.vmf_fixed = true,
2204 				.vmf_overwrite = true,
2205 				.vmkf_overwrite_immutable = true,
2206 				.vm_tag = recipe[i].vm_tag,
2207 			};
2208 			__assert_only kern_return_t kr2;
2209 
2210 			kr2 = vm_map_enter(map, &recipe[i].range.min_address,
2211 			    mach_vm_range_size(&recipe[i].range),
2212 			    0, vmk_flags, VM_OBJECT_NULL, 0, FALSE,
2213 			    VM_PROT_NONE, VM_PROT_ALL,
2214 			    VM_INHERIT_DEFAULT);
2215 			assert(kr2 == KERN_SUCCESS);
2216 		}
2217 	}
2218 	return kr;
2219 }
2220 
2221 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)2222 mach_vm_range_create(
2223 	vm_map_t                map,
2224 	mach_vm_range_flavor_t  flavor,
2225 	mach_vm_range_recipes_raw_t recipe,
2226 	natural_t               size)
2227 {
2228 	if (map != current_map()) {
2229 		return KERN_INVALID_ARGUMENT;
2230 	}
2231 
2232 	if (!map->uses_user_ranges) {
2233 		return KERN_NOT_SUPPORTED;
2234 	}
2235 
2236 	if (size == 0) {
2237 		return KERN_SUCCESS;
2238 	}
2239 
2240 	if (flavor == MACH_VM_RANGE_FLAVOR_V1) {
2241 		mach_vm_range_recipe_v1_ut *array;
2242 
2243 		if (size % sizeof(mach_vm_range_recipe_v1_ut)) {
2244 			return KERN_INVALID_ARGUMENT;
2245 		}
2246 
2247 		size /= sizeof(mach_vm_range_recipe_v1_ut);
2248 		if (size > VM_MAP_EXTRA_RANGES_MAX) {
2249 			return KERN_NO_SPACE;
2250 		}
2251 
2252 		array = (mach_vm_range_recipe_v1_ut *)recipe;
2253 		return mach_vm_range_create_v1(map, array, size);
2254 	}
2255 
2256 	return KERN_INVALID_ARGUMENT;
2257 }
2258 
2259 #else /* !CONFIG_MAP_RANGES */
2260 
2261 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)2262 mach_vm_range_create(
2263 	vm_map_t                map,
2264 	mach_vm_range_flavor_t  flavor,
2265 	mach_vm_range_recipes_raw_t recipe,
2266 	natural_t               size)
2267 {
2268 #pragma unused(map, flavor, recipe, size)
2269 	return KERN_NOT_SUPPORTED;
2270 }
2271 
2272 #endif /* !CONFIG_MAP_RANGES */
2273 
2274 /*
2275  * These symbols are looked up at runtime by vmware, VirtualBox,
2276  * despite not being exported in the symbol sets.
2277  */
2278 
2279 #if defined(__x86_64__)
2280 
2281 extern typeof(mach_vm_remap_external) mach_vm_remap;
2282 extern typeof(mach_vm_map_external) mach_vm_map;
2283 extern typeof(vm_map_external) vm_map;
2284 
2285 kern_return_t
mach_vm_map(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2286 mach_vm_map(
2287 	vm_map_t                target_map,
2288 	mach_vm_offset_ut      *address,
2289 	mach_vm_size_ut         initial_size,
2290 	mach_vm_offset_ut       mask,
2291 	int                     flags,
2292 	ipc_port_t              port,
2293 	memory_object_offset_ut offset,
2294 	boolean_t               copy,
2295 	vm_prot_ut              cur_protection,
2296 	vm_prot_ut              max_protection,
2297 	vm_inherit_ut           inheritance)
2298 {
2299 	return mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
2300 	           offset, copy, cur_protection, max_protection, inheritance);
2301 }
2302 
2303 kern_return_t
mach_vm_remap(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)2304 mach_vm_remap(
2305 	vm_map_t                target_map,
2306 	mach_vm_offset_ut      *address,
2307 	mach_vm_size_ut         size,
2308 	mach_vm_offset_ut       mask,
2309 	int                     flags,
2310 	vm_map_t                src_map,
2311 	mach_vm_offset_ut       memory_address,
2312 	boolean_t               copy,
2313 	vm_prot_ut             *cur_protection,   /* OUT */
2314 	vm_prot_ut             *max_protection,   /* OUT */
2315 	vm_inherit_ut           inheritance)
2316 {
2317 	return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
2318 	           copy, cur_protection, max_protection, inheritance);
2319 }
2320 
2321 kern_return_t
vm_map(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,vm_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2322 vm_map(
2323 	vm_map_t                target_map,
2324 	vm_offset_ut           *address,
2325 	vm_size_ut              size,
2326 	vm_offset_ut            mask,
2327 	int                     flags,
2328 	ipc_port_t              port,
2329 	vm_offset_ut            offset,
2330 	boolean_t               copy,
2331 	vm_prot_ut              cur_protection,
2332 	vm_prot_ut              max_protection,
2333 	vm_inherit_ut           inheritance)
2334 {
2335 	return mach_vm_map(target_map, address,
2336 	           size, mask, flags, port, offset, copy,
2337 	           cur_protection, max_protection, inheritance);
2338 }
2339 
2340 #endif /* __x86_64__ */
2341