xref: /xnu-12377.61.12/osfmk/vm/vm_user.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_user.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	User-exported virtual memory functions.
63  */
64 
65 /*
66  * There are three implementations of the "XXX_allocate" functionality in
67  * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68  * (for a task with the same address space size, especially the current task),
69  * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70  * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71  * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72  * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73  * for new code.
74  *
75  * The entrypoints into the kernel are more complex. All platforms support a
76  * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77  * size types for the platform. On platforms that only support U32/K32,
78  * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79  * subsystem 3800 is used disambiguate the size of parameters, and they will
80  * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81  * the MIG glue should never call into vm_allocate directly, because the calling
82  * task and kernel_task are unlikely to use the same size parameters
83  *
84  * New VM call implementations should be added here and to mach_vm.defs
85  * (subsystem 4800), and use mach_vm_* "wide" types.
86  */
87 
88 #include <debug.h>
89 
90 #include <mach/boolean.h>
91 #include <mach/kern_return.h>
92 #include <mach/mach_types.h>    /* to get vm_address_t */
93 #include <mach/memory_object.h>
94 #include <mach/std_types.h>     /* to get pointer_t */
95 #include <mach/upl.h>
96 #include <mach/vm_attributes.h>
97 #include <mach/vm_param.h>
98 #include <mach/vm_statistics.h>
99 #include <mach/mach_syscalls.h>
100 #include <mach/sdt.h>
101 #include <mach/memory_entry.h>
102 
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/memory_entry_server.h>
106 #include <mach/vm_map_server.h>
107 
108 #include <kern/host.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/misc_protos.h>
112 #include <vm/vm_fault.h>
113 #include <vm/vm_map_internal.h>
114 #include <vm/vm_object_xnu.h>
115 #include <vm/vm_kern.h>
116 #include <vm/vm_page_internal.h>
117 #include <vm/memory_object_internal.h>
118 #include <vm/vm_pageout_internal.h>
119 #include <vm/vm_protos.h>
120 #include <vm/vm_purgeable_internal.h>
121 #include <vm/vm_memory_entry_xnu.h>
122 #include <vm/vm_kern_internal.h>
123 #include <vm/vm_iokit.h>
124 #include <vm/vm_sanitize_internal.h>
125 #if CONFIG_DEFERRED_RECLAIM
126 #include <vm/vm_reclaim_internal.h>
127 #endif /* CONFIG_DEFERRED_RECLAIM */
128 #include <vm/vm_init_xnu.h>
129 
130 #include <san/kasan.h>
131 
132 #include <libkern/OSDebug.h>
133 #include <IOKit/IOBSD.h>
134 #include <sys/kdebug_triage.h>
135 
136 #include <sys/code_signing.h> /* for is_address_space_debugged */
137 
138 /*
139  *	mach_vm_allocate allocates "zero fill" memory in the specfied
140  *	map.
141  */
142 kern_return_t
mach_vm_allocate_external(vm_map_t map,mach_vm_offset_ut * addr,mach_vm_size_ut size,int flags)143 mach_vm_allocate_external(
144 	vm_map_t                map,
145 	mach_vm_offset_ut      *addr,
146 	mach_vm_size_ut         size,
147 	int                     flags)
148 {
149 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
150 
151 	/* filter out any kernel-only flags */
152 	if (flags & ~VM_FLAGS_USER_ALLOCATE) {
153 		ktriage_record(thread_tid(current_thread()),
154 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
155 		    KDBG_TRIAGE_RESERVED,
156 		    KDBG_TRIAGE_VM_ALLOCATE_KERNEL_BADFLAGS_ERROR),
157 		    KERN_INVALID_ARGUMENT /* arg */);
158 		return KERN_INVALID_ARGUMENT;
159 	}
160 
161 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
162 
163 	return mach_vm_allocate_kernel(map, addr, size, vmk_flags);
164 }
165 
166 /*
167  *	vm_allocate
168  *	Legacy routine that allocates "zero fill" memory in the specfied
169  *	map (which is limited to the same size as the kernel).
170  */
171 kern_return_t
vm_allocate_external(vm_map_t map,vm_offset_ut * addr,vm_size_ut size,int flags)172 vm_allocate_external(
173 	vm_map_t        map,
174 	vm_offset_ut   *addr,
175 	vm_size_ut      size,
176 	int             flags)
177 {
178 	return mach_vm_allocate_external(map, addr, size, flags);
179 }
180 
181 static __attribute__((always_inline, warn_unused_result))
182 kern_return_t
mach_vm_deallocate_sanitize(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,mach_vm_offset_t * start,mach_vm_offset_t * end,mach_vm_size_t * size)183 mach_vm_deallocate_sanitize(
184 	vm_map_t                map,
185 	mach_vm_offset_ut       start_u,
186 	mach_vm_size_ut         size_u,
187 	mach_vm_offset_t       *start,
188 	mach_vm_offset_t       *end,
189 	mach_vm_size_t         *size)
190 {
191 	vm_sanitize_flags_t     flags = VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS;
192 
193 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
194 	flags |= VM_SANITIZE_FLAGS_DENY_NON_CANONICAL_ADDR;
195 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
196 
197 	return vm_sanitize_addr_size(start_u, size_u,
198 	           VM_SANITIZE_CALLER_VM_DEALLOCATE, map, flags,
199 	           start, end, size);
200 }
201 
202 /*
203  *	mach_vm_deallocate -
204  *	deallocates the specified range of addresses in the
205  *	specified address map.
206  */
207 kern_return_t
mach_vm_deallocate(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u)208 mach_vm_deallocate(
209 	vm_map_t                map,
210 	mach_vm_offset_ut       start_u,
211 	mach_vm_size_ut         size_u)
212 {
213 	mach_vm_offset_t start, end;
214 	mach_vm_size_t   size;
215 	kern_return_t    kr;
216 
217 	if (map == VM_MAP_NULL) {
218 		return KERN_INVALID_ARGUMENT;
219 	}
220 
221 	kr = mach_vm_deallocate_sanitize(map,
222 	    start_u,
223 	    size_u,
224 	    &start,
225 	    &end,
226 	    &size);
227 	if (__improbable(kr != KERN_SUCCESS)) {
228 		return vm_sanitize_get_kr(kr);
229 	}
230 
231 	return vm_map_remove_guard(map, start, end,
232 	           VM_MAP_REMOVE_NO_FLAGS,
233 	           KMEM_GUARD_NONE).kmr_return;
234 }
235 
236 /*
237  *	vm_deallocate -
238  *	deallocates the specified range of addresses in the
239  *	specified address map (limited to addresses the same
240  *	size as the kernel).
241  */
242 kern_return_t
vm_deallocate(vm_map_t map,vm_offset_ut start,vm_size_ut size)243 vm_deallocate(
244 	vm_map_t                map,
245 	vm_offset_ut            start,
246 	vm_size_ut              size)
247 {
248 	return mach_vm_deallocate(map, start, size);
249 }
250 
251 /*
252  *	mach_vm_inherit -
253  *	Sets the inheritance of the specified range in the
254  *	specified map.
255  */
256 kern_return_t
mach_vm_inherit(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_inherit_ut new_inheritance_u)257 mach_vm_inherit(
258 	vm_map_t                map,
259 	mach_vm_offset_ut       start_u,
260 	mach_vm_size_ut         size_u,
261 	vm_inherit_ut           new_inheritance_u)
262 {
263 	if (map == VM_MAP_NULL) {
264 		return KERN_INVALID_ARGUMENT;
265 	}
266 
267 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
268 		return KERN_SUCCESS;
269 	}
270 
271 	return vm_map_inherit(map,
272 	           start_u,
273 	           vm_sanitize_compute_ut_end(start_u, size_u),
274 	           new_inheritance_u);
275 }
276 
277 /*
278  *	vm_inherit -
279  *	Sets the inheritance of the specified range in the
280  *	specified map (range limited to addresses
281  */
282 kern_return_t
vm_inherit(vm_map_t map,vm_offset_ut start_u,vm_size_ut size_u,vm_inherit_ut new_inheritance_u)283 vm_inherit(
284 	vm_map_t                map,
285 	vm_offset_ut            start_u,
286 	vm_size_ut              size_u,
287 	vm_inherit_ut           new_inheritance_u)
288 {
289 	return mach_vm_inherit(map, start_u, size_u, new_inheritance_u);
290 }
291 
292 /*
293  *	mach_vm_protect -
294  *	Sets the protection of the specified range in the
295  *	specified map.
296  */
297 
298 kern_return_t
mach_vm_protect(vm_map_t map,mach_vm_address_ut start_u,mach_vm_size_ut size_u,boolean_t set_maximum,vm_prot_ut new_protection_u)299 mach_vm_protect(
300 	vm_map_t                map,
301 	mach_vm_address_ut      start_u,
302 	mach_vm_size_ut         size_u,
303 	boolean_t               set_maximum,
304 	vm_prot_ut              new_protection_u)
305 {
306 	if (map == VM_MAP_NULL) {
307 		return KERN_INVALID_ARGUMENT;
308 	}
309 
310 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
311 		return KERN_SUCCESS;
312 	}
313 
314 	return vm_map_protect(map,
315 	           start_u,
316 	           vm_sanitize_compute_ut_end(start_u, size_u),
317 	           set_maximum,
318 	           new_protection_u);
319 }
320 
321 /*
322  *	vm_protect -
323  *	Sets the protection of the specified range in the
324  *	specified map. Addressability of the range limited
325  *	to the same size as the kernel.
326  */
327 
328 kern_return_t
vm_protect(vm_map_t map,vm_offset_ut start_u,vm_size_ut size_u,boolean_t set_maximum,vm_prot_ut new_protection_u)329 vm_protect(
330 	vm_map_t                map,
331 	vm_offset_ut            start_u,
332 	vm_size_ut              size_u,
333 	boolean_t               set_maximum,
334 	vm_prot_ut              new_protection_u)
335 {
336 	return mach_vm_protect(map, start_u, size_u, set_maximum, new_protection_u);
337 }
338 
339 /*
340  * mach_vm_machine_attributes -
341  * Handle machine-specific attributes for a mapping, such
342  * as cachability, migrability, etc.
343  */
344 kern_return_t
mach_vm_machine_attribute(vm_map_t map,mach_vm_address_ut addr_u,mach_vm_size_ut size_u,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)345 mach_vm_machine_attribute(
346 	vm_map_t                map,
347 	mach_vm_address_ut      addr_u,
348 	mach_vm_size_ut         size_u,
349 	vm_machine_attribute_t  attribute,
350 	vm_machine_attribute_val_t *value) /* IN/OUT */
351 {
352 	if (map == VM_MAP_NULL) {
353 		return KERN_INVALID_ARGUMENT;
354 	}
355 
356 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
357 		return KERN_SUCCESS;
358 	}
359 
360 	return vm_map_machine_attribute(map,
361 	           addr_u,
362 	           vm_sanitize_compute_ut_end(addr_u, size_u),
363 	           attribute,
364 	           value);
365 }
366 
367 /*
368  * vm_machine_attribute -
369  * Handle machine-specific attributes for a mapping, such
370  * as cachability, migrability, etc. Limited addressability
371  * (same range limits as for the native kernel map).
372  */
373 kern_return_t
vm_machine_attribute(vm_map_t map,vm_address_ut addr_u,vm_size_ut size_u,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)374 vm_machine_attribute(
375 	vm_map_t                map,
376 	vm_address_ut           addr_u,
377 	vm_size_ut              size_u,
378 	vm_machine_attribute_t  attribute,
379 	vm_machine_attribute_val_t *value) /* IN/OUT */
380 {
381 	return mach_vm_machine_attribute(map, addr_u, size_u, attribute, value);
382 }
383 
384 /*
385  * mach_vm_read -
386  * Read/copy a range from one address space and return it to the caller.
387  *
388  * It is assumed that the address for the returned memory is selected by
389  * the IPC implementation as part of receiving the reply to this call.
390  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
391  * that gets returned.
392  *
393  * JMM - because of mach_msg_type_number_t, this call is limited to a
394  * single 4GB region at this time.
395  *
396  */
397 kern_return_t
mach_vm_read(vm_map_t map,mach_vm_address_ut addr,mach_vm_size_ut size,pointer_ut * data,mach_msg_type_number_t * data_size)398 mach_vm_read(
399 	vm_map_t                map,
400 	mach_vm_address_ut      addr,
401 	mach_vm_size_ut         size,
402 	pointer_ut             *data,
403 	mach_msg_type_number_t *data_size)
404 {
405 	kern_return_t   error;
406 	vm_map_copy_t   ipc_address;
407 
408 	if (map == VM_MAP_NULL) {
409 		return KERN_INVALID_ARGUMENT;
410 	}
411 
412 	/*
413 	 * mach_msg_type_number_t is a signed int,
414 	 * make sure we do not overflow it.
415 	 */
416 	if (!VM_SANITIZE_UNSAFE_FITS(size, mach_msg_type_number_t)) {
417 		return KERN_INVALID_ARGUMENT;
418 	}
419 
420 	error = vm_map_copyin(map, addr, size, FALSE, &ipc_address);
421 
422 	if (KERN_SUCCESS == error) {
423 		VM_SANITIZE_UT_SET(*data, (pointer_t) ipc_address);
424 		/* On success we know size was validated by vm_map_copyin. */
425 		*data_size =
426 		    (mach_msg_type_number_t)VM_SANITIZE_UNSAFE_UNWRAP(size);
427 	}
428 	return error;
429 }
430 
431 /*
432  * vm_read -
433  * Read/copy a range from one address space and return it to the caller.
434  * Limited addressability (same range limits as for the native kernel map).
435  *
436  * It is assumed that the address for the returned memory is selected by
437  * the IPC implementation as part of receiving the reply to this call.
438  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
439  * that gets returned.
440  */
441 kern_return_t
vm_read(vm_map_t map,vm_address_ut addr,vm_size_ut size,pointer_ut * data,mach_msg_type_number_t * data_size)442 vm_read(
443 	vm_map_t                map,
444 	vm_address_ut           addr,
445 	vm_size_ut              size,
446 	pointer_ut             *data,
447 	mach_msg_type_number_t *data_size)
448 {
449 	return mach_vm_read(map, addr, size, data, data_size);
450 }
451 
452 /*
453  * mach_vm_read_list -
454  * Read/copy a list of address ranges from specified map.
455  *
456  * MIG does not know how to deal with a returned array of
457  * vm_map_copy_t structures, so we have to do the copyout
458  * manually here.
459  */
460 kern_return_t
mach_vm_read_list(vm_map_t map,mach_vm_read_entry_t data_list,natural_t count)461 mach_vm_read_list(
462 	vm_map_t                        map,
463 	mach_vm_read_entry_t            data_list,
464 	natural_t                       count)
465 {
466 	mach_msg_type_number_t  i;
467 	kern_return_t   error;
468 	vm_map_copy_t   copy;
469 
470 	if (map == VM_MAP_NULL ||
471 	    count > VM_MAP_ENTRY_MAX) {
472 		return KERN_INVALID_ARGUMENT;
473 	}
474 
475 	error = KERN_SUCCESS;
476 	for (i = 0; i < count; i++) {
477 		vm_map_address_t map_addr;
478 		vm_map_size_t map_size;
479 
480 		map_addr = (vm_map_address_t)(data_list[i].address);
481 		map_size = (vm_map_size_t)(data_list[i].size);
482 
483 		if (map_size != 0) {
484 			error = vm_map_copyin(map,
485 			    map_addr,
486 			    map_size,
487 			    FALSE,              /* src_destroy */
488 			    &copy);
489 			if (KERN_SUCCESS == error) {
490 				error = vm_map_copyout(
491 					current_task()->map,
492 					&map_addr,
493 					copy);
494 				if (KERN_SUCCESS == error) {
495 					data_list[i].address = map_addr;
496 					continue;
497 				}
498 				vm_map_copy_discard(copy);
499 			}
500 		}
501 		data_list[i].address = (mach_vm_address_t)0;
502 		data_list[i].size = (mach_vm_size_t)0;
503 	}
504 	return error;
505 }
506 
507 /*
508  * vm_read_list -
509  * Read/copy a list of address ranges from specified map.
510  *
511  * MIG does not know how to deal with a returned array of
512  * vm_map_copy_t structures, so we have to do the copyout
513  * manually here.
514  *
515  * The source and destination ranges are limited to those
516  * that can be described with a vm_address_t (i.e. same
517  * size map as the kernel).
518  *
519  * JMM - If the result of the copyout is an address range
520  * that cannot be described with a vm_address_t (i.e. the
521  * caller had a larger address space but used this call
522  * anyway), it will result in a truncated address being
523  * returned (and a likely confused caller).
524  */
525 
526 kern_return_t
vm_read_list(vm_map_t map,vm_read_entry_t data_list,natural_t count)527 vm_read_list(
528 	vm_map_t                map,
529 	vm_read_entry_t data_list,
530 	natural_t               count)
531 {
532 	mach_msg_type_number_t  i;
533 	kern_return_t   error;
534 	vm_map_copy_t   copy;
535 
536 	if (map == VM_MAP_NULL ||
537 	    count > VM_MAP_ENTRY_MAX) {
538 		return KERN_INVALID_ARGUMENT;
539 	}
540 
541 	error = KERN_SUCCESS;
542 	for (i = 0; i < count; i++) {
543 		vm_map_address_t map_addr;
544 		vm_map_size_t map_size;
545 
546 		map_addr = (vm_map_address_t)(data_list[i].address);
547 		map_size = (vm_map_size_t)(data_list[i].size);
548 
549 		if (map_size != 0) {
550 			error = vm_map_copyin(map,
551 			    map_addr,
552 			    map_size,
553 			    FALSE,              /* src_destroy */
554 			    &copy);
555 			if (KERN_SUCCESS == error) {
556 				error = vm_map_copyout(current_task()->map,
557 				    &map_addr,
558 				    copy);
559 				if (KERN_SUCCESS == error) {
560 					data_list[i].address =
561 					    CAST_DOWN(vm_offset_t, map_addr);
562 					continue;
563 				}
564 				vm_map_copy_discard(copy);
565 			}
566 		}
567 		data_list[i].address = (mach_vm_address_t)0;
568 		data_list[i].size = (mach_vm_size_t)0;
569 	}
570 	return error;
571 }
572 
573 /*
574  * mach_vm_read_overwrite -
575  * Overwrite a range of the current map with data from the specified
576  * map/address range.
577  *
578  * In making an assumption that the current thread is local, it is
579  * no longer cluster-safe without a fully supportive local proxy
580  * thread/task (but we don't support cluster's anymore so this is moot).
581  */
582 
583 kern_return_t
mach_vm_read_overwrite(vm_map_t map,mach_vm_address_ut address,mach_vm_size_ut size,mach_vm_address_ut data,mach_vm_size_ut * data_size)584 mach_vm_read_overwrite(
585 	vm_map_t                map,
586 	mach_vm_address_ut      address,
587 	mach_vm_size_ut         size,
588 	mach_vm_address_ut      data,
589 	mach_vm_size_ut        *data_size)
590 {
591 	kern_return_t   error;
592 	vm_map_copy_t   copy;
593 
594 	if (map == VM_MAP_NULL) {
595 		return KERN_INVALID_ARGUMENT;
596 	}
597 
598 	error = vm_map_copyin(map, address, size, FALSE, &copy);
599 
600 	if (KERN_SUCCESS == error) {
601 		if (copy) {
602 			assert(VM_SANITIZE_UNSAFE_IS_EQUAL(size, copy->size));
603 		}
604 
605 		error = vm_map_copy_overwrite(current_thread()->map,
606 		    data,
607 		    copy,
608 		    size,
609 #if HAS_MTE
610 		    FALSE,
611 #endif
612 		    FALSE);
613 		if (KERN_SUCCESS == error) {
614 			*data_size = size;
615 			return error;
616 		}
617 		vm_map_copy_discard(copy);
618 	}
619 	return error;
620 }
621 
622 /*
623  * vm_read_overwrite -
624  * Overwrite a range of the current map with data from the specified
625  * map/address range.
626  *
627  * This routine adds the additional limitation that the source and
628  * destination ranges must be describable with vm_address_t values
629  * (i.e. the same size address spaces as the kernel, or at least the
630  * the ranges are in that first portion of the respective address
631  * spaces).
632  */
633 
634 kern_return_t
vm_read_overwrite(vm_map_t map,vm_address_ut address,vm_size_ut size,vm_address_ut data,vm_size_ut * data_size)635 vm_read_overwrite(
636 	vm_map_t                map,
637 	vm_address_ut           address,
638 	vm_size_ut              size,
639 	vm_address_ut           data,
640 	vm_size_ut             *data_size)
641 {
642 	return mach_vm_read_overwrite(map, address, size, data, data_size);
643 }
644 
645 /*
646  * mach_vm_update_pointers_with_remote_tags -
647  */
648 #if HAS_MTE
649 /*
650  * Iterate a pointer list and rewrite the
651  * pointers to contain the correct MTE tags.
652  */
653 #endif /* HAS_MTE */
654 
655 kern_return_t
mach_vm_update_pointers_with_remote_tags(__unused vm_map_t map,__unused mach_vm_offset_list_t in_pointer_list,__unused mach_msg_type_number_t in_pointer_listCnt,__unused mach_vm_offset_list_t out_pointer_list,__unused mach_msg_type_number_t * out_pointer_listCnt)656 mach_vm_update_pointers_with_remote_tags(
657 	__unused vm_map_t map,
658 	__unused mach_vm_offset_list_t in_pointer_list,
659 	__unused mach_msg_type_number_t in_pointer_listCnt,
660 	__unused mach_vm_offset_list_t out_pointer_list,
661 	__unused mach_msg_type_number_t *out_pointer_listCnt)
662 {
663 	if (!in_pointer_list
664 	    || !out_pointer_list
665 	    || in_pointer_listCnt > VM_OFFSET_LIST_MAX
666 	    /* The length of the output pointer list must match the input pointer list */
667 	    || !out_pointer_listCnt
668 	    || *out_pointer_listCnt != in_pointer_listCnt
669 	    ) {
670 		return KERN_INVALID_ARGUMENT;
671 	}
672 
673 	if (!map || !map->pmap) {
674 		return KERN_INVALID_ARGUMENT;
675 	}
676 
677 #if HAS_MTE
678 	/* This API is intended for debuggers, so ensure the target is debugged */
679 	vm_map_lock_read(map);
680 	task_t map_task = map->owning_task;
681 	bool is_debugged = map_task && is_address_space_debugged(get_bsdtask_info(map_task));
682 	vm_map_unlock_read(map);
683 	if (!is_debugged) {
684 		return KERN_INVALID_ARGUMENT;
685 	}
686 
687 	vm_map_switch_context_t ctx = vm_map_switch_to(map);
688 
689 	for (mach_msg_type_number_t i = 0; i < in_pointer_listCnt; i++) {
690 		mach_vm_offset_t unsigned_address = (mach_vm_offset_t)in_pointer_list[i];
691 		vm_map_address_t signed_address = 0;
692 		/* Note that inputs pointing to non-MTE objects safely return canonical tags */
693 		int ret = copyin_mte_load_tag(unsigned_address, &signed_address);
694 		if (ret != 0) {
695 			/* Perhaps an invalid address, just leave the output slot as zero. */
696 			continue;
697 		}
698 		out_pointer_list[i] = signed_address;
699 	}
700 
701 	vm_map_switch_back(ctx);
702 
703 	return KERN_SUCCESS;
704 #endif /* HAS_MTE */
705 	return KERN_FAILURE;
706 }
707 
708 /*
709  * mach_vm_write -
710  * Overwrite the specified address range with the data provided
711  * (from the current map).
712  */
713 kern_return_t
mach_vm_write(vm_map_t map,mach_vm_address_ut address,pointer_ut data_u,mach_msg_type_number_t size)714 mach_vm_write(
715 	vm_map_t                map,
716 	mach_vm_address_ut      address,
717 	pointer_ut              data_u,
718 	mach_msg_type_number_t  size)
719 {
720 	if (map == VM_MAP_NULL) {
721 		return KERN_INVALID_ARGUMENT;
722 	}
723 
724 	/*
725 	 * data is created by the kernel's MIG server from a userspace buffer,
726 	 * so it is safe to unwrap.
727 	 */
728 	vm_map_copy_t data = (vm_map_copy_t) VM_SANITIZE_UNSAFE_UNWRAP(data_u);
729 
730 	return vm_map_copy_overwrite(map,
731 	           address,
732 	           data,
733 	           size,
734 #if HAS_MTE
735 	           TRUE, /* sec_override */
736 #endif
737 	           FALSE /* interruptible XXX */);
738 }
739 
740 /*
741  * vm_write -
742  * Overwrite the specified address range with the data provided
743  * (from the current map).
744  *
745  * The addressability of the range of addresses to overwrite is
746  * limited bu the use of a vm_address_t (same size as kernel map).
747  * Either the target map is also small, or the range is in the
748  * low addresses within it.
749  */
750 kern_return_t
vm_write(vm_map_t map,vm_address_ut address,pointer_ut data,mach_msg_type_number_t size)751 vm_write(
752 	vm_map_t                map,
753 	vm_address_ut           address,
754 	pointer_ut              data,
755 	mach_msg_type_number_t  size)
756 {
757 	return mach_vm_write(map, address, data, size);
758 }
759 
760 /*
761  * mach_vm_copy -
762  * Overwrite one range of the specified map with the contents of
763  * another range within that same map (i.e. both address ranges
764  * are "over there").
765  */
766 kern_return_t
mach_vm_copy(vm_map_t map,mach_vm_address_ut source_address,mach_vm_size_ut size,mach_vm_address_ut dest_address)767 mach_vm_copy(
768 	vm_map_t                map,
769 	mach_vm_address_ut      source_address,
770 	mach_vm_size_ut         size,
771 	mach_vm_address_ut      dest_address)
772 {
773 	vm_map_copy_t copy;
774 	kern_return_t kr;
775 
776 	if (map == VM_MAP_NULL) {
777 		return KERN_INVALID_ARGUMENT;
778 	}
779 
780 	kr = vm_map_copyin(map, source_address, size, FALSE, &copy);
781 
782 	if (KERN_SUCCESS == kr) {
783 		if (copy) {
784 			assert(VM_SANITIZE_UNSAFE_IS_EQUAL(size, copy->size));
785 		}
786 
787 		kr = vm_map_copy_overwrite(map,
788 		    dest_address,
789 		    copy,
790 		    size,
791 #if HAS_MTE
792 		    FALSE /* interruptible XXX */,
793 #endif
794 		    FALSE);
795 
796 		if (KERN_SUCCESS != kr) {
797 			vm_map_copy_discard(copy);
798 		}
799 	}
800 	return kr;
801 }
802 
803 kern_return_t
vm_copy(vm_map_t map,vm_address_ut source_address,vm_size_ut size,vm_address_ut dest_address)804 vm_copy(
805 	vm_map_t                map,
806 	vm_address_ut           source_address,
807 	vm_size_ut              size,
808 	vm_address_ut           dest_address)
809 {
810 	return mach_vm_copy(map, source_address, size, dest_address);
811 }
812 
813 /*
814  * mach_vm_map -
815  * Map some range of an object into an address space.
816  *
817  * The object can be one of several types of objects:
818  *	NULL - anonymous memory
819  *	a named entry - a range within another address space
820  *	                or a range within a memory object
821  *	a whole memory object
822  *
823  */
824 kern_return_t
mach_vm_map_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)825 mach_vm_map_external(
826 	vm_map_t                target_map,
827 	mach_vm_offset_ut      *address,
828 	mach_vm_size_ut         initial_size,
829 	mach_vm_offset_ut       mask,
830 	int                     flags,
831 	ipc_port_t              port,
832 	memory_object_offset_ut offset,
833 	boolean_t               copy,
834 	vm_prot_ut              cur_protection,
835 	vm_prot_ut              max_protection,
836 	vm_inherit_ut           inheritance)
837 {
838 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
839 
840 	/* filter out any kernel-only flags */
841 	if (flags & ~VM_FLAGS_USER_MAP) {
842 		return KERN_INVALID_ARGUMENT;
843 	}
844 
845 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
846 	/* range_id is set by mach_vm_map_kernel */
847 	return mach_vm_map_kernel(target_map, address, initial_size, mask,
848 	           vmk_flags, port, offset, copy,
849 	           cur_protection, max_protection,
850 	           inheritance);
851 }
852 
853 /* legacy interface */
854 __attribute__((always_inline))
855 kern_return_t
vm_map_64_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)856 vm_map_64_external(
857 	vm_map_t                target_map,
858 	vm_offset_ut           *address,
859 	vm_size_ut              size,
860 	vm_offset_ut            mask,
861 	int                     flags,
862 	ipc_port_t              port,
863 	memory_object_offset_ut offset,
864 	boolean_t               copy,
865 	vm_prot_ut              cur_protection,
866 	vm_prot_ut              max_protection,
867 	vm_inherit_ut           inheritance)
868 {
869 	return mach_vm_map_external(target_map, address,
870 	           size, mask, flags, port, offset, copy,
871 	           cur_protection, max_protection, inheritance);
872 }
873 
874 /* temporary, until world build */
875 __attribute__((always_inline))
876 kern_return_t
vm_map_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,vm_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)877 vm_map_external(
878 	vm_map_t                target_map,
879 	vm_offset_ut           *address,
880 	vm_size_ut              size,
881 	vm_offset_ut            mask,
882 	int                     flags,
883 	ipc_port_t              port,
884 	vm_offset_ut            offset,
885 	boolean_t               copy,
886 	vm_prot_ut              cur_protection,
887 	vm_prot_ut              max_protection,
888 	vm_inherit_ut           inheritance)
889 {
890 	return mach_vm_map_external(target_map, address,
891 	           size, mask, flags, port, offset, copy,
892 	           cur_protection, max_protection, inheritance);
893 }
894 
895 static __attribute__((always_inline, warn_unused_result))
896 kern_return_t
mach_vm_remap_new_external_sanitize(vm_map_t target_map,vm_prot_ut cur_protection_u,vm_prot_ut max_protection_u,vm_prot_t * cur_protection,vm_prot_t * max_protection)897 mach_vm_remap_new_external_sanitize(
898 	vm_map_t                target_map,
899 	vm_prot_ut              cur_protection_u,
900 	vm_prot_ut              max_protection_u,
901 	vm_prot_t              *cur_protection,
902 	vm_prot_t              *max_protection)
903 {
904 	return vm_sanitize_cur_and_max_prots(cur_protection_u, max_protection_u,
905 	           VM_SANITIZE_CALLER_VM_MAP_REMAP, target_map,
906 	           cur_protection, max_protection);
907 }
908 
909 /*
910  * mach_vm_remap_new -
911  * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
912  * and {cur,max}_protection are in/out.
913  */
914 kern_return_t
mach_vm_remap_new_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,mach_port_t src_tport,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection_u,vm_prot_ut * max_protection_u,vm_inherit_ut inheritance)915 mach_vm_remap_new_external(
916 	vm_map_t                target_map,
917 	mach_vm_offset_ut      *address,
918 	mach_vm_size_ut         size,
919 	mach_vm_offset_ut       mask,
920 	int                     flags,
921 	mach_port_t             src_tport,
922 	mach_vm_offset_ut       memory_address,
923 	boolean_t               copy,
924 	vm_prot_ut             *cur_protection_u,   /* IN/OUT */
925 	vm_prot_ut             *max_protection_u,   /* IN/OUT */
926 	vm_inherit_ut           inheritance)
927 {
928 	vm_map_kernel_flags_t   vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
929 	vm_map_t                src_map;
930 	vm_prot_t               cur_protection, max_protection;
931 	kern_return_t           kr;
932 
933 	if (target_map == VM_MAP_NULL) {
934 		return KERN_INVALID_ARGUMENT;
935 	}
936 
937 	/* filter out any kernel-only flags */
938 	if (flags & ~VM_FLAGS_USER_REMAP) {
939 		return KERN_INVALID_ARGUMENT;
940 	}
941 
942 	vm_map_kernel_flags_set_vmflags(&vmk_flags,
943 	    flags | VM_FLAGS_RETURN_DATA_ADDR);
944 
945 	/*
946 	 * We don't need cur_protection here, but sanitizing it before
947 	 * enforcing W^X below matches historical error codes better.
948 	 */
949 	kr = mach_vm_remap_new_external_sanitize(target_map,
950 	    *cur_protection_u,
951 	    *max_protection_u,
952 	    &cur_protection,
953 	    &max_protection);
954 	if (__improbable(kr != KERN_SUCCESS)) {
955 		return vm_sanitize_get_kr(kr);
956 	}
957 
958 	if ((max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
959 	    (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
960 		/*
961 		 * XXX FBDP TODO
962 		 * enforce target's "wx" policies
963 		 */
964 		return KERN_PROTECTION_FAILURE;
965 	}
966 
967 	if (copy || max_protection == VM_PROT_READ || max_protection == VM_PROT_NONE) {
968 		src_map = convert_port_to_map_read(src_tport);
969 	} else {
970 		src_map = convert_port_to_map(src_tport);
971 	}
972 
973 	/* range_id is set by vm_map_remap */
974 	kr = vm_map_remap(target_map,
975 	    address,
976 	    size,
977 	    mask,
978 	    vmk_flags,
979 	    src_map,
980 	    memory_address,
981 	    copy,
982 	    cur_protection_u,    /* IN/OUT */
983 	    max_protection_u,    /* IN/OUT */
984 	    inheritance);
985 
986 	vm_map_deallocate(src_map);
987 
988 	if (kr == KERN_SUCCESS) {
989 		ipc_port_release_send(src_tport);  /* consume on success */
990 	}
991 	return kr;
992 }
993 
994 /*
995  * mach_vm_remap -
996  * Remap a range of memory from one task into another,
997  * to another address range within the same task, or
998  * over top of itself (with altered permissions and/or
999  * as an in-place copy of itself).
1000  */
1001 kern_return_t
mach_vm_remap_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)1002 mach_vm_remap_external(
1003 	vm_map_t                target_map,
1004 	mach_vm_offset_ut      *address,
1005 	mach_vm_size_ut         size,
1006 	mach_vm_offset_ut       mask,
1007 	int                     flags,
1008 	vm_map_t                src_map,
1009 	mach_vm_offset_ut       memory_address,
1010 	boolean_t               copy,
1011 	vm_prot_ut             *cur_protection,    /* OUT */
1012 	vm_prot_ut             *max_protection,    /* OUT */
1013 	vm_inherit_ut           inheritance)
1014 {
1015 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1016 
1017 	/* filter out any kernel-only flags */
1018 	if (flags & ~VM_FLAGS_USER_REMAP) {
1019 		return KERN_INVALID_ARGUMENT;
1020 	}
1021 
1022 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1023 
1024 	*cur_protection = vm_sanitize_wrap_prot(VM_PROT_NONE);
1025 	*max_protection = vm_sanitize_wrap_prot(VM_PROT_NONE);
1026 	vmk_flags.vmkf_remap_legacy_mode = true;
1027 
1028 	/* range_id is set by vm_map_remap */
1029 	return vm_map_remap(target_map,
1030 	           address,
1031 	           size,
1032 	           mask,
1033 	           vmk_flags,
1034 	           src_map,
1035 	           memory_address,
1036 	           copy,
1037 	           cur_protection,
1038 	           max_protection,
1039 	           inheritance);
1040 }
1041 
1042 /*
1043  * vm_remap_new -
1044  * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
1045  * and {cur,max}_protection are in/out.
1046  */
1047 kern_return_t
vm_remap_new_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,mach_port_t src_tport,vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)1048 vm_remap_new_external(
1049 	vm_map_t                target_map,
1050 	vm_offset_ut           *address,
1051 	vm_size_ut              size,
1052 	vm_offset_ut            mask,
1053 	int                     flags,
1054 	mach_port_t             src_tport,
1055 	vm_offset_ut            memory_address,
1056 	boolean_t               copy,
1057 	vm_prot_ut             *cur_protection,       /* IN/OUT */
1058 	vm_prot_ut             *max_protection,       /* IN/OUT */
1059 	vm_inherit_ut           inheritance)
1060 {
1061 	return mach_vm_remap_new_external(target_map,
1062 	           address,
1063 	           size,
1064 	           mask,
1065 	           flags,
1066 	           src_tport,
1067 	           memory_address,
1068 	           copy,
1069 	           cur_protection, /* IN/OUT */
1070 	           max_protection, /* IN/OUT */
1071 	           inheritance);
1072 }
1073 
1074 /*
1075  * vm_remap -
1076  * Remap a range of memory from one task into another,
1077  * to another address range within the same task, or
1078  * over top of itself (with altered permissions and/or
1079  * as an in-place copy of itself).
1080  *
1081  * The addressability of the source and target address
1082  * range is limited by the size of vm_address_t (in the
1083  * kernel context).
1084  */
1085 kern_return_t
vm_remap_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,vm_map_t src_map,vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)1086 vm_remap_external(
1087 	vm_map_t                target_map,
1088 	vm_offset_ut           *address,
1089 	vm_size_ut              size,
1090 	vm_offset_ut            mask,
1091 	int                     flags,
1092 	vm_map_t                src_map,
1093 	vm_offset_ut            memory_address,
1094 	boolean_t               copy,
1095 	vm_prot_ut             *cur_protection,    /* OUT */
1096 	vm_prot_ut             *max_protection,    /* OUT */
1097 	vm_inherit_ut           inheritance)
1098 {
1099 	return mach_vm_remap_external(target_map, address,
1100 	           size, mask, flags, src_map, memory_address, copy,
1101 	           cur_protection, max_protection, inheritance);
1102 }
1103 
1104 /*
1105  * NOTE: these routine (and this file) will no longer require mach_host_server.h
1106  * when mach_vm_wire and vm_wire are changed to use ledgers.
1107  */
1108 #include <mach/mach_host_server.h>
1109 /*
1110  *	mach_vm_wire
1111  *	Specify that the range of the virtual address space
1112  *	of the target task must not cause page faults for
1113  *	the indicated accesses.
1114  *
1115  *	[ To unwire the pages, specify VM_PROT_NONE. ]
1116  */
1117 kern_return_t
mach_vm_wire_external(host_priv_t host_priv,vm_map_t map,mach_vm_address_ut start,mach_vm_size_ut size,vm_prot_ut access)1118 mach_vm_wire_external(
1119 	host_priv_t             host_priv,
1120 	vm_map_t                map,
1121 	mach_vm_address_ut      start,
1122 	mach_vm_size_ut         size,
1123 	vm_prot_ut              access)
1124 {
1125 	kern_return_t     rc;
1126 	mach_vm_offset_ut end;
1127 
1128 	if (host_priv == HOST_PRIV_NULL) {
1129 		return KERN_INVALID_HOST;
1130 	}
1131 
1132 	if (map == VM_MAP_NULL) {
1133 		return KERN_INVALID_TASK;
1134 	}
1135 
1136 	end = vm_sanitize_compute_ut_end(start, size);
1137 	if (VM_SANITIZE_UNSAFE_IS_ZERO(access)) {
1138 		rc = vm_map_unwire_impl(map, start, end, true,
1139 		    VM_SANITIZE_CALLER_VM_UNWIRE_USER);
1140 	} else {
1141 		rc = vm_map_wire_impl(map, start, end, access,
1142 		    VM_KERN_MEMORY_MLOCK, true, NULL, VM_SANITIZE_CALLER_VM_WIRE_USER);
1143 	}
1144 
1145 	return rc;
1146 }
1147 
1148 /*
1149  *	vm_wire -
1150  *	Specify that the range of the virtual address space
1151  *	of the target task must not cause page faults for
1152  *	the indicated accesses.
1153  *
1154  *	[ To unwire the pages, specify VM_PROT_NONE. ]
1155  */
1156 kern_return_t
vm_wire(host_priv_t host_priv,vm_map_t map,vm_offset_ut start,vm_size_ut size,vm_prot_ut access)1157 vm_wire(
1158 	host_priv_t             host_priv,
1159 	vm_map_t                map,
1160 	vm_offset_ut            start,
1161 	vm_size_ut              size,
1162 	vm_prot_ut              access)
1163 {
1164 	return mach_vm_wire_external(host_priv, map, start, size, access);
1165 }
1166 
1167 /*
1168  *	vm_msync
1169  *
1170  *	Synchronises the memory range specified with its backing store
1171  *	image by either flushing or cleaning the contents to the appropriate
1172  *	memory manager.
1173  *
1174  *	interpretation of sync_flags
1175  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
1176  *				  pages to manager.
1177  *
1178  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1179  *				- discard pages, write dirty or precious
1180  *				  pages back to memory manager.
1181  *
1182  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1183  *				- write dirty or precious pages back to
1184  *				  the memory manager.
1185  *
1186  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
1187  *				  is a hole in the region, and we would
1188  *				  have returned KERN_SUCCESS, return
1189  *				  KERN_INVALID_ADDRESS instead.
1190  *
1191  *	RETURNS
1192  *	KERN_INVALID_TASK		Bad task parameter
1193  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
1194  *	KERN_SUCCESS			The usual.
1195  *	KERN_INVALID_ADDRESS		There was a hole in the region.
1196  */
1197 
1198 kern_return_t
mach_vm_msync(vm_map_t map,mach_vm_address_ut address_u,mach_vm_size_ut size_u,vm_sync_t sync_flags)1199 mach_vm_msync(
1200 	vm_map_t                map,
1201 	mach_vm_address_ut      address_u,
1202 	mach_vm_size_ut         size_u,
1203 	vm_sync_t               sync_flags)
1204 {
1205 	if (map == VM_MAP_NULL) {
1206 		return KERN_INVALID_TASK;
1207 	}
1208 
1209 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
1210 		return KERN_SUCCESS;
1211 	}
1212 
1213 	return vm_map_msync(map, address_u, size_u, sync_flags);
1214 }
1215 
1216 /*
1217  *	vm_msync
1218  *
1219  *	Synchronises the memory range specified with its backing store
1220  *	image by either flushing or cleaning the contents to the appropriate
1221  *	memory manager.
1222  *
1223  *	interpretation of sync_flags
1224  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
1225  *				  pages to manager.
1226  *
1227  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1228  *				- discard pages, write dirty or precious
1229  *				  pages back to memory manager.
1230  *
1231  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1232  *				- write dirty or precious pages back to
1233  *				  the memory manager.
1234  *
1235  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
1236  *				  is a hole in the region, and we would
1237  *				  have returned KERN_SUCCESS, return
1238  *				  KERN_INVALID_ADDRESS instead.
1239  *
1240  *	The addressability of the range is limited to that which can
1241  *	be described by a vm_address_t.
1242  *
1243  *	RETURNS
1244  *	KERN_INVALID_TASK		Bad task parameter
1245  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
1246  *	KERN_SUCCESS			The usual.
1247  *	KERN_INVALID_ADDRESS		There was a hole in the region.
1248  */
1249 
1250 kern_return_t
vm_msync(vm_map_t map,vm_address_ut address_u,vm_size_ut size_u,vm_sync_t sync_flags)1251 vm_msync(
1252 	vm_map_t        map,
1253 	vm_address_ut   address_u,
1254 	vm_size_ut      size_u,
1255 	vm_sync_t       sync_flags)
1256 {
1257 	return mach_vm_msync(map, address_u, size_u, sync_flags);
1258 }
1259 
1260 
1261 int
vm_toggle_entry_reuse(int toggle,int * old_value)1262 vm_toggle_entry_reuse(int toggle, int *old_value)
1263 {
1264 	vm_map_t map = current_map();
1265 
1266 	vmlp_api_start(VM_TOGGLE_ENTRY_REUSE);
1267 
1268 	assert(!map->is_nested_map);
1269 	if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
1270 		*old_value = map->disable_vmentry_reuse;
1271 	} else if (toggle == VM_TOGGLE_SET) {
1272 		vm_map_entry_t map_to_entry;
1273 
1274 		vm_map_lock(map);
1275 		vm_map_disable_hole_optimization(map);
1276 		map->disable_vmentry_reuse = TRUE;
1277 		__IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
1278 		if (map->first_free == map_to_entry) {
1279 			map->highest_entry_end = vm_map_min(map);
1280 		} else {
1281 			map->highest_entry_end = map->first_free->vme_end;
1282 		}
1283 		vm_map_unlock(map);
1284 	} else if (toggle == VM_TOGGLE_CLEAR) {
1285 		vm_map_lock(map);
1286 		map->disable_vmentry_reuse = FALSE;
1287 		vm_map_unlock(map);
1288 	} else {
1289 		vmlp_api_end(VM_TOGGLE_ENTRY_REUSE, KERN_INVALID_ARGUMENT);
1290 		return KERN_INVALID_ARGUMENT;
1291 	}
1292 
1293 	vmlp_api_end(VM_TOGGLE_ENTRY_REUSE, KERN_SUCCESS);
1294 	return KERN_SUCCESS;
1295 }
1296 
1297 
1298 static __attribute__((always_inline, warn_unused_result))
1299 kern_return_t
mach_vm_behavior_set_sanitize(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_behavior_ut new_behavior_u,mach_vm_offset_t * start,mach_vm_offset_t * end,mach_vm_size_t * size,vm_behavior_t * new_behavior)1300 mach_vm_behavior_set_sanitize(
1301 	vm_map_t                map,
1302 	mach_vm_offset_ut       start_u,
1303 	mach_vm_size_ut         size_u,
1304 	vm_behavior_ut          new_behavior_u,
1305 	mach_vm_offset_t       *start,
1306 	mach_vm_offset_t       *end,
1307 	mach_vm_size_t         *size,
1308 	vm_behavior_t          *new_behavior)
1309 {
1310 	mach_vm_offset_t align_mask;
1311 	kern_return_t    kr;
1312 
1313 	kr = vm_sanitize_behavior(new_behavior_u, VM_SANITIZE_CALLER_VM_BEHAVIOR_SET, new_behavior);
1314 	if (__improbable(kr != KERN_SUCCESS)) {
1315 		return kr;
1316 	}
1317 
1318 	/* Choose alignment of addr/size based on the behavior being set. */
1319 	switch (*new_behavior) {
1320 	case VM_BEHAVIOR_REUSABLE:
1321 	case VM_BEHAVIOR_REUSE:
1322 	case VM_BEHAVIOR_CAN_REUSE:
1323 	case VM_BEHAVIOR_ZERO:
1324 		/*
1325 		 * Align to the hardware page size, to allow
1326 		 * malloc() to maximize the amount of re-usability,
1327 		 * even on systems with larger software page size.
1328 		 */
1329 		align_mask = PAGE_MASK;
1330 		break;
1331 	default:
1332 		align_mask = VM_MAP_PAGE_MASK(map);
1333 		break;
1334 	}
1335 
1336 	vm_sanitize_flags_t     flags = VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS;
1337 
1338 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1339 	flags |= VM_SANITIZE_FLAGS_STRIP_ADDR;
1340 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1341 
1342 	kr = vm_sanitize_addr_size(start_u, size_u, VM_SANITIZE_CALLER_VM_BEHAVIOR_SET,
1343 	    align_mask, map, flags, start, end, size);
1344 	if (__improbable(kr != KERN_SUCCESS)) {
1345 		return kr;
1346 	}
1347 
1348 	return KERN_SUCCESS;
1349 }
1350 
1351 /*
1352  *	mach_vm_behavior_set
1353  *
1354  *	Sets the paging behavior attribute for the  specified range
1355  *	in the specified map.
1356  *
1357  *	This routine will fail with KERN_INVALID_ADDRESS if any address
1358  *	in [start,start+size) is not a valid allocated memory region.
1359  */
1360 kern_return_t
mach_vm_behavior_set(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_behavior_ut new_behavior_u)1361 mach_vm_behavior_set(
1362 	vm_map_t                map,
1363 	mach_vm_offset_ut       start_u,
1364 	mach_vm_size_ut         size_u,
1365 	vm_behavior_ut          new_behavior_u)
1366 {
1367 	kern_return_t    kr;
1368 	mach_vm_offset_t start, end;
1369 	mach_vm_size_t   size;
1370 	vm_behavior_t    new_behavior;
1371 
1372 	if (map == VM_MAP_NULL) {
1373 		return KERN_INVALID_ARGUMENT;
1374 	}
1375 
1376 	kr = mach_vm_behavior_set_sanitize(map,
1377 	    start_u, size_u, new_behavior_u,
1378 	    &start, &end, &size, &new_behavior);
1379 	if (__improbable(kr != KERN_SUCCESS)) {
1380 		return vm_sanitize_get_kr(kr);
1381 	}
1382 
1383 	return vm_map_behavior_set(map,
1384 	           start,
1385 	           end,
1386 	           new_behavior);
1387 }
1388 
1389 /*
1390  *	vm_behavior_set
1391  *
1392  *	Sets the paging behavior attribute for the  specified range
1393  *	in the specified map.
1394  *
1395  *	This routine will fail with KERN_INVALID_ADDRESS if any address
1396  *	in [start,start+size) is not a valid allocated memory region.
1397  *
1398  *	This routine is potentially limited in addressibility by the
1399  *	use of vm_offset_t (if the map provided is larger than the
1400  *	kernel's).
1401  */
1402 kern_return_t
vm_behavior_set(vm_map_t map,vm_offset_ut start,vm_size_ut size,vm_behavior_ut new_behavior)1403 vm_behavior_set(
1404 	vm_map_t                map,
1405 	vm_offset_ut            start,
1406 	vm_size_ut              size,
1407 	vm_behavior_ut          new_behavior)
1408 {
1409 	return mach_vm_behavior_set(map,
1410 	           start,
1411 	           size,
1412 	           new_behavior);
1413 }
1414 
1415 /*
1416  *	mach_vm_region:
1417  *
1418  *	User call to obtain information about a region in
1419  *	a task's address map. Currently, only one flavor is
1420  *	supported.
1421  *
1422  *	XXX The reserved and behavior fields cannot be filled
1423  *	    in until the vm merge from the IK is completed, and
1424  *	    vm_reserve is implemented.
1425  *
1426  *	XXX Dependency: syscall_vm_region() also supports only one flavor.
1427  */
1428 
1429 kern_return_t
mach_vm_region(vm_map_t map,mach_vm_offset_ut * address_u,mach_vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1430 mach_vm_region(
1431 	vm_map_t                map,
1432 	mach_vm_offset_ut      *address_u,      /* IN/OUT */
1433 	mach_vm_size_ut        *size_u,         /* OUT */
1434 	vm_region_flavor_t      flavor,         /* IN */
1435 	vm_region_info_t        info,           /* OUT */
1436 	mach_msg_type_number_t *count,          /* IN/OUT */
1437 	mach_port_t            *object_name)    /* OUT */
1438 {
1439 	if (VM_MAP_NULL == map) {
1440 		return KERN_INVALID_ARGUMENT;
1441 	}
1442 
1443 	/* legacy conversion */
1444 	if (VM_REGION_BASIC_INFO == flavor) {
1445 		flavor = VM_REGION_BASIC_INFO_64;
1446 	}
1447 
1448 	return vm_map_region(map, address_u, size_u, flavor, info, count,
1449 	           object_name);
1450 }
1451 
1452 static inline kern_return_t
vm_region_get_kern_return(kern_return_t kr,vm_offset_ut addr_u,vm_size_ut size_u)1453 vm_region_get_kern_return(
1454 	kern_return_t           kr,
1455 	vm_offset_ut            addr_u,
1456 	vm_size_ut              size_u)
1457 {
1458 	vm_offset_ut end_u = vm_sanitize_compute_ut_end(addr_u, size_u);
1459 
1460 	if (KERN_SUCCESS == kr && VM_SANITIZE_UNSAFE_UNWRAP(end_u) > VM_MAX_ADDRESS) {
1461 		return KERN_INVALID_ADDRESS;
1462 	}
1463 	return kr;
1464 }
1465 
1466 /*
1467  *	vm_region_64 and vm_region:
1468  *
1469  *	User call to obtain information about a region in
1470  *	a task's address map. Currently, only one flavor is
1471  *	supported.
1472  *
1473  *	XXX The reserved and behavior fields cannot be filled
1474  *	    in until the vm merge from the IK is completed, and
1475  *	    vm_reserve is implemented.
1476  *
1477  *	XXX Dependency: syscall_vm_region() also supports only one flavor.
1478  */
1479 
1480 kern_return_t
vm_region_64(vm_map_t map,vm_offset_ut * address_u,vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1481 vm_region_64(
1482 	vm_map_t                map,
1483 	vm_offset_ut           *address_u,      /* IN/OUT */
1484 	vm_size_ut             *size_u,         /* OUT */
1485 	vm_region_flavor_t      flavor,         /* IN */
1486 	vm_region_info_t        info,           /* OUT */
1487 	mach_msg_type_number_t *count,          /* IN/OUT */
1488 	mach_port_t            *object_name)    /* OUT */
1489 {
1490 	kern_return_t kr;
1491 
1492 	kr = mach_vm_region(map, address_u, size_u, flavor, info, count,
1493 	    object_name);
1494 
1495 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1496 }
1497 
1498 kern_return_t
vm_region(vm_map_t map,vm_address_ut * address_u,vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1499 vm_region(
1500 	vm_map_t                map,
1501 	vm_address_ut          *address_u,      /* IN/OUT */
1502 	vm_size_ut             *size_u,         /* OUT */
1503 	vm_region_flavor_t      flavor,         /* IN */
1504 	vm_region_info_t        info,           /* OUT */
1505 	mach_msg_type_number_t *count,          /* IN/OUT */
1506 	mach_port_t            *object_name)    /* OUT */
1507 {
1508 	kern_return_t kr;
1509 
1510 	if (VM_MAP_NULL == map) {
1511 		return KERN_INVALID_ARGUMENT;
1512 	}
1513 
1514 	kr = vm_map_region(map, address_u, size_u, flavor, info, count,
1515 	    object_name);
1516 
1517 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1518 }
1519 
1520 /*
1521  *	vm_region_recurse: A form of vm_region which follows the
1522  *	submaps in a target map
1523  *
1524  */
1525 kern_return_t
mach_vm_region_recurse(vm_map_t map,mach_vm_address_ut * address_u,mach_vm_size_ut * size_u,uint32_t * depth,vm_region_recurse_info_t info,mach_msg_type_number_t * infoCnt)1526 mach_vm_region_recurse(
1527 	vm_map_t                map,
1528 	mach_vm_address_ut     *address_u,
1529 	mach_vm_size_ut        *size_u,
1530 	uint32_t               *depth,
1531 	vm_region_recurse_info_t info,
1532 	mach_msg_type_number_t *infoCnt)
1533 {
1534 	if (VM_MAP_NULL == map) {
1535 		return KERN_INVALID_ARGUMENT;
1536 	}
1537 
1538 	return vm_map_region_recurse_64(map, address_u, size_u, depth,
1539 	           (vm_region_submap_info_64_t)info, infoCnt);
1540 }
1541 
1542 /*
1543  *	vm_region_recurse: A form of vm_region which follows the
1544  *	submaps in a target map
1545  *
1546  */
1547 kern_return_t
vm_region_recurse_64(vm_map_t map,vm_address_ut * address_u,vm_size_ut * size_u,uint32_t * depth,vm_region_recurse_info_64_t info,mach_msg_type_number_t * infoCnt)1548 vm_region_recurse_64(
1549 	vm_map_t                map,
1550 	vm_address_ut          *address_u,
1551 	vm_size_ut             *size_u,
1552 	uint32_t               *depth,
1553 	vm_region_recurse_info_64_t info,
1554 	mach_msg_type_number_t *infoCnt)
1555 {
1556 	kern_return_t kr;
1557 
1558 	kr = mach_vm_region_recurse(map, address_u, size_u, depth,
1559 	    (vm_region_recurse_info_t)info, infoCnt);
1560 
1561 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1562 }
1563 
1564 kern_return_t
vm_region_recurse(vm_map_t map,vm_offset_ut * address_u,vm_size_ut * size_u,natural_t * depth,vm_region_recurse_info_t info32,mach_msg_type_number_t * infoCnt)1565 vm_region_recurse(
1566 	vm_map_t                map,
1567 	vm_offset_ut           *address_u,      /* IN/OUT */
1568 	vm_size_ut             *size_u,         /* OUT */
1569 	natural_t              *depth,          /* IN/OUT */
1570 	vm_region_recurse_info_t info32,        /* IN/OUT */
1571 	mach_msg_type_number_t *infoCnt)        /* IN/OUT */
1572 {
1573 	vm_region_submap_info_data_64_t info64;
1574 	vm_region_submap_info_t info;
1575 	kern_return_t           kr;
1576 
1577 	if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
1578 		return KERN_INVALID_ARGUMENT;
1579 	}
1580 
1581 	info = (vm_region_submap_info_t)info32;
1582 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1583 
1584 	kr = vm_map_region_recurse_64(map, address_u, size_u,
1585 	    depth, &info64, infoCnt);
1586 
1587 	info->protection = info64.protection;
1588 	info->max_protection = info64.max_protection;
1589 	info->inheritance = info64.inheritance;
1590 	info->offset = (uint32_t)info64.offset; /* trouble-maker */
1591 	info->user_tag = info64.user_tag;
1592 	info->pages_resident = info64.pages_resident;
1593 	info->pages_shared_now_private = info64.pages_shared_now_private;
1594 	info->pages_swapped_out = info64.pages_swapped_out;
1595 	info->pages_dirtied = info64.pages_dirtied;
1596 	info->ref_count = info64.ref_count;
1597 	info->shadow_depth = info64.shadow_depth;
1598 	info->external_pager = info64.external_pager;
1599 	info->share_mode = info64.share_mode;
1600 	info->is_submap = info64.is_submap;
1601 	info->behavior = info64.behavior;
1602 	info->object_id = info64.object_id;
1603 	info->user_wired_count = info64.user_wired_count;
1604 
1605 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1606 
1607 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1608 }
1609 
1610 kern_return_t
mach_vm_purgable_control(vm_map_t map,mach_vm_offset_ut address_u,vm_purgable_t control,int * state)1611 mach_vm_purgable_control(
1612 	vm_map_t                map,
1613 	mach_vm_offset_ut       address_u,
1614 	vm_purgable_t           control,
1615 	int                    *state)
1616 {
1617 	if (VM_MAP_NULL == map) {
1618 		return KERN_INVALID_ARGUMENT;
1619 	}
1620 
1621 	switch (control) {
1622 	case VM_PURGABLE_SET_STATE:
1623 	case VM_PURGABLE_GET_STATE:
1624 	case VM_PURGABLE_PURGE_ALL:
1625 		break;
1626 	case VM_PURGABLE_SET_STATE_FROM_KERNEL:
1627 	default:
1628 		/* not allowed from user-space */
1629 		return KERN_INVALID_ARGUMENT;
1630 	}
1631 
1632 	return vm_map_purgable_control(map, address_u, control, state);
1633 }
1634 
1635 kern_return_t
mach_vm_purgable_control_external(mach_port_t target_tport,mach_vm_offset_ut address_u,vm_purgable_t control,int * state)1636 mach_vm_purgable_control_external(
1637 	mach_port_t             target_tport,
1638 	mach_vm_offset_ut       address_u,
1639 	vm_purgable_t           control,
1640 	int                    *state)
1641 {
1642 	vm_map_t map;
1643 	kern_return_t kr;
1644 
1645 	if (control == VM_PURGABLE_GET_STATE) {
1646 		map = convert_port_to_map_read(target_tport);
1647 	} else {
1648 		map = convert_port_to_map(target_tport);
1649 	}
1650 
1651 	kr = mach_vm_purgable_control(map, address_u, control, state);
1652 	vm_map_deallocate(map);
1653 
1654 	return kr;
1655 }
1656 
1657 kern_return_t
vm_purgable_control_external(mach_port_t target_tport,vm_offset_ut address,vm_purgable_t control,int * state)1658 vm_purgable_control_external(
1659 	mach_port_t             target_tport,
1660 	vm_offset_ut            address,
1661 	vm_purgable_t           control,
1662 	int                     *state)
1663 {
1664 	return mach_vm_purgable_control_external(target_tport, address, control, state);
1665 }
1666 
1667 
1668 kern_return_t
mach_vm_page_query(vm_map_t map,mach_vm_offset_ut offset_u,int * disposition,int * ref_count)1669 mach_vm_page_query(
1670 	vm_map_t                map,
1671 	mach_vm_offset_ut       offset_u,
1672 	int                    *disposition,
1673 	int                    *ref_count)
1674 {
1675 	kern_return_t                   kr;
1676 	vm_page_info_basic_data_t       info;
1677 	mach_msg_type_number_t          count;
1678 
1679 	if (VM_MAP_NULL == map) {
1680 		return KERN_INVALID_ARGUMENT;
1681 	}
1682 
1683 	count = VM_PAGE_INFO_BASIC_COUNT;
1684 	kr = vm_map_page_info(map, offset_u, VM_PAGE_INFO_BASIC,
1685 	    (vm_page_info_t) &info, &count);
1686 	if (kr == KERN_SUCCESS) {
1687 		*disposition = info.disposition;
1688 		*ref_count = info.ref_count;
1689 	} else {
1690 		*disposition = 0;
1691 		*ref_count = 0;
1692 	}
1693 
1694 	return kr;
1695 }
1696 
1697 kern_return_t
vm_map_page_query(vm_map_t map,vm_offset_ut offset,int * disposition,int * ref_count)1698 vm_map_page_query(
1699 	vm_map_t                map,
1700 	vm_offset_ut            offset,
1701 	int                    *disposition,
1702 	int                    *ref_count)
1703 {
1704 	return mach_vm_page_query(map, offset, disposition, ref_count);
1705 }
1706 
1707 static __attribute__((always_inline, warn_unused_result))
1708 kern_return_t
mach_vm_page_range_query_sanitize(mach_vm_offset_ut address_u,mach_vm_size_ut size_u,int effective_page_mask,mach_vm_address_ut dispositions_addr_u,mach_vm_size_ut dispositions_count_u,mach_vm_offset_t * start,mach_vm_size_t * size,mach_vm_address_t * dispositions_addr,mach_vm_size_t * disp_buf_req_size)1709 mach_vm_page_range_query_sanitize(
1710 	mach_vm_offset_ut       address_u,
1711 	mach_vm_size_ut         size_u,
1712 	int                     effective_page_mask,
1713 	mach_vm_address_ut      dispositions_addr_u,
1714 	mach_vm_size_ut         dispositions_count_u,
1715 	mach_vm_offset_t       *start,
1716 	mach_vm_size_t         *size,
1717 	mach_vm_address_t      *dispositions_addr,
1718 	mach_vm_size_t         *disp_buf_req_size)
1719 {
1720 	mach_vm_offset_t  end;
1721 	mach_vm_size_t    dispositions_count;
1722 	mach_vm_address_t discard;
1723 
1724 	/*
1725 	 * There are no alignment requirements on
1726 	 * dispositions_addr_u/dispositions_count_u, those are derived into
1727 	 * inputs into copyout. So it is safe to unwrap them. We do want to
1728 	 * check that the range starting at dispositions_addr_u and ending
1729 	 * after dispositions_count_u integers is sound (i.e., doesn't wrap
1730 	 * around due to integer overflow).
1731 	 */
1732 	*dispositions_addr = VM_SANITIZE_UNSAFE_UNWRAP(dispositions_addr_u);
1733 	dispositions_count = VM_SANITIZE_UNSAFE_UNWRAP(dispositions_count_u);
1734 	if (
1735 		os_mul_overflow(
1736 			dispositions_count,
1737 			sizeof(int),
1738 			disp_buf_req_size) ||
1739 		os_add_overflow(
1740 			*dispositions_addr,
1741 			*disp_buf_req_size,
1742 			&discard)) {
1743 		return KERN_INVALID_ARGUMENT;
1744 	}
1745 
1746 	return vm_sanitize_addr_size(address_u, size_u,
1747 	           VM_SANITIZE_CALLER_VM_MAP_PAGE_RANGE_QUERY,
1748 	           effective_page_mask,
1749 	           VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, start,
1750 	           &end, size);
1751 }
1752 
1753 kern_return_t
mach_vm_page_range_query(vm_map_t map,mach_vm_offset_ut address_u,mach_vm_size_ut size_u,mach_vm_address_ut dispositions_addr_u,mach_vm_size_ut * dispositions_count_u)1754 mach_vm_page_range_query(
1755 	vm_map_t                map,
1756 	mach_vm_offset_ut       address_u,
1757 	mach_vm_size_ut         size_u,
1758 	mach_vm_address_ut      dispositions_addr_u,
1759 	mach_vm_size_ut        *dispositions_count_u)
1760 {
1761 	kern_return_t           kr;
1762 	int                     num_pages = 0, i = 0;
1763 	mach_vm_size_t          curr_sz = 0, copy_sz = 0;
1764 	mach_vm_size_t          disp_buf_req_size = 0, disp_buf_total_size = 0;
1765 	mach_msg_type_number_t  count = 0;
1766 	mach_vm_address_t       dispositions_addr;
1767 
1768 	void                    *info = NULL;
1769 	void                    *local_disp = NULL;
1770 	vm_map_size_t           info_size = 0, local_disp_size = 0;
1771 	mach_vm_offset_t        start = 0;
1772 	vm_map_size_t           size;
1773 	int                     effective_page_shift, effective_page_size, effective_page_mask;
1774 
1775 	if (map == VM_MAP_NULL || dispositions_count_u == NULL) {
1776 		return KERN_INVALID_ARGUMENT;
1777 	}
1778 
1779 	effective_page_shift = vm_self_region_page_shift_safely(map);
1780 	if (effective_page_shift == -1) {
1781 		return KERN_INVALID_ARGUMENT;
1782 	}
1783 	effective_page_size = (1 << effective_page_shift);
1784 	effective_page_mask = effective_page_size - 1;
1785 
1786 	kr = mach_vm_page_range_query_sanitize(address_u,
1787 	    size_u,
1788 	    effective_page_mask,
1789 	    dispositions_addr_u,
1790 	    *dispositions_count_u,
1791 	    &start,
1792 	    &size,
1793 	    &dispositions_addr,
1794 	    &disp_buf_req_size);
1795 	if (__improbable(kr != KERN_SUCCESS)) {
1796 		return vm_sanitize_get_kr(kr);
1797 	}
1798 
1799 	if (disp_buf_req_size == 0 || size == 0) {
1800 		return KERN_SUCCESS;
1801 	}
1802 
1803 	/*
1804 	 * For large requests, we will go through them
1805 	 * MAX_PAGE_RANGE_QUERY chunk at a time.
1806 	 */
1807 
1808 	curr_sz = MIN(size, MAX_PAGE_RANGE_QUERY);
1809 	num_pages = (int) (curr_sz >> effective_page_shift);
1810 
1811 	info_size = num_pages * sizeof(vm_page_info_basic_data_t);
1812 	info = kalloc_data(info_size, Z_WAITOK);
1813 
1814 	local_disp_size = num_pages * sizeof(int);
1815 	local_disp = kalloc_data(local_disp_size, Z_WAITOK);
1816 
1817 	if (info == NULL || local_disp == NULL) {
1818 		kr = KERN_RESOURCE_SHORTAGE;
1819 		goto out;
1820 	}
1821 
1822 	while (size) {
1823 		count = VM_PAGE_INFO_BASIC_COUNT;
1824 		kr = vm_map_page_range_info_internal(
1825 			map,
1826 			start,
1827 			vm_map_round_page(start + curr_sz, effective_page_mask),
1828 			effective_page_shift,
1829 			VM_PAGE_INFO_BASIC,
1830 			(vm_page_info_t) info,
1831 			&count);
1832 
1833 		assert(kr == KERN_SUCCESS);
1834 
1835 		for (i = 0; i < num_pages; i++) {
1836 			((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
1837 		}
1838 
1839 		copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
1840 		kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
1841 
1842 		start += curr_sz;
1843 		disp_buf_req_size -= copy_sz;
1844 		disp_buf_total_size += copy_sz;
1845 
1846 		if (kr != 0) {
1847 			break;
1848 		}
1849 
1850 		if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
1851 			/*
1852 			 * We might have inspected the full range OR
1853 			 * more than it esp. if the user passed in
1854 			 * non-page aligned start/size and/or if we
1855 			 * descended into a submap. We are done here.
1856 			 */
1857 
1858 			size = 0;
1859 		} else {
1860 			dispositions_addr += copy_sz;
1861 
1862 			size -= curr_sz;
1863 
1864 			curr_sz = MIN(vm_map_round_page(size, effective_page_mask), MAX_PAGE_RANGE_QUERY);
1865 			num_pages = (int)(curr_sz >> effective_page_shift);
1866 		}
1867 	}
1868 
1869 	VM_SANITIZE_UT_SET(
1870 		*dispositions_count_u,
1871 		disp_buf_total_size / sizeof(int));
1872 
1873 out:
1874 	kfree_data(local_disp, local_disp_size);
1875 	kfree_data(info, info_size);
1876 	return kr;
1877 }
1878 
1879 kern_return_t
mach_vm_page_info(vm_map_t map,mach_vm_address_ut address,vm_page_info_flavor_t flavor,vm_page_info_t info,mach_msg_type_number_t * count)1880 mach_vm_page_info(
1881 	vm_map_t                map,
1882 	mach_vm_address_ut      address,
1883 	vm_page_info_flavor_t   flavor,
1884 	vm_page_info_t          info,
1885 	mach_msg_type_number_t  *count)
1886 {
1887 	kern_return_t   kr;
1888 
1889 	if (map == VM_MAP_NULL) {
1890 		return KERN_INVALID_ARGUMENT;
1891 	}
1892 
1893 	kr = vm_map_page_info(map, address, flavor, info, count);
1894 	return kr;
1895 }
1896 
1897 /*
1898  *	task_wire
1899  *
1900  *	Set or clear the map's wiring_required flag.  This flag, if set,
1901  *	will cause all future virtual memory allocation to allocate
1902  *	user wired memory.  Unwiring pages wired down as a result of
1903  *	this routine is done with the vm_wire interface.
1904  */
1905 kern_return_t
task_wire(vm_map_t map,boolean_t must_wire __unused)1906 task_wire(
1907 	vm_map_t        map,
1908 	boolean_t       must_wire __unused)
1909 {
1910 	if (map == VM_MAP_NULL) {
1911 		return KERN_INVALID_ARGUMENT;
1912 	}
1913 
1914 	return KERN_NOT_SUPPORTED;
1915 }
1916 
1917 kern_return_t
vm_map_exec_lockdown(vm_map_t map)1918 vm_map_exec_lockdown(
1919 	vm_map_t        map)
1920 {
1921 	vmlp_api_start(VM_MAP_EXEC_LOCKDOWN);
1922 
1923 	if (map == VM_MAP_NULL) {
1924 		vmlp_api_end(VM_MAP_EXEC_LOCKDOWN, KERN_INVALID_ARGUMENT);
1925 		return KERN_INVALID_ARGUMENT;
1926 	}
1927 
1928 	vm_map_lock(map);
1929 	map->map_disallow_new_exec = TRUE;
1930 	vm_map_unlock(map);
1931 
1932 	vmlp_api_end(VM_MAP_EXEC_LOCKDOWN, KERN_SUCCESS);
1933 	return KERN_SUCCESS;
1934 }
1935 
1936 #if XNU_PLATFORM_MacOSX
1937 kern_return_t
vm_region_object_create(__unused vm_map_t target_map,__unused vm_size_t size,__unused ipc_port_t * object_handle)1938 vm_region_object_create(
1939 	__unused vm_map_t target_map,
1940 	__unused vm_size_t size,
1941 	__unused ipc_port_t *object_handle)
1942 {
1943 	return KERN_NOT_SUPPORTED;
1944 }
1945 #endif /* XNU_PLATFORM_MacOSX */
1946 
1947 extern boolean_t proc_is_simulated(struct proc *p);
1948 
1949 kern_return_t
mach_vm_deferred_reclamation_buffer_allocate(task_t task,mach_vm_address_ut * address,uint64_t * sampling_period,uint32_t initial_capacity,uint32_t max_capacity)1950 mach_vm_deferred_reclamation_buffer_allocate(
1951 	task_t           task,
1952 	mach_vm_address_ut *address,
1953 	uint64_t *sampling_period,
1954 	uint32_t initial_capacity,
1955 	uint32_t max_capacity)
1956 {
1957 #if CONFIG_DEFERRED_RECLAIM
1958 	if (task != current_task()) {
1959 		/* Remote buffer operations are not supported*/
1960 		return KERN_INVALID_TASK;
1961 	}
1962 	struct proc *p = task_get_proc_raw(task);
1963 	if (proc_is_simulated(p)) {
1964 		return KERN_NOT_SUPPORTED;
1965 	}
1966 	return vm_deferred_reclamation_buffer_allocate_internal(task, address, sampling_period, initial_capacity, max_capacity);
1967 #else
1968 	(void) task;
1969 	(void) address;
1970 	(void) size;
1971 	return KERN_NOT_SUPPORTED;
1972 #endif /* CONFIG_DEFERRED_RECLAIM */
1973 }
1974 
1975 kern_return_t
mach_vm_deferred_reclamation_buffer_flush(task_t task,uint32_t num_entries_to_reclaim,mach_vm_size_ut * bytes_reclaimed_out)1976 mach_vm_deferred_reclamation_buffer_flush(
1977 	task_t task,
1978 	uint32_t num_entries_to_reclaim,
1979 	mach_vm_size_ut *bytes_reclaimed_out)
1980 {
1981 #if CONFIG_DEFERRED_RECLAIM
1982 	kern_return_t kr;
1983 	mach_vm_size_t bytes_reclaimed = 0;
1984 	if (task != current_task()) {
1985 		/* Remote buffer operations are not supported */
1986 		return KERN_INVALID_TASK;
1987 	}
1988 	if (bytes_reclaimed_out == NULL) {
1989 		return KERN_INVALID_ARGUMENT;
1990 	}
1991 	kr = vm_deferred_reclamation_buffer_flush_internal(task, num_entries_to_reclaim, &bytes_reclaimed);
1992 	*bytes_reclaimed_out = vm_sanitize_wrap_size(bytes_reclaimed);
1993 	return kr;
1994 #else
1995 	(void) task;
1996 	(void) num_entries_to_reclaim;
1997 	return KERN_NOT_SUPPORTED;
1998 #endif /* CONFIG_DEFERRED_RECLAIM */
1999 }
2000 
2001 kern_return_t
mach_vm_deferred_reclamation_buffer_resize(task_t task,uint32_t new_len,mach_vm_size_ut * bytes_reclaimed_out)2002 mach_vm_deferred_reclamation_buffer_resize(task_t task,
2003     uint32_t new_len,
2004     mach_vm_size_ut *bytes_reclaimed_out)
2005 {
2006 #if CONFIG_DEFERRED_RECLAIM
2007 	mach_error_t err;
2008 	mach_vm_size_t bytes_reclaimed = 0;
2009 
2010 	if (task != current_task()) {
2011 		/* Remote buffer operations are not supported */
2012 		return KERN_INVALID_TASK;
2013 	}
2014 	if (bytes_reclaimed_out == NULL) {
2015 		return KERN_INVALID_ARGUMENT;
2016 	}
2017 
2018 	err = vm_deferred_reclamation_buffer_resize_internal(task, new_len, &bytes_reclaimed);
2019 	*bytes_reclaimed_out = vm_sanitize_wrap_size(bytes_reclaimed);
2020 	return err;
2021 #else
2022 	(void) task;
2023 	(void) size;
2024 	return KERN_NOT_SUPPORTED;
2025 #endif /* CONFIG_DEFERRED_RECLAIM */
2026 }
2027 
2028 kern_return_t
mach_vm_deferred_reclamation_buffer_query(task_t task,mach_vm_address_ut * addr_out_ut,mach_vm_size_ut * size_out_ut)2029 mach_vm_deferred_reclamation_buffer_query(task_t task,
2030     mach_vm_address_ut *addr_out_ut,
2031     mach_vm_size_ut *size_out_ut)
2032 {
2033 #if CONFIG_DEFERRED_RECLAIM
2034 	return vm_deferred_reclamation_buffer_query_internal(task, addr_out_ut, size_out_ut);
2035 #else /* CONFIG_DEFERRED_RECLAIM */
2036 	(void) task;
2037 	(void) addr_out_ut;
2038 	(void) size_out_ut;
2039 	return KERN_NOT_SUPPORTED;
2040 #endif /* !CONFIG_DEFERRED_RECLAIM */
2041 }
2042 
2043 #if CONFIG_MAP_RANGES
2044 
2045 extern void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
2046 
2047 static int
vm_map_user_range_cmp(const void * e1,const void * e2)2048 vm_map_user_range_cmp(const void *e1, const void *e2)
2049 {
2050 	const struct vm_map_user_range *r1 = e1;
2051 	const struct vm_map_user_range *r2 = e2;
2052 
2053 	if (r1->vmur_min_address != r2->vmur_min_address) {
2054 		return r1->vmur_min_address < r2->vmur_min_address ? -1 : 1;
2055 	}
2056 
2057 	return 0;
2058 }
2059 
2060 static int
mach_vm_range_recipe_v1_cmp(const void * e1,const void * e2)2061 mach_vm_range_recipe_v1_cmp(const void *e1, const void *e2)
2062 {
2063 	const mach_vm_range_recipe_v1_t *r1 = e1;
2064 	const mach_vm_range_recipe_v1_t *r2 = e2;
2065 
2066 	if (r1->range.min_address != r2->range.min_address) {
2067 		return r1->range.min_address < r2->range.min_address ? -1 : 1;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 static inline __result_use_check kern_return_t
mach_vm_range_create_v1_sanitize(vm_map_t map,mach_vm_range_recipe_v1_ut * recipe_u,uint32_t count,mach_vm_range_recipe_v1_t ** recipe_p)2074 mach_vm_range_create_v1_sanitize(
2075 	vm_map_t                map,
2076 	mach_vm_range_recipe_v1_ut *recipe_u,
2077 	uint32_t count,
2078 	mach_vm_range_recipe_v1_t **recipe_p)
2079 {
2080 	kern_return_t kr;
2081 
2082 	for (size_t i = 0; i < count; i++) {
2083 		vm_map_offset_t start, end;
2084 		vm_map_size_t size;
2085 		mach_vm_range_ut * range_u = &recipe_u[i].range_u;
2086 		kr = vm_sanitize_addr_end(
2087 			range_u->min_address_u,
2088 			range_u->max_address_u,
2089 			VM_SANITIZE_CALLER_MACH_VM_RANGE_CREATE,
2090 			map,
2091 			VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS
2092 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
2093 			| VM_SANITIZE_FLAGS_DENY_NON_CANONICAL_ADDR
2094 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
2095 			| VM_SANITIZE_FLAGS_CHECK_ALIGNED_START
2096 			| VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE,
2097 			&start, &end, &size); // Ignore return values
2098 		if (__improbable(kr != KERN_SUCCESS)) {
2099 			return kr;
2100 		}
2101 	}
2102 	/*
2103 	 * Sanitization only checked properties of recipe_u.
2104 	 * We can now see it through the lens of the safe type.
2105 	 * The cast is undefined behavior, but of the kind VM sanitization
2106 	 * relies on anyway, so we don't expect this to cause issues.
2107 	 */
2108 	*recipe_p = (mach_vm_range_recipe_v1_t *)recipe_u;
2109 
2110 	return KERN_SUCCESS;
2111 }
2112 
2113 /*!
2114  * @function mach_vm_range_create_v1()
2115  *
2116  * @brief
2117  * Handle the backend for mach_vm_range_create() for the
2118  * MACH_VM_RANGE_FLAVOR_V1 flavor.
2119  *
2120  * @description
2121  * This call allows to create "ranges" in the map of a task
2122  * that have special semantics/policies around placement of
2123  * new allocations (in the vm_map_locate_space() sense).
2124  *
2125  * @returns
2126  * - KERN_SUCCESS on success
2127  * - KERN_INVALID_ARGUMENT for incorrect arguments
2128  * - KERN_NO_SPACE if the maximum amount of ranges would be exceeded
2129  * - KERN_MEMORY_PRESENT if any of the requested ranges
2130  *   overlaps with existing ranges or allocations in the map.
2131  */
2132 static kern_return_t
mach_vm_range_create_v1(vm_map_t map,mach_vm_range_recipe_v1_ut * recipe_u,uint32_t new_count)2133 mach_vm_range_create_v1(
2134 	vm_map_t                   map,
2135 	mach_vm_range_recipe_v1_ut *recipe_u,
2136 	uint32_t                   new_count)
2137 {
2138 	mach_vm_range_recipe_v1_t *recipe;
2139 	vm_map_user_range_t table;
2140 	kern_return_t kr = KERN_SUCCESS;
2141 	uint16_t count;
2142 
2143 	vmlp_api_start(MACH_VM_RANGE_CREATE_V1);
2144 
2145 	struct mach_vm_range void1 = {
2146 		.min_address = map->default_range.max_address,
2147 		.max_address = map->data_range.min_address,
2148 	};
2149 	struct mach_vm_range void2 = {
2150 		.min_address = map->data_range.max_address,
2151 #if XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT
2152 		.max_address = MACH_VM_JUMBO_ADDRESS,
2153 #else /* !XNU_TARGET_OS_IOS || !EXTENDED_USER_VA_SUPPORT */
2154 		.max_address = vm_map_max(map),
2155 #endif /* XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT */
2156 	};
2157 
2158 	kr = mach_vm_range_create_v1_sanitize(map, recipe_u, new_count, &recipe);
2159 	if (__improbable(kr != KERN_SUCCESS)) {
2160 		kr = vm_sanitize_get_kr(kr);
2161 		vmlp_api_end(MACH_VM_RANGE_CREATE_V1, kr);
2162 		return kr;
2163 	}
2164 
2165 	qsort(recipe, new_count, sizeof(mach_vm_range_recipe_v1_t),
2166 	    mach_vm_range_recipe_v1_cmp);
2167 
2168 	/*
2169 	 * Step 1: Validate that the recipes have no intersections.
2170 	 */
2171 
2172 	for (size_t i = 0; i < new_count; i++) {
2173 		mach_vm_range_t r = &recipe[i].range;
2174 		mach_vm_size_t s;
2175 
2176 		if (recipe[i].flags) {
2177 			vmlp_api_end(MACH_VM_RANGE_CREATE_V1, KERN_INVALID_ARGUMENT);
2178 			return KERN_INVALID_ARGUMENT;
2179 		}
2180 
2181 		static_assert((int)UMEM_RANGE_ID_FIXED == MACH_VM_RANGE_FIXED);
2182 		switch (recipe[i].range_tag) {
2183 		case MACH_VM_RANGE_FIXED:
2184 			break;
2185 		default:
2186 			vmlp_api_end(MACH_VM_RANGE_CREATE_V1, KERN_INVALID_ARGUMENT);
2187 			return KERN_INVALID_ARGUMENT;
2188 		}
2189 
2190 		s = mach_vm_range_size(r);
2191 		if (!mach_vm_range_contains(&void1, r->min_address, s) &&
2192 		    !mach_vm_range_contains(&void2, r->min_address, s)) {
2193 			vmlp_api_end(MACH_VM_RANGE_CREATE_V1, KERN_INVALID_ARGUMENT);
2194 			return KERN_INVALID_ARGUMENT;
2195 		}
2196 
2197 		if (i > 0 && recipe[i - 1].range.max_address >
2198 		    recipe[i].range.min_address) {
2199 			vmlp_api_end(MACH_VM_RANGE_CREATE_V1, KERN_INVALID_ARGUMENT);
2200 			return KERN_INVALID_ARGUMENT;
2201 		}
2202 	}
2203 
2204 	vm_map_lock(map);
2205 
2206 	table = map->extra_ranges;
2207 	count = map->extra_ranges_count;
2208 
2209 	if (count + new_count > VM_MAP_EXTRA_RANGES_MAX) {
2210 		kr = KERN_NO_SPACE;
2211 		goto out_unlock;
2212 	}
2213 
2214 	/*
2215 	 * Step 2: Check that there is no intersection with existing ranges.
2216 	 */
2217 
2218 	for (size_t i = 0, j = 0; i < new_count && j < count;) {
2219 		mach_vm_range_t     r1 = &recipe[i].range;
2220 		vm_map_user_range_t r2 = &table[j];
2221 
2222 		if (r1->max_address <= r2->vmur_min_address) {
2223 			i++;
2224 		} else if (r2->vmur_max_address <= r1->min_address) {
2225 			j++;
2226 		} else {
2227 			kr = KERN_MEMORY_PRESENT;
2228 			goto out_unlock;
2229 		}
2230 	}
2231 
2232 	/*
2233 	 * Step 3: commit the new ranges.
2234 	 */
2235 
2236 	static_assert(VM_MAP_EXTRA_RANGES_MAX * sizeof(struct vm_map_user_range) <=
2237 	    KALLOC_SAFE_ALLOC_SIZE);
2238 
2239 	table = krealloc_data(table,
2240 	    count * sizeof(struct vm_map_user_range),
2241 	    (count + new_count) * sizeof(struct vm_map_user_range),
2242 	    Z_ZERO | Z_WAITOK | Z_NOFAIL);
2243 
2244 	for (size_t i = 0; i < new_count; i++) {
2245 		static_assert(MACH_VM_MAX_ADDRESS < (1ull << 56));
2246 
2247 		table[count + i] = (struct vm_map_user_range){
2248 			.vmur_min_address = recipe[i].range.min_address,
2249 			.vmur_max_address = recipe[i].range.max_address,
2250 			.vmur_range_id    = (vm_map_range_id_t)recipe[i].range_tag,
2251 		};
2252 	}
2253 
2254 	qsort(table, count + new_count,
2255 	    sizeof(struct vm_map_user_range), vm_map_user_range_cmp);
2256 
2257 	map->extra_ranges_count += new_count;
2258 	map->extra_ranges = table;
2259 
2260 out_unlock:
2261 	vm_map_unlock(map);
2262 
2263 	if (kr == KERN_SUCCESS) {
2264 		for (size_t i = 0; i < new_count; i++) {
2265 			vm_map_kernel_flags_t vmk_flags = {
2266 				.vmf_fixed = true,
2267 				.vmf_overwrite = true,
2268 				.vmkf_overwrite_immutable = true,
2269 				.vm_tag = recipe[i].vm_tag,
2270 			};
2271 			__assert_only kern_return_t kr2;
2272 
2273 			vmlp_range_event(map, recipe[i].range.min_address, recipe[i].range.max_address - recipe[i].range.min_address);
2274 
2275 			kr2 = vm_map_enter(map, &recipe[i].range.min_address,
2276 			    mach_vm_range_size(&recipe[i].range),
2277 			    0, vmk_flags, VM_OBJECT_NULL, 0, FALSE,
2278 			    VM_PROT_NONE, VM_PROT_ALL,
2279 			    VM_INHERIT_DEFAULT);
2280 			assert(kr2 == KERN_SUCCESS);
2281 		}
2282 	}
2283 	vmlp_api_end(MACH_VM_RANGE_CREATE_V1, kr);
2284 	return kr;
2285 }
2286 
2287 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)2288 mach_vm_range_create(
2289 	vm_map_t                map,
2290 	mach_vm_range_flavor_t  flavor,
2291 	mach_vm_range_recipes_raw_t recipe,
2292 	natural_t               size)
2293 {
2294 	if (map != current_map()) {
2295 		return KERN_INVALID_ARGUMENT;
2296 	}
2297 
2298 	if (!map->uses_user_ranges) {
2299 		return KERN_NOT_SUPPORTED;
2300 	}
2301 
2302 	if (size == 0) {
2303 		return KERN_SUCCESS;
2304 	}
2305 
2306 	if (flavor == MACH_VM_RANGE_FLAVOR_V1) {
2307 		mach_vm_range_recipe_v1_ut *array;
2308 
2309 		if (size % sizeof(mach_vm_range_recipe_v1_ut)) {
2310 			return KERN_INVALID_ARGUMENT;
2311 		}
2312 
2313 		size /= sizeof(mach_vm_range_recipe_v1_ut);
2314 		if (size > VM_MAP_EXTRA_RANGES_MAX) {
2315 			return KERN_NO_SPACE;
2316 		}
2317 
2318 		array = (mach_vm_range_recipe_v1_ut *)recipe;
2319 		return mach_vm_range_create_v1(map, array, size);
2320 	}
2321 
2322 	return KERN_INVALID_ARGUMENT;
2323 }
2324 
2325 #else /* !CONFIG_MAP_RANGES */
2326 
2327 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)2328 mach_vm_range_create(
2329 	vm_map_t                map,
2330 	mach_vm_range_flavor_t  flavor,
2331 	mach_vm_range_recipes_raw_t recipe,
2332 	natural_t               size)
2333 {
2334 #pragma unused(map, flavor, recipe, size)
2335 	return KERN_NOT_SUPPORTED;
2336 }
2337 
2338 #endif /* !CONFIG_MAP_RANGES */
2339 
2340 /*
2341  * These symbols are looked up at runtime by vmware, VirtualBox,
2342  * despite not being exported in the symbol sets.
2343  */
2344 
2345 #if defined(__x86_64__)
2346 
2347 extern typeof(mach_vm_remap_external) mach_vm_remap;
2348 extern typeof(mach_vm_map_external) mach_vm_map;
2349 extern typeof(vm_map_external) vm_map;
2350 
2351 kern_return_t
mach_vm_map(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2352 mach_vm_map(
2353 	vm_map_t                target_map,
2354 	mach_vm_offset_ut      *address,
2355 	mach_vm_size_ut         initial_size,
2356 	mach_vm_offset_ut       mask,
2357 	int                     flags,
2358 	ipc_port_t              port,
2359 	memory_object_offset_ut offset,
2360 	boolean_t               copy,
2361 	vm_prot_ut              cur_protection,
2362 	vm_prot_ut              max_protection,
2363 	vm_inherit_ut           inheritance)
2364 {
2365 	return mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
2366 	           offset, copy, cur_protection, max_protection, inheritance);
2367 }
2368 
2369 kern_return_t
mach_vm_remap(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)2370 mach_vm_remap(
2371 	vm_map_t                target_map,
2372 	mach_vm_offset_ut      *address,
2373 	mach_vm_size_ut         size,
2374 	mach_vm_offset_ut       mask,
2375 	int                     flags,
2376 	vm_map_t                src_map,
2377 	mach_vm_offset_ut       memory_address,
2378 	boolean_t               copy,
2379 	vm_prot_ut             *cur_protection,   /* OUT */
2380 	vm_prot_ut             *max_protection,   /* OUT */
2381 	vm_inherit_ut           inheritance)
2382 {
2383 	return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
2384 	           copy, cur_protection, max_protection, inheritance);
2385 }
2386 
2387 kern_return_t
vm_map(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,vm_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2388 vm_map(
2389 	vm_map_t                target_map,
2390 	vm_offset_ut           *address,
2391 	vm_size_ut              size,
2392 	vm_offset_ut            mask,
2393 	int                     flags,
2394 	ipc_port_t              port,
2395 	vm_offset_ut            offset,
2396 	boolean_t               copy,
2397 	vm_prot_ut              cur_protection,
2398 	vm_prot_ut              max_protection,
2399 	vm_inherit_ut           inheritance)
2400 {
2401 	return mach_vm_map(target_map, address,
2402 	           size, mask, flags, port, offset, copy,
2403 	           cur_protection, max_protection, inheritance);
2404 }
2405 
2406 #endif /* __x86_64__ */
2407