xref: /xnu-11417.121.6/osfmk/vm/vm_user.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_user.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	User-exported virtual memory functions.
63  */
64 
65 /*
66  * There are three implementations of the "XXX_allocate" functionality in
67  * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate
68  * (for a task with the same address space size, especially the current task),
69  * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate
70  * in the kernel should only be used on the kernel_task. vm32_vm_allocate only
71  * makes sense on platforms where a user task can either be 32 or 64, or the kernel
72  * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred
73  * for new code.
74  *
75  * The entrypoints into the kernel are more complex. All platforms support a
76  * mach_vm_allocate-style API (subsystem 4800) which operates with the largest
77  * size types for the platform. On platforms that only support U32/K32,
78  * subsystem 4800 is all you need. On platforms that support both U32 and U64,
79  * subsystem 3800 is used disambiguate the size of parameters, and they will
80  * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms,
81  * the MIG glue should never call into vm_allocate directly, because the calling
82  * task and kernel_task are unlikely to use the same size parameters
83  *
84  * New VM call implementations should be added here and to mach_vm.defs
85  * (subsystem 4800), and use mach_vm_* "wide" types.
86  */
87 
88 #include <debug.h>
89 
90 #include <mach/boolean.h>
91 #include <mach/kern_return.h>
92 #include <mach/mach_types.h>    /* to get vm_address_t */
93 #include <mach/memory_object.h>
94 #include <mach/std_types.h>     /* to get pointer_t */
95 #include <mach/upl.h>
96 #include <mach/vm_attributes.h>
97 #include <mach/vm_param.h>
98 #include <mach/vm_statistics.h>
99 #include <mach/mach_syscalls.h>
100 #include <mach/sdt.h>
101 #include <mach/memory_entry.h>
102 
103 #include <mach/host_priv_server.h>
104 #include <mach/mach_vm_server.h>
105 #include <mach/memory_entry_server.h>
106 #include <mach/vm_map_server.h>
107 
108 #include <kern/host.h>
109 #include <kern/kalloc.h>
110 #include <kern/task.h>
111 #include <kern/misc_protos.h>
112 #include <vm/vm_fault.h>
113 #include <vm/vm_map_internal.h>
114 #include <vm/vm_object_xnu.h>
115 #include <vm/vm_kern.h>
116 #include <vm/vm_page_internal.h>
117 #include <vm/memory_object_internal.h>
118 #include <vm/vm_pageout_internal.h>
119 #include <vm/vm_protos.h>
120 #include <vm/vm_purgeable_internal.h>
121 #include <vm/vm_memory_entry_xnu.h>
122 #include <vm/vm_kern_internal.h>
123 #include <vm/vm_iokit.h>
124 #include <vm/vm_sanitize_internal.h>
125 #if CONFIG_DEFERRED_RECLAIM
126 #include <vm/vm_reclaim_internal.h>
127 #endif /* CONFIG_DEFERRED_RECLAIM */
128 #include <vm/vm_init_xnu.h>
129 
130 #include <san/kasan.h>
131 
132 #include <libkern/OSDebug.h>
133 #include <IOKit/IOBSD.h>
134 #include <sys/kdebug_triage.h>
135 
136 /*
137  *	mach_vm_allocate allocates "zero fill" memory in the specfied
138  *	map.
139  */
140 kern_return_t
mach_vm_allocate_external(vm_map_t map,mach_vm_offset_ut * addr,mach_vm_size_ut size,int flags)141 mach_vm_allocate_external(
142 	vm_map_t                map,
143 	mach_vm_offset_ut      *addr,
144 	mach_vm_size_ut         size,
145 	int                     flags)
146 {
147 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
148 
149 	/* filter out any kernel-only flags */
150 	if (flags & ~VM_FLAGS_USER_ALLOCATE) {
151 		ktriage_record(thread_tid(current_thread()),
152 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
153 		    KDBG_TRIAGE_RESERVED,
154 		    KDBG_TRIAGE_VM_ALLOCATE_KERNEL_BADFLAGS_ERROR),
155 		    KERN_INVALID_ARGUMENT /* arg */);
156 		return KERN_INVALID_ARGUMENT;
157 	}
158 
159 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
160 
161 	return mach_vm_allocate_kernel(map, addr, size, vmk_flags);
162 }
163 
164 /*
165  *	vm_allocate
166  *	Legacy routine that allocates "zero fill" memory in the specfied
167  *	map (which is limited to the same size as the kernel).
168  */
169 kern_return_t
vm_allocate_external(vm_map_t map,vm_offset_ut * addr,vm_size_ut size,int flags)170 vm_allocate_external(
171 	vm_map_t        map,
172 	vm_offset_ut   *addr,
173 	vm_size_ut      size,
174 	int             flags)
175 {
176 	return mach_vm_allocate_external(map, addr, size, flags);
177 }
178 
179 static __attribute__((always_inline, warn_unused_result))
180 kern_return_t
mach_vm_deallocate_sanitize(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,mach_vm_offset_t * start,mach_vm_offset_t * end,mach_vm_size_t * size)181 mach_vm_deallocate_sanitize(
182 	vm_map_t                map,
183 	mach_vm_offset_ut       start_u,
184 	mach_vm_size_ut         size_u,
185 	mach_vm_offset_t       *start,
186 	mach_vm_offset_t       *end,
187 	mach_vm_size_t         *size)
188 {
189 	return vm_sanitize_addr_size(start_u, size_u,
190 	           VM_SANITIZE_CALLER_VM_DEALLOCATE, map,
191 	           VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS, start,
192 	           end, size);
193 }
194 
195 /*
196  *	mach_vm_deallocate -
197  *	deallocates the specified range of addresses in the
198  *	specified address map.
199  */
200 kern_return_t
mach_vm_deallocate(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u)201 mach_vm_deallocate(
202 	vm_map_t                map,
203 	mach_vm_offset_ut       start_u,
204 	mach_vm_size_ut         size_u)
205 {
206 	mach_vm_offset_t start, end;
207 	mach_vm_size_t   size;
208 	kern_return_t    kr;
209 
210 	if (map == VM_MAP_NULL) {
211 		return KERN_INVALID_ARGUMENT;
212 	}
213 
214 	kr = mach_vm_deallocate_sanitize(map,
215 	    start_u,
216 	    size_u,
217 	    &start,
218 	    &end,
219 	    &size);
220 	if (__improbable(kr != KERN_SUCCESS)) {
221 		return vm_sanitize_get_kr(kr);
222 	}
223 
224 	return vm_map_remove_guard(map, start, end,
225 	           VM_MAP_REMOVE_NO_FLAGS,
226 	           KMEM_GUARD_NONE).kmr_return;
227 }
228 
229 /*
230  *	vm_deallocate -
231  *	deallocates the specified range of addresses in the
232  *	specified address map (limited to addresses the same
233  *	size as the kernel).
234  */
235 kern_return_t
vm_deallocate(vm_map_t map,vm_offset_ut start,vm_size_ut size)236 vm_deallocate(
237 	vm_map_t                map,
238 	vm_offset_ut            start,
239 	vm_size_ut              size)
240 {
241 	return mach_vm_deallocate(map, start, size);
242 }
243 
244 /*
245  *	mach_vm_inherit -
246  *	Sets the inheritance of the specified range in the
247  *	specified map.
248  */
249 kern_return_t
mach_vm_inherit(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_inherit_ut new_inheritance_u)250 mach_vm_inherit(
251 	vm_map_t                map,
252 	mach_vm_offset_ut       start_u,
253 	mach_vm_size_ut         size_u,
254 	vm_inherit_ut           new_inheritance_u)
255 {
256 	if (map == VM_MAP_NULL) {
257 		return KERN_INVALID_ARGUMENT;
258 	}
259 
260 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
261 		return KERN_SUCCESS;
262 	}
263 
264 	return vm_map_inherit(map,
265 	           start_u,
266 	           vm_sanitize_compute_ut_end(start_u, size_u),
267 	           new_inheritance_u);
268 }
269 
270 /*
271  *	vm_inherit -
272  *	Sets the inheritance of the specified range in the
273  *	specified map (range limited to addresses
274  */
275 kern_return_t
vm_inherit(vm_map_t map,vm_offset_ut start_u,vm_size_ut size_u,vm_inherit_ut new_inheritance_u)276 vm_inherit(
277 	vm_map_t                map,
278 	vm_offset_ut            start_u,
279 	vm_size_ut              size_u,
280 	vm_inherit_ut           new_inheritance_u)
281 {
282 	return mach_vm_inherit(map, start_u, size_u, new_inheritance_u);
283 }
284 
285 /*
286  *	mach_vm_protect -
287  *	Sets the protection of the specified range in the
288  *	specified map.
289  */
290 
291 kern_return_t
mach_vm_protect(vm_map_t map,mach_vm_address_ut start_u,mach_vm_size_ut size_u,boolean_t set_maximum,vm_prot_ut new_protection_u)292 mach_vm_protect(
293 	vm_map_t                map,
294 	mach_vm_address_ut      start_u,
295 	mach_vm_size_ut         size_u,
296 	boolean_t               set_maximum,
297 	vm_prot_ut              new_protection_u)
298 {
299 	if (map == VM_MAP_NULL) {
300 		return KERN_INVALID_ARGUMENT;
301 	}
302 
303 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
304 		return KERN_SUCCESS;
305 	}
306 
307 	return vm_map_protect(map,
308 	           start_u,
309 	           vm_sanitize_compute_ut_end(start_u, size_u),
310 	           set_maximum,
311 	           new_protection_u);
312 }
313 
314 /*
315  *	vm_protect -
316  *	Sets the protection of the specified range in the
317  *	specified map. Addressability of the range limited
318  *	to the same size as the kernel.
319  */
320 
321 kern_return_t
vm_protect(vm_map_t map,vm_offset_ut start_u,vm_size_ut size_u,boolean_t set_maximum,vm_prot_ut new_protection_u)322 vm_protect(
323 	vm_map_t                map,
324 	vm_offset_ut            start_u,
325 	vm_size_ut              size_u,
326 	boolean_t               set_maximum,
327 	vm_prot_ut              new_protection_u)
328 {
329 	return mach_vm_protect(map, start_u, size_u, set_maximum, new_protection_u);
330 }
331 
332 /*
333  * mach_vm_machine_attributes -
334  * Handle machine-specific attributes for a mapping, such
335  * as cachability, migrability, etc.
336  */
337 kern_return_t
mach_vm_machine_attribute(vm_map_t map,mach_vm_address_ut addr_u,mach_vm_size_ut size_u,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)338 mach_vm_machine_attribute(
339 	vm_map_t                map,
340 	mach_vm_address_ut      addr_u,
341 	mach_vm_size_ut         size_u,
342 	vm_machine_attribute_t  attribute,
343 	vm_machine_attribute_val_t *value) /* IN/OUT */
344 {
345 	if (map == VM_MAP_NULL) {
346 		return KERN_INVALID_ARGUMENT;
347 	}
348 
349 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
350 		return KERN_SUCCESS;
351 	}
352 
353 	return vm_map_machine_attribute(map,
354 	           addr_u,
355 	           vm_sanitize_compute_ut_end(addr_u, size_u),
356 	           attribute,
357 	           value);
358 }
359 
360 /*
361  * vm_machine_attribute -
362  * Handle machine-specific attributes for a mapping, such
363  * as cachability, migrability, etc. Limited addressability
364  * (same range limits as for the native kernel map).
365  */
366 kern_return_t
vm_machine_attribute(vm_map_t map,vm_address_ut addr_u,vm_size_ut size_u,vm_machine_attribute_t attribute,vm_machine_attribute_val_t * value)367 vm_machine_attribute(
368 	vm_map_t                map,
369 	vm_address_ut           addr_u,
370 	vm_size_ut              size_u,
371 	vm_machine_attribute_t  attribute,
372 	vm_machine_attribute_val_t *value) /* IN/OUT */
373 {
374 	return mach_vm_machine_attribute(map, addr_u, size_u, attribute, value);
375 }
376 
377 /*
378  * mach_vm_read -
379  * Read/copy a range from one address space and return it to the caller.
380  *
381  * It is assumed that the address for the returned memory is selected by
382  * the IPC implementation as part of receiving the reply to this call.
383  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
384  * that gets returned.
385  *
386  * JMM - because of mach_msg_type_number_t, this call is limited to a
387  * single 4GB region at this time.
388  *
389  */
390 kern_return_t
mach_vm_read(vm_map_t map,mach_vm_address_ut addr,mach_vm_size_ut size,pointer_ut * data,mach_msg_type_number_t * data_size)391 mach_vm_read(
392 	vm_map_t                map,
393 	mach_vm_address_ut      addr,
394 	mach_vm_size_ut         size,
395 	pointer_ut             *data,
396 	mach_msg_type_number_t *data_size)
397 {
398 	kern_return_t   error;
399 	vm_map_copy_t   ipc_address;
400 
401 	if (map == VM_MAP_NULL) {
402 		return KERN_INVALID_ARGUMENT;
403 	}
404 
405 	/*
406 	 * mach_msg_type_number_t is a signed int,
407 	 * make sure we do not overflow it.
408 	 */
409 	if (!VM_SANITIZE_UNSAFE_FITS(size, mach_msg_type_number_t)) {
410 		return KERN_INVALID_ARGUMENT;
411 	}
412 
413 	error = vm_map_copyin(map, addr, size, FALSE, &ipc_address);
414 
415 	if (KERN_SUCCESS == error) {
416 		VM_SANITIZE_UT_SET(*data, (pointer_t) ipc_address);
417 		/* On success we know size was validated by vm_map_copyin. */
418 		*data_size =
419 		    (mach_msg_type_number_t)VM_SANITIZE_UNSAFE_UNWRAP(size);
420 	}
421 	return error;
422 }
423 
424 /*
425  * vm_read -
426  * Read/copy a range from one address space and return it to the caller.
427  * Limited addressability (same range limits as for the native kernel map).
428  *
429  * It is assumed that the address for the returned memory is selected by
430  * the IPC implementation as part of receiving the reply to this call.
431  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
432  * that gets returned.
433  */
434 kern_return_t
vm_read(vm_map_t map,vm_address_ut addr,vm_size_ut size,pointer_ut * data,mach_msg_type_number_t * data_size)435 vm_read(
436 	vm_map_t                map,
437 	vm_address_ut           addr,
438 	vm_size_ut              size,
439 	pointer_ut             *data,
440 	mach_msg_type_number_t *data_size)
441 {
442 	return mach_vm_read(map, addr, size, data, data_size);
443 }
444 
445 /*
446  * mach_vm_read_list -
447  * Read/copy a list of address ranges from specified map.
448  *
449  * MIG does not know how to deal with a returned array of
450  * vm_map_copy_t structures, so we have to do the copyout
451  * manually here.
452  */
453 kern_return_t
mach_vm_read_list(vm_map_t map,mach_vm_read_entry_t data_list,natural_t count)454 mach_vm_read_list(
455 	vm_map_t                        map,
456 	mach_vm_read_entry_t            data_list,
457 	natural_t                       count)
458 {
459 	mach_msg_type_number_t  i;
460 	kern_return_t   error;
461 	vm_map_copy_t   copy;
462 
463 	if (map == VM_MAP_NULL ||
464 	    count > VM_MAP_ENTRY_MAX) {
465 		return KERN_INVALID_ARGUMENT;
466 	}
467 
468 	error = KERN_SUCCESS;
469 	for (i = 0; i < count; i++) {
470 		vm_map_address_t map_addr;
471 		vm_map_size_t map_size;
472 
473 		map_addr = (vm_map_address_t)(data_list[i].address);
474 		map_size = (vm_map_size_t)(data_list[i].size);
475 
476 		if (map_size != 0) {
477 			error = vm_map_copyin(map,
478 			    map_addr,
479 			    map_size,
480 			    FALSE,              /* src_destroy */
481 			    &copy);
482 			if (KERN_SUCCESS == error) {
483 				error = vm_map_copyout(
484 					current_task()->map,
485 					&map_addr,
486 					copy);
487 				if (KERN_SUCCESS == error) {
488 					data_list[i].address = map_addr;
489 					continue;
490 				}
491 				vm_map_copy_discard(copy);
492 			}
493 		}
494 		data_list[i].address = (mach_vm_address_t)0;
495 		data_list[i].size = (mach_vm_size_t)0;
496 	}
497 	return error;
498 }
499 
500 /*
501  * vm_read_list -
502  * Read/copy a list of address ranges from specified map.
503  *
504  * MIG does not know how to deal with a returned array of
505  * vm_map_copy_t structures, so we have to do the copyout
506  * manually here.
507  *
508  * The source and destination ranges are limited to those
509  * that can be described with a vm_address_t (i.e. same
510  * size map as the kernel).
511  *
512  * JMM - If the result of the copyout is an address range
513  * that cannot be described with a vm_address_t (i.e. the
514  * caller had a larger address space but used this call
515  * anyway), it will result in a truncated address being
516  * returned (and a likely confused caller).
517  */
518 
519 kern_return_t
vm_read_list(vm_map_t map,vm_read_entry_t data_list,natural_t count)520 vm_read_list(
521 	vm_map_t                map,
522 	vm_read_entry_t data_list,
523 	natural_t               count)
524 {
525 	mach_msg_type_number_t  i;
526 	kern_return_t   error;
527 	vm_map_copy_t   copy;
528 
529 	if (map == VM_MAP_NULL ||
530 	    count > VM_MAP_ENTRY_MAX) {
531 		return KERN_INVALID_ARGUMENT;
532 	}
533 
534 	error = KERN_SUCCESS;
535 	for (i = 0; i < count; i++) {
536 		vm_map_address_t map_addr;
537 		vm_map_size_t map_size;
538 
539 		map_addr = (vm_map_address_t)(data_list[i].address);
540 		map_size = (vm_map_size_t)(data_list[i].size);
541 
542 		if (map_size != 0) {
543 			error = vm_map_copyin(map,
544 			    map_addr,
545 			    map_size,
546 			    FALSE,              /* src_destroy */
547 			    &copy);
548 			if (KERN_SUCCESS == error) {
549 				error = vm_map_copyout(current_task()->map,
550 				    &map_addr,
551 				    copy);
552 				if (KERN_SUCCESS == error) {
553 					data_list[i].address =
554 					    CAST_DOWN(vm_offset_t, map_addr);
555 					continue;
556 				}
557 				vm_map_copy_discard(copy);
558 			}
559 		}
560 		data_list[i].address = (mach_vm_address_t)0;
561 		data_list[i].size = (mach_vm_size_t)0;
562 	}
563 	return error;
564 }
565 
566 /*
567  * mach_vm_read_overwrite -
568  * Overwrite a range of the current map with data from the specified
569  * map/address range.
570  *
571  * In making an assumption that the current thread is local, it is
572  * no longer cluster-safe without a fully supportive local proxy
573  * thread/task (but we don't support cluster's anymore so this is moot).
574  */
575 
576 kern_return_t
mach_vm_read_overwrite(vm_map_t map,mach_vm_address_ut address,mach_vm_size_ut size,mach_vm_address_ut data,mach_vm_size_ut * data_size)577 mach_vm_read_overwrite(
578 	vm_map_t                map,
579 	mach_vm_address_ut      address,
580 	mach_vm_size_ut         size,
581 	mach_vm_address_ut      data,
582 	mach_vm_size_ut        *data_size)
583 {
584 	kern_return_t   error;
585 	vm_map_copy_t   copy;
586 
587 	if (map == VM_MAP_NULL) {
588 		return KERN_INVALID_ARGUMENT;
589 	}
590 
591 	error = vm_map_copyin(map, address, size, FALSE, &copy);
592 
593 	if (KERN_SUCCESS == error) {
594 		if (copy) {
595 			assert(VM_SANITIZE_UNSAFE_IS_EQUAL(size, copy->size));
596 		}
597 
598 		error = vm_map_copy_overwrite(current_thread()->map,
599 		    data, copy, size, FALSE);
600 		if (KERN_SUCCESS == error) {
601 			*data_size = size;
602 			return error;
603 		}
604 		vm_map_copy_discard(copy);
605 	}
606 	return error;
607 }
608 
609 /*
610  * vm_read_overwrite -
611  * Overwrite a range of the current map with data from the specified
612  * map/address range.
613  *
614  * This routine adds the additional limitation that the source and
615  * destination ranges must be describable with vm_address_t values
616  * (i.e. the same size address spaces as the kernel, or at least the
617  * the ranges are in that first portion of the respective address
618  * spaces).
619  */
620 
621 kern_return_t
vm_read_overwrite(vm_map_t map,vm_address_ut address,vm_size_ut size,vm_address_ut data,vm_size_ut * data_size)622 vm_read_overwrite(
623 	vm_map_t                map,
624 	vm_address_ut           address,
625 	vm_size_ut              size,
626 	vm_address_ut           data,
627 	vm_size_ut             *data_size)
628 {
629 	return mach_vm_read_overwrite(map, address, size, data, data_size);
630 }
631 
632 /*
633  * mach_vm_read_overwrite -
634  */
635 
636 kern_return_t
mach_vm_update_pointers_with_remote_tags(__unused vm_map_t map,__unused mach_vm_offset_list_t in_pointer_list,__unused mach_msg_type_number_t in_pointer_listCnt,__unused mach_vm_offset_list_t out_pointer_list,__unused mach_msg_type_number_t * out_pointer_listCnt)637 mach_vm_update_pointers_with_remote_tags(
638 	__unused vm_map_t map,
639 	__unused mach_vm_offset_list_t in_pointer_list,
640 	__unused mach_msg_type_number_t in_pointer_listCnt,
641 	__unused mach_vm_offset_list_t out_pointer_list,
642 	__unused mach_msg_type_number_t *out_pointer_listCnt)
643 {
644 	if (!in_pointer_list || !out_pointer_list || in_pointer_listCnt >= 512) {
645 		return KERN_INVALID_ARGUMENT;
646 	}
647 	if (!map || !map->pmap) {
648 		return KERN_INVALID_ARGUMENT;
649 	}
650 	return KERN_FAILURE;
651 }
652 
653 /*
654  * mach_vm_write -
655  * Overwrite the specified address range with the data provided
656  * (from the current map).
657  */
658 kern_return_t
mach_vm_write(vm_map_t map,mach_vm_address_ut address,pointer_ut data_u,mach_msg_type_number_t size)659 mach_vm_write(
660 	vm_map_t                map,
661 	mach_vm_address_ut      address,
662 	pointer_ut              data_u,
663 	mach_msg_type_number_t  size)
664 {
665 	if (map == VM_MAP_NULL) {
666 		return KERN_INVALID_ARGUMENT;
667 	}
668 
669 	/*
670 	 * data is created by the kernel's MIG server from a userspace buffer,
671 	 * so it is safe to unwrap.
672 	 */
673 	vm_map_copy_t data = (vm_map_copy_t) VM_SANITIZE_UNSAFE_UNWRAP(data_u);
674 
675 	return vm_map_copy_overwrite(map, address,
676 	           data, size, FALSE /* interruptible XXX */);
677 }
678 
679 /*
680  * vm_write -
681  * Overwrite the specified address range with the data provided
682  * (from the current map).
683  *
684  * The addressability of the range of addresses to overwrite is
685  * limited bu the use of a vm_address_t (same size as kernel map).
686  * Either the target map is also small, or the range is in the
687  * low addresses within it.
688  */
689 kern_return_t
vm_write(vm_map_t map,vm_address_ut address,pointer_ut data,mach_msg_type_number_t size)690 vm_write(
691 	vm_map_t                map,
692 	vm_address_ut           address,
693 	pointer_ut              data,
694 	mach_msg_type_number_t  size)
695 {
696 	return mach_vm_write(map, address, data, size);
697 }
698 
699 /*
700  * mach_vm_copy -
701  * Overwrite one range of the specified map with the contents of
702  * another range within that same map (i.e. both address ranges
703  * are "over there").
704  */
705 kern_return_t
mach_vm_copy(vm_map_t map,mach_vm_address_ut source_address,mach_vm_size_ut size,mach_vm_address_ut dest_address)706 mach_vm_copy(
707 	vm_map_t                map,
708 	mach_vm_address_ut      source_address,
709 	mach_vm_size_ut         size,
710 	mach_vm_address_ut      dest_address)
711 {
712 	vm_map_copy_t copy;
713 	kern_return_t kr;
714 
715 	if (map == VM_MAP_NULL) {
716 		return KERN_INVALID_ARGUMENT;
717 	}
718 
719 	kr = vm_map_copyin(map, source_address, size, FALSE, &copy);
720 
721 	if (KERN_SUCCESS == kr) {
722 		if (copy) {
723 			assert(VM_SANITIZE_UNSAFE_IS_EQUAL(size, copy->size));
724 		}
725 
726 		kr = vm_map_copy_overwrite(map, dest_address,
727 		    copy, size, FALSE /* interruptible XXX */);
728 
729 		if (KERN_SUCCESS != kr) {
730 			vm_map_copy_discard(copy);
731 		}
732 	}
733 	return kr;
734 }
735 
736 kern_return_t
vm_copy(vm_map_t map,vm_address_ut source_address,vm_size_ut size,vm_address_ut dest_address)737 vm_copy(
738 	vm_map_t                map,
739 	vm_address_ut           source_address,
740 	vm_size_ut              size,
741 	vm_address_ut           dest_address)
742 {
743 	return mach_vm_copy(map, source_address, size, dest_address);
744 }
745 
746 /*
747  * mach_vm_map -
748  * Map some range of an object into an address space.
749  *
750  * The object can be one of several types of objects:
751  *	NULL - anonymous memory
752  *	a named entry - a range within another address space
753  *	                or a range within a memory object
754  *	a whole memory object
755  *
756  */
757 kern_return_t
mach_vm_map_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)758 mach_vm_map_external(
759 	vm_map_t                target_map,
760 	mach_vm_offset_ut      *address,
761 	mach_vm_size_ut         initial_size,
762 	mach_vm_offset_ut       mask,
763 	int                     flags,
764 	ipc_port_t              port,
765 	memory_object_offset_ut offset,
766 	boolean_t               copy,
767 	vm_prot_ut              cur_protection,
768 	vm_prot_ut              max_protection,
769 	vm_inherit_ut           inheritance)
770 {
771 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
772 
773 	/* filter out any kernel-only flags */
774 	if (flags & ~VM_FLAGS_USER_MAP) {
775 		return KERN_INVALID_ARGUMENT;
776 	}
777 
778 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
779 	/* range_id is set by mach_vm_map_kernel */
780 	return mach_vm_map_kernel(target_map, address, initial_size, mask,
781 	           vmk_flags, port, offset, copy,
782 	           cur_protection, max_protection,
783 	           inheritance);
784 }
785 
786 /* legacy interface */
787 __attribute__((always_inline))
788 kern_return_t
vm_map_64_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)789 vm_map_64_external(
790 	vm_map_t                target_map,
791 	vm_offset_ut           *address,
792 	vm_size_ut              size,
793 	vm_offset_ut            mask,
794 	int                     flags,
795 	ipc_port_t              port,
796 	memory_object_offset_ut offset,
797 	boolean_t               copy,
798 	vm_prot_ut              cur_protection,
799 	vm_prot_ut              max_protection,
800 	vm_inherit_ut           inheritance)
801 {
802 	return mach_vm_map_external(target_map, address,
803 	           size, mask, flags, port, offset, copy,
804 	           cur_protection, max_protection, inheritance);
805 }
806 
807 /* temporary, until world build */
808 __attribute__((always_inline))
809 kern_return_t
vm_map_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,vm_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)810 vm_map_external(
811 	vm_map_t                target_map,
812 	vm_offset_ut           *address,
813 	vm_size_ut              size,
814 	vm_offset_ut            mask,
815 	int                     flags,
816 	ipc_port_t              port,
817 	vm_offset_ut            offset,
818 	boolean_t               copy,
819 	vm_prot_ut              cur_protection,
820 	vm_prot_ut              max_protection,
821 	vm_inherit_ut           inheritance)
822 {
823 	return mach_vm_map_external(target_map, address,
824 	           size, mask, flags, port, offset, copy,
825 	           cur_protection, max_protection, inheritance);
826 }
827 
828 static __attribute__((always_inline, warn_unused_result))
829 kern_return_t
mach_vm_remap_new_external_sanitize(vm_map_t target_map,vm_prot_ut cur_protection_u,vm_prot_ut max_protection_u,vm_prot_t * cur_protection,vm_prot_t * max_protection)830 mach_vm_remap_new_external_sanitize(
831 	vm_map_t                target_map,
832 	vm_prot_ut              cur_protection_u,
833 	vm_prot_ut              max_protection_u,
834 	vm_prot_t              *cur_protection,
835 	vm_prot_t              *max_protection)
836 {
837 	return vm_sanitize_cur_and_max_prots(cur_protection_u, max_protection_u,
838 	           VM_SANITIZE_CALLER_VM_MAP_REMAP, target_map,
839 	           cur_protection, max_protection);
840 }
841 
842 /*
843  * mach_vm_remap_new -
844  * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
845  * and {cur,max}_protection are in/out.
846  */
847 kern_return_t
mach_vm_remap_new_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,mach_port_t src_tport,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection_u,vm_prot_ut * max_protection_u,vm_inherit_ut inheritance)848 mach_vm_remap_new_external(
849 	vm_map_t                target_map,
850 	mach_vm_offset_ut      *address,
851 	mach_vm_size_ut         size,
852 	mach_vm_offset_ut       mask,
853 	int                     flags,
854 	mach_port_t             src_tport,
855 	mach_vm_offset_ut       memory_address,
856 	boolean_t               copy,
857 	vm_prot_ut             *cur_protection_u,   /* IN/OUT */
858 	vm_prot_ut             *max_protection_u,   /* IN/OUT */
859 	vm_inherit_ut           inheritance)
860 {
861 	vm_map_kernel_flags_t   vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
862 	vm_map_t                src_map;
863 	vm_prot_t               cur_protection, max_protection;
864 	kern_return_t           kr;
865 
866 	if (target_map == VM_MAP_NULL) {
867 		return KERN_INVALID_ARGUMENT;
868 	}
869 
870 	/* filter out any kernel-only flags */
871 	if (flags & ~VM_FLAGS_USER_REMAP) {
872 		return KERN_INVALID_ARGUMENT;
873 	}
874 
875 	vm_map_kernel_flags_set_vmflags(&vmk_flags,
876 	    flags | VM_FLAGS_RETURN_DATA_ADDR);
877 
878 	/*
879 	 * We don't need cur_protection here, but sanitizing it before
880 	 * enforcing W^X below matches historical error codes better.
881 	 */
882 	kr = mach_vm_remap_new_external_sanitize(target_map,
883 	    *cur_protection_u,
884 	    *max_protection_u,
885 	    &cur_protection,
886 	    &max_protection);
887 	if (__improbable(kr != KERN_SUCCESS)) {
888 		return vm_sanitize_get_kr(kr);
889 	}
890 
891 	if ((max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
892 	    (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
893 		/*
894 		 * XXX FBDP TODO
895 		 * enforce target's "wx" policies
896 		 */
897 		return KERN_PROTECTION_FAILURE;
898 	}
899 
900 	if (copy || max_protection == VM_PROT_READ || max_protection == VM_PROT_NONE) {
901 		src_map = convert_port_to_map_read(src_tport);
902 	} else {
903 		src_map = convert_port_to_map(src_tport);
904 	}
905 
906 	/* range_id is set by vm_map_remap */
907 	kr = vm_map_remap(target_map,
908 	    address,
909 	    size,
910 	    mask,
911 	    vmk_flags,
912 	    src_map,
913 	    memory_address,
914 	    copy,
915 	    cur_protection_u,    /* IN/OUT */
916 	    max_protection_u,    /* IN/OUT */
917 	    inheritance);
918 
919 	vm_map_deallocate(src_map);
920 
921 	if (kr == KERN_SUCCESS) {
922 		ipc_port_release_send(src_tport);  /* consume on success */
923 	}
924 	return kr;
925 }
926 
927 /*
928  * mach_vm_remap -
929  * Remap a range of memory from one task into another,
930  * to another address range within the same task, or
931  * over top of itself (with altered permissions and/or
932  * as an in-place copy of itself).
933  */
934 kern_return_t
mach_vm_remap_external(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)935 mach_vm_remap_external(
936 	vm_map_t                target_map,
937 	mach_vm_offset_ut      *address,
938 	mach_vm_size_ut         size,
939 	mach_vm_offset_ut       mask,
940 	int                     flags,
941 	vm_map_t                src_map,
942 	mach_vm_offset_ut       memory_address,
943 	boolean_t               copy,
944 	vm_prot_ut             *cur_protection,    /* OUT */
945 	vm_prot_ut             *max_protection,    /* OUT */
946 	vm_inherit_ut           inheritance)
947 {
948 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
949 
950 	/* filter out any kernel-only flags */
951 	if (flags & ~VM_FLAGS_USER_REMAP) {
952 		return KERN_INVALID_ARGUMENT;
953 	}
954 
955 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
956 
957 	*cur_protection = vm_sanitize_wrap_prot(VM_PROT_NONE);
958 	*max_protection = vm_sanitize_wrap_prot(VM_PROT_NONE);
959 	vmk_flags.vmkf_remap_legacy_mode = true;
960 
961 	/* range_id is set by vm_map_remap */
962 	return vm_map_remap(target_map,
963 	           address,
964 	           size,
965 	           mask,
966 	           vmk_flags,
967 	           src_map,
968 	           memory_address,
969 	           copy,
970 	           cur_protection,
971 	           max_protection,
972 	           inheritance);
973 }
974 
975 /*
976  * vm_remap_new -
977  * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set
978  * and {cur,max}_protection are in/out.
979  */
980 kern_return_t
vm_remap_new_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,mach_port_t src_tport,vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)981 vm_remap_new_external(
982 	vm_map_t                target_map,
983 	vm_offset_ut           *address,
984 	vm_size_ut              size,
985 	vm_offset_ut            mask,
986 	int                     flags,
987 	mach_port_t             src_tport,
988 	vm_offset_ut            memory_address,
989 	boolean_t               copy,
990 	vm_prot_ut             *cur_protection,       /* IN/OUT */
991 	vm_prot_ut             *max_protection,       /* IN/OUT */
992 	vm_inherit_ut           inheritance)
993 {
994 	return mach_vm_remap_new_external(target_map,
995 	           address,
996 	           size,
997 	           mask,
998 	           flags,
999 	           src_tport,
1000 	           memory_address,
1001 	           copy,
1002 	           cur_protection, /* IN/OUT */
1003 	           max_protection, /* IN/OUT */
1004 	           inheritance);
1005 }
1006 
1007 /*
1008  * vm_remap -
1009  * Remap a range of memory from one task into another,
1010  * to another address range within the same task, or
1011  * over top of itself (with altered permissions and/or
1012  * as an in-place copy of itself).
1013  *
1014  * The addressability of the source and target address
1015  * range is limited by the size of vm_address_t (in the
1016  * kernel context).
1017  */
1018 kern_return_t
vm_remap_external(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,vm_map_t src_map,vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)1019 vm_remap_external(
1020 	vm_map_t                target_map,
1021 	vm_offset_ut           *address,
1022 	vm_size_ut              size,
1023 	vm_offset_ut            mask,
1024 	int                     flags,
1025 	vm_map_t                src_map,
1026 	vm_offset_ut            memory_address,
1027 	boolean_t               copy,
1028 	vm_prot_ut             *cur_protection,    /* OUT */
1029 	vm_prot_ut             *max_protection,    /* OUT */
1030 	vm_inherit_ut           inheritance)
1031 {
1032 	return mach_vm_remap_external(target_map, address,
1033 	           size, mask, flags, src_map, memory_address, copy,
1034 	           cur_protection, max_protection, inheritance);
1035 }
1036 
1037 /*
1038  * NOTE: these routine (and this file) will no longer require mach_host_server.h
1039  * when mach_vm_wire and vm_wire are changed to use ledgers.
1040  */
1041 #include <mach/mach_host_server.h>
1042 /*
1043  *	mach_vm_wire
1044  *	Specify that the range of the virtual address space
1045  *	of the target task must not cause page faults for
1046  *	the indicated accesses.
1047  *
1048  *	[ To unwire the pages, specify VM_PROT_NONE. ]
1049  */
1050 kern_return_t
mach_vm_wire_external(host_priv_t host_priv,vm_map_t map,mach_vm_address_ut start,mach_vm_size_ut size,vm_prot_ut access)1051 mach_vm_wire_external(
1052 	host_priv_t             host_priv,
1053 	vm_map_t                map,
1054 	mach_vm_address_ut      start,
1055 	mach_vm_size_ut         size,
1056 	vm_prot_ut              access)
1057 {
1058 	kern_return_t     rc;
1059 	mach_vm_offset_ut end;
1060 
1061 	if (host_priv == HOST_PRIV_NULL) {
1062 		return KERN_INVALID_HOST;
1063 	}
1064 
1065 	if (map == VM_MAP_NULL) {
1066 		return KERN_INVALID_TASK;
1067 	}
1068 
1069 	end = vm_sanitize_compute_ut_end(start, size);
1070 	if (VM_SANITIZE_UNSAFE_IS_ZERO(access)) {
1071 		rc = vm_map_unwire_impl(map, start, end, true,
1072 		    VM_SANITIZE_CALLER_VM_UNWIRE_USER);
1073 	} else {
1074 		rc = vm_map_wire_impl(map, start, end, access,
1075 		    VM_KERN_MEMORY_MLOCK, true, NULL, VM_SANITIZE_CALLER_VM_WIRE_USER);
1076 	}
1077 
1078 	return rc;
1079 }
1080 
1081 /*
1082  *	vm_wire -
1083  *	Specify that the range of the virtual address space
1084  *	of the target task must not cause page faults for
1085  *	the indicated accesses.
1086  *
1087  *	[ To unwire the pages, specify VM_PROT_NONE. ]
1088  */
1089 kern_return_t
vm_wire(host_priv_t host_priv,vm_map_t map,vm_offset_ut start,vm_size_ut size,vm_prot_ut access)1090 vm_wire(
1091 	host_priv_t             host_priv,
1092 	vm_map_t                map,
1093 	vm_offset_ut            start,
1094 	vm_size_ut              size,
1095 	vm_prot_ut              access)
1096 {
1097 	return mach_vm_wire_external(host_priv, map, start, size, access);
1098 }
1099 
1100 /*
1101  *	vm_msync
1102  *
1103  *	Synchronises the memory range specified with its backing store
1104  *	image by either flushing or cleaning the contents to the appropriate
1105  *	memory manager.
1106  *
1107  *	interpretation of sync_flags
1108  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
1109  *				  pages to manager.
1110  *
1111  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1112  *				- discard pages, write dirty or precious
1113  *				  pages back to memory manager.
1114  *
1115  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1116  *				- write dirty or precious pages back to
1117  *				  the memory manager.
1118  *
1119  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
1120  *				  is a hole in the region, and we would
1121  *				  have returned KERN_SUCCESS, return
1122  *				  KERN_INVALID_ADDRESS instead.
1123  *
1124  *	RETURNS
1125  *	KERN_INVALID_TASK		Bad task parameter
1126  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
1127  *	KERN_SUCCESS			The usual.
1128  *	KERN_INVALID_ADDRESS		There was a hole in the region.
1129  */
1130 
1131 kern_return_t
mach_vm_msync(vm_map_t map,mach_vm_address_ut address_u,mach_vm_size_ut size_u,vm_sync_t sync_flags)1132 mach_vm_msync(
1133 	vm_map_t                map,
1134 	mach_vm_address_ut      address_u,
1135 	mach_vm_size_ut         size_u,
1136 	vm_sync_t               sync_flags)
1137 {
1138 	if (map == VM_MAP_NULL) {
1139 		return KERN_INVALID_TASK;
1140 	}
1141 
1142 	if (VM_SANITIZE_UNSAFE_IS_ZERO(size_u)) {
1143 		return KERN_SUCCESS;
1144 	}
1145 
1146 	return vm_map_msync(map, address_u, size_u, sync_flags);
1147 }
1148 
1149 /*
1150  *	vm_msync
1151  *
1152  *	Synchronises the memory range specified with its backing store
1153  *	image by either flushing or cleaning the contents to the appropriate
1154  *	memory manager.
1155  *
1156  *	interpretation of sync_flags
1157  *	VM_SYNC_INVALIDATE	- discard pages, only return precious
1158  *				  pages to manager.
1159  *
1160  *	VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS)
1161  *				- discard pages, write dirty or precious
1162  *				  pages back to memory manager.
1163  *
1164  *	VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS
1165  *				- write dirty or precious pages back to
1166  *				  the memory manager.
1167  *
1168  *	VM_SYNC_CONTIGUOUS	- does everything normally, but if there
1169  *				  is a hole in the region, and we would
1170  *				  have returned KERN_SUCCESS, return
1171  *				  KERN_INVALID_ADDRESS instead.
1172  *
1173  *	The addressability of the range is limited to that which can
1174  *	be described by a vm_address_t.
1175  *
1176  *	RETURNS
1177  *	KERN_INVALID_TASK		Bad task parameter
1178  *	KERN_INVALID_ARGUMENT		both sync and async were specified.
1179  *	KERN_SUCCESS			The usual.
1180  *	KERN_INVALID_ADDRESS		There was a hole in the region.
1181  */
1182 
1183 kern_return_t
vm_msync(vm_map_t map,vm_address_ut address_u,vm_size_ut size_u,vm_sync_t sync_flags)1184 vm_msync(
1185 	vm_map_t        map,
1186 	vm_address_ut   address_u,
1187 	vm_size_ut      size_u,
1188 	vm_sync_t       sync_flags)
1189 {
1190 	return mach_vm_msync(map, address_u, size_u, sync_flags);
1191 }
1192 
1193 
1194 int
vm_toggle_entry_reuse(int toggle,int * old_value)1195 vm_toggle_entry_reuse(int toggle, int *old_value)
1196 {
1197 	vm_map_t map = current_map();
1198 
1199 	assert(!map->is_nested_map);
1200 	if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) {
1201 		*old_value = map->disable_vmentry_reuse;
1202 	} else if (toggle == VM_TOGGLE_SET) {
1203 		vm_map_entry_t map_to_entry;
1204 
1205 		vm_map_lock(map);
1206 		vm_map_disable_hole_optimization(map);
1207 		map->disable_vmentry_reuse = TRUE;
1208 		__IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map));
1209 		if (map->first_free == map_to_entry) {
1210 			map->highest_entry_end = vm_map_min(map);
1211 		} else {
1212 			map->highest_entry_end = map->first_free->vme_end;
1213 		}
1214 		vm_map_unlock(map);
1215 	} else if (toggle == VM_TOGGLE_CLEAR) {
1216 		vm_map_lock(map);
1217 		map->disable_vmentry_reuse = FALSE;
1218 		vm_map_unlock(map);
1219 	} else {
1220 		return KERN_INVALID_ARGUMENT;
1221 	}
1222 
1223 	return KERN_SUCCESS;
1224 }
1225 
1226 
1227 static __attribute__((always_inline, warn_unused_result))
1228 kern_return_t
mach_vm_behavior_set_sanitize(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_behavior_ut new_behavior_u,mach_vm_offset_t * start,mach_vm_offset_t * end,mach_vm_size_t * size,vm_behavior_t * new_behavior)1229 mach_vm_behavior_set_sanitize(
1230 	vm_map_t                map,
1231 	mach_vm_offset_ut       start_u,
1232 	mach_vm_size_ut         size_u,
1233 	vm_behavior_ut          new_behavior_u,
1234 	mach_vm_offset_t       *start,
1235 	mach_vm_offset_t       *end,
1236 	mach_vm_size_t         *size,
1237 	vm_behavior_t          *new_behavior)
1238 {
1239 	mach_vm_offset_t align_mask;
1240 	kern_return_t    kr;
1241 
1242 	kr = vm_sanitize_behavior(new_behavior_u, VM_SANITIZE_CALLER_VM_BEHAVIOR_SET, new_behavior);
1243 	if (__improbable(kr != KERN_SUCCESS)) {
1244 		return kr;
1245 	}
1246 
1247 	/* Choose alignment of addr/size based on the behavior being set. */
1248 	switch (*new_behavior) {
1249 	case VM_BEHAVIOR_REUSABLE:
1250 	case VM_BEHAVIOR_REUSE:
1251 	case VM_BEHAVIOR_CAN_REUSE:
1252 	case VM_BEHAVIOR_ZERO:
1253 		/*
1254 		 * Align to the hardware page size, to allow
1255 		 * malloc() to maximize the amount of re-usability,
1256 		 * even on systems with larger software page size.
1257 		 */
1258 		align_mask = PAGE_MASK;
1259 		break;
1260 	default:
1261 		align_mask = VM_MAP_PAGE_MASK(map);
1262 		break;
1263 	}
1264 
1265 	kr = vm_sanitize_addr_size(start_u, size_u, VM_SANITIZE_CALLER_VM_BEHAVIOR_SET,
1266 	    align_mask, map,
1267 	    VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS,
1268 	    start, end, size);
1269 	if (__improbable(kr != KERN_SUCCESS)) {
1270 		return kr;
1271 	}
1272 
1273 	return KERN_SUCCESS;
1274 }
1275 
1276 /*
1277  *	mach_vm_behavior_set
1278  *
1279  *	Sets the paging behavior attribute for the  specified range
1280  *	in the specified map.
1281  *
1282  *	This routine will fail with KERN_INVALID_ADDRESS if any address
1283  *	in [start,start+size) is not a valid allocated memory region.
1284  */
1285 kern_return_t
mach_vm_behavior_set(vm_map_t map,mach_vm_offset_ut start_u,mach_vm_size_ut size_u,vm_behavior_ut new_behavior_u)1286 mach_vm_behavior_set(
1287 	vm_map_t                map,
1288 	mach_vm_offset_ut       start_u,
1289 	mach_vm_size_ut         size_u,
1290 	vm_behavior_ut          new_behavior_u)
1291 {
1292 	kern_return_t    kr;
1293 	mach_vm_offset_t start, end;
1294 	mach_vm_size_t   size;
1295 	vm_behavior_t    new_behavior;
1296 
1297 	if (map == VM_MAP_NULL) {
1298 		return KERN_INVALID_ARGUMENT;
1299 	}
1300 
1301 	kr = mach_vm_behavior_set_sanitize(map,
1302 	    start_u, size_u, new_behavior_u,
1303 	    &start, &end, &size, &new_behavior);
1304 	if (__improbable(kr != KERN_SUCCESS)) {
1305 		return vm_sanitize_get_kr(kr);
1306 	}
1307 
1308 	return vm_map_behavior_set(map,
1309 	           start,
1310 	           end,
1311 	           new_behavior);
1312 }
1313 
1314 /*
1315  *	vm_behavior_set
1316  *
1317  *	Sets the paging behavior attribute for the  specified range
1318  *	in the specified map.
1319  *
1320  *	This routine will fail with KERN_INVALID_ADDRESS if any address
1321  *	in [start,start+size) is not a valid allocated memory region.
1322  *
1323  *	This routine is potentially limited in addressibility by the
1324  *	use of vm_offset_t (if the map provided is larger than the
1325  *	kernel's).
1326  */
1327 kern_return_t
vm_behavior_set(vm_map_t map,vm_offset_ut start,vm_size_ut size,vm_behavior_ut new_behavior)1328 vm_behavior_set(
1329 	vm_map_t                map,
1330 	vm_offset_ut            start,
1331 	vm_size_ut              size,
1332 	vm_behavior_ut          new_behavior)
1333 {
1334 	return mach_vm_behavior_set(map,
1335 	           start,
1336 	           size,
1337 	           new_behavior);
1338 }
1339 
1340 /*
1341  *	mach_vm_region:
1342  *
1343  *	User call to obtain information about a region in
1344  *	a task's address map. Currently, only one flavor is
1345  *	supported.
1346  *
1347  *	XXX The reserved and behavior fields cannot be filled
1348  *	    in until the vm merge from the IK is completed, and
1349  *	    vm_reserve is implemented.
1350  *
1351  *	XXX Dependency: syscall_vm_region() also supports only one flavor.
1352  */
1353 
1354 kern_return_t
mach_vm_region(vm_map_t map,mach_vm_offset_ut * address_u,mach_vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1355 mach_vm_region(
1356 	vm_map_t                map,
1357 	mach_vm_offset_ut      *address_u,      /* IN/OUT */
1358 	mach_vm_size_ut        *size_u,         /* OUT */
1359 	vm_region_flavor_t      flavor,         /* IN */
1360 	vm_region_info_t        info,           /* OUT */
1361 	mach_msg_type_number_t *count,          /* IN/OUT */
1362 	mach_port_t            *object_name)    /* OUT */
1363 {
1364 	if (VM_MAP_NULL == map) {
1365 		return KERN_INVALID_ARGUMENT;
1366 	}
1367 
1368 	/* legacy conversion */
1369 	if (VM_REGION_BASIC_INFO == flavor) {
1370 		flavor = VM_REGION_BASIC_INFO_64;
1371 	}
1372 
1373 	return vm_map_region(map, address_u, size_u, flavor, info, count,
1374 	           object_name);
1375 }
1376 
1377 static inline kern_return_t
vm_region_get_kern_return(kern_return_t kr,vm_offset_ut addr_u,vm_size_ut size_u)1378 vm_region_get_kern_return(
1379 	kern_return_t           kr,
1380 	vm_offset_ut            addr_u,
1381 	vm_size_ut              size_u)
1382 {
1383 	vm_offset_ut end_u = vm_sanitize_compute_ut_end(addr_u, size_u);
1384 
1385 	if (KERN_SUCCESS == kr && VM_SANITIZE_UNSAFE_UNWRAP(end_u) > VM_MAX_ADDRESS) {
1386 		return KERN_INVALID_ADDRESS;
1387 	}
1388 	return kr;
1389 }
1390 
1391 /*
1392  *	vm_region_64 and vm_region:
1393  *
1394  *	User call to obtain information about a region in
1395  *	a task's address map. Currently, only one flavor is
1396  *	supported.
1397  *
1398  *	XXX The reserved and behavior fields cannot be filled
1399  *	    in until the vm merge from the IK is completed, and
1400  *	    vm_reserve is implemented.
1401  *
1402  *	XXX Dependency: syscall_vm_region() also supports only one flavor.
1403  */
1404 
1405 kern_return_t
vm_region_64(vm_map_t map,vm_offset_ut * address_u,vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1406 vm_region_64(
1407 	vm_map_t                map,
1408 	vm_offset_ut           *address_u,      /* IN/OUT */
1409 	vm_size_ut             *size_u,         /* OUT */
1410 	vm_region_flavor_t      flavor,         /* IN */
1411 	vm_region_info_t        info,           /* OUT */
1412 	mach_msg_type_number_t *count,          /* IN/OUT */
1413 	mach_port_t            *object_name)    /* OUT */
1414 {
1415 	kern_return_t kr;
1416 
1417 	kr = mach_vm_region(map, address_u, size_u, flavor, info, count,
1418 	    object_name);
1419 
1420 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1421 }
1422 
1423 kern_return_t
vm_region(vm_map_t map,vm_address_ut * address_u,vm_size_ut * size_u,vm_region_flavor_t flavor,vm_region_info_t info,mach_msg_type_number_t * count,mach_port_t * object_name)1424 vm_region(
1425 	vm_map_t                map,
1426 	vm_address_ut          *address_u,      /* IN/OUT */
1427 	vm_size_ut             *size_u,         /* OUT */
1428 	vm_region_flavor_t      flavor,         /* IN */
1429 	vm_region_info_t        info,           /* OUT */
1430 	mach_msg_type_number_t *count,          /* IN/OUT */
1431 	mach_port_t            *object_name)    /* OUT */
1432 {
1433 	kern_return_t kr;
1434 
1435 	if (VM_MAP_NULL == map) {
1436 		return KERN_INVALID_ARGUMENT;
1437 	}
1438 
1439 	kr = vm_map_region(map, address_u, size_u, flavor, info, count,
1440 	    object_name);
1441 
1442 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1443 }
1444 
1445 /*
1446  *	vm_region_recurse: A form of vm_region which follows the
1447  *	submaps in a target map
1448  *
1449  */
1450 kern_return_t
mach_vm_region_recurse(vm_map_t map,mach_vm_address_ut * address_u,mach_vm_size_ut * size_u,uint32_t * depth,vm_region_recurse_info_t info,mach_msg_type_number_t * infoCnt)1451 mach_vm_region_recurse(
1452 	vm_map_t                map,
1453 	mach_vm_address_ut     *address_u,
1454 	mach_vm_size_ut        *size_u,
1455 	uint32_t               *depth,
1456 	vm_region_recurse_info_t info,
1457 	mach_msg_type_number_t *infoCnt)
1458 {
1459 	if (VM_MAP_NULL == map) {
1460 		return KERN_INVALID_ARGUMENT;
1461 	}
1462 
1463 	return vm_map_region_recurse_64(map, address_u, size_u, depth,
1464 	           (vm_region_submap_info_64_t)info, infoCnt);
1465 }
1466 
1467 /*
1468  *	vm_region_recurse: A form of vm_region which follows the
1469  *	submaps in a target map
1470  *
1471  */
1472 kern_return_t
vm_region_recurse_64(vm_map_t map,vm_address_ut * address_u,vm_size_ut * size_u,uint32_t * depth,vm_region_recurse_info_64_t info,mach_msg_type_number_t * infoCnt)1473 vm_region_recurse_64(
1474 	vm_map_t                map,
1475 	vm_address_ut          *address_u,
1476 	vm_size_ut             *size_u,
1477 	uint32_t               *depth,
1478 	vm_region_recurse_info_64_t info,
1479 	mach_msg_type_number_t *infoCnt)
1480 {
1481 	kern_return_t kr;
1482 
1483 	kr = mach_vm_region_recurse(map, address_u, size_u, depth,
1484 	    (vm_region_recurse_info_t)info, infoCnt);
1485 
1486 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1487 }
1488 
1489 kern_return_t
vm_region_recurse(vm_map_t map,vm_offset_ut * address_u,vm_size_ut * size_u,natural_t * depth,vm_region_recurse_info_t info32,mach_msg_type_number_t * infoCnt)1490 vm_region_recurse(
1491 	vm_map_t                map,
1492 	vm_offset_ut           *address_u,      /* IN/OUT */
1493 	vm_size_ut             *size_u,         /* OUT */
1494 	natural_t              *depth,          /* IN/OUT */
1495 	vm_region_recurse_info_t info32,        /* IN/OUT */
1496 	mach_msg_type_number_t *infoCnt)        /* IN/OUT */
1497 {
1498 	vm_region_submap_info_data_64_t info64;
1499 	vm_region_submap_info_t info;
1500 	kern_return_t           kr;
1501 
1502 	if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) {
1503 		return KERN_INVALID_ARGUMENT;
1504 	}
1505 
1506 	info = (vm_region_submap_info_t)info32;
1507 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64;
1508 
1509 	kr = vm_map_region_recurse_64(map, address_u, size_u,
1510 	    depth, &info64, infoCnt);
1511 
1512 	info->protection = info64.protection;
1513 	info->max_protection = info64.max_protection;
1514 	info->inheritance = info64.inheritance;
1515 	info->offset = (uint32_t)info64.offset; /* trouble-maker */
1516 	info->user_tag = info64.user_tag;
1517 	info->pages_resident = info64.pages_resident;
1518 	info->pages_shared_now_private = info64.pages_shared_now_private;
1519 	info->pages_swapped_out = info64.pages_swapped_out;
1520 	info->pages_dirtied = info64.pages_dirtied;
1521 	info->ref_count = info64.ref_count;
1522 	info->shadow_depth = info64.shadow_depth;
1523 	info->external_pager = info64.external_pager;
1524 	info->share_mode = info64.share_mode;
1525 	info->is_submap = info64.is_submap;
1526 	info->behavior = info64.behavior;
1527 	info->object_id = info64.object_id;
1528 	info->user_wired_count = info64.user_wired_count;
1529 
1530 	*infoCnt = VM_REGION_SUBMAP_INFO_COUNT;
1531 
1532 	return vm_region_get_kern_return(kr, *address_u, *size_u);
1533 }
1534 
1535 kern_return_t
mach_vm_purgable_control(vm_map_t map,mach_vm_offset_ut address_u,vm_purgable_t control,int * state)1536 mach_vm_purgable_control(
1537 	vm_map_t                map,
1538 	mach_vm_offset_ut       address_u,
1539 	vm_purgable_t           control,
1540 	int                    *state)
1541 {
1542 	if (VM_MAP_NULL == map) {
1543 		return KERN_INVALID_ARGUMENT;
1544 	}
1545 
1546 	switch (control) {
1547 	case VM_PURGABLE_SET_STATE:
1548 	case VM_PURGABLE_GET_STATE:
1549 	case VM_PURGABLE_PURGE_ALL:
1550 		break;
1551 	case VM_PURGABLE_SET_STATE_FROM_KERNEL:
1552 	default:
1553 		/* not allowed from user-space */
1554 		return KERN_INVALID_ARGUMENT;
1555 	}
1556 
1557 	return vm_map_purgable_control(map, address_u, control, state);
1558 }
1559 
1560 kern_return_t
mach_vm_purgable_control_external(mach_port_t target_tport,mach_vm_offset_ut address_u,vm_purgable_t control,int * state)1561 mach_vm_purgable_control_external(
1562 	mach_port_t             target_tport,
1563 	mach_vm_offset_ut       address_u,
1564 	vm_purgable_t           control,
1565 	int                    *state)
1566 {
1567 	vm_map_t map;
1568 	kern_return_t kr;
1569 
1570 	if (control == VM_PURGABLE_GET_STATE) {
1571 		map = convert_port_to_map_read(target_tport);
1572 	} else {
1573 		map = convert_port_to_map(target_tport);
1574 	}
1575 
1576 	kr = mach_vm_purgable_control(map, address_u, control, state);
1577 	vm_map_deallocate(map);
1578 
1579 	return kr;
1580 }
1581 
1582 kern_return_t
vm_purgable_control_external(mach_port_t target_tport,vm_offset_ut address,vm_purgable_t control,int * state)1583 vm_purgable_control_external(
1584 	mach_port_t             target_tport,
1585 	vm_offset_ut            address,
1586 	vm_purgable_t           control,
1587 	int                     *state)
1588 {
1589 	return mach_vm_purgable_control_external(target_tport, address, control, state);
1590 }
1591 
1592 
1593 kern_return_t
mach_vm_page_query(vm_map_t map,mach_vm_offset_ut offset_u,int * disposition,int * ref_count)1594 mach_vm_page_query(
1595 	vm_map_t                map,
1596 	mach_vm_offset_ut       offset_u,
1597 	int                    *disposition,
1598 	int                    *ref_count)
1599 {
1600 	kern_return_t                   kr;
1601 	vm_page_info_basic_data_t       info;
1602 	mach_msg_type_number_t          count;
1603 
1604 	if (VM_MAP_NULL == map) {
1605 		return KERN_INVALID_ARGUMENT;
1606 	}
1607 
1608 	count = VM_PAGE_INFO_BASIC_COUNT;
1609 	kr = vm_map_page_info(map, offset_u, VM_PAGE_INFO_BASIC,
1610 	    (vm_page_info_t) &info, &count);
1611 	if (kr == KERN_SUCCESS) {
1612 		*disposition = info.disposition;
1613 		*ref_count = info.ref_count;
1614 	} else {
1615 		*disposition = 0;
1616 		*ref_count = 0;
1617 	}
1618 
1619 	return kr;
1620 }
1621 
1622 kern_return_t
vm_map_page_query(vm_map_t map,vm_offset_ut offset,int * disposition,int * ref_count)1623 vm_map_page_query(
1624 	vm_map_t                map,
1625 	vm_offset_ut            offset,
1626 	int                    *disposition,
1627 	int                    *ref_count)
1628 {
1629 	return mach_vm_page_query(map, offset, disposition, ref_count);
1630 }
1631 
1632 static __attribute__((always_inline, warn_unused_result))
1633 kern_return_t
mach_vm_page_range_query_sanitize(mach_vm_offset_ut address_u,mach_vm_size_ut size_u,int effective_page_mask,mach_vm_address_ut dispositions_addr_u,mach_vm_size_ut dispositions_count_u,mach_vm_offset_t * start,mach_vm_size_t * size,mach_vm_address_t * dispositions_addr,mach_vm_size_t * disp_buf_req_size)1634 mach_vm_page_range_query_sanitize(
1635 	mach_vm_offset_ut       address_u,
1636 	mach_vm_size_ut         size_u,
1637 	int                     effective_page_mask,
1638 	mach_vm_address_ut      dispositions_addr_u,
1639 	mach_vm_size_ut         dispositions_count_u,
1640 	mach_vm_offset_t       *start,
1641 	mach_vm_size_t         *size,
1642 	mach_vm_address_t      *dispositions_addr,
1643 	mach_vm_size_t         *disp_buf_req_size)
1644 {
1645 	mach_vm_offset_t  end;
1646 	mach_vm_size_t    dispositions_count;
1647 	mach_vm_address_t discard;
1648 
1649 	/*
1650 	 * There are no alignment requirements on
1651 	 * dispositions_addr_u/dispositions_count_u, those are derived into
1652 	 * inputs into copyout. So it is safe to unwrap them. We do want to
1653 	 * check that the range starting at dispositions_addr_u and ending
1654 	 * after dispositions_count_u integers is sound (i.e., doesn't wrap
1655 	 * around due to integer overflow).
1656 	 */
1657 	*dispositions_addr = VM_SANITIZE_UNSAFE_UNWRAP(dispositions_addr_u);
1658 	dispositions_count = VM_SANITIZE_UNSAFE_UNWRAP(dispositions_count_u);
1659 	if (
1660 		os_mul_overflow(
1661 			dispositions_count,
1662 			sizeof(int),
1663 			disp_buf_req_size) ||
1664 		os_add_overflow(
1665 			*dispositions_addr,
1666 			*disp_buf_req_size,
1667 			&discard)) {
1668 		return KERN_INVALID_ARGUMENT;
1669 	}
1670 
1671 	return vm_sanitize_addr_size(address_u, size_u,
1672 	           VM_SANITIZE_CALLER_VM_MAP_PAGE_RANGE_QUERY,
1673 	           effective_page_mask,
1674 	           VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, start,
1675 	           &end, size);
1676 }
1677 
1678 kern_return_t
mach_vm_page_range_query(vm_map_t map,mach_vm_offset_ut address_u,mach_vm_size_ut size_u,mach_vm_address_ut dispositions_addr_u,mach_vm_size_ut * dispositions_count_u)1679 mach_vm_page_range_query(
1680 	vm_map_t                map,
1681 	mach_vm_offset_ut       address_u,
1682 	mach_vm_size_ut         size_u,
1683 	mach_vm_address_ut      dispositions_addr_u,
1684 	mach_vm_size_ut        *dispositions_count_u)
1685 {
1686 	kern_return_t           kr;
1687 	int                     num_pages = 0, i = 0;
1688 	mach_vm_size_t          curr_sz = 0, copy_sz = 0;
1689 	mach_vm_size_t          disp_buf_req_size = 0, disp_buf_total_size = 0;
1690 	mach_msg_type_number_t  count = 0;
1691 	mach_vm_address_t       dispositions_addr;
1692 
1693 	void                    *info = NULL;
1694 	void                    *local_disp = NULL;
1695 	vm_map_size_t           info_size = 0, local_disp_size = 0;
1696 	mach_vm_offset_t        start = 0;
1697 	vm_map_size_t           size;
1698 	int                     effective_page_shift, effective_page_size, effective_page_mask;
1699 
1700 	if (map == VM_MAP_NULL || dispositions_count_u == NULL) {
1701 		return KERN_INVALID_ARGUMENT;
1702 	}
1703 
1704 	effective_page_shift = vm_self_region_page_shift_safely(map);
1705 	if (effective_page_shift == -1) {
1706 		return KERN_INVALID_ARGUMENT;
1707 	}
1708 	effective_page_size = (1 << effective_page_shift);
1709 	effective_page_mask = effective_page_size - 1;
1710 
1711 	kr = mach_vm_page_range_query_sanitize(address_u,
1712 	    size_u,
1713 	    effective_page_mask,
1714 	    dispositions_addr_u,
1715 	    *dispositions_count_u,
1716 	    &start,
1717 	    &size,
1718 	    &dispositions_addr,
1719 	    &disp_buf_req_size);
1720 	if (__improbable(kr != KERN_SUCCESS)) {
1721 		return vm_sanitize_get_kr(kr);
1722 	}
1723 
1724 	if (disp_buf_req_size == 0 || size == 0) {
1725 		return KERN_SUCCESS;
1726 	}
1727 
1728 	/*
1729 	 * For large requests, we will go through them
1730 	 * MAX_PAGE_RANGE_QUERY chunk at a time.
1731 	 */
1732 
1733 	curr_sz = MIN(size, MAX_PAGE_RANGE_QUERY);
1734 	num_pages = (int) (curr_sz >> effective_page_shift);
1735 
1736 	info_size = num_pages * sizeof(vm_page_info_basic_data_t);
1737 	info = kalloc_data(info_size, Z_WAITOK);
1738 
1739 	local_disp_size = num_pages * sizeof(int);
1740 	local_disp = kalloc_data(local_disp_size, Z_WAITOK);
1741 
1742 	if (info == NULL || local_disp == NULL) {
1743 		kr = KERN_RESOURCE_SHORTAGE;
1744 		goto out;
1745 	}
1746 
1747 	while (size) {
1748 		count = VM_PAGE_INFO_BASIC_COUNT;
1749 		kr = vm_map_page_range_info_internal(
1750 			map,
1751 			start,
1752 			vm_map_round_page(start + curr_sz, effective_page_mask),
1753 			effective_page_shift,
1754 			VM_PAGE_INFO_BASIC,
1755 			(vm_page_info_t) info,
1756 			&count);
1757 
1758 		assert(kr == KERN_SUCCESS);
1759 
1760 		for (i = 0; i < num_pages; i++) {
1761 			((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition;
1762 		}
1763 
1764 		copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */);
1765 		kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz);
1766 
1767 		start += curr_sz;
1768 		disp_buf_req_size -= copy_sz;
1769 		disp_buf_total_size += copy_sz;
1770 
1771 		if (kr != 0) {
1772 			break;
1773 		}
1774 
1775 		if ((disp_buf_req_size == 0) || (curr_sz >= size)) {
1776 			/*
1777 			 * We might have inspected the full range OR
1778 			 * more than it esp. if the user passed in
1779 			 * non-page aligned start/size and/or if we
1780 			 * descended into a submap. We are done here.
1781 			 */
1782 
1783 			size = 0;
1784 		} else {
1785 			dispositions_addr += copy_sz;
1786 
1787 			size -= curr_sz;
1788 
1789 			curr_sz = MIN(vm_map_round_page(size, effective_page_mask), MAX_PAGE_RANGE_QUERY);
1790 			num_pages = (int)(curr_sz >> effective_page_shift);
1791 		}
1792 	}
1793 
1794 	VM_SANITIZE_UT_SET(
1795 		*dispositions_count_u,
1796 		disp_buf_total_size / sizeof(int));
1797 
1798 out:
1799 	kfree_data(local_disp, local_disp_size);
1800 	kfree_data(info, info_size);
1801 	return kr;
1802 }
1803 
1804 kern_return_t
mach_vm_page_info(vm_map_t map,mach_vm_address_ut address,vm_page_info_flavor_t flavor,vm_page_info_t info,mach_msg_type_number_t * count)1805 mach_vm_page_info(
1806 	vm_map_t                map,
1807 	mach_vm_address_ut      address,
1808 	vm_page_info_flavor_t   flavor,
1809 	vm_page_info_t          info,
1810 	mach_msg_type_number_t  *count)
1811 {
1812 	kern_return_t   kr;
1813 
1814 	if (map == VM_MAP_NULL) {
1815 		return KERN_INVALID_ARGUMENT;
1816 	}
1817 
1818 	kr = vm_map_page_info(map, address, flavor, info, count);
1819 	return kr;
1820 }
1821 
1822 /*
1823  *	task_wire
1824  *
1825  *	Set or clear the map's wiring_required flag.  This flag, if set,
1826  *	will cause all future virtual memory allocation to allocate
1827  *	user wired memory.  Unwiring pages wired down as a result of
1828  *	this routine is done with the vm_wire interface.
1829  */
1830 kern_return_t
task_wire(vm_map_t map,boolean_t must_wire __unused)1831 task_wire(
1832 	vm_map_t        map,
1833 	boolean_t       must_wire __unused)
1834 {
1835 	if (map == VM_MAP_NULL) {
1836 		return KERN_INVALID_ARGUMENT;
1837 	}
1838 
1839 	return KERN_NOT_SUPPORTED;
1840 }
1841 
1842 kern_return_t
vm_map_exec_lockdown(vm_map_t map)1843 vm_map_exec_lockdown(
1844 	vm_map_t        map)
1845 {
1846 	if (map == VM_MAP_NULL) {
1847 		return KERN_INVALID_ARGUMENT;
1848 	}
1849 
1850 	vm_map_lock(map);
1851 	map->map_disallow_new_exec = TRUE;
1852 	vm_map_unlock(map);
1853 
1854 	return KERN_SUCCESS;
1855 }
1856 
1857 #if XNU_PLATFORM_MacOSX
1858 /*
1859  * Now a kernel-private interface (for BootCache
1860  * use only).  Need a cleaner way to create an
1861  * empty vm_map() and return a handle to it.
1862  */
1863 
1864 kern_return_t
vm_region_object_create(vm_map_t target_map,vm_size_t size,ipc_port_t * object_handle)1865 vm_region_object_create(
1866 	vm_map_t                target_map,
1867 	vm_size_t               size,
1868 	ipc_port_t              *object_handle)
1869 {
1870 	vm_named_entry_t        user_entry;
1871 	vm_map_t                new_map;
1872 
1873 	user_entry = mach_memory_entry_allocate(object_handle);
1874 
1875 	/* Create a named object based on a submap of specified size */
1876 
1877 	new_map = vm_map_create_options(PMAP_NULL, VM_MAP_MIN_ADDRESS,
1878 	    vm_map_round_page(size, VM_MAP_PAGE_MASK(target_map)),
1879 	    VM_MAP_CREATE_PAGEABLE);
1880 	vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map));
1881 
1882 	user_entry->backing.map = new_map;
1883 	user_entry->internal = TRUE;
1884 	user_entry->is_sub_map = TRUE;
1885 	user_entry->offset = 0;
1886 	user_entry->protection = VM_PROT_ALL;
1887 	user_entry->size = size;
1888 
1889 	return KERN_SUCCESS;
1890 }
1891 #endif /* XNU_PLATFORM_MacOSX */
1892 
1893 extern boolean_t proc_is_simulated(struct proc *p);
1894 
1895 kern_return_t
mach_vm_deferred_reclamation_buffer_allocate(task_t task,mach_vm_address_ut * address,uint32_t initial_capacity,uint32_t max_capacity)1896 mach_vm_deferred_reclamation_buffer_allocate(
1897 	task_t           task,
1898 	mach_vm_address_ut *address,
1899 	uint32_t initial_capacity,
1900 	uint32_t max_capacity)
1901 {
1902 #if CONFIG_DEFERRED_RECLAIM
1903 	if (task != current_task()) {
1904 		/* Remote buffer operations are not supported*/
1905 		return KERN_INVALID_TASK;
1906 	}
1907 	struct proc *p = task_get_proc_raw(task);
1908 	if (proc_is_simulated(p)) {
1909 		return KERN_NOT_SUPPORTED;
1910 	}
1911 	return vm_deferred_reclamation_buffer_allocate_internal(task, address, initial_capacity, max_capacity);
1912 #else
1913 	(void) task;
1914 	(void) address;
1915 	(void) size;
1916 	return KERN_NOT_SUPPORTED;
1917 #endif /* CONFIG_DEFERRED_RECLAIM */
1918 }
1919 
1920 kern_return_t
mach_vm_deferred_reclamation_buffer_flush(task_t task,uint32_t num_entries_to_reclaim)1921 mach_vm_deferred_reclamation_buffer_flush(
1922 	task_t task,
1923 	uint32_t num_entries_to_reclaim)
1924 {
1925 #if CONFIG_DEFERRED_RECLAIM
1926 	if (task != current_task()) {
1927 		/* Remote buffer operations are not supported */
1928 		return KERN_INVALID_TASK;
1929 	}
1930 	return vm_deferred_reclamation_buffer_flush_internal(task, num_entries_to_reclaim);
1931 #else
1932 	(void) task;
1933 	(void) num_entries_to_reclaim;
1934 	return KERN_NOT_SUPPORTED;
1935 #endif /* CONFIG_DEFERRED_RECLAIM */
1936 }
1937 
1938 kern_return_t
mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(task_t task,mach_vm_size_ut reclaimable_bytes_u)1939 mach_vm_deferred_reclamation_buffer_update_reclaimable_bytes(
1940 	task_t task,
1941 	mach_vm_size_ut reclaimable_bytes_u)
1942 {
1943 #if CONFIG_DEFERRED_RECLAIM
1944 	/*
1945 	 * This unwrapping is safe as reclaimable_bytes is not to be
1946 	 * interpreted as the size of range of addresses.
1947 	 */
1948 	mach_vm_size_t reclaimable_bytes =
1949 	    VM_SANITIZE_UNSAFE_UNWRAP(reclaimable_bytes_u);
1950 	if (task != current_task()) {
1951 		/* Remote buffer operations are not supported */
1952 		return KERN_INVALID_TASK;
1953 	}
1954 	return vm_deferred_reclamation_buffer_update_reclaimable_bytes_internal(task, reclaimable_bytes);
1955 #else
1956 	(void) task;
1957 	(void) reclaimable_bytes;
1958 	return KERN_NOT_SUPPORTED;
1959 #endif /* CONFIG_DEFERRED_RECLAIM */
1960 }
1961 
1962 kern_return_t
mach_vm_deferred_reclamation_buffer_resize(task_t task,uint32_t capacity)1963 mach_vm_deferred_reclamation_buffer_resize(task_t task,
1964     uint32_t capacity)
1965 {
1966 #if CONFIG_DEFERRED_RECLAIM
1967 	if (task != current_task()) {
1968 		/* Remote buffer operations are not supported */
1969 		return KERN_INVALID_TASK;
1970 	}
1971 	return vm_deferred_reclamation_buffer_resize_internal(task, capacity);
1972 #else
1973 	(void) task;
1974 	(void) size;
1975 	return KERN_NOT_SUPPORTED;
1976 #endif /* CONFIG_DEFERRED_RECLAIM */
1977 }
1978 
1979 #if CONFIG_MAP_RANGES
1980 
1981 extern void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
1982 
1983 static int
vm_map_user_range_cmp(const void * e1,const void * e2)1984 vm_map_user_range_cmp(const void *e1, const void *e2)
1985 {
1986 	const struct vm_map_user_range *r1 = e1;
1987 	const struct vm_map_user_range *r2 = e2;
1988 
1989 	if (r1->vmur_min_address != r2->vmur_min_address) {
1990 		return r1->vmur_min_address < r2->vmur_min_address ? -1 : 1;
1991 	}
1992 
1993 	return 0;
1994 }
1995 
1996 static int
mach_vm_range_recipe_v1_cmp(const void * e1,const void * e2)1997 mach_vm_range_recipe_v1_cmp(const void *e1, const void *e2)
1998 {
1999 	const mach_vm_range_recipe_v1_t *r1 = e1;
2000 	const mach_vm_range_recipe_v1_t *r2 = e2;
2001 
2002 	if (r1->range.min_address != r2->range.min_address) {
2003 		return r1->range.min_address < r2->range.min_address ? -1 : 1;
2004 	}
2005 
2006 	return 0;
2007 }
2008 
2009 static inline __result_use_check kern_return_t
mach_vm_range_create_v1_sanitize(vm_map_t map,mach_vm_range_recipe_v1_ut * recipe_u,uint32_t count,mach_vm_range_recipe_v1_t ** recipe_p)2010 mach_vm_range_create_v1_sanitize(
2011 	vm_map_t                map,
2012 	mach_vm_range_recipe_v1_ut *recipe_u,
2013 	uint32_t count,
2014 	mach_vm_range_recipe_v1_t **recipe_p)
2015 {
2016 	kern_return_t kr;
2017 
2018 	for (size_t i = 0; i < count; i++) {
2019 		vm_map_offset_t start, end;
2020 		vm_map_size_t size;
2021 		mach_vm_range_ut * range_u = &recipe_u[i].range_u;
2022 		kr = vm_sanitize_addr_end(
2023 			range_u->min_address_u,
2024 			range_u->max_address_u,
2025 			VM_SANITIZE_CALLER_MACH_VM_RANGE_CREATE,
2026 			map,
2027 			VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS
2028 			| VM_SANITIZE_FLAGS_CHECK_ALIGNED_START
2029 			| VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE,
2030 			&start, &end, &size); // Ignore return values
2031 		if (__improbable(kr != KERN_SUCCESS)) {
2032 			return kr;
2033 		}
2034 	}
2035 	/*
2036 	 * Sanitization only checked properties of recipe_u.
2037 	 * We can now see it through the lens of the safe type.
2038 	 * The cast is undefined behavior, but of the kind VM sanitization
2039 	 * relies on anyway, so we don't expect this to cause issues.
2040 	 */
2041 	*recipe_p = (mach_vm_range_recipe_v1_t *)recipe_u;
2042 
2043 	return KERN_SUCCESS;
2044 }
2045 
2046 /*!
2047  * @function mach_vm_range_create_v1()
2048  *
2049  * @brief
2050  * Handle the backend for mach_vm_range_create() for the
2051  * MACH_VM_RANGE_FLAVOR_V1 flavor.
2052  *
2053  * @description
2054  * This call allows to create "ranges" in the map of a task
2055  * that have special semantics/policies around placement of
2056  * new allocations (in the vm_map_locate_space() sense).
2057  *
2058  * @returns
2059  * - KERN_SUCCESS on success
2060  * - KERN_INVALID_ARGUMENT for incorrect arguments
2061  * - KERN_NO_SPACE if the maximum amount of ranges would be exceeded
2062  * - KERN_MEMORY_PRESENT if any of the requested ranges
2063  *   overlaps with existing ranges or allocations in the map.
2064  */
2065 static kern_return_t
mach_vm_range_create_v1(vm_map_t map,mach_vm_range_recipe_v1_ut * recipe_u,uint32_t new_count)2066 mach_vm_range_create_v1(
2067 	vm_map_t                   map,
2068 	mach_vm_range_recipe_v1_ut *recipe_u,
2069 	uint32_t                   new_count)
2070 {
2071 	mach_vm_range_recipe_v1_t *recipe;
2072 	vm_map_user_range_t table;
2073 	kern_return_t kr = KERN_SUCCESS;
2074 	uint16_t count;
2075 
2076 	struct mach_vm_range void1 = {
2077 		.min_address = map->default_range.max_address,
2078 		.max_address = map->data_range.min_address,
2079 	};
2080 	struct mach_vm_range void2 = {
2081 		.min_address = map->data_range.max_address,
2082 #if XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT
2083 		.max_address = MACH_VM_JUMBO_ADDRESS,
2084 #else /* !XNU_TARGET_OS_IOS || !EXTENDED_USER_VA_SUPPORT */
2085 		.max_address = vm_map_max(map),
2086 #endif /* XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT */
2087 	};
2088 
2089 	kr = mach_vm_range_create_v1_sanitize(map, recipe_u, new_count, &recipe);
2090 	if (__improbable(kr != KERN_SUCCESS)) {
2091 		return vm_sanitize_get_kr(kr);
2092 	}
2093 
2094 	qsort(recipe, new_count, sizeof(mach_vm_range_recipe_v1_t),
2095 	    mach_vm_range_recipe_v1_cmp);
2096 
2097 	/*
2098 	 * Step 1: Validate that the recipes have no intersections.
2099 	 */
2100 
2101 	for (size_t i = 0; i < new_count; i++) {
2102 		mach_vm_range_t r = &recipe[i].range;
2103 		mach_vm_size_t s;
2104 
2105 		if (recipe[i].flags) {
2106 			return KERN_INVALID_ARGUMENT;
2107 		}
2108 
2109 		static_assert((int)UMEM_RANGE_ID_FIXED == MACH_VM_RANGE_FIXED);
2110 		switch (recipe[i].range_tag) {
2111 		case MACH_VM_RANGE_FIXED:
2112 			break;
2113 		default:
2114 			return KERN_INVALID_ARGUMENT;
2115 		}
2116 
2117 		s = mach_vm_range_size(r);
2118 		if (!mach_vm_range_contains(&void1, r->min_address, s) &&
2119 		    !mach_vm_range_contains(&void2, r->min_address, s)) {
2120 			return KERN_INVALID_ARGUMENT;
2121 		}
2122 
2123 		if (i > 0 && recipe[i - 1].range.max_address >
2124 		    recipe[i].range.min_address) {
2125 			return KERN_INVALID_ARGUMENT;
2126 		}
2127 	}
2128 
2129 	vm_map_lock(map);
2130 
2131 	table = map->extra_ranges;
2132 	count = map->extra_ranges_count;
2133 
2134 	if (count + new_count > VM_MAP_EXTRA_RANGES_MAX) {
2135 		kr = KERN_NO_SPACE;
2136 		goto out_unlock;
2137 	}
2138 
2139 	/*
2140 	 * Step 2: Check that there is no intersection with existing ranges.
2141 	 */
2142 
2143 	for (size_t i = 0, j = 0; i < new_count && j < count;) {
2144 		mach_vm_range_t     r1 = &recipe[i].range;
2145 		vm_map_user_range_t r2 = &table[j];
2146 
2147 		if (r1->max_address <= r2->vmur_min_address) {
2148 			i++;
2149 		} else if (r2->vmur_max_address <= r1->min_address) {
2150 			j++;
2151 		} else {
2152 			kr = KERN_MEMORY_PRESENT;
2153 			goto out_unlock;
2154 		}
2155 	}
2156 
2157 	/*
2158 	 * Step 3: commit the new ranges.
2159 	 */
2160 
2161 	static_assert(VM_MAP_EXTRA_RANGES_MAX * sizeof(struct vm_map_user_range) <=
2162 	    KALLOC_SAFE_ALLOC_SIZE);
2163 
2164 	table = krealloc_data(table,
2165 	    count * sizeof(struct vm_map_user_range),
2166 	    (count + new_count) * sizeof(struct vm_map_user_range),
2167 	    Z_ZERO | Z_WAITOK | Z_NOFAIL);
2168 
2169 	for (size_t i = 0; i < new_count; i++) {
2170 		static_assert(MACH_VM_MAX_ADDRESS < (1ull << 56));
2171 
2172 		table[count + i] = (struct vm_map_user_range){
2173 			.vmur_min_address = recipe[i].range.min_address,
2174 			.vmur_max_address = recipe[i].range.max_address,
2175 			.vmur_range_id    = (vm_map_range_id_t)recipe[i].range_tag,
2176 		};
2177 	}
2178 
2179 	qsort(table, count + new_count,
2180 	    sizeof(struct vm_map_user_range), vm_map_user_range_cmp);
2181 
2182 	map->extra_ranges_count += new_count;
2183 	map->extra_ranges = table;
2184 
2185 out_unlock:
2186 	vm_map_unlock(map);
2187 
2188 	if (kr == KERN_SUCCESS) {
2189 		for (size_t i = 0; i < new_count; i++) {
2190 			vm_map_kernel_flags_t vmk_flags = {
2191 				.vmf_fixed = true,
2192 				.vmf_overwrite = true,
2193 				.vmkf_overwrite_immutable = true,
2194 				.vm_tag = recipe[i].vm_tag,
2195 			};
2196 			__assert_only kern_return_t kr2;
2197 
2198 			kr2 = vm_map_enter(map, &recipe[i].range.min_address,
2199 			    mach_vm_range_size(&recipe[i].range),
2200 			    0, vmk_flags, VM_OBJECT_NULL, 0, FALSE,
2201 			    VM_PROT_NONE, VM_PROT_ALL,
2202 			    VM_INHERIT_DEFAULT);
2203 			assert(kr2 == KERN_SUCCESS);
2204 		}
2205 	}
2206 	return kr;
2207 }
2208 
2209 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)2210 mach_vm_range_create(
2211 	vm_map_t                map,
2212 	mach_vm_range_flavor_t  flavor,
2213 	mach_vm_range_recipes_raw_t recipe,
2214 	natural_t               size)
2215 {
2216 	if (map != current_map()) {
2217 		return KERN_INVALID_ARGUMENT;
2218 	}
2219 
2220 	if (!map->uses_user_ranges) {
2221 		return KERN_NOT_SUPPORTED;
2222 	}
2223 
2224 	if (size == 0) {
2225 		return KERN_SUCCESS;
2226 	}
2227 
2228 	if (flavor == MACH_VM_RANGE_FLAVOR_V1) {
2229 		mach_vm_range_recipe_v1_ut *array;
2230 
2231 		if (size % sizeof(mach_vm_range_recipe_v1_ut)) {
2232 			return KERN_INVALID_ARGUMENT;
2233 		}
2234 
2235 		size /= sizeof(mach_vm_range_recipe_v1_ut);
2236 		if (size > VM_MAP_EXTRA_RANGES_MAX) {
2237 			return KERN_NO_SPACE;
2238 		}
2239 
2240 		array = (mach_vm_range_recipe_v1_ut *)recipe;
2241 		return mach_vm_range_create_v1(map, array, size);
2242 	}
2243 
2244 	return KERN_INVALID_ARGUMENT;
2245 }
2246 
2247 #else /* !CONFIG_MAP_RANGES */
2248 
2249 kern_return_t
mach_vm_range_create(vm_map_t map,mach_vm_range_flavor_t flavor,mach_vm_range_recipes_raw_t recipe,natural_t size)2250 mach_vm_range_create(
2251 	vm_map_t                map,
2252 	mach_vm_range_flavor_t  flavor,
2253 	mach_vm_range_recipes_raw_t recipe,
2254 	natural_t               size)
2255 {
2256 #pragma unused(map, flavor, recipe, size)
2257 	return KERN_NOT_SUPPORTED;
2258 }
2259 
2260 #endif /* !CONFIG_MAP_RANGES */
2261 
2262 /*
2263  * These symbols are looked up at runtime by vmware, VirtualBox,
2264  * despite not being exported in the symbol sets.
2265  */
2266 
2267 #if defined(__x86_64__)
2268 
2269 extern typeof(mach_vm_remap_external) mach_vm_remap;
2270 extern typeof(mach_vm_map_external) mach_vm_map;
2271 extern typeof(vm_map_external) vm_map;
2272 
2273 kern_return_t
mach_vm_map(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,int flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2274 mach_vm_map(
2275 	vm_map_t                target_map,
2276 	mach_vm_offset_ut      *address,
2277 	mach_vm_size_ut         initial_size,
2278 	mach_vm_offset_ut       mask,
2279 	int                     flags,
2280 	ipc_port_t              port,
2281 	memory_object_offset_ut offset,
2282 	boolean_t               copy,
2283 	vm_prot_ut              cur_protection,
2284 	vm_prot_ut              max_protection,
2285 	vm_inherit_ut           inheritance)
2286 {
2287 	return mach_vm_map_external(target_map, address, initial_size, mask, flags, port,
2288 	           offset, copy, cur_protection, max_protection, inheritance);
2289 }
2290 
2291 kern_return_t
mach_vm_remap(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,int flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)2292 mach_vm_remap(
2293 	vm_map_t                target_map,
2294 	mach_vm_offset_ut      *address,
2295 	mach_vm_size_ut         size,
2296 	mach_vm_offset_ut       mask,
2297 	int                     flags,
2298 	vm_map_t                src_map,
2299 	mach_vm_offset_ut       memory_address,
2300 	boolean_t               copy,
2301 	vm_prot_ut             *cur_protection,   /* OUT */
2302 	vm_prot_ut             *max_protection,   /* OUT */
2303 	vm_inherit_ut           inheritance)
2304 {
2305 	return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address,
2306 	           copy, cur_protection, max_protection, inheritance);
2307 }
2308 
2309 kern_return_t
vm_map(vm_map_t target_map,vm_offset_ut * address,vm_size_ut size,vm_offset_ut mask,int flags,ipc_port_t port,vm_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2310 vm_map(
2311 	vm_map_t                target_map,
2312 	vm_offset_ut           *address,
2313 	vm_size_ut              size,
2314 	vm_offset_ut            mask,
2315 	int                     flags,
2316 	ipc_port_t              port,
2317 	vm_offset_ut            offset,
2318 	boolean_t               copy,
2319 	vm_prot_ut              cur_protection,
2320 	vm_prot_ut              max_protection,
2321 	vm_inherit_ut           inheritance)
2322 {
2323 	return mach_vm_map(target_map, address,
2324 	           size, mask, flags, port, offset, copy,
2325 	           cur_protection, max_protection, inheritance);
2326 }
2327 
2328 #endif /* __x86_64__ */
2329