xref: /xnu-12377.1.9/osfmk/vm/vm_map_internal.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifndef _VM_VM_MAP_INTERNAL_H_
58 #define _VM_VM_MAP_INTERNAL_H_
59 
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_kern_xnu.h>
62 #include <mach/vm_types_unsafe.h>
63 #include <vm/vm_sanitize_internal.h>
64 #include <kern/thread_test_context.h>
65 #ifdef MACH_KERNEL_PRIVATE
66 #include <vm/vm_object_internal.h>
67 #endif /* MACH_KERNEL_PRIVATE */
68 
69 __BEGIN_DECLS
70 
71 #ifdef XNU_KERNEL_PRIVATE
72 
73 /* Check protection */
74 extern boolean_t vm_map_check_protection(
75 	vm_map_t                map,
76 	vm_map_offset_ut        start_u,
77 	vm_map_offset_ut        end_u,
78 	vm_prot_ut              protection_u,
79 	vm_sanitize_caller_t    vm_sanitize_caller);
80 
81 extern kern_return_t vm_map_wire_impl(
82 	vm_map_t                map,
83 	vm_map_offset_ut        start_u,
84 	vm_map_offset_ut        end_u,
85 	vm_prot_ut              prot_u,
86 	vm_tag_t                tag,
87 	boolean_t               user_wire,
88 	ppnum_t                *physpage_p,
89 	vm_sanitize_caller_t    vm_sanitize_caller);
90 
91 extern kern_return_t vm_map_unwire_impl(
92 	vm_map_t                map,
93 	vm_map_offset_ut        start_u,
94 	vm_map_offset_ut        end_u,
95 	boolean_t               user_wire,
96 	vm_sanitize_caller_t    vm_sanitize_caller);
97 
98 #endif /* XNU_KERNEL_PRIVATE */
99 #ifdef MACH_KERNEL_PRIVATE
100 #pragma GCC visibility push(hidden)
101 
102 /* definitions related to overriding the NX behavior */
103 #define VM_ABI_32       0x1
104 #define VM_ABI_64       0x2
105 
106 /*
107  * This file contains interfaces that are private to the VM
108  */
109 
110 #define KiB(kb) ((kb) << 10ull)
111 #define BtoKiB(b) ((b) >> 10)
112 #define MiB(mb) ((mb) << 20ull)
113 #define BtoMiB(b) ((b) >> 20)
114 
115 #if __LP64__
116 #define KMEM_SMALLMAP_THRESHOLD     (MiB(1))
117 #else
118 #define KMEM_SMALLMAP_THRESHOLD     (KiB(256))
119 #endif
120 
121 struct kmem_page_meta;
122 
123 
124 /* We can't extern this from vm_kern.h because we can't include pmap.h */
125 extern void kernel_memory_populate_object_and_unlock(
126 	vm_object_t             object, /* must be locked */
127 	vm_address_t            addr,
128 	vm_offset_t             offset,
129 	vm_size_t               size,
130 	struct vm_page         *page_list,
131 	kma_flags_t             flags,
132 	vm_tag_t                tag,
133 	vm_prot_t               prot,
134 	pmap_mapping_type_t     mapping_type);
135 
136 /* Initialize the module */
137 extern void vm_map_init(void);
138 
139 /*!
140  * @function vm_map_locate_space_anywhere()
141  *
142  * @brief
143  * Locate (no reservation) a range in the specified VM map.
144  *
145  * @param map           the map to scan for memory, must be locked.
146  * @param size          the size of the allocation to make.
147  * @param mask          an alignment mask the allocation must respect.
148  *                      (takes vmk_flags.vmkf_guard_before into account).
149  * @param vmk_flags     the vm map kernel flags to influence this call.
150  *                      vmk_flags.vmf_anywhere must be set.
151  * @param start_inout   in: an optional address to start scanning from, or 0
152  * @param entry_out     the entry right before the hole.
153  *
154  * @returns
155  * - KERN_SUCCESS in case of success, in which case:
156  *   o the address pointed at by @c start_inout is updated to the start
157  *     of the range located
158  *   o entry_out is set to the entry right before the hole in the map.
159  *
160  * - KERN_INVALID_ARGUMENT if some of the parameters aren't right
161  *   (typically invalid vmk_flags).
162  *
163  * - KERN_NO_SPACE if no space was found with the specified constraints.
164  */
165 extern kern_return_t vm_map_locate_space_anywhere(
166 	vm_map_t                map,
167 	vm_map_size_t           size,
168 	vm_map_offset_t         mask,
169 	vm_map_kernel_flags_t   vmk_flags,
170 	vm_map_offset_t        *start_inout,
171 	vm_map_entry_t         *entry_out);
172 
173 /* Allocate a range in the specified virtual address map and
174  * return the entry allocated for that range. */
175 extern kern_return_t vm_map_find_space(
176 	vm_map_t                map,
177 	vm_map_address_t        hint_addr,
178 	vm_map_size_t           size,
179 	vm_map_offset_t         mask,
180 	vm_map_kernel_flags_t   vmk_flags,
181 	vm_map_entry_t          *o_entry);                              /* OUT */
182 
183 extern void vm_map_clip_start(
184 	vm_map_t                map,
185 	vm_map_entry_t          entry,
186 	vm_map_offset_t         endaddr);
187 
188 extern void vm_map_clip_end(
189 	vm_map_t                map,
190 	vm_map_entry_t          entry,
191 	vm_map_offset_t         endaddr);
192 
193 extern boolean_t vm_map_entry_should_cow_for_true_share(
194 	vm_map_entry_t          entry);
195 
196 extern void vm_map_seal(
197 	vm_map_t                 map,
198 	bool                     nested_pmap);
199 
200 /*!
201  * @typedef vmr_flags_t
202  *
203  * @brief
204  * Flags for vm_map_remove() and vm_map_delete()
205  *
206  * @const VM_MAP_REMOVE_NO_FLAGS
207  * When no special flags is to be passed.
208  *
209  * @const VM_MAP_REMOVE_KUNWIRE
210  * Unwire memory as a side effect.
211  *
212  * @const VM_MAP_REMOVE_INTERRUPTIBLE
213  * Whether the call is interruptible if it needs to wait for a vm map
214  * entry to quiesce (interruption leads to KERN_ABORTED).
215  *
216  * @const VM_MAP_REMOVE_NOKUNWIRE_LAST
217  * Do not unwire the last page of this entry during remove.
218  * (Used by kmem_realloc()).
219  *
220  * @const VM_MAP_REMOVE_IMMUTABLE
221  * Allow permanent entries to be removed.
222  *
223  * @const VM_MAP_REMOVE_GAPS_FAIL
224  * Return KERN_INVALID_VALUE when a gap is being removed instead of panicking.
225  *
226  * @const VM_MAP_REMOVE_NO_YIELD.
227  * Try to avoid yielding during this call.
228  *
229  * @const VM_MAP_REMOVE_GUESS_SIZE
230  * The caller doesn't know the precise size of the entry,
231  * but the address must match an atomic entry.
232  *
233  * @const VM_MAP_REMOVE_IMMUTABLE_CODE
234  * Allow executables entries to be removed (for VM_PROT_COPY),
235  * which is used by debuggers.
236  */
237 __options_decl(vmr_flags_t, uint32_t, {
238 	VM_MAP_REMOVE_NO_FLAGS          = 0x000,
239 	VM_MAP_REMOVE_KUNWIRE           = 0x001,
240 	VM_MAP_REMOVE_INTERRUPTIBLE     = 0x002,
241 	VM_MAP_REMOVE_NOKUNWIRE_LAST    = 0x004,
242 	VM_MAP_REMOVE_IMMUTABLE         = 0x008,
243 	VM_MAP_REMOVE_GAPS_FAIL         = 0x010,
244 	VM_MAP_REMOVE_NO_YIELD          = 0x020,
245 	VM_MAP_REMOVE_GUESS_SIZE        = 0x040,
246 	VM_MAP_REMOVE_IMMUTABLE_CODE    = 0x080,
247 	VM_MAP_REMOVE_TO_OVERWRITE      = 0x100,
248 });
249 
250 /* Deallocate a region */
251 extern kmem_return_t vm_map_remove_guard(
252 	vm_map_t                map,
253 	vm_map_offset_t         start,
254 	vm_map_offset_t         end,
255 	vmr_flags_t             flags,
256 	kmem_guard_t            guard) __result_use_check;
257 
258 extern kmem_return_t vm_map_remove_and_unlock(
259 	vm_map_t        map,
260 	vm_map_offset_t start,
261 	vm_map_offset_t end,
262 	vmr_flags_t     flags,
263 	kmem_guard_t    guard) __result_use_check;
264 
265 /* Deallocate a region */
266 static inline void
vm_map_remove(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)267 vm_map_remove(
268 	vm_map_t                map,
269 	vm_map_offset_t         start,
270 	vm_map_offset_t         end)
271 {
272 	vmr_flags_t  flags = VM_MAP_REMOVE_NO_FLAGS;
273 	kmem_guard_t guard = KMEM_GUARD_NONE;
274 
275 	(void)vm_map_remove_guard(map, start, end, flags, guard);
276 }
277 
278 extern bool kmem_is_ptr_range(vm_map_range_id_t range_id);
279 
280 extern mach_vm_range_t kmem_validate_range_for_overwrite(
281 	vm_map_offset_t         addr,
282 	vm_map_size_t           size);
283 
284 extern uint32_t kmem_addr_get_slot_idx(
285 	vm_map_offset_t         start,
286 	vm_map_offset_t         end,
287 	vm_map_range_id_t       range_id,
288 	struct kmem_page_meta **meta,
289 	uint32_t               *size_idx,
290 	mach_vm_range_t         slot);
291 
292 extern void kmem_validate_slot(
293 	vm_map_offset_t         addr,
294 	struct kmem_page_meta  *meta,
295 	uint32_t                size_idx,
296 	uint32_t                slot_idx);
297 
298 /*
299  * Function used to allocate VA from kmem pointer ranges
300  */
301 extern kern_return_t kmem_locate_space(
302 	vm_map_size_t           size,
303 	vm_map_range_id_t       range_id,
304 	bool                    direction,
305 	vm_map_offset_t        *start_inout,
306 	vm_map_entry_t         *entry_out);
307 
308 /*
309  * Function used to free VA to kmem pointer ranges
310  */
311 extern void kmem_free_space(
312 	vm_map_offset_t         start,
313 	vm_map_offset_t         end,
314 	vm_map_range_id_t       range_id,
315 	mach_vm_range_t         slot);
316 
317 ppnum_t vm_map_get_phys_page(
318 	vm_map_t        map,
319 	vm_offset_t     offset);
320 
321 /* Change inheritance */
322 extern kern_return_t    vm_map_inherit(
323 	vm_map_t                map,
324 	vm_map_offset_ut        start,
325 	vm_map_offset_ut        end,
326 	vm_inherit_ut           new_inheritance);
327 
328 /* Change protection */
329 extern kern_return_t    vm_map_protect(
330 	vm_map_t                map,
331 	vm_map_offset_ut        start_u,
332 	vm_map_offset_ut        end_u,
333 	boolean_t               set_max,
334 	vm_prot_ut              new_prot_u);
335 
336 #pragma GCC visibility pop
337 
338 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)339 VME_OBJECT_SET(
340 	vm_map_entry_t entry,
341 	vm_object_t    object,
342 	bool           atomic,
343 	uint32_t       context)
344 {
345 	__builtin_assume(((vm_offset_t)object & 3) == 0);
346 
347 	entry->vme_atomic = atomic;
348 	entry->is_sub_map = false;
349 	if (atomic) {
350 		entry->vme_context = context;
351 	} else {
352 		entry->vme_context = 0;
353 	}
354 
355 	if (!object) {
356 		entry->vme_object_or_delta = 0;
357 	} else if (is_kernel_object(object)) {
358 #if VM_BTLOG_TAGS
359 		if (!(entry->vme_kernel_object && entry->vme_tag_btref))
360 #endif /* VM_BTLOG_TAGS */
361 		{
362 			entry->vme_object_or_delta = 0;
363 		}
364 	} else {
365 #if VM_BTLOG_TAGS
366 		if (entry->vme_kernel_object && entry->vme_tag_btref) {
367 			btref_put(entry->vme_tag_btref);
368 		}
369 #endif /* VM_BTLOG_TAGS */
370 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
371 	}
372 
373 	entry->vme_kernel_object = is_kernel_object(object);
374 	entry->vme_resilient_codesign = false;
375 	entry->used_for_jit = false;
376 }
377 
378 
379 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)380 VME_OFFSET_SET(
381 	vm_map_entry_t entry,
382 	vm_object_offset_t offset)
383 {
384 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
385 	assert3u(VME_OFFSET(entry), ==, offset);
386 }
387 
388 /*
389  * IMPORTANT:
390  * The "alias" field can be updated while holding the VM map lock
391  * "shared".  It's OK as along as it's the only field that can be
392  * updated without the VM map "exclusive" lock.
393  */
394 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)395 VME_ALIAS_SET(
396 	vm_map_entry_t entry,
397 	unsigned int alias)
398 {
399 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
400 	entry->vme_alias = alias;
401 }
402 
403 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)404 VME_OBJECT_SHADOW(
405 	vm_map_entry_t entry,
406 	vm_object_size_t length,
407 	bool always)
408 {
409 	vm_object_t object;
410 	vm_object_offset_t offset;
411 
412 	object = VME_OBJECT(entry);
413 	offset = VME_OFFSET(entry);
414 	vm_object_shadow(&object, &offset, length, always);
415 	if (object != VME_OBJECT(entry)) {
416 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
417 		entry->use_pmap = true;
418 	}
419 	if (offset != VME_OFFSET(entry)) {
420 		VME_OFFSET_SET(entry, offset);
421 	}
422 }
423 
424 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
425 
426 static inline bool
vmtaglog_matches(vm_tag_t tag)427 vmtaglog_matches(vm_tag_t tag)
428 {
429 	switch (vmtaglog_tag) {
430 	case VM_KERN_MEMORY_NONE:
431 		return false;
432 	case VM_KERN_MEMORY_FIRST_DYNAMIC:
433 		return tag >= VM_KERN_MEMORY_FIRST_DYNAMIC;
434 	case VM_KERN_MEMORY_ANY:
435 		return tag != VM_KERN_MEMORY_NONE;
436 	default:
437 		return tag == vmtaglog_tag;
438 	}
439 }
440 
441 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)442 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
443 {
444 #if VM_BTLOG_TAGS
445 	if (vmtaglog_matches(VME_ALIAS(entry)) && entry->vme_kernel_object && entry->wired_count) {
446 		assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
447 		entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
448 	}
449 #endif /* VM_BTLOG_TAGS */
450 }
451 
452 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)453 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
454 {
455 #if VM_BTLOG_TAGS
456 	if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
457 		btref_put(entry->vme_tag_btref);
458 		entry->vme_tag_btref = 0;
459 	}
460 #endif /* VM_BTLOG_TAGS */
461 }
462 
463 extern kern_return_t
464 vm_map_copy_adjust_to_target(
465 	vm_map_copy_t           copy_map,
466 	vm_map_offset_ut        offset,
467 	vm_map_size_ut          size,
468 	vm_map_t                target_map,
469 	boolean_t               copy,
470 	vm_map_copy_t           *target_copy_map_p,
471 	vm_map_offset_t         *overmap_start_p,
472 	vm_map_offset_t         *overmap_end_p,
473 	vm_map_offset_t         *trimmed_start_p);
474 
475 
476 __attribute__((always_inline))
477 int vm_map_lock_read_to_write(vm_map_t map);
478 
479 __attribute__((always_inline))
480 boolean_t vm_map_try_lock(vm_map_t map);
481 
482 __attribute__((always_inline))
483 boolean_t vm_map_try_lock_read(vm_map_t map);
484 
485 int vm_self_region_page_shift(vm_map_t target_map);
486 int vm_self_region_page_shift_safely(vm_map_t target_map);
487 
488 /* Lookup map entry containing or the specified address in the given map */
489 extern boolean_t        vm_map_lookup_entry_or_next(
490 	vm_map_t                map,
491 	vm_map_address_t        address,
492 	vm_map_entry_t          *entry);                                /* OUT */
493 
494 extern void             vm_map_copy_remap(
495 	vm_map_t                map,
496 	vm_map_entry_t          where,
497 	vm_map_copy_t           copy,
498 	vm_map_offset_t         adjustment,
499 	vm_prot_t               cur_prot,
500 	vm_prot_t               max_prot,
501 	vm_inherit_t            inheritance);
502 
503 /* Find the VM object, offset, and protection for a given virtual address
504  * in the specified map, assuming a page fault of the	type specified. */
505 extern kern_return_t    vm_map_lookup_and_lock_object(
506 	vm_map_t                *var_map,                               /* IN/OUT */
507 	vm_map_address_t        vaddr,
508 	vm_prot_t               fault_type,
509 	int                     object_lock_type,
510 	vm_map_version_t        *out_version,                           /* OUT */
511 	vm_object_t             *object,                                /* OUT */
512 	vm_object_offset_t      *offset,                                /* OUT */
513 	vm_prot_t               *out_prot,                              /* OUT */
514 	boolean_t               *wired,                                 /* OUT */
515 	vm_object_fault_info_t  fault_info,                             /* OUT */
516 	vm_map_t                *real_map,                              /* OUT */
517 	bool                    *contended);                            /* OUT */
518 
519 /* Verifies that the map has not changed since the given version. */
520 extern boolean_t        vm_map_verify(
521 	vm_map_t                map,
522 	vm_map_version_t        *version);                              /* REF */
523 
524 
525 /* simplify map entries */
526 extern void             vm_map_simplify_entry(
527 	vm_map_t        map,
528 	vm_map_entry_t  this_entry);
529 extern void             vm_map_simplify(
530 	vm_map_t                map,
531 	vm_map_offset_t         start);
532 
533 #if __arm64__
534 extern kern_return_t    vm_map_enter_fourk(
535 	vm_map_t                map,
536 	vm_map_offset_t         *address,
537 	vm_map_size_t           size,
538 	vm_map_offset_t         mask,
539 	vm_map_kernel_flags_t   vmk_flags,
540 	vm_object_t             object,
541 	vm_object_offset_t      offset,
542 	boolean_t               needs_copy,
543 	vm_prot_t               cur_protection,
544 	vm_prot_t               max_protection,
545 	vm_inherit_t            inheritance);
546 #endif /* __arm64__ */
547 
548 
549 /* Enter a mapping */
550 extern kern_return_t    vm_map_enter(
551 	vm_map_t                map,
552 	vm_map_offset_t        *address,
553 	vm_map_size_t           size,
554 	vm_map_offset_t         mask,
555 	vm_map_kernel_flags_t   vmk_flags,
556 	vm_object_t             object,
557 	vm_object_offset_t      offset,
558 	boolean_t               needs_copy,
559 	vm_prot_t               cur_protection,
560 	vm_prot_t               max_protection,
561 	vm_inherit_t            inheritance);
562 
563 
564 /* Enter a mapping of a memory object */
565 extern kern_return_t    vm_map_enter_mem_object(
566 	vm_map_t                map,
567 	vm_map_offset_ut       *address,
568 	vm_map_size_ut          size,
569 	vm_map_offset_ut        mask,
570 	vm_map_kernel_flags_t   vmk_flags,
571 	ipc_port_t              port,
572 	vm_object_offset_ut     offset,
573 	boolean_t               needs_copy,
574 	vm_prot_ut              cur_protection,
575 	vm_prot_ut              max_protection,
576 	vm_inherit_ut           inheritance,
577 	upl_page_list_ptr_t     page_list,
578 	unsigned int            page_list_count);
579 
580 extern kern_return_t    vm_map_remap(
581 	vm_map_t                target_map,
582 	vm_map_offset_ut       *address,
583 	vm_map_size_ut          size,
584 	vm_map_offset_ut        mask,
585 	vm_map_kernel_flags_t   vmk_flags,
586 	vm_map_t                src_map,
587 	vm_map_offset_ut        memory_address,
588 	boolean_t               copy,
589 	vm_prot_ut              *cur_protection,
590 	vm_prot_ut              *max_protection,
591 	vm_inherit_ut           inheritance);
592 
593 
594 /* Add or remove machine-dependent attributes from map regions */
595 extern kern_return_t    vm_map_machine_attribute(
596 	vm_map_t                map,
597 	vm_map_offset_ut        start,
598 	vm_map_offset_ut        end,
599 	vm_machine_attribute_t  attribute,
600 	vm_machine_attribute_val_t *value); /* IN/OUT */
601 
602 extern kern_return_t    vm_map_msync(
603 	vm_map_t                map,
604 	vm_map_address_ut       address,
605 	vm_map_size_ut          size,
606 	vm_sync_t               sync_flags);
607 
608 /* Set paging behavior */
609 extern kern_return_t    vm_map_behavior_set(
610 	vm_map_t                map,
611 	vm_map_offset_t         start,
612 	vm_map_offset_t         end,
613 	vm_behavior_t           new_behavior);
614 
615 extern kern_return_t vm_map_region(
616 	vm_map_t                 map,
617 	vm_map_offset_ut        *address,
618 	vm_map_size_ut          *size,
619 	vm_region_flavor_t       flavor,
620 	vm_region_info_t         info,
621 	mach_msg_type_number_t  *count,
622 	mach_port_t             *object_name);
623 
624 extern kern_return_t vm_map_region_recurse_64(
625 	vm_map_t                 map,
626 	vm_map_offset_ut        *address,
627 	vm_map_size_ut          *size,
628 	natural_t               *nesting_depth,
629 	vm_region_submap_info_64_t info,
630 	mach_msg_type_number_t  *count);
631 
632 /* definitions related to overriding the NX behavior */
633 
634 extern int override_nx(vm_map_t map, uint32_t user_tag);
635 
636 extern void vm_map_region_top_walk(
637 	vm_map_entry_t entry,
638 	vm_region_top_info_t top);
639 extern void vm_map_region_walk(
640 	vm_map_t map,
641 	vm_map_offset_t va,
642 	vm_map_entry_t entry,
643 	vm_object_offset_t offset,
644 	vm_object_size_t range,
645 	vm_region_extended_info_t extended,
646 	boolean_t look_for_pages,
647 	mach_msg_type_number_t count);
648 
649 extern void vm_map_copy_ledger(
650 	task_t  old_task,
651 	task_t  new_task,
652 	int     ledger_entry);
653 
654 #endif /* MACH_KERNEL_PRIVATE */
655 
656 /* Get rid of a map */
657 extern void             vm_map_destroy(
658 	vm_map_t                map);
659 
660 extern void             vm_map_require(
661 	vm_map_t                map);
662 
663 extern void             vm_map_copy_require(
664 	vm_map_copy_t           copy);
665 
666 
667 extern kern_return_t    vm_map_copy_extract(
668 	vm_map_t                src_map,
669 	vm_map_address_t        src_addr,
670 	vm_map_size_t           len,
671 	boolean_t               copy,
672 	vm_map_copy_t           *copy_result,   /* OUT */
673 	vm_prot_t               *cur_prot,      /* OUT */
674 	vm_prot_t               *max_prot,      /* OUT */
675 	vm_inherit_t            inheritance,
676 	vm_map_kernel_flags_t   vmk_flags);
677 
678 #define VM_MAP_COPYIN_SRC_DESTROY        0x00000001
679 #define VM_MAP_COPYIN_USE_MAXPROT        0x00000002
680 #define VM_MAP_COPYIN_ENTRY_LIST         0x00000004
681 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
682 #define VM_MAP_COPYIN_FORK               0x00000010
683 #define VM_MAP_COPYIN_ALL_FLAGS              0x0000001F
684 
685 extern kern_return_t    vm_map_copyin_internal(
686 	vm_map_t                src_map,
687 	vm_map_address_ut       src_addr_u,
688 	vm_map_size_ut          len_u,
689 	int                     flags,
690 	vm_map_copy_t          *copy_result);   /* OUT */
691 
692 extern boolean_t        vm_map_tpro_enforcement(
693 	vm_map_t                map);
694 
695 extern void vm_map_iokit_mapped_region(
696 	vm_map_t                map,
697 	vm_size_t               bytes);
698 
699 extern void vm_map_iokit_unmapped_region(
700 	vm_map_t                map,
701 	vm_size_t               bytes);
702 
703 extern boolean_t first_free_is_valid(vm_map_t);
704 
705 extern void             vm_map_range_fork(
706 	vm_map_t                new_map,
707 	vm_map_t                old_map);
708 
709 extern int              vm_map_get_user_range(
710 	vm_map_t                map,
711 	vm_map_range_id_t       range_id,
712 	mach_vm_range_t         range);
713 
714 
715 #ifdef MACH_KERNEL_PRIVATE
716 
717 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)718 VM_MAP_IS_EXOTIC(
719 	vm_map_t map __unused)
720 {
721 #if __arm64__
722 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
723 	    pmap_is_exotic(map->pmap)) {
724 		return true;
725 	}
726 #endif /* __arm64__ */
727 	return false;
728 }
729 
730 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)731 VM_MAP_IS_ALIEN(
732 	vm_map_t map __unused)
733 {
734 	/*
735 	 * An "alien" process/task/map/pmap should mostly behave
736 	 * as it currently would on iOS.
737 	 */
738 #if XNU_TARGET_OS_OSX
739 	if (map->is_alien) {
740 		return true;
741 	}
742 	return false;
743 #else /* XNU_TARGET_OS_OSX */
744 	return true;
745 #endif /* XNU_TARGET_OS_OSX */
746 }
747 
748 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)749 VM_MAP_POLICY_WX_FAIL(
750 	vm_map_t map __unused)
751 {
752 	if (VM_MAP_IS_ALIEN(map)) {
753 		return false;
754 	}
755 	return true;
756 }
757 
758 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)759 VM_MAP_POLICY_WX_STRIP_X(
760 	vm_map_t map __unused)
761 {
762 	if (VM_MAP_IS_ALIEN(map)) {
763 		return true;
764 	}
765 	return false;
766 }
767 
768 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)769 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
770 	vm_map_t map __unused)
771 {
772 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
773 		return false;
774 	}
775 	return true;
776 }
777 
778 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)779 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
780 	vm_map_t map)
781 {
782 	return VM_MAP_IS_ALIEN(map);
783 }
784 
785 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)786 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
787 	vm_map_t map __unused)
788 {
789 	if (VM_MAP_IS_ALIEN(map)) {
790 		return false;
791 	}
792 	return true;
793 }
794 
795 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)796 VM_MAP_POLICY_ALLOW_JIT_SHARING(
797 	vm_map_t map __unused)
798 {
799 	if (VM_MAP_IS_ALIEN(map)) {
800 		return false;
801 	}
802 	return true;
803 }
804 
805 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)806 VM_MAP_POLICY_ALLOW_JIT_COPY(
807 	vm_map_t map __unused)
808 {
809 	if (VM_MAP_IS_ALIEN(map)) {
810 		return false;
811 	}
812 	return true;
813 }
814 
815 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)816 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
817 	vm_map_t map __unused)
818 {
819 #if __x86_64__
820 	return true;
821 #else /* __x86_64__ */
822 	if (VM_MAP_IS_EXOTIC(map)) {
823 		return true;
824 	}
825 	return false;
826 #endif /* __x86_64__ */
827 }
828 
829 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)830 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
831 {
832 	switch (prot) {
833 	case MAP_MEM_NOOP:                      break;
834 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
835 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
836 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
837 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
838 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
839 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
840 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
841 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
842 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
843 	default:                                break;
844 	}
845 }
846 
847 static inline boolean_t
vm_map_always_shadow(vm_map_t map)848 vm_map_always_shadow(vm_map_t map)
849 {
850 	if (map->mapped_in_other_pmaps) {
851 		/*
852 		 * This is a submap, mapped in other maps.
853 		 * Even if a VM object is mapped only once in this submap,
854 		 * the submap itself could be mapped multiple times,
855 		 * so vm_object_shadow() should always create a shadow
856 		 * object, even if the object has only 1 reference.
857 		 */
858 		return TRUE;
859 	}
860 	return FALSE;
861 }
862 
863 extern void
864 vm_map_sizes(vm_map_t map,
865     vm_map_size_t * psize,
866     vm_map_size_t * pfree,
867     vm_map_size_t * plargest_free);
868 
869 extern void vm_map_guard_exception(
870 	vm_map_offset_t         address,
871 	unsigned                reason);
872 
873 #endif /* MACH_KERNEL_PRIVATE */
874 
875 __END_DECLS
876 
877 #endif  /* _VM_VM_MAP_INTERNAL_H_ */
878