xref: /xnu-11215.41.3/osfmk/vm/vm_map_internal.h (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifndef _VM_VM_MAP_INTERNAL_H_
58 #define _VM_VM_MAP_INTERNAL_H_
59 
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_kern_xnu.h>
62 #include <mach/vm_types_unsafe.h>
63 #include <vm/vm_sanitize_internal.h>
64 #ifdef MACH_KERNEL_PRIVATE
65 #include <vm/vm_object_internal.h>
66 #endif /* MACH_KERNEL_PRIVATE */
67 
68 __BEGIN_DECLS
69 
70 #ifdef XNU_KERNEL_PRIVATE
71 
72 /* Check protection */
73 extern boolean_t vm_map_check_protection(
74 	vm_map_t                map,
75 	vm_map_offset_t         start,
76 	vm_map_offset_t         end,
77 	vm_prot_t               protection);
78 
79 extern kern_return_t vm_map_wire_impl(
80 	vm_map_t                map,
81 	vm_map_offset_ut        start_u,
82 	vm_map_offset_ut        end_u,
83 	vm_prot_ut              prot_u,
84 	vm_tag_t                tag,
85 	boolean_t               user_wire,
86 	ppnum_t                *physpage_p,
87 	vm_sanitize_caller_t    vm_sanitize_caller);
88 
89 extern kern_return_t vm_map_unwire_impl(
90 	vm_map_t                map,
91 	vm_map_offset_ut        start_u,
92 	vm_map_offset_ut        end_u,
93 	boolean_t               user_wire,
94 	vm_sanitize_caller_t    vm_sanitize_caller);
95 
96 #endif /* XNU_KERNEL_PRIVATE */
97 #ifdef MACH_KERNEL_PRIVATE
98 #pragma GCC visibility push(hidden)
99 
100 /* definitions related to overriding the NX behavior */
101 #define VM_ABI_32       0x1
102 #define VM_ABI_64       0x2
103 
104 /*
105  * This file contains interfaces that are private to the VM
106  */
107 
108 #define KiB(x) (1024 * (x))
109 #define MeB(x) (1024 * 1024 * (x))
110 
111 #if __LP64__
112 #define KMEM_SMALLMAP_THRESHOLD     (MeB(1))
113 #else
114 #define KMEM_SMALLMAP_THRESHOLD     (KiB(256))
115 #endif
116 
117 struct kmem_page_meta;
118 
119 
120 /* We can't extern this from vm_kern.h because we can't include pmap.h */
121 extern void kernel_memory_populate_object_and_unlock(
122 	vm_object_t             object, /* must be locked */
123 	vm_address_t            addr,
124 	vm_offset_t             offset,
125 	vm_size_t               size,
126 	struct vm_page         *page_list,
127 	kma_flags_t             flags,
128 	vm_tag_t                tag,
129 	vm_prot_t               prot,
130 	pmap_mapping_type_t     mapping_type);
131 
132 /* Initialize the module */
133 extern void vm_map_init(void);
134 
135 /*!
136  * @function vm_map_locate_space_anywhere()
137  *
138  * @brief
139  * Locate (no reservation) a range in the specified VM map.
140  *
141  * @param map           the map to scan for memory, must be locked.
142  * @param size          the size of the allocation to make.
143  * @param mask          an alignment mask the allocation must respect.
144  *                      (takes vmk_flags.vmkf_guard_before into account).
145  * @param vmk_flags     the vm map kernel flags to influence this call.
146  *                      vmk_flags.vmf_anywhere must be set.
147  * @param start_inout   in: an optional address to start scanning from, or 0
148  * @param entry_out     the entry right before the hole.
149  *
150  * @returns
151  * - KERN_SUCCESS in case of success, in which case:
152  *   o the address pointed at by @c start_inout is updated to the start
153  *     of the range located
154  *   o entry_out is set to the entry right before the hole in the map.
155  *
156  * - KERN_INVALID_ARGUMENT if some of the parameters aren't right
157  *   (typically invalid vmk_flags).
158  *
159  * - KERN_NO_SPACE if no space was found with the specified constraints.
160  */
161 extern kern_return_t vm_map_locate_space_anywhere(
162 	vm_map_t                map,
163 	vm_map_size_t           size,
164 	vm_map_offset_t         mask,
165 	vm_map_kernel_flags_t   vmk_flags,
166 	vm_map_offset_t        *start_inout,
167 	vm_map_entry_t         *entry_out);
168 
169 /* Allocate a range in the specified virtual address map and
170  * return the entry allocated for that range. */
171 extern kern_return_t vm_map_find_space(
172 	vm_map_t                map,
173 	vm_map_address_t        hint_addr,
174 	vm_map_size_t           size,
175 	vm_map_offset_t         mask,
176 	vm_map_kernel_flags_t   vmk_flags,
177 	vm_map_entry_t          *o_entry);                              /* OUT */
178 
179 extern void vm_map_clip_start(
180 	vm_map_t                map,
181 	vm_map_entry_t          entry,
182 	vm_map_offset_t         endaddr);
183 
184 extern void vm_map_clip_end(
185 	vm_map_t                map,
186 	vm_map_entry_t          entry,
187 	vm_map_offset_t         endaddr);
188 
189 extern boolean_t vm_map_entry_should_cow_for_true_share(
190 	vm_map_entry_t          entry);
191 
192 /*!
193  * @typedef vmr_flags_t
194  *
195  * @brief
196  * Flags for vm_map_remove() and vm_map_delete()
197  *
198  * @const VM_MAP_REMOVE_NO_FLAGS
199  * When no special flags is to be passed.
200  *
201  * @const VM_MAP_REMOVE_KUNWIRE
202  * Unwire memory as a side effect.
203  *
204  * @const VM_MAP_REMOVE_INTERRUPTIBLE
205  * Whether the call is interruptible if it needs to wait for a vm map
206  * entry to quiesce (interruption leads to KERN_ABORTED).
207  *
208  * @const VM_MAP_REMOVE_NOKUNWIRE_LAST
209  * Do not unwire the last page of this entry during remove.
210  * (Used by kmem_realloc()).
211  *
212  * @const VM_MAP_REMOVE_IMMUTABLE
213  * Allow permanent entries to be removed.
214  *
215  * @const VM_MAP_REMOVE_GAPS_FAIL
216  * Return KERN_INVALID_VALUE when a gap is being removed instead of panicking.
217  *
218  * @const VM_MAP_REMOVE_NO_YIELD.
219  * Try to avoid yielding during this call.
220  *
221  * @const VM_MAP_REMOVE_GUESS_SIZE
222  * The caller doesn't know the precise size of the entry,
223  * but the address must match an atomic entry.
224  *
225  * @const VM_MAP_REMOVE_IMMUTABLE_CODE
226  * Allow executables entries to be removed (for VM_PROT_COPY),
227  * which is used by debuggers.
228  */
229 __options_decl(vmr_flags_t, uint32_t, {
230 	VM_MAP_REMOVE_NO_FLAGS          = 0x000,
231 	VM_MAP_REMOVE_KUNWIRE           = 0x001,
232 	VM_MAP_REMOVE_INTERRUPTIBLE     = 0x002,
233 	VM_MAP_REMOVE_NOKUNWIRE_LAST    = 0x004,
234 	VM_MAP_REMOVE_NO_MAP_ALIGN      = 0x008,
235 	VM_MAP_REMOVE_IMMUTABLE         = 0x010,
236 	VM_MAP_REMOVE_GAPS_FAIL         = 0x020,
237 	VM_MAP_REMOVE_NO_YIELD          = 0x040,
238 	VM_MAP_REMOVE_GUESS_SIZE        = 0x080,
239 	VM_MAP_REMOVE_IMMUTABLE_CODE    = 0x100,
240 	VM_MAP_REMOVE_TO_OVERWRITE      = 0x200,
241 });
242 
243 /* Deallocate a region */
244 extern kmem_return_t vm_map_remove_guard(
245 	vm_map_t                map,
246 	vm_map_offset_t         start,
247 	vm_map_offset_t         end,
248 	vmr_flags_t             flags,
249 	kmem_guard_t            guard) __result_use_check;
250 
251 extern kmem_return_t vm_map_remove_and_unlock(
252 	vm_map_t        map,
253 	vm_map_offset_t start,
254 	vm_map_offset_t end,
255 	vmr_flags_t     flags,
256 	kmem_guard_t    guard) __result_use_check;
257 
258 /* Deallocate a region */
259 static inline void
vm_map_remove(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)260 vm_map_remove(
261 	vm_map_t                map,
262 	vm_map_offset_t         start,
263 	vm_map_offset_t         end)
264 {
265 	vmr_flags_t  flags = VM_MAP_REMOVE_NO_FLAGS;
266 	kmem_guard_t guard = KMEM_GUARD_NONE;
267 
268 	(void)vm_map_remove_guard(map, start, end, flags, guard);
269 }
270 
271 extern bool kmem_is_ptr_range(vm_map_range_id_t range_id);
272 
273 extern mach_vm_range_t kmem_validate_range_for_overwrite(
274 	vm_map_offset_t         addr,
275 	vm_map_size_t           size);
276 
277 extern uint32_t kmem_addr_get_slot_idx(
278 	vm_map_offset_t         start,
279 	vm_map_offset_t         end,
280 	vm_map_range_id_t       range_id,
281 	struct kmem_page_meta **meta,
282 	uint32_t               *size_idx,
283 	mach_vm_range_t         slot);
284 
285 extern void kmem_validate_slot(
286 	vm_map_offset_t         addr,
287 	struct kmem_page_meta  *meta,
288 	uint32_t                size_idx,
289 	uint32_t                slot_idx);
290 
291 /*
292  * Function used to allocate VA from kmem pointer ranges
293  */
294 extern kern_return_t kmem_locate_space(
295 	vm_map_size_t           size,
296 	vm_map_range_id_t       range_id,
297 	bool                    direction,
298 	vm_map_offset_t        *start_inout,
299 	vm_map_entry_t         *entry_out);
300 
301 /*
302  * Function used to free VA to kmem pointer ranges
303  */
304 extern void kmem_free_space(
305 	vm_map_offset_t         start,
306 	vm_map_offset_t         end,
307 	vm_map_range_id_t       range_id,
308 	mach_vm_range_t         slot);
309 
310 ppnum_t vm_map_get_phys_page(
311 	vm_map_t        map,
312 	vm_offset_t     offset);
313 
314 #pragma GCC visibility pop
315 
316 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)317 VME_OBJECT_SET(
318 	vm_map_entry_t entry,
319 	vm_object_t    object,
320 	bool           atomic,
321 	uint32_t       context)
322 {
323 	__builtin_assume(((vm_offset_t)object & 3) == 0);
324 
325 	entry->vme_atomic = atomic;
326 	entry->is_sub_map = false;
327 	if (atomic) {
328 		entry->vme_context = context;
329 	} else {
330 		entry->vme_context = 0;
331 	}
332 
333 	if (!object) {
334 		entry->vme_object_or_delta = 0;
335 	} else if (is_kernel_object(object)) {
336 #if VM_BTLOG_TAGS
337 		if (!(entry->vme_kernel_object && entry->vme_tag_btref))
338 #endif /* VM_BTLOG_TAGS */
339 		{
340 			entry->vme_object_or_delta = 0;
341 		}
342 	} else {
343 #if VM_BTLOG_TAGS
344 		if (entry->vme_kernel_object && entry->vme_tag_btref) {
345 			btref_put(entry->vme_tag_btref);
346 		}
347 #endif /* VM_BTLOG_TAGS */
348 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
349 	}
350 
351 	entry->vme_kernel_object = is_kernel_object(object);
352 	entry->vme_resilient_codesign = false;
353 	entry->used_for_jit = false;
354 }
355 
356 
357 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)358 VME_OFFSET_SET(
359 	vm_map_entry_t entry,
360 	vm_object_offset_t offset)
361 {
362 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
363 	assert3u(VME_OFFSET(entry), ==, offset);
364 }
365 
366 /*
367  * IMPORTANT:
368  * The "alias" field can be updated while holding the VM map lock
369  * "shared".  It's OK as along as it's the only field that can be
370  * updated without the VM map "exclusive" lock.
371  */
372 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)373 VME_ALIAS_SET(
374 	vm_map_entry_t entry,
375 	unsigned int alias)
376 {
377 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
378 	entry->vme_alias = alias;
379 }
380 
381 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)382 VME_OBJECT_SHADOW(
383 	vm_map_entry_t entry,
384 	vm_object_size_t length,
385 	bool always)
386 {
387 	vm_object_t object;
388 	vm_object_offset_t offset;
389 
390 	object = VME_OBJECT(entry);
391 	offset = VME_OFFSET(entry);
392 	vm_object_shadow(&object, &offset, length, always);
393 	if (object != VME_OBJECT(entry)) {
394 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
395 		entry->use_pmap = true;
396 	}
397 	if (offset != VME_OFFSET(entry)) {
398 		VME_OFFSET_SET(entry, offset);
399 	}
400 }
401 
402 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
403 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)404 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
405 {
406 #if VM_BTLOG_TAGS
407 	if (vmtaglog_tag && (VME_ALIAS(entry) == vmtaglog_tag) && entry->vme_kernel_object && entry->wired_count) {
408 		assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
409 		entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
410 	}
411 #endif /* VM_BTLOG_TAGS */
412 }
413 
414 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)415 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
416 {
417 #if VM_BTLOG_TAGS
418 	if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
419 		btref_put(entry->vme_tag_btref);
420 		entry->vme_tag_btref = 0;
421 	}
422 #endif /* VM_BTLOG_TAGS */
423 }
424 
425 extern kern_return_t
426 vm_map_copy_adjust_to_target(
427 	vm_map_copy_t           copy_map,
428 	vm_map_offset_ut        offset,
429 	vm_map_size_ut          size,
430 	vm_map_t                target_map,
431 	boolean_t               copy,
432 	vm_map_copy_t           *target_copy_map_p,
433 	vm_map_offset_t         *overmap_start_p,
434 	vm_map_offset_t         *overmap_end_p,
435 	vm_map_offset_t         *trimmed_start_p);
436 
437 
438 __attribute__((always_inline))
439 int vm_map_lock_read_to_write(vm_map_t map);
440 
441 __attribute__((always_inline))
442 boolean_t vm_map_try_lock(vm_map_t map);
443 
444 __attribute__((always_inline))
445 boolean_t vm_map_try_lock_read(vm_map_t map);
446 
447 int vm_self_region_page_shift(vm_map_t target_map);
448 int vm_self_region_page_shift_safely(vm_map_t target_map);
449 
450 /* Lookup map entry containing or the specified address in the given map */
451 extern boolean_t        vm_map_lookup_entry_or_next(
452 	vm_map_t                map,
453 	vm_map_address_t        address,
454 	vm_map_entry_t          *entry);                                /* OUT */
455 
456 /* like vm_map_lookup_entry without the PGZ bear trap */
457 #if CONFIG_PROB_GZALLOC
458 extern boolean_t        vm_map_lookup_entry_allow_pgz(
459 	vm_map_t                map,
460 	vm_map_address_t        address,
461 	vm_map_entry_t          *entry);                                /* OUT */
462 #else /* !CONFIG_PROB_GZALLOC */
463 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
464 #endif /* !CONFIG_PROB_GZALLOC */
465 
466 
467 extern void             vm_map_copy_remap(
468 	vm_map_t                map,
469 	vm_map_entry_t          where,
470 	vm_map_copy_t           copy,
471 	vm_map_offset_t         adjustment,
472 	vm_prot_t               cur_prot,
473 	vm_prot_t               max_prot,
474 	vm_inherit_t            inheritance);
475 
476 /* Find the VM object, offset, and protection for a given virtual address
477  * in the specified map, assuming a page fault of the	type specified. */
478 extern kern_return_t    vm_map_lookup_and_lock_object(
479 	vm_map_t                *var_map,                               /* IN/OUT */
480 	vm_map_address_t        vaddr,
481 	vm_prot_t               fault_type,
482 	int                     object_lock_type,
483 	vm_map_version_t        *out_version,                           /* OUT */
484 	vm_object_t             *object,                                /* OUT */
485 	vm_object_offset_t      *offset,                                /* OUT */
486 	vm_prot_t               *out_prot,                              /* OUT */
487 	boolean_t               *wired,                                 /* OUT */
488 	vm_object_fault_info_t  fault_info,                             /* OUT */
489 	vm_map_t                *real_map,                              /* OUT */
490 	bool                    *contended);                            /* OUT */
491 
492 /* Verifies that the map has not changed since the given version. */
493 extern boolean_t        vm_map_verify(
494 	vm_map_t                map,
495 	vm_map_version_t        *version);                              /* REF */
496 
497 
498 /* simplify map entries */
499 extern void             vm_map_simplify_entry(
500 	vm_map_t        map,
501 	vm_map_entry_t  this_entry);
502 extern void             vm_map_simplify(
503 	vm_map_t                map,
504 	vm_map_offset_t         start);
505 
506 #if __arm64__
507 extern kern_return_t    vm_map_enter_fourk(
508 	vm_map_t                map,
509 	vm_map_offset_t         *address,
510 	vm_map_size_t           size,
511 	vm_map_offset_t         mask,
512 	vm_map_kernel_flags_t   vmk_flags,
513 	vm_object_t             object,
514 	vm_object_offset_t      offset,
515 	boolean_t               needs_copy,
516 	vm_prot_t               cur_protection,
517 	vm_prot_t               max_protection,
518 	vm_inherit_t            inheritance);
519 #endif /* __arm64__ */
520 
521 
522 /* Enter a mapping */
523 extern kern_return_t    vm_map_enter(
524 	vm_map_t                map,
525 	vm_map_offset_t        *address,
526 	vm_map_size_t           size,
527 	vm_map_offset_t         mask,
528 	vm_map_kernel_flags_t   vmk_flags,
529 	vm_object_t             object,
530 	vm_object_offset_t      offset,
531 	boolean_t               needs_copy,
532 	vm_prot_t               cur_protection,
533 	vm_prot_t               max_protection,
534 	vm_inherit_t            inheritance);
535 
536 
537 /* Enter a mapping of a memory object */
538 extern kern_return_t    vm_map_enter_mem_object(
539 	vm_map_t                map,
540 	vm_map_offset_ut       *address,
541 	vm_map_size_ut          size,
542 	vm_map_offset_ut        mask,
543 	vm_map_kernel_flags_t   vmk_flags,
544 	ipc_port_t              port,
545 	vm_object_offset_ut     offset,
546 	boolean_t               needs_copy,
547 	vm_prot_ut              cur_protection,
548 	vm_prot_ut              max_protection,
549 	vm_inherit_ut           inheritance,
550 	upl_page_list_ptr_t     page_list,
551 	unsigned int            page_list_count);
552 
553 extern kern_return_t    vm_map_remap(
554 	vm_map_t                target_map,
555 	vm_map_offset_ut       *address,
556 	vm_map_size_ut          size,
557 	vm_map_offset_ut        mask,
558 	vm_map_kernel_flags_t   vmk_flags,
559 	vm_map_t                src_map,
560 	vm_map_offset_ut        memory_address,
561 	boolean_t               copy,
562 	vm_prot_ut              *cur_protection,
563 	vm_prot_ut              *max_protection,
564 	vm_inherit_ut           inheritance);
565 
566 
567 /* Add or remove machine-dependent attributes from map regions */
568 extern kern_return_t    vm_map_machine_attribute(
569 	vm_map_t                map,
570 	vm_map_offset_t         start,
571 	vm_map_offset_t         end,
572 	vm_machine_attribute_t  attribute,
573 	vm_machine_attribute_val_t* value);                         /* IN/OUT */
574 
575 extern kern_return_t    vm_map_msync(
576 	vm_map_t                map,
577 	vm_map_address_t        address,
578 	vm_map_size_t           size,
579 	vm_sync_t               sync_flags);
580 
581 /* Set paging behavior */
582 extern kern_return_t    vm_map_behavior_set(
583 	vm_map_t                map,
584 	vm_map_offset_t         start,
585 	vm_map_offset_t         end,
586 	vm_behavior_t           new_behavior);
587 
588 extern kern_return_t vm_map_region(
589 	vm_map_t                 map,
590 	vm_map_offset_t         *address,
591 	vm_map_size_t           *size,
592 	vm_region_flavor_t       flavor,
593 	vm_region_info_t         info,
594 	mach_msg_type_number_t  *count,
595 	mach_port_t             *object_name);
596 
597 extern kern_return_t vm_map_region_recurse_64(
598 	vm_map_t                 map,
599 	vm_map_offset_t         *address,
600 	vm_map_size_t           *size,
601 	natural_t               *nesting_depth,
602 	vm_region_submap_info_64_t info,
603 	mach_msg_type_number_t  *count);
604 
605 extern kern_return_t vm_map_page_query_internal(
606 	vm_map_t                map,
607 	vm_map_offset_t         offset,
608 	int                     *disposition,
609 	int                     *ref_count);
610 
611 /* definitions related to overriding the NX behavior */
612 
613 extern int override_nx(vm_map_t map, uint32_t user_tag);
614 
615 extern void vm_map_region_top_walk(
616 	vm_map_entry_t entry,
617 	vm_region_top_info_t top);
618 extern void vm_map_region_walk(
619 	vm_map_t map,
620 	vm_map_offset_t va,
621 	vm_map_entry_t entry,
622 	vm_object_offset_t offset,
623 	vm_object_size_t range,
624 	vm_region_extended_info_t extended,
625 	boolean_t look_for_pages,
626 	mach_msg_type_number_t count);
627 
628 extern void vm_map_copy_ledger(
629 	task_t  old_task,
630 	task_t  new_task,
631 	int     ledger_entry);
632 
633 #endif /* MACH_KERNEL_PRIVATE */
634 
635 /* Get rid of a map */
636 extern void             vm_map_destroy(
637 	vm_map_t                map);
638 
639 extern void             vm_map_require(
640 	vm_map_t                map);
641 
642 extern void             vm_map_copy_require(
643 	vm_map_copy_t           copy);
644 
645 extern kern_return_t    vm_map_copy_extract(
646 	vm_map_t                src_map,
647 	vm_map_address_t        src_addr,
648 	vm_map_size_t           len,
649 	boolean_t               copy,
650 	vm_map_copy_t           *copy_result,   /* OUT */
651 	vm_prot_t               *cur_prot,      /* OUT */
652 	vm_prot_t               *max_prot,      /* OUT */
653 	vm_inherit_t            inheritance,
654 	vm_map_kernel_flags_t   vmk_flags);
655 
656 #define VM_MAP_COPYIN_SRC_DESTROY       0x00000001
657 #define VM_MAP_COPYIN_USE_MAXPROT       0x00000002
658 #define VM_MAP_COPYIN_ENTRY_LIST        0x00000004
659 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
660 #define VM_MAP_COPYIN_FORK              0x00000010
661 #define VM_MAP_COPYIN_ALL_FLAGS         0x0000001F
662 
663 extern kern_return_t    vm_map_copyin_internal(
664 	vm_map_t                src_map,
665 	vm_map_address_ut       src_addr_u,
666 	vm_map_size_ut          len_u,
667 	int                     flags,
668 	vm_map_copy_t          *copy_result);   /* OUT */
669 
670 extern boolean_t        vm_map_tpro_enforcement(
671 	vm_map_t                map);
672 
673 extern void vm_map_iokit_mapped_region(
674 	vm_map_t                map,
675 	vm_size_t               bytes);
676 
677 extern void vm_map_iokit_unmapped_region(
678 	vm_map_t                map,
679 	vm_size_t               bytes);
680 
681 extern boolean_t first_free_is_valid(vm_map_t);
682 
683 extern void             vm_map_range_fork(
684 	vm_map_t                new_map,
685 	vm_map_t                old_map);
686 
687 extern int              vm_map_get_user_range(
688 	vm_map_t                map,
689 	vm_map_range_id_t       range_id,
690 	mach_vm_range_t         range);
691 
692 
693 #ifdef MACH_KERNEL_PRIVATE
694 
695 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)696 VM_MAP_IS_EXOTIC(
697 	vm_map_t map __unused)
698 {
699 #if __arm64__
700 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
701 	    pmap_is_exotic(map->pmap)) {
702 		return true;
703 	}
704 #endif /* __arm64__ */
705 	return false;
706 }
707 
708 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)709 VM_MAP_IS_ALIEN(
710 	vm_map_t map __unused)
711 {
712 	/*
713 	 * An "alien" process/task/map/pmap should mostly behave
714 	 * as it currently would on iOS.
715 	 */
716 #if XNU_TARGET_OS_OSX
717 	if (map->is_alien) {
718 		return true;
719 	}
720 	return false;
721 #else /* XNU_TARGET_OS_OSX */
722 	return true;
723 #endif /* XNU_TARGET_OS_OSX */
724 }
725 
726 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)727 VM_MAP_POLICY_WX_FAIL(
728 	vm_map_t map __unused)
729 {
730 	if (VM_MAP_IS_ALIEN(map)) {
731 		return false;
732 	}
733 	return true;
734 }
735 
736 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)737 VM_MAP_POLICY_WX_STRIP_X(
738 	vm_map_t map __unused)
739 {
740 	if (VM_MAP_IS_ALIEN(map)) {
741 		return true;
742 	}
743 	return false;
744 }
745 
746 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)747 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
748 	vm_map_t map __unused)
749 {
750 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
751 		return false;
752 	}
753 	return true;
754 }
755 
756 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)757 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
758 	vm_map_t map)
759 {
760 	return VM_MAP_IS_ALIEN(map);
761 }
762 
763 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)764 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
765 	vm_map_t map __unused)
766 {
767 	if (VM_MAP_IS_ALIEN(map)) {
768 		return false;
769 	}
770 	return true;
771 }
772 
773 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)774 VM_MAP_POLICY_ALLOW_JIT_SHARING(
775 	vm_map_t map __unused)
776 {
777 	if (VM_MAP_IS_ALIEN(map)) {
778 		return false;
779 	}
780 	return true;
781 }
782 
783 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)784 VM_MAP_POLICY_ALLOW_JIT_COPY(
785 	vm_map_t map __unused)
786 {
787 	if (VM_MAP_IS_ALIEN(map)) {
788 		return false;
789 	}
790 	return true;
791 }
792 
793 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)794 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
795 	vm_map_t map __unused)
796 {
797 #if __x86_64__
798 	return true;
799 #else /* __x86_64__ */
800 	if (VM_MAP_IS_EXOTIC(map)) {
801 		return true;
802 	}
803 	return false;
804 #endif /* __x86_64__ */
805 }
806 
807 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)808 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
809 {
810 	switch (prot) {
811 	case MAP_MEM_NOOP:                      break;
812 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
813 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
814 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
815 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
816 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
817 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
818 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
819 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
820 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
821 	default:                                break;
822 	}
823 }
824 
825 static inline boolean_t
vm_map_always_shadow(vm_map_t map)826 vm_map_always_shadow(vm_map_t map)
827 {
828 	if (map->mapped_in_other_pmaps) {
829 		/*
830 		 * This is a submap, mapped in other maps.
831 		 * Even if a VM object is mapped only once in this submap,
832 		 * the submap itself could be mapped multiple times,
833 		 * so vm_object_shadow() should always create a shadow
834 		 * object, even if the object has only 1 reference.
835 		 */
836 		return TRUE;
837 	}
838 	return FALSE;
839 }
840 
841 extern void
842 vm_map_sizes(vm_map_t map,
843     vm_map_size_t * psize,
844     vm_map_size_t * pfree,
845     vm_map_size_t * plargest_free);
846 
847 #endif /* MACH_KERNEL_PRIVATE */
848 
849 __END_DECLS
850 
851 #endif  /* _VM_VM_MAP_INTERNAL_H_ */
852