xref: /xnu-12377.41.6/osfmk/vm/vm_map_internal.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifndef _VM_VM_MAP_INTERNAL_H_
58 #define _VM_VM_MAP_INTERNAL_H_
59 
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_kern_xnu.h>
62 #include <mach/vm_types_unsafe.h>
63 #include <vm/vm_sanitize_internal.h>
64 #include <kern/thread_test_context.h>
65 #ifdef MACH_KERNEL_PRIVATE
66 #include <vm/vm_object_internal.h>
67 #endif /* MACH_KERNEL_PRIVATE */
68 
69 __BEGIN_DECLS
70 
71 #ifdef XNU_KERNEL_PRIVATE
72 
73 /* Check protection */
74 extern boolean_t vm_map_check_protection(
75 	vm_map_t                map,
76 	vm_map_offset_ut        start_u,
77 	vm_map_offset_ut        end_u,
78 	vm_prot_ut              protection_u,
79 	vm_sanitize_caller_t    vm_sanitize_caller);
80 
81 extern kern_return_t vm_map_wire_impl(
82 	vm_map_t                map,
83 	vm_map_offset_ut        start_u,
84 	vm_map_offset_ut        end_u,
85 	vm_prot_ut              prot_u,
86 	vm_tag_t                tag,
87 	boolean_t               user_wire,
88 	ppnum_t                *physpage_p,
89 	vm_sanitize_caller_t    vm_sanitize_caller);
90 
91 extern kern_return_t vm_map_unwire_impl(
92 	vm_map_t                map,
93 	vm_map_offset_ut        start_u,
94 	vm_map_offset_ut        end_u,
95 	boolean_t               user_wire,
96 	vm_sanitize_caller_t    vm_sanitize_caller);
97 
98 #endif /* XNU_KERNEL_PRIVATE */
99 #ifdef MACH_KERNEL_PRIVATE
100 #pragma GCC visibility push(hidden)
101 
102 /* definitions related to overriding the NX behavior */
103 #define VM_ABI_32       0x1
104 #define VM_ABI_64       0x2
105 
106 /*
107  * This file contains interfaces that are private to the VM
108  */
109 
110 #define KiB(kb) ((kb) << 10ull)
111 #define BtoKiB(b) ((b) >> 10)
112 #define MiB(mb) ((mb) << 20ull)
113 #define BtoMiB(b) ((b) >> 20)
114 
115 #if __LP64__
116 #define KMEM_SMALLMAP_THRESHOLD     (MiB(1))
117 #else
118 #define KMEM_SMALLMAP_THRESHOLD     (KiB(256))
119 #endif
120 
121 struct kmem_page_meta;
122 
123 
124 /* We can't extern this from vm_kern.h because we can't include pmap.h */
125 extern void kernel_memory_populate_object_and_unlock(
126 	vm_object_t             object, /* must be locked */
127 	vm_address_t            addr,
128 	vm_offset_t             offset,
129 	vm_size_t               size,
130 	struct vm_page         *page_list,
131 	kma_flags_t             flags,
132 	vm_tag_t                tag,
133 	vm_prot_t               prot,
134 	pmap_mapping_type_t     mapping_type);
135 
136 /* Initialize the module */
137 extern void vm_map_init(void);
138 
139 /*!
140  * @function vm_map_locate_space_anywhere()
141  *
142  * @brief
143  * Locate (no reservation) a range in the specified VM map.
144  *
145  * @param map           the map to scan for memory, must be locked.
146  * @param size          the size of the allocation to make.
147  * @param mask          an alignment mask the allocation must respect.
148  *                      (takes vmk_flags.vmkf_guard_before into account).
149  * @param vmk_flags     the vm map kernel flags to influence this call.
150  *                      vmk_flags.vmf_anywhere must be set.
151  * @param start_inout   in: an optional address to start scanning from, or 0
152  * @param entry_out     the entry right before the hole.
153  *
154  * @returns
155  * - KERN_SUCCESS in case of success, in which case:
156  *   o the address pointed at by @c start_inout is updated to the start
157  *     of the range located
158  *   o entry_out is set to the entry right before the hole in the map.
159  *
160  * - KERN_INVALID_ARGUMENT if some of the parameters aren't right
161  *   (typically invalid vmk_flags).
162  *
163  * - KERN_NO_SPACE if no space was found with the specified constraints.
164  */
165 extern kern_return_t vm_map_locate_space_anywhere(
166 	vm_map_t                map,
167 	vm_map_size_t           size,
168 	vm_map_offset_t         mask,
169 	vm_map_kernel_flags_t   vmk_flags,
170 	vm_map_offset_t        *start_inout,
171 	vm_map_entry_t         *entry_out);
172 
173 /* Allocate a range in the specified virtual address map and
174  * return the entry allocated for that range. */
175 extern kern_return_t vm_map_find_space(
176 	vm_map_t                map,
177 	vm_map_address_t        hint_addr,
178 	vm_map_size_t           size,
179 	vm_map_offset_t         mask,
180 	vm_map_kernel_flags_t   vmk_flags,
181 	vm_map_entry_t          *o_entry);                              /* OUT */
182 
183 extern void vm_map_clip_start(
184 	vm_map_t                map,
185 	vm_map_entry_t          entry,
186 	vm_map_offset_t         endaddr);
187 
188 extern void vm_map_clip_end(
189 	vm_map_t                map,
190 	vm_map_entry_t          entry,
191 	vm_map_offset_t         endaddr);
192 
193 extern boolean_t vm_map_entry_should_cow_for_true_share(
194 	vm_map_entry_t          entry);
195 
196 extern void vm_map_seal(
197 	vm_map_t                 map,
198 	bool                     nested_pmap);
199 
200 /*!
201  * @typedef vmr_flags_t
202  *
203  * @brief
204  * Flags for vm_map_remove() and vm_map_delete()
205  *
206  * @const VM_MAP_REMOVE_NO_FLAGS
207  * When no special flags is to be passed.
208  *
209  * @const VM_MAP_REMOVE_KUNWIRE
210  * Unwire memory as a side effect.
211  *
212  * @const VM_MAP_REMOVE_INTERRUPTIBLE
213  * Whether the call is interruptible if it needs to wait for a vm map
214  * entry to quiesce (interruption leads to KERN_ABORTED).
215  *
216  * @const VM_MAP_REMOVE_NOKUNWIRE_LAST
217  * Do not unwire the last page of this entry during remove.
218  * (Used by kmem_realloc()).
219  *
220  * @const VM_MAP_REMOVE_IMMUTABLE
221  * Allow permanent entries to be removed.
222  *
223  * @const VM_MAP_REMOVE_GAPS_FAIL
224  * Return KERN_INVALID_VALUE when a gap is being removed instead of panicking.
225  *
226  * @const VM_MAP_REMOVE_NO_YIELD.
227  * Try to avoid yielding during this call.
228  *
229  * @const VM_MAP_REMOVE_GUESS_SIZE
230  * The caller doesn't know the precise size of the entry,
231  * but the address must match an atomic entry.
232  *
233  * @const VM_MAP_REMOVE_IMMUTABLE_CODE
234  * Allow executables entries to be removed (for VM_PROT_COPY),
235  * which is used by debuggers.
236  */
237 __options_decl(vmr_flags_t, uint32_t, {
238 	VM_MAP_REMOVE_NO_FLAGS          = 0x000,
239 	VM_MAP_REMOVE_KUNWIRE           = 0x001,
240 	VM_MAP_REMOVE_INTERRUPTIBLE     = 0x002,
241 	VM_MAP_REMOVE_NOKUNWIRE_LAST    = 0x004,
242 	VM_MAP_REMOVE_IMMUTABLE         = 0x008,
243 	VM_MAP_REMOVE_GAPS_FAIL         = 0x010,
244 	VM_MAP_REMOVE_NO_YIELD          = 0x020,
245 	VM_MAP_REMOVE_GUESS_SIZE        = 0x040,
246 	VM_MAP_REMOVE_IMMUTABLE_CODE    = 0x080,
247 	VM_MAP_REMOVE_TO_OVERWRITE      = 0x100,
248 });
249 
250 /* Deallocate a region */
251 extern kmem_return_t vm_map_remove_guard(
252 	vm_map_t                map,
253 	vm_map_offset_t         start,
254 	vm_map_offset_t         end,
255 	vmr_flags_t             flags,
256 	kmem_guard_t            guard) __result_use_check;
257 
258 extern kmem_return_t vm_map_remove_and_unlock(
259 	vm_map_t        map,
260 	vm_map_offset_t start,
261 	vm_map_offset_t end,
262 	vmr_flags_t     flags,
263 	kmem_guard_t    guard) __result_use_check;
264 
265 /* Deallocate a region */
266 static inline void
vm_map_remove(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)267 vm_map_remove(
268 	vm_map_t                map,
269 	vm_map_offset_t         start,
270 	vm_map_offset_t         end)
271 {
272 	vmr_flags_t  flags = VM_MAP_REMOVE_NO_FLAGS;
273 	kmem_guard_t guard = KMEM_GUARD_NONE;
274 
275 	(void)vm_map_remove_guard(map, start, end, flags, guard);
276 }
277 
278 extern bool kmem_is_ptr_range(vm_map_range_id_t range_id);
279 
280 extern mach_vm_range_t kmem_validate_range_for_overwrite(
281 	vm_map_offset_t         addr,
282 	vm_map_size_t           size);
283 
284 extern uint32_t kmem_addr_get_slot_idx(
285 	vm_map_offset_t         start,
286 	vm_map_offset_t         end,
287 	vm_map_range_id_t       range_id,
288 	struct kmem_page_meta **meta,
289 	uint32_t               *size_idx,
290 	mach_vm_range_t         slot);
291 
292 extern void kmem_validate_slot(
293 	vm_map_offset_t         addr,
294 	struct kmem_page_meta  *meta,
295 	uint32_t                size_idx,
296 	uint32_t                slot_idx);
297 
298 /*
299  * Function used to allocate VA from kmem pointer ranges
300  */
301 extern kern_return_t kmem_locate_space(
302 	vm_map_size_t           size,
303 	vm_map_range_id_t       range_id,
304 	bool                    direction,
305 	vm_map_offset_t        *start_inout,
306 	vm_map_entry_t         *entry_out);
307 
308 /*
309  * Function used to free VA to kmem pointer ranges
310  */
311 extern void kmem_free_space(
312 	vm_map_offset_t         start,
313 	vm_map_offset_t         end,
314 	vm_map_range_id_t       range_id,
315 	mach_vm_range_t         slot);
316 
317 ppnum_t vm_map_get_phys_page(
318 	vm_map_t        map,
319 	vm_offset_t     offset);
320 
321 /* Change inheritance */
322 extern kern_return_t    vm_map_inherit(
323 	vm_map_t                map,
324 	vm_map_offset_ut        start,
325 	vm_map_offset_ut        end,
326 	vm_inherit_ut           new_inheritance);
327 
328 /* Change protection */
329 extern kern_return_t    vm_map_protect(
330 	vm_map_t                map,
331 	vm_map_offset_ut        start_u,
332 	vm_map_offset_ut        end_u,
333 	boolean_t               set_max,
334 	vm_prot_ut              new_prot_u);
335 
336 #pragma GCC visibility pop
337 
338 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)339 VME_OBJECT_SET(
340 	vm_map_entry_t entry,
341 	vm_object_t    object,
342 	bool           atomic,
343 	uint32_t       context)
344 {
345 	__builtin_assume(((vm_offset_t)object & 3) == 0);
346 
347 	entry->vme_atomic = atomic;
348 	entry->is_sub_map = false;
349 	if (atomic) {
350 		entry->vme_context = context;
351 	} else {
352 		entry->vme_context = 0;
353 	}
354 
355 	if (!object) {
356 		entry->vme_object_or_delta = 0;
357 	} else if (is_kernel_object(object)) {
358 #if VM_BTLOG_TAGS
359 		if (!(entry->vme_kernel_object && entry->vme_tag_btref))
360 #endif /* VM_BTLOG_TAGS */
361 		{
362 			entry->vme_object_or_delta = 0;
363 		}
364 	} else {
365 #if VM_BTLOG_TAGS
366 		if (entry->vme_kernel_object && entry->vme_tag_btref) {
367 			btref_put(entry->vme_tag_btref);
368 		}
369 #endif /* VM_BTLOG_TAGS */
370 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
371 	}
372 
373 	entry->vme_kernel_object = is_kernel_object(object);
374 	entry->vme_resilient_codesign = false;
375 	entry->used_for_jit = false;
376 #if HAS_MTE
377 	if (object == kernel_object_tagged) {
378 		entry->vme_is_tagged = TRUE;
379 	} else if (object == kernel_object_default) {
380 		entry->vme_is_tagged = FALSE;
381 	}
382 #endif /* HAS_MTE */
383 }
384 
385 
386 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)387 VME_OFFSET_SET(
388 	vm_map_entry_t entry,
389 	vm_object_offset_t offset)
390 {
391 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
392 	assert3u(VME_OFFSET(entry), ==, offset);
393 }
394 
395 /*
396  * IMPORTANT:
397  * The "alias" field can be updated while holding the VM map lock
398  * "shared".  It's OK as along as it's the only field that can be
399  * updated without the VM map "exclusive" lock.
400  */
401 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)402 VME_ALIAS_SET(
403 	vm_map_entry_t entry,
404 	unsigned int alias)
405 {
406 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
407 	entry->vme_alias = alias;
408 }
409 
410 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)411 VME_OBJECT_SHADOW(
412 	vm_map_entry_t entry,
413 	vm_object_size_t length,
414 	bool always)
415 {
416 	vm_object_t object;
417 	vm_object_offset_t offset;
418 
419 	object = VME_OBJECT(entry);
420 	offset = VME_OFFSET(entry);
421 	vm_object_shadow(&object, &offset, length, always);
422 	if (object != VME_OBJECT(entry)) {
423 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
424 		entry->use_pmap = true;
425 	}
426 	if (offset != VME_OFFSET(entry)) {
427 		VME_OFFSET_SET(entry, offset);
428 	}
429 }
430 
431 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
432 
433 static inline bool
vmtaglog_matches(vm_tag_t tag)434 vmtaglog_matches(vm_tag_t tag)
435 {
436 	switch (vmtaglog_tag) {
437 	case VM_KERN_MEMORY_NONE:
438 		return false;
439 	case VM_KERN_MEMORY_FIRST_DYNAMIC:
440 		return tag >= VM_KERN_MEMORY_FIRST_DYNAMIC;
441 	case VM_KERN_MEMORY_ANY:
442 		return tag != VM_KERN_MEMORY_NONE;
443 	default:
444 		return tag == vmtaglog_tag;
445 	}
446 }
447 
448 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)449 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
450 {
451 #if VM_BTLOG_TAGS
452 	if (vmtaglog_matches(VME_ALIAS(entry)) && entry->vme_kernel_object && entry->wired_count) {
453 		assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
454 		entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
455 	}
456 #endif /* VM_BTLOG_TAGS */
457 }
458 
459 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)460 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
461 {
462 #if VM_BTLOG_TAGS
463 	if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
464 		btref_put(entry->vme_tag_btref);
465 		entry->vme_tag_btref = 0;
466 	}
467 #endif /* VM_BTLOG_TAGS */
468 }
469 
470 extern kern_return_t
471 vm_map_copy_adjust_to_target(
472 	vm_map_copy_t           copy_map,
473 	vm_map_offset_ut        offset,
474 	vm_map_size_ut          size,
475 	vm_map_t                target_map,
476 	boolean_t               copy,
477 	vm_map_copy_t           *target_copy_map_p,
478 	vm_map_offset_t         *overmap_start_p,
479 	vm_map_offset_t         *overmap_end_p,
480 	vm_map_offset_t         *trimmed_start_p);
481 
482 
483 __attribute__((always_inline))
484 int vm_map_lock_read_to_write(vm_map_t map);
485 
486 __attribute__((always_inline))
487 boolean_t vm_map_try_lock(vm_map_t map);
488 
489 __attribute__((always_inline))
490 boolean_t vm_map_try_lock_read(vm_map_t map);
491 
492 int vm_self_region_page_shift(vm_map_t target_map);
493 int vm_self_region_page_shift_safely(vm_map_t target_map);
494 
495 /* Lookup map entry containing or the specified address in the given map */
496 extern boolean_t        vm_map_lookup_entry_or_next(
497 	vm_map_t                map,
498 	vm_map_address_t        address,
499 	vm_map_entry_t          *entry);                                /* OUT */
500 
501 extern void             vm_map_copy_remap(
502 	vm_map_t                map,
503 	vm_map_entry_t          where,
504 	vm_map_copy_t           copy,
505 	vm_map_offset_t         adjustment,
506 	vm_prot_t               cur_prot,
507 	vm_prot_t               max_prot,
508 	vm_inherit_t            inheritance);
509 
510 /* Find the VM object, offset, and protection for a given virtual address
511  * in the specified map, assuming a page fault of the	type specified. */
512 extern kern_return_t    vm_map_lookup_and_lock_object(
513 	vm_map_t                *var_map,                               /* IN/OUT */
514 	vm_map_address_t        vaddr,
515 	vm_prot_t               fault_type,
516 	int                     object_lock_type,
517 	vm_map_version_t        *out_version,                           /* OUT */
518 	vm_object_t             *object,                                /* OUT */
519 	vm_object_offset_t      *offset,                                /* OUT */
520 	vm_prot_t               *out_prot,                              /* OUT */
521 	boolean_t               *wired,                                 /* OUT */
522 	vm_object_fault_info_t  fault_info,                             /* OUT */
523 	vm_map_t                *real_map,                              /* OUT */
524 	bool                    *contended);                            /* OUT */
525 
526 /* Verifies that the map has not changed since the given version. */
527 extern boolean_t        vm_map_verify(
528 	vm_map_t                map,
529 	vm_map_version_t        *version);                              /* REF */
530 
531 
532 /* simplify map entries */
533 extern void             vm_map_simplify_entry(
534 	vm_map_t        map,
535 	vm_map_entry_t  this_entry);
536 extern void             vm_map_simplify(
537 	vm_map_t                map,
538 	vm_map_offset_t         start);
539 
540 #if __arm64__
541 extern kern_return_t    vm_map_enter_fourk(
542 	vm_map_t                map,
543 	vm_map_offset_t         *address,
544 	vm_map_size_t           size,
545 	vm_map_offset_t         mask,
546 	vm_map_kernel_flags_t   vmk_flags,
547 	vm_object_t             object,
548 	vm_object_offset_t      offset,
549 	boolean_t               needs_copy,
550 	vm_prot_t               cur_protection,
551 	vm_prot_t               max_protection,
552 	vm_inherit_t            inheritance);
553 #endif /* __arm64__ */
554 
555 
556 /* Enter a mapping */
557 extern kern_return_t    vm_map_enter(
558 	vm_map_t                map,
559 	vm_map_offset_t        *address,
560 	vm_map_size_t           size,
561 	vm_map_offset_t         mask,
562 	vm_map_kernel_flags_t   vmk_flags,
563 	vm_object_t             object,
564 	vm_object_offset_t      offset,
565 	boolean_t               needs_copy,
566 	vm_prot_t               cur_protection,
567 	vm_prot_t               max_protection,
568 	vm_inherit_t            inheritance);
569 
570 
571 /* Enter a mapping of a memory object */
572 extern kern_return_t    vm_map_enter_mem_object(
573 	vm_map_t                map,
574 	vm_map_offset_ut       *address,
575 	vm_map_size_ut          size,
576 	vm_map_offset_ut        mask,
577 	vm_map_kernel_flags_t   vmk_flags,
578 	ipc_port_t              port,
579 	vm_object_offset_ut     offset,
580 	boolean_t               needs_copy,
581 	vm_prot_ut              cur_protection,
582 	vm_prot_ut              max_protection,
583 	vm_inherit_ut           inheritance,
584 	upl_page_list_ptr_t     page_list,
585 	unsigned int            page_list_count);
586 
587 extern kern_return_t    vm_map_remap(
588 	vm_map_t                target_map,
589 	vm_map_offset_ut       *address,
590 	vm_map_size_ut          size,
591 	vm_map_offset_ut        mask,
592 	vm_map_kernel_flags_t   vmk_flags,
593 	vm_map_t                src_map,
594 	vm_map_offset_ut        memory_address,
595 	boolean_t               copy,
596 	vm_prot_ut              *cur_protection,
597 	vm_prot_ut              *max_protection,
598 	vm_inherit_ut           inheritance);
599 
600 
601 /* Add or remove machine-dependent attributes from map regions */
602 extern kern_return_t    vm_map_machine_attribute(
603 	vm_map_t                map,
604 	vm_map_offset_ut        start,
605 	vm_map_offset_ut        end,
606 	vm_machine_attribute_t  attribute,
607 	vm_machine_attribute_val_t *value); /* IN/OUT */
608 
609 extern kern_return_t    vm_map_msync(
610 	vm_map_t                map,
611 	vm_map_address_ut       address,
612 	vm_map_size_ut          size,
613 	vm_sync_t               sync_flags);
614 
615 /* Set paging behavior */
616 extern kern_return_t    vm_map_behavior_set(
617 	vm_map_t                map,
618 	vm_map_offset_t         start,
619 	vm_map_offset_t         end,
620 	vm_behavior_t           new_behavior);
621 
622 extern kern_return_t vm_map_region(
623 	vm_map_t                 map,
624 	vm_map_offset_ut        *address,
625 	vm_map_size_ut          *size,
626 	vm_region_flavor_t       flavor,
627 	vm_region_info_t         info,
628 	mach_msg_type_number_t  *count,
629 	mach_port_t             *object_name);
630 
631 extern kern_return_t vm_map_region_recurse_64(
632 	vm_map_t                 map,
633 	vm_map_offset_ut        *address,
634 	vm_map_size_ut          *size,
635 	natural_t               *nesting_depth,
636 	vm_region_submap_info_64_t info,
637 	mach_msg_type_number_t  *count);
638 
639 /* definitions related to overriding the NX behavior */
640 
641 extern int override_nx(vm_map_t map, uint32_t user_tag);
642 
643 extern void vm_map_region_top_walk(
644 	vm_map_entry_t entry,
645 	vm_region_top_info_t top);
646 extern void vm_map_region_walk(
647 	vm_map_t map,
648 	vm_map_offset_t va,
649 	vm_map_entry_t entry,
650 	vm_object_offset_t offset,
651 	vm_object_size_t range,
652 	vm_region_extended_info_t extended,
653 	boolean_t look_for_pages,
654 	mach_msg_type_number_t count);
655 
656 extern void vm_map_copy_ledger(
657 	task_t  old_task,
658 	task_t  new_task,
659 	int     ledger_entry);
660 
661 #endif /* MACH_KERNEL_PRIVATE */
662 
663 /* Get rid of a map */
664 extern void             vm_map_destroy(
665 	vm_map_t                map);
666 
667 extern void             vm_map_require(
668 	vm_map_t                map);
669 
670 extern void             vm_map_copy_require(
671 	vm_map_copy_t           copy);
672 
673 #if HAS_MTE
674 __options_closed_decl(vm_mte_operation_flags_t, uint32_t, {
675 	/* all operations must have exactly one of these: */
676 	VM_MTE_OPERATION_TYPE_COPY = 0x1,
677 	VM_MTE_OPERATION_TYPE_SHARE = 0x2,
678 	VM_MTE_OPERATION_TYPE_INHERIT_SHARE = 0x4,
679 	VM_MTE_OPERATION_TYPE_CREATE_UPL = 0x8,
680 	VM_MTE_OPERATION_TYPE_MASK = VM_MTE_OPERATION_TYPE_COPY | VM_MTE_OPERATION_TYPE_SHARE | VM_MTE_OPERATION_TYPE_INHERIT_SHARE | VM_MTE_OPERATION_TYPE_CREATE_UPL,
681 
682 	/* all operations except CREATE_UPL require exactly one of these: */
683 	VM_MTE_OPERATION_DEST_USER = 0x10,
684 	VM_MTE_OPERATION_DEST_KERNEL = 0x20,
685 	VM_MTE_OPERATION_DEST_UNKNOWN = 0x40,
686 	VM_MTE_OPERATION_DEST_INTERNAL = 0x80,
687 	VM_MTE_OPERATION_DEST_MASK = VM_MTE_OPERATION_DEST_USER | VM_MTE_OPERATION_DEST_KERNEL | VM_MTE_OPERATION_DEST_UNKNOWN | VM_MTE_OPERATION_DEST_INTERNAL,
688 
689 	/* these flags can be additionally added to any of the above: */
690 	VM_MTE_OPERATION_IOKIT = 0x100, /* don't throw guard exceptions; IOKit will handle errors */
691 	VM_MTE_OPERATION_FORK = 0x200, /* apply policies for fork() instead of generic userspace policies */
692 	VM_MTE_OPERATION_REMAP_EXTRACT = 0x400, /* apply policies for vm_map_remap_extract() */
693 	VM_MTE_OPERATION_MAKE_MEMORY_ENTRY = 0x800 /* apply policies for mach_make_memory_entry() */
694 });
695 
696 __options_closed_decl(option_variant_t, uint8_t, {
697 	OPTIONAL_NONE,
698 	OPTIONAL_SOME,
699 });
700 
701 #define OPTIONAL_IS_NONE(var) ((var).discriminant == OPTIONAL_NONE)
702 #define OPTIONAL_IS_SOME(var) ((var).discriminant == OPTIONAL_SOME)
703 
704 #define DEFINE_OPTIONAL_TYPE(name, T) \
705 	typedef struct option {\
706 	        option_variant_t discriminant;\
707 	        T payload;\
708 	} optional_##name##_t;\
709 \
710 	static inline optional_##name##_t optional_##name##_none(void) {\
711 	return (optional_##name##_t){\
712 	                .discriminant = OPTIONAL_NONE,\
713 	                .payload = NULL,\
714 	        };\
715     }\
716 \
717 	static inline optional_##name##_t optional_##name##_some(T payload) {\
718 	return (optional_##name##_t){\
719 	                .discriminant = OPTIONAL_SOME,\
720 	                .payload = payload,\
721 	        };\
722     }\
723 \
724 	static inline T optional_##name##_expect(optional_##name##_t optional, const char* message) {\
725 	        if (!OPTIONAL_IS_SOME(optional)) {\
726 	                panic("EXPECT(##name##) failed: %s", message);\
727 	        }\
728 	        return optional.payload;\
729 	}\
730 \
731 	static inline T optional_##name##_unwrap(optional_##name##_t optional) {\
732 	                return optional_##name##_expect(optional, "Unwrapped a None ##name##");\
733 	}
734 
735 
736 DEFINE_OPTIONAL_TYPE(vm_object, vm_object_t);
737 
738 
739 /*
740  * Since these macro are used in expression contexts, it's not easy to
741  * drop in an assertion when an unsupported type is passed in. However, the
742  * default error message is pretty clear.
743  */
744 #define OPTIONAL_NONE(var) _Generic((var),\
745     vm_object_t:  optional_vm_object_none((var))\
746 )
747 
748 #define OPTIONAL_SOME(var) _Generic((var),\
749 	vm_object_t:  optional_vm_object_some((var))\
750 )
751 
752 #define OPTIONAL_UNWRAP(var) _Generic((var),\
753 	optional_vm_object_t:  optional_vm_object_unwrap((var))\
754 )
755 
756 #define OPTIONAL_EXPECT(var, msg) _Generic((var),\
757 	optional_vm_object_t:  optional_vm_object_expect((var), (msg))\
758 )
759 
760 bool vm_map_allow_mte_operation(vm_map_t source_map, vm_map_offset_t addr, vm_size_t size, vm_mte_operation_flags_t flags,
761     optional_vm_object_t maybe_source_vm_object);
762 #endif /* HAS_MTE */
763 
764 extern kern_return_t    vm_map_copy_extract(
765 	vm_map_t                src_map,
766 	vm_map_address_t        src_addr,
767 	vm_map_size_t           len,
768 	boolean_t               copy,
769 	vm_map_copy_t           *copy_result,   /* OUT */
770 	vm_prot_t               *cur_prot,      /* OUT */
771 	vm_prot_t               *max_prot,      /* OUT */
772 	vm_inherit_t            inheritance,
773 	vm_map_kernel_flags_t   vmk_flags);
774 
775 #define VM_MAP_COPYIN_SRC_DESTROY        0x00000001
776 #define VM_MAP_COPYIN_USE_MAXPROT        0x00000002
777 #define VM_MAP_COPYIN_ENTRY_LIST         0x00000004
778 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
779 #define VM_MAP_COPYIN_FORK               0x00000010
780 #if HAS_MTE
781 #define VM_MAP_COPYIN_IOKIT                  0x00000020
782 #define VM_MAP_COPYIN_DEST_USER              0x00000040
783 #define VM_MAP_COPYIN_DEST_KERNEL            0x00000080
784 #define VM_MAP_COPYIN_DEST_UNKNOWN           0x00000100
785 #define VM_MAP_COPYIN_ALL_FLAGS              0x000001FF
786 #else /* !HAS_MTE */
787 #define VM_MAP_COPYIN_ALL_FLAGS              0x0000001F
788 #endif /* HAS_MTE */
789 
790 extern kern_return_t    vm_map_copyin_internal(
791 	vm_map_t                src_map,
792 	vm_map_address_ut       src_addr_u,
793 	vm_map_size_ut          len_u,
794 	int                     flags,
795 	vm_map_copy_t          *copy_result);   /* OUT */
796 
797 extern boolean_t        vm_map_tpro_enforcement(
798 	vm_map_t                map);
799 
800 extern void vm_map_iokit_mapped_region(
801 	vm_map_t                map,
802 	vm_size_t               bytes);
803 
804 extern void vm_map_iokit_unmapped_region(
805 	vm_map_t                map,
806 	vm_size_t               bytes);
807 
808 extern boolean_t first_free_is_valid(vm_map_t);
809 
810 extern void             vm_map_range_fork(
811 	vm_map_t                new_map,
812 	vm_map_t                old_map);
813 
814 extern int              vm_map_get_user_range(
815 	vm_map_t                map,
816 	vm_map_range_id_t       range_id,
817 	mach_vm_range_t         range);
818 
819 
820 #ifdef MACH_KERNEL_PRIVATE
821 
822 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)823 VM_MAP_IS_EXOTIC(
824 	vm_map_t map __unused)
825 {
826 #if __arm64__
827 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
828 	    pmap_is_exotic(map->pmap)) {
829 		return true;
830 	}
831 #endif /* __arm64__ */
832 	return false;
833 }
834 
835 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)836 VM_MAP_IS_ALIEN(
837 	vm_map_t map __unused)
838 {
839 	/*
840 	 * An "alien" process/task/map/pmap should mostly behave
841 	 * as it currently would on iOS.
842 	 */
843 #if XNU_TARGET_OS_OSX
844 	if (map->is_alien) {
845 		return true;
846 	}
847 	return false;
848 #else /* XNU_TARGET_OS_OSX */
849 	return true;
850 #endif /* XNU_TARGET_OS_OSX */
851 }
852 
853 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)854 VM_MAP_POLICY_WX_FAIL(
855 	vm_map_t map __unused)
856 {
857 	if (VM_MAP_IS_ALIEN(map)) {
858 		return false;
859 	}
860 	return true;
861 }
862 
863 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)864 VM_MAP_POLICY_WX_STRIP_X(
865 	vm_map_t map __unused)
866 {
867 	if (VM_MAP_IS_ALIEN(map)) {
868 		return true;
869 	}
870 	return false;
871 }
872 
873 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)874 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
875 	vm_map_t map __unused)
876 {
877 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
878 		return false;
879 	}
880 	return true;
881 }
882 
883 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)884 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
885 	vm_map_t map)
886 {
887 	return VM_MAP_IS_ALIEN(map);
888 }
889 
890 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)891 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
892 	vm_map_t map __unused)
893 {
894 	if (VM_MAP_IS_ALIEN(map)) {
895 		return false;
896 	}
897 	return true;
898 }
899 
900 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)901 VM_MAP_POLICY_ALLOW_JIT_SHARING(
902 	vm_map_t map __unused)
903 {
904 	if (VM_MAP_IS_ALIEN(map)) {
905 		return false;
906 	}
907 	return true;
908 }
909 
910 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)911 VM_MAP_POLICY_ALLOW_JIT_COPY(
912 	vm_map_t map __unused)
913 {
914 	if (VM_MAP_IS_ALIEN(map)) {
915 		return false;
916 	}
917 	return true;
918 }
919 
920 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)921 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
922 	vm_map_t map __unused)
923 {
924 #if __x86_64__
925 	return true;
926 #else /* __x86_64__ */
927 	if (VM_MAP_IS_EXOTIC(map)) {
928 		return true;
929 	}
930 	return false;
931 #endif /* __x86_64__ */
932 }
933 
934 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)935 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
936 {
937 	switch (prot) {
938 	case MAP_MEM_NOOP:                      break;
939 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
940 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
941 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
942 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
943 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
944 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
945 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
946 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
947 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
948 	default:                                break;
949 	}
950 }
951 
952 static inline boolean_t
vm_map_always_shadow(vm_map_t map)953 vm_map_always_shadow(vm_map_t map)
954 {
955 	if (map->mapped_in_other_pmaps) {
956 		/*
957 		 * This is a submap, mapped in other maps.
958 		 * Even if a VM object is mapped only once in this submap,
959 		 * the submap itself could be mapped multiple times,
960 		 * so vm_object_shadow() should always create a shadow
961 		 * object, even if the object has only 1 reference.
962 		 */
963 		return TRUE;
964 	}
965 	return FALSE;
966 }
967 
968 extern void
969 vm_map_sizes(vm_map_t map,
970     vm_map_size_t * psize,
971     vm_map_size_t * pfree,
972     vm_map_size_t * plargest_free);
973 
974 extern void vm_map_guard_exception(
975 	vm_map_offset_t         address,
976 	unsigned                reason);
977 
978 #endif /* MACH_KERNEL_PRIVATE */
979 
980 __END_DECLS
981 
982 #endif  /* _VM_VM_MAP_INTERNAL_H_ */
983