1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #ifndef _VM_VM_MAP_INTERNAL_H_
58 #define _VM_VM_MAP_INTERNAL_H_
59
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_kern_xnu.h>
62 #include <mach/vm_types_unsafe.h>
63 #include <vm/vm_sanitize_internal.h>
64 #include <kern/thread_test_context.h>
65 #ifdef MACH_KERNEL_PRIVATE
66 #include <vm/vm_object_internal.h>
67 #endif /* MACH_KERNEL_PRIVATE */
68
69 __BEGIN_DECLS
70
71 #ifdef XNU_KERNEL_PRIVATE
72
73 /* Check protection */
74 extern boolean_t vm_map_check_protection(
75 vm_map_t map,
76 vm_map_offset_ut start_u,
77 vm_map_offset_ut end_u,
78 vm_prot_ut protection_u,
79 vm_sanitize_caller_t vm_sanitize_caller);
80
81 extern kern_return_t vm_map_wire_impl(
82 vm_map_t map,
83 vm_map_offset_ut start_u,
84 vm_map_offset_ut end_u,
85 vm_prot_ut prot_u,
86 vm_tag_t tag,
87 boolean_t user_wire,
88 ppnum_t *physpage_p,
89 vm_sanitize_caller_t vm_sanitize_caller);
90
91 extern kern_return_t vm_map_unwire_impl(
92 vm_map_t map,
93 vm_map_offset_ut start_u,
94 vm_map_offset_ut end_u,
95 boolean_t user_wire,
96 vm_sanitize_caller_t vm_sanitize_caller);
97
98 #endif /* XNU_KERNEL_PRIVATE */
99 #ifdef MACH_KERNEL_PRIVATE
100 #pragma GCC visibility push(hidden)
101
102 /* definitions related to overriding the NX behavior */
103 #define VM_ABI_32 0x1
104 #define VM_ABI_64 0x2
105
106 /*
107 * This file contains interfaces that are private to the VM
108 */
109
110 #define KiB(kb) ((kb) << 10ull)
111 #define BtoKiB(b) ((b) >> 10)
112 #define MiB(mb) ((mb) << 20ull)
113 #define BtoMiB(b) ((b) >> 20)
114
115 #if __LP64__
116 #define KMEM_SMALLMAP_THRESHOLD (MiB(1))
117 #else
118 #define KMEM_SMALLMAP_THRESHOLD (KiB(256))
119 #endif
120
121 struct kmem_page_meta;
122
123
124 /* We can't extern this from vm_kern.h because we can't include pmap.h */
125 extern void kernel_memory_populate_object_and_unlock(
126 vm_object_t object, /* must be locked */
127 vm_address_t addr,
128 vm_offset_t offset,
129 vm_size_t size,
130 struct vm_page *page_list,
131 kma_flags_t flags,
132 vm_tag_t tag,
133 vm_prot_t prot,
134 pmap_mapping_type_t mapping_type);
135
136 /* Initialize the module */
137 extern void vm_map_init(void);
138
139 /*!
140 * @function vm_map_locate_space_anywhere()
141 *
142 * @brief
143 * Locate (no reservation) a range in the specified VM map.
144 *
145 * @param map the map to scan for memory, must be locked.
146 * @param size the size of the allocation to make.
147 * @param mask an alignment mask the allocation must respect.
148 * (takes vmk_flags.vmkf_guard_before into account).
149 * @param vmk_flags the vm map kernel flags to influence this call.
150 * vmk_flags.vmf_anywhere must be set.
151 * @param start_inout in: an optional address to start scanning from, or 0
152 * @param entry_out the entry right before the hole.
153 *
154 * @returns
155 * - KERN_SUCCESS in case of success, in which case:
156 * o the address pointed at by @c start_inout is updated to the start
157 * of the range located
158 * o entry_out is set to the entry right before the hole in the map.
159 *
160 * - KERN_INVALID_ARGUMENT if some of the parameters aren't right
161 * (typically invalid vmk_flags).
162 *
163 * - KERN_NO_SPACE if no space was found with the specified constraints.
164 */
165 extern kern_return_t vm_map_locate_space_anywhere(
166 vm_map_t map,
167 vm_map_size_t size,
168 vm_map_offset_t mask,
169 vm_map_kernel_flags_t vmk_flags,
170 vm_map_offset_t *start_inout,
171 vm_map_entry_t *entry_out);
172
173 /* Allocate a range in the specified virtual address map and
174 * return the entry allocated for that range. */
175 extern kern_return_t vm_map_find_space(
176 vm_map_t map,
177 vm_map_address_t hint_addr,
178 vm_map_size_t size,
179 vm_map_offset_t mask,
180 vm_map_kernel_flags_t vmk_flags,
181 vm_map_entry_t *o_entry); /* OUT */
182
183 extern void vm_map_clip_start(
184 vm_map_t map,
185 vm_map_entry_t entry,
186 vm_map_offset_t endaddr);
187
188 extern void vm_map_clip_end(
189 vm_map_t map,
190 vm_map_entry_t entry,
191 vm_map_offset_t endaddr);
192
193 extern boolean_t vm_map_entry_should_cow_for_true_share(
194 vm_map_entry_t entry);
195
196 extern void vm_map_seal(
197 vm_map_t map,
198 bool nested_pmap);
199
200 /*!
201 * @typedef vmr_flags_t
202 *
203 * @brief
204 * Flags for vm_map_remove() and vm_map_delete()
205 *
206 * @const VM_MAP_REMOVE_NO_FLAGS
207 * When no special flags is to be passed.
208 *
209 * @const VM_MAP_REMOVE_KUNWIRE
210 * Unwire memory as a side effect.
211 *
212 * @const VM_MAP_REMOVE_INTERRUPTIBLE
213 * Whether the call is interruptible if it needs to wait for a vm map
214 * entry to quiesce (interruption leads to KERN_ABORTED).
215 *
216 * @const VM_MAP_REMOVE_NOKUNWIRE_LAST
217 * Do not unwire the last page of this entry during remove.
218 * (Used by kmem_realloc()).
219 *
220 * @const VM_MAP_REMOVE_IMMUTABLE
221 * Allow permanent entries to be removed.
222 *
223 * @const VM_MAP_REMOVE_GAPS_FAIL
224 * Return KERN_INVALID_VALUE when a gap is being removed instead of panicking.
225 *
226 * @const VM_MAP_REMOVE_NO_YIELD.
227 * Try to avoid yielding during this call.
228 *
229 * @const VM_MAP_REMOVE_GUESS_SIZE
230 * The caller doesn't know the precise size of the entry,
231 * but the address must match an atomic entry.
232 *
233 * @const VM_MAP_REMOVE_IMMUTABLE_CODE
234 * Allow executables entries to be removed (for VM_PROT_COPY),
235 * which is used by debuggers.
236 */
237 __options_decl(vmr_flags_t, uint32_t, {
238 VM_MAP_REMOVE_NO_FLAGS = 0x000,
239 VM_MAP_REMOVE_KUNWIRE = 0x001,
240 VM_MAP_REMOVE_INTERRUPTIBLE = 0x002,
241 VM_MAP_REMOVE_NOKUNWIRE_LAST = 0x004,
242 VM_MAP_REMOVE_IMMUTABLE = 0x008,
243 VM_MAP_REMOVE_GAPS_FAIL = 0x010,
244 VM_MAP_REMOVE_NO_YIELD = 0x020,
245 VM_MAP_REMOVE_GUESS_SIZE = 0x040,
246 VM_MAP_REMOVE_IMMUTABLE_CODE = 0x080,
247 VM_MAP_REMOVE_TO_OVERWRITE = 0x100,
248 });
249
250 /* Deallocate a region */
251 extern kmem_return_t vm_map_remove_guard(
252 vm_map_t map,
253 vm_map_offset_t start,
254 vm_map_offset_t end,
255 vmr_flags_t flags,
256 kmem_guard_t guard) __result_use_check;
257
258 extern kmem_return_t vm_map_remove_and_unlock(
259 vm_map_t map,
260 vm_map_offset_t start,
261 vm_map_offset_t end,
262 vmr_flags_t flags,
263 kmem_guard_t guard) __result_use_check;
264
265 /* Deallocate a region */
266 static inline void
vm_map_remove(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)267 vm_map_remove(
268 vm_map_t map,
269 vm_map_offset_t start,
270 vm_map_offset_t end)
271 {
272 vmr_flags_t flags = VM_MAP_REMOVE_NO_FLAGS;
273 kmem_guard_t guard = KMEM_GUARD_NONE;
274
275 (void)vm_map_remove_guard(map, start, end, flags, guard);
276 }
277
278 extern bool kmem_is_ptr_range(vm_map_range_id_t range_id);
279
280 extern mach_vm_range_t kmem_validate_range_for_overwrite(
281 vm_map_offset_t addr,
282 vm_map_size_t size);
283
284 extern uint32_t kmem_addr_get_slot_idx(
285 vm_map_offset_t start,
286 vm_map_offset_t end,
287 vm_map_range_id_t range_id,
288 struct kmem_page_meta **meta,
289 uint32_t *size_idx,
290 mach_vm_range_t slot);
291
292 extern void kmem_validate_slot(
293 vm_map_offset_t addr,
294 struct kmem_page_meta *meta,
295 uint32_t size_idx,
296 uint32_t slot_idx);
297
298 /*
299 * Function used to allocate VA from kmem pointer ranges
300 */
301 extern kern_return_t kmem_locate_space(
302 vm_map_size_t size,
303 vm_map_range_id_t range_id,
304 bool direction,
305 vm_map_offset_t *start_inout,
306 vm_map_entry_t *entry_out);
307
308 /*
309 * Function used to free VA to kmem pointer ranges
310 */
311 extern void kmem_free_space(
312 vm_map_offset_t start,
313 vm_map_offset_t end,
314 vm_map_range_id_t range_id,
315 mach_vm_range_t slot);
316
317 ppnum_t vm_map_get_phys_page(
318 vm_map_t map,
319 vm_offset_t offset);
320
321 /* Change inheritance */
322 extern kern_return_t vm_map_inherit(
323 vm_map_t map,
324 vm_map_offset_ut start,
325 vm_map_offset_ut end,
326 vm_inherit_ut new_inheritance);
327
328 /* Change protection */
329 extern kern_return_t vm_map_protect(
330 vm_map_t map,
331 vm_map_offset_ut start_u,
332 vm_map_offset_ut end_u,
333 boolean_t set_max,
334 vm_prot_ut new_prot_u);
335
336 #pragma GCC visibility pop
337
338 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)339 VME_OBJECT_SET(
340 vm_map_entry_t entry,
341 vm_object_t object,
342 bool atomic,
343 uint32_t context)
344 {
345 __builtin_assume(((vm_offset_t)object & 3) == 0);
346
347 entry->vme_atomic = atomic;
348 entry->is_sub_map = false;
349 if (atomic) {
350 entry->vme_context = context;
351 } else {
352 entry->vme_context = 0;
353 }
354
355 if (!object) {
356 entry->vme_object_or_delta = 0;
357 } else if (is_kernel_object(object)) {
358 #if VM_BTLOG_TAGS
359 if (!(entry->vme_kernel_object && entry->vme_tag_btref))
360 #endif /* VM_BTLOG_TAGS */
361 {
362 entry->vme_object_or_delta = 0;
363 }
364 } else {
365 #if VM_BTLOG_TAGS
366 if (entry->vme_kernel_object && entry->vme_tag_btref) {
367 btref_put(entry->vme_tag_btref);
368 }
369 #endif /* VM_BTLOG_TAGS */
370 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
371 }
372
373 entry->vme_kernel_object = is_kernel_object(object);
374 entry->vme_resilient_codesign = false;
375 entry->used_for_jit = false;
376 #if HAS_MTE
377 if (object == kernel_object_tagged) {
378 entry->vme_is_tagged = TRUE;
379 } else if (object == kernel_object_default) {
380 entry->vme_is_tagged = FALSE;
381 }
382 #endif /* HAS_MTE */
383 }
384
385
386 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)387 VME_OFFSET_SET(
388 vm_map_entry_t entry,
389 vm_object_offset_t offset)
390 {
391 entry->vme_offset = offset >> VME_OFFSET_SHIFT;
392 assert3u(VME_OFFSET(entry), ==, offset);
393 }
394
395 /*
396 * IMPORTANT:
397 * The "alias" field can be updated while holding the VM map lock
398 * "shared". It's OK as along as it's the only field that can be
399 * updated without the VM map "exclusive" lock.
400 */
401 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)402 VME_ALIAS_SET(
403 vm_map_entry_t entry,
404 unsigned int alias)
405 {
406 assert3u(alias & VME_ALIAS_MASK, ==, alias);
407 entry->vme_alias = alias;
408 }
409
410 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)411 VME_OBJECT_SHADOW(
412 vm_map_entry_t entry,
413 vm_object_size_t length,
414 bool always)
415 {
416 vm_object_t object;
417 vm_object_offset_t offset;
418
419 object = VME_OBJECT(entry);
420 offset = VME_OFFSET(entry);
421 vm_object_shadow(&object, &offset, length, always);
422 if (object != VME_OBJECT(entry)) {
423 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
424 entry->use_pmap = true;
425 }
426 if (offset != VME_OFFSET(entry)) {
427 VME_OFFSET_SET(entry, offset);
428 }
429 }
430
431 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
432
433 static inline bool
vmtaglog_matches(vm_tag_t tag)434 vmtaglog_matches(vm_tag_t tag)
435 {
436 switch (vmtaglog_tag) {
437 case VM_KERN_MEMORY_NONE:
438 return false;
439 case VM_KERN_MEMORY_FIRST_DYNAMIC:
440 return tag >= VM_KERN_MEMORY_FIRST_DYNAMIC;
441 case VM_KERN_MEMORY_ANY:
442 return tag != VM_KERN_MEMORY_NONE;
443 default:
444 return tag == vmtaglog_tag;
445 }
446 }
447
448 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)449 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
450 {
451 #if VM_BTLOG_TAGS
452 if (vmtaglog_matches(VME_ALIAS(entry)) && entry->vme_kernel_object && entry->wired_count) {
453 assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
454 entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
455 }
456 #endif /* VM_BTLOG_TAGS */
457 }
458
459 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)460 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
461 {
462 #if VM_BTLOG_TAGS
463 if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
464 btref_put(entry->vme_tag_btref);
465 entry->vme_tag_btref = 0;
466 }
467 #endif /* VM_BTLOG_TAGS */
468 }
469
470 extern kern_return_t
471 vm_map_copy_adjust_to_target(
472 vm_map_copy_t copy_map,
473 vm_map_offset_ut offset,
474 vm_map_size_ut size,
475 vm_map_t target_map,
476 boolean_t copy,
477 vm_map_copy_t *target_copy_map_p,
478 vm_map_offset_t *overmap_start_p,
479 vm_map_offset_t *overmap_end_p,
480 vm_map_offset_t *trimmed_start_p);
481
482
483 __attribute__((always_inline))
484 int vm_map_lock_read_to_write(vm_map_t map);
485
486 __attribute__((always_inline))
487 boolean_t vm_map_try_lock(vm_map_t map);
488
489 __attribute__((always_inline))
490 boolean_t vm_map_try_lock_read(vm_map_t map);
491
492 int vm_self_region_page_shift(vm_map_t target_map);
493 int vm_self_region_page_shift_safely(vm_map_t target_map);
494
495 /* Lookup map entry containing or the specified address in the given map */
496 extern boolean_t vm_map_lookup_entry_or_next(
497 vm_map_t map,
498 vm_map_address_t address,
499 vm_map_entry_t *entry); /* OUT */
500
501 extern void vm_map_copy_remap(
502 vm_map_t map,
503 vm_map_entry_t where,
504 vm_map_copy_t copy,
505 vm_map_offset_t adjustment,
506 vm_prot_t cur_prot,
507 vm_prot_t max_prot,
508 vm_inherit_t inheritance);
509
510 /* Find the VM object, offset, and protection for a given virtual address
511 * in the specified map, assuming a page fault of the type specified. */
512 extern kern_return_t vm_map_lookup_and_lock_object(
513 vm_map_t *var_map, /* IN/OUT */
514 vm_map_address_t vaddr,
515 vm_prot_t fault_type,
516 int object_lock_type,
517 vm_map_version_t *out_version, /* OUT */
518 vm_object_t *object, /* OUT */
519 vm_object_offset_t *offset, /* OUT */
520 vm_prot_t *out_prot, /* OUT */
521 boolean_t *wired, /* OUT */
522 vm_object_fault_info_t fault_info, /* OUT */
523 vm_map_t *real_map, /* OUT */
524 bool *contended); /* OUT */
525
526 /* Verifies that the map has not changed since the given version. */
527 extern boolean_t vm_map_verify(
528 vm_map_t map,
529 vm_map_version_t *version); /* REF */
530
531
532 /* simplify map entries */
533 extern void vm_map_simplify_entry(
534 vm_map_t map,
535 vm_map_entry_t this_entry);
536 extern void vm_map_simplify(
537 vm_map_t map,
538 vm_map_offset_t start);
539
540 #if __arm64__
541 extern kern_return_t vm_map_enter_fourk(
542 vm_map_t map,
543 vm_map_offset_t *address,
544 vm_map_size_t size,
545 vm_map_offset_t mask,
546 vm_map_kernel_flags_t vmk_flags,
547 vm_object_t object,
548 vm_object_offset_t offset,
549 boolean_t needs_copy,
550 vm_prot_t cur_protection,
551 vm_prot_t max_protection,
552 vm_inherit_t inheritance);
553 #endif /* __arm64__ */
554
555 /* Enter a mapping */
556 extern kern_return_t vm_map_enter(
557 vm_map_t map,
558 vm_map_offset_t *address,
559 vm_map_size_t size,
560 vm_map_offset_t mask,
561 vm_map_kernel_flags_t vmk_flags,
562 vm_object_t object,
563 vm_object_offset_t offset,
564 boolean_t needs_copy,
565 vm_prot_t cur_protection,
566 vm_prot_t max_protection,
567 vm_inherit_t inheritance);
568
569
570 /* Enter a mapping of a memory object */
571 extern kern_return_t vm_map_enter_mem_object(
572 vm_map_t map,
573 vm_map_offset_ut *address,
574 vm_map_size_ut size,
575 vm_map_offset_ut mask,
576 vm_map_kernel_flags_t vmk_flags,
577 ipc_port_t port,
578 vm_object_offset_ut offset,
579 boolean_t needs_copy,
580 vm_prot_ut cur_protection,
581 vm_prot_ut max_protection,
582 vm_inherit_ut inheritance,
583 upl_page_list_ptr_t page_list,
584 unsigned int page_list_count);
585
586 extern kern_return_t vm_map_remap(
587 vm_map_t target_map,
588 vm_map_offset_ut *address,
589 vm_map_size_ut size,
590 vm_map_offset_ut mask,
591 vm_map_kernel_flags_t vmk_flags,
592 vm_map_t src_map,
593 vm_map_offset_ut memory_address,
594 boolean_t copy,
595 vm_prot_ut *cur_protection,
596 vm_prot_ut *max_protection,
597 vm_inherit_ut inheritance);
598
599
600 /* Add or remove machine-dependent attributes from map regions */
601 extern kern_return_t vm_map_machine_attribute(
602 vm_map_t map,
603 vm_map_offset_ut start,
604 vm_map_offset_ut end,
605 vm_machine_attribute_t attribute,
606 vm_machine_attribute_val_t *value); /* IN/OUT */
607
608 extern kern_return_t vm_map_msync(
609 vm_map_t map,
610 vm_map_address_ut address,
611 vm_map_size_ut size,
612 vm_sync_t sync_flags);
613
614 /* Set paging behavior */
615 extern kern_return_t vm_map_behavior_set(
616 vm_map_t map,
617 vm_map_offset_t start,
618 vm_map_offset_t end,
619 vm_behavior_t new_behavior);
620
621 extern kern_return_t vm_map_region(
622 vm_map_t map,
623 vm_map_offset_ut *address,
624 vm_map_size_ut *size,
625 vm_region_flavor_t flavor,
626 vm_region_info_t info,
627 mach_msg_type_number_t *count,
628 mach_port_t *object_name);
629
630 extern kern_return_t vm_map_region_recurse_64(
631 vm_map_t map,
632 vm_map_offset_ut *address,
633 vm_map_size_ut *size,
634 natural_t *nesting_depth,
635 vm_region_submap_info_64_t info,
636 mach_msg_type_number_t *count);
637
638 /* definitions related to overriding the NX behavior */
639
640 extern int override_nx(vm_map_t map, uint32_t user_tag);
641
642 extern void vm_map_region_top_walk(
643 vm_map_entry_t entry,
644 vm_region_top_info_t top);
645 extern void vm_map_region_walk(
646 vm_map_t map,
647 vm_map_offset_t va,
648 vm_map_entry_t entry,
649 vm_object_offset_t offset,
650 vm_object_size_t range,
651 vm_region_extended_info_t extended,
652 boolean_t look_for_pages,
653 mach_msg_type_number_t count);
654
655 extern void vm_map_copy_ledger(
656 task_t old_task,
657 task_t new_task,
658 int ledger_entry);
659
660 #endif /* MACH_KERNEL_PRIVATE */
661
662 /* Get rid of a map */
663 extern void vm_map_destroy(
664 vm_map_t map);
665
666 extern void vm_map_require(
667 vm_map_t map);
668
669 extern void vm_map_copy_require(
670 vm_map_copy_t copy);
671
672 #if HAS_MTE
673 __options_closed_decl(vm_mte_operation_flags_t, uint32_t, {
674 /* all operations must have exactly one of these: */
675 VM_MTE_OPERATION_TYPE_COPY = 0x1,
676 VM_MTE_OPERATION_TYPE_SHARE = 0x2,
677 VM_MTE_OPERATION_TYPE_INHERIT_SHARE = 0x4,
678 VM_MTE_OPERATION_TYPE_CREATE_UPL = 0x8,
679 VM_MTE_OPERATION_TYPE_MASK = VM_MTE_OPERATION_TYPE_COPY | VM_MTE_OPERATION_TYPE_SHARE | VM_MTE_OPERATION_TYPE_INHERIT_SHARE | VM_MTE_OPERATION_TYPE_CREATE_UPL,
680
681 /* all operations except CREATE_UPL require exactly one of these: */
682 VM_MTE_OPERATION_DEST_USER = 0x10,
683 VM_MTE_OPERATION_DEST_KERNEL = 0x20,
684 VM_MTE_OPERATION_DEST_UNKNOWN = 0x40,
685 VM_MTE_OPERATION_DEST_INTERNAL = 0x80,
686 VM_MTE_OPERATION_DEST_MASK = VM_MTE_OPERATION_DEST_USER | VM_MTE_OPERATION_DEST_KERNEL | VM_MTE_OPERATION_DEST_UNKNOWN | VM_MTE_OPERATION_DEST_INTERNAL,
687
688 /* these flags can be additionally added to any of the above: */
689 VM_MTE_OPERATION_IOKIT = 0x100, /* don't throw guard exceptions; IOKit will handle errors */
690 VM_MTE_OPERATION_FORK = 0x200, /* apply policies for fork() instead of generic userspace policies */
691 VM_MTE_OPERATION_REMAP_EXTRACT = 0x400, /* apply policies for vm_map_remap_extract() */
692 VM_MTE_OPERATION_MAKE_MEMORY_ENTRY = 0x800 /* apply policies for mach_make_memory_entry() */
693 });
694
695 __options_closed_decl(option_variant_t, uint8_t, {
696 OPTIONAL_NONE,
697 OPTIONAL_SOME,
698 });
699
700 #define OPTIONAL_IS_NONE(var) ((var).discriminant == OPTIONAL_NONE)
701 #define OPTIONAL_IS_SOME(var) ((var).discriminant == OPTIONAL_SOME)
702
703 #define DEFINE_OPTIONAL_TYPE(name, T) \
704 typedef struct option {\
705 option_variant_t discriminant;\
706 T payload;\
707 } optional_##name##_t;\
708 \
709 static inline optional_##name##_t optional_##name##_none(void) {\
710 return (optional_##name##_t){\
711 .discriminant = OPTIONAL_NONE,\
712 .payload = NULL,\
713 };\
714 }\
715 \
716 static inline optional_##name##_t optional_##name##_some(T payload) {\
717 return (optional_##name##_t){\
718 .discriminant = OPTIONAL_SOME,\
719 .payload = payload,\
720 };\
721 }\
722 \
723 static inline T optional_##name##_expect(optional_##name##_t optional, const char* message) {\
724 if (!OPTIONAL_IS_SOME(optional)) {\
725 panic("EXPECT(##name##) failed: %s", message);\
726 }\
727 return optional.payload;\
728 }\
729 \
730 static inline T optional_##name##_unwrap(optional_##name##_t optional) {\
731 return optional_##name##_expect(optional, "Unwrapped a None ##name##");\
732 }
733
734
735 DEFINE_OPTIONAL_TYPE(vm_object, vm_object_t);
736
737
738 /*
739 * Since these macro are used in expression contexts, it's not easy to
740 * drop in an assertion when an unsupported type is passed in. However, the
741 * default error message is pretty clear.
742 */
743 #define OPTIONAL_NONE(var) _Generic((var),\
744 vm_object_t: optional_vm_object_none((var))\
745 )
746
747 #define OPTIONAL_SOME(var) _Generic((var),\
748 vm_object_t: optional_vm_object_some((var))\
749 )
750
751 #define OPTIONAL_UNWRAP(var) _Generic((var),\
752 optional_vm_object_t: optional_vm_object_unwrap((var))\
753 )
754
755 #define OPTIONAL_EXPECT(var, msg) _Generic((var),\
756 optional_vm_object_t: optional_vm_object_expect((var), (msg))\
757 )
758
759 bool vm_map_allow_mte_operation(vm_map_t source_map, vm_map_offset_t addr, vm_size_t size, vm_mte_operation_flags_t flags,
760 optional_vm_object_t maybe_source_vm_object);
761 #endif /* HAS_MTE */
762
763 extern kern_return_t vm_map_copy_extract(
764 vm_map_t src_map,
765 vm_map_address_t src_addr,
766 vm_map_size_t len,
767 boolean_t copy,
768 vm_map_copy_t *copy_result, /* OUT */
769 vm_prot_t *cur_prot, /* OUT */
770 vm_prot_t *max_prot, /* OUT */
771 vm_inherit_t inheritance,
772 vm_map_kernel_flags_t vmk_flags);
773
774 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
775 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
776 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
777 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
778 #define VM_MAP_COPYIN_FORK 0x00000010
779 #if HAS_MTE
780 #define VM_MAP_COPYIN_IOKIT 0x00000020
781 #define VM_MAP_COPYIN_DEST_USER 0x00000040
782 #define VM_MAP_COPYIN_DEST_KERNEL 0x00000080
783 #define VM_MAP_COPYIN_DEST_UNKNOWN 0x00000100
784 #define VM_MAP_COPYIN_ALL_FLAGS 0x000001FF
785 #else /* !HAS_MTE */
786 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000001F
787 #endif /* HAS_MTE */
788
789 extern kern_return_t vm_map_copyin_internal(
790 vm_map_t src_map,
791 vm_map_address_ut src_addr_u,
792 vm_map_size_ut len_u,
793 int flags,
794 vm_map_copy_t *copy_result); /* OUT */
795
796 extern boolean_t vm_map_tpro_enforcement(
797 vm_map_t map);
798
799 extern void vm_map_iokit_mapped_region(
800 vm_map_t map,
801 vm_size_t bytes);
802
803 extern void vm_map_iokit_unmapped_region(
804 vm_map_t map,
805 vm_size_t bytes);
806
807 extern boolean_t first_free_is_valid(vm_map_t);
808
809 extern void vm_map_range_fork(
810 vm_map_t new_map,
811 vm_map_t old_map);
812
813 extern int vm_map_get_user_range(
814 vm_map_t map,
815 vm_map_range_id_t range_id,
816 mach_vm_range_t range);
817
818
819 #ifdef MACH_KERNEL_PRIVATE
820
821 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)822 VM_MAP_IS_EXOTIC(
823 vm_map_t map __unused)
824 {
825 #if __arm64__
826 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
827 pmap_is_exotic(map->pmap)) {
828 return true;
829 }
830 #endif /* __arm64__ */
831 return false;
832 }
833
834 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)835 VM_MAP_IS_ALIEN(
836 vm_map_t map __unused)
837 {
838 /*
839 * An "alien" process/task/map/pmap should mostly behave
840 * as it currently would on iOS.
841 */
842 #if XNU_TARGET_OS_OSX
843 if (map->is_alien) {
844 return true;
845 }
846 return false;
847 #else /* XNU_TARGET_OS_OSX */
848 return true;
849 #endif /* XNU_TARGET_OS_OSX */
850 }
851
852 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)853 VM_MAP_POLICY_WX_FAIL(
854 vm_map_t map __unused)
855 {
856 if (VM_MAP_IS_ALIEN(map)) {
857 return false;
858 }
859 return true;
860 }
861
862 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)863 VM_MAP_POLICY_WX_STRIP_X(
864 vm_map_t map __unused)
865 {
866 if (VM_MAP_IS_ALIEN(map)) {
867 return true;
868 }
869 return false;
870 }
871
872 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)873 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
874 vm_map_t map __unused)
875 {
876 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
877 return false;
878 }
879 return true;
880 }
881
882 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)883 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
884 vm_map_t map)
885 {
886 return VM_MAP_IS_ALIEN(map);
887 }
888
889 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)890 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
891 vm_map_t map __unused)
892 {
893 if (VM_MAP_IS_ALIEN(map)) {
894 return false;
895 }
896 return true;
897 }
898
899 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)900 VM_MAP_POLICY_ALLOW_JIT_SHARING(
901 vm_map_t map __unused)
902 {
903 if (VM_MAP_IS_ALIEN(map)) {
904 return false;
905 }
906 return true;
907 }
908
909 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)910 VM_MAP_POLICY_ALLOW_JIT_COPY(
911 vm_map_t map __unused)
912 {
913 if (VM_MAP_IS_ALIEN(map)) {
914 return false;
915 }
916 return true;
917 }
918
919 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)920 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
921 vm_map_t map __unused)
922 {
923 #if __x86_64__
924 return true;
925 #else /* __x86_64__ */
926 if (VM_MAP_IS_EXOTIC(map)) {
927 return true;
928 }
929 return false;
930 #endif /* __x86_64__ */
931 }
932
933 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)934 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
935 {
936 switch (prot) {
937 case MAP_MEM_NOOP: break;
938 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
939 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
940 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
941 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
942 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
943 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
944 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
945 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
946 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
947 default: break;
948 }
949 }
950
951 static inline boolean_t
vm_map_always_shadow(vm_map_t map)952 vm_map_always_shadow(vm_map_t map)
953 {
954 if (map->mapped_in_other_pmaps) {
955 /*
956 * This is a submap, mapped in other maps.
957 * Even if a VM object is mapped only once in this submap,
958 * the submap itself could be mapped multiple times,
959 * so vm_object_shadow() should always create a shadow
960 * object, even if the object has only 1 reference.
961 */
962 return TRUE;
963 }
964 return FALSE;
965 }
966
967 extern void
968 vm_map_sizes(vm_map_t map,
969 vm_map_size_t * psize,
970 vm_map_size_t * pfree,
971 vm_map_size_t * plargest_free);
972
973 extern void vm_map_guard_exception(
974 vm_map_offset_t address,
975 unsigned reason);
976
977 #endif /* MACH_KERNEL_PRIVATE */
978
979 __END_DECLS
980
981 #endif /* _VM_VM_MAP_INTERNAL_H_ */
982