1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #ifndef _VM_VM_MAP_INTERNAL_H_
58 #define _VM_VM_MAP_INTERNAL_H_
59
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_kern_xnu.h>
62 #include <mach/vm_types_unsafe.h>
63 #include <vm/vm_sanitize_internal.h>
64 #include <kern/thread_test_context.h>
65 #ifdef MACH_KERNEL_PRIVATE
66 #include <vm/vm_object_internal.h>
67 #endif /* MACH_KERNEL_PRIVATE */
68
69 __BEGIN_DECLS
70
71 #ifdef XNU_KERNEL_PRIVATE
72
73 /* Check protection */
74 extern boolean_t vm_map_check_protection(
75 vm_map_t map,
76 vm_map_offset_ut start_u,
77 vm_map_offset_ut end_u,
78 vm_prot_ut protection_u,
79 vm_sanitize_caller_t vm_sanitize_caller);
80
81 extern kern_return_t vm_map_wire_impl(
82 vm_map_t map,
83 vm_map_offset_ut start_u,
84 vm_map_offset_ut end_u,
85 vm_prot_ut prot_u,
86 vm_tag_t tag,
87 boolean_t user_wire,
88 ppnum_t *physpage_p,
89 vm_sanitize_caller_t vm_sanitize_caller);
90
91 extern kern_return_t vm_map_unwire_impl(
92 vm_map_t map,
93 vm_map_offset_ut start_u,
94 vm_map_offset_ut end_u,
95 boolean_t user_wire,
96 vm_sanitize_caller_t vm_sanitize_caller);
97
98 #endif /* XNU_KERNEL_PRIVATE */
99 #ifdef MACH_KERNEL_PRIVATE
100 #pragma GCC visibility push(hidden)
101
102 /* definitions related to overriding the NX behavior */
103 #define VM_ABI_32 0x1
104 #define VM_ABI_64 0x2
105
106 /*
107 * This file contains interfaces that are private to the VM
108 */
109
110 #define KiB(x) (1024 * (x))
111 #define MeB(x) (1024 * 1024 * (x))
112
113 #if __LP64__
114 #define KMEM_SMALLMAP_THRESHOLD (MeB(1))
115 #else
116 #define KMEM_SMALLMAP_THRESHOLD (KiB(256))
117 #endif
118
119 struct kmem_page_meta;
120
121
122 /* We can't extern this from vm_kern.h because we can't include pmap.h */
123 extern void kernel_memory_populate_object_and_unlock(
124 vm_object_t object, /* must be locked */
125 vm_address_t addr,
126 vm_offset_t offset,
127 vm_size_t size,
128 struct vm_page *page_list,
129 kma_flags_t flags,
130 vm_tag_t tag,
131 vm_prot_t prot,
132 pmap_mapping_type_t mapping_type);
133
134 /* Initialize the module */
135 extern void vm_map_init(void);
136
137 /*!
138 * @function vm_map_locate_space_anywhere()
139 *
140 * @brief
141 * Locate (no reservation) a range in the specified VM map.
142 *
143 * @param map the map to scan for memory, must be locked.
144 * @param size the size of the allocation to make.
145 * @param mask an alignment mask the allocation must respect.
146 * (takes vmk_flags.vmkf_guard_before into account).
147 * @param vmk_flags the vm map kernel flags to influence this call.
148 * vmk_flags.vmf_anywhere must be set.
149 * @param start_inout in: an optional address to start scanning from, or 0
150 * @param entry_out the entry right before the hole.
151 *
152 * @returns
153 * - KERN_SUCCESS in case of success, in which case:
154 * o the address pointed at by @c start_inout is updated to the start
155 * of the range located
156 * o entry_out is set to the entry right before the hole in the map.
157 *
158 * - KERN_INVALID_ARGUMENT if some of the parameters aren't right
159 * (typically invalid vmk_flags).
160 *
161 * - KERN_NO_SPACE if no space was found with the specified constraints.
162 */
163 extern kern_return_t vm_map_locate_space_anywhere(
164 vm_map_t map,
165 vm_map_size_t size,
166 vm_map_offset_t mask,
167 vm_map_kernel_flags_t vmk_flags,
168 vm_map_offset_t *start_inout,
169 vm_map_entry_t *entry_out);
170
171 /* Allocate a range in the specified virtual address map and
172 * return the entry allocated for that range. */
173 extern kern_return_t vm_map_find_space(
174 vm_map_t map,
175 vm_map_address_t hint_addr,
176 vm_map_size_t size,
177 vm_map_offset_t mask,
178 vm_map_kernel_flags_t vmk_flags,
179 vm_map_entry_t *o_entry); /* OUT */
180
181 extern void vm_map_clip_start(
182 vm_map_t map,
183 vm_map_entry_t entry,
184 vm_map_offset_t endaddr);
185
186 extern void vm_map_clip_end(
187 vm_map_t map,
188 vm_map_entry_t entry,
189 vm_map_offset_t endaddr);
190
191 extern boolean_t vm_map_entry_should_cow_for_true_share(
192 vm_map_entry_t entry);
193
194 /*!
195 * @typedef vmr_flags_t
196 *
197 * @brief
198 * Flags for vm_map_remove() and vm_map_delete()
199 *
200 * @const VM_MAP_REMOVE_NO_FLAGS
201 * When no special flags is to be passed.
202 *
203 * @const VM_MAP_REMOVE_KUNWIRE
204 * Unwire memory as a side effect.
205 *
206 * @const VM_MAP_REMOVE_INTERRUPTIBLE
207 * Whether the call is interruptible if it needs to wait for a vm map
208 * entry to quiesce (interruption leads to KERN_ABORTED).
209 *
210 * @const VM_MAP_REMOVE_NOKUNWIRE_LAST
211 * Do not unwire the last page of this entry during remove.
212 * (Used by kmem_realloc()).
213 *
214 * @const VM_MAP_REMOVE_IMMUTABLE
215 * Allow permanent entries to be removed.
216 *
217 * @const VM_MAP_REMOVE_GAPS_FAIL
218 * Return KERN_INVALID_VALUE when a gap is being removed instead of panicking.
219 *
220 * @const VM_MAP_REMOVE_NO_YIELD.
221 * Try to avoid yielding during this call.
222 *
223 * @const VM_MAP_REMOVE_GUESS_SIZE
224 * The caller doesn't know the precise size of the entry,
225 * but the address must match an atomic entry.
226 *
227 * @const VM_MAP_REMOVE_IMMUTABLE_CODE
228 * Allow executables entries to be removed (for VM_PROT_COPY),
229 * which is used by debuggers.
230 */
231 __options_decl(vmr_flags_t, uint32_t, {
232 VM_MAP_REMOVE_NO_FLAGS = 0x000,
233 VM_MAP_REMOVE_KUNWIRE = 0x001,
234 VM_MAP_REMOVE_INTERRUPTIBLE = 0x002,
235 VM_MAP_REMOVE_NOKUNWIRE_LAST = 0x004,
236 VM_MAP_REMOVE_NO_MAP_ALIGN = 0x008,
237 VM_MAP_REMOVE_IMMUTABLE = 0x010,
238 VM_MAP_REMOVE_GAPS_FAIL = 0x020,
239 VM_MAP_REMOVE_NO_YIELD = 0x040,
240 VM_MAP_REMOVE_GUESS_SIZE = 0x080,
241 VM_MAP_REMOVE_IMMUTABLE_CODE = 0x100,
242 VM_MAP_REMOVE_TO_OVERWRITE = 0x200,
243 });
244
245 /* Deallocate a region */
246 extern kmem_return_t vm_map_remove_guard(
247 vm_map_t map,
248 vm_map_offset_t start,
249 vm_map_offset_t end,
250 vmr_flags_t flags,
251 kmem_guard_t guard) __result_use_check;
252
253 extern kmem_return_t vm_map_remove_and_unlock(
254 vm_map_t map,
255 vm_map_offset_t start,
256 vm_map_offset_t end,
257 vmr_flags_t flags,
258 kmem_guard_t guard) __result_use_check;
259
260 /* Deallocate a region */
261 static inline void
vm_map_remove(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)262 vm_map_remove(
263 vm_map_t map,
264 vm_map_offset_t start,
265 vm_map_offset_t end)
266 {
267 vmr_flags_t flags = VM_MAP_REMOVE_NO_FLAGS;
268 kmem_guard_t guard = KMEM_GUARD_NONE;
269
270 (void)vm_map_remove_guard(map, start, end, flags, guard);
271 }
272
273 extern bool kmem_is_ptr_range(vm_map_range_id_t range_id);
274
275 extern mach_vm_range_t kmem_validate_range_for_overwrite(
276 vm_map_offset_t addr,
277 vm_map_size_t size);
278
279 extern uint32_t kmem_addr_get_slot_idx(
280 vm_map_offset_t start,
281 vm_map_offset_t end,
282 vm_map_range_id_t range_id,
283 struct kmem_page_meta **meta,
284 uint32_t *size_idx,
285 mach_vm_range_t slot);
286
287 extern void kmem_validate_slot(
288 vm_map_offset_t addr,
289 struct kmem_page_meta *meta,
290 uint32_t size_idx,
291 uint32_t slot_idx);
292
293 /*
294 * Function used to allocate VA from kmem pointer ranges
295 */
296 extern kern_return_t kmem_locate_space(
297 vm_map_size_t size,
298 vm_map_range_id_t range_id,
299 bool direction,
300 vm_map_offset_t *start_inout,
301 vm_map_entry_t *entry_out);
302
303 /*
304 * Function used to free VA to kmem pointer ranges
305 */
306 extern void kmem_free_space(
307 vm_map_offset_t start,
308 vm_map_offset_t end,
309 vm_map_range_id_t range_id,
310 mach_vm_range_t slot);
311
312 ppnum_t vm_map_get_phys_page(
313 vm_map_t map,
314 vm_offset_t offset);
315
316 /* Change inheritance */
317 extern kern_return_t vm_map_inherit(
318 vm_map_t map,
319 vm_map_offset_ut start,
320 vm_map_offset_ut end,
321 vm_inherit_ut new_inheritance);
322
323 /* Change protection */
324 extern kern_return_t vm_map_protect(
325 vm_map_t map,
326 vm_map_offset_ut start_u,
327 vm_map_offset_ut end_u,
328 boolean_t set_max,
329 vm_prot_ut new_prot_u);
330
331 #pragma GCC visibility pop
332
333 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)334 VME_OBJECT_SET(
335 vm_map_entry_t entry,
336 vm_object_t object,
337 bool atomic,
338 uint32_t context)
339 {
340 __builtin_assume(((vm_offset_t)object & 3) == 0);
341
342 entry->vme_atomic = atomic;
343 entry->is_sub_map = false;
344 if (atomic) {
345 entry->vme_context = context;
346 } else {
347 entry->vme_context = 0;
348 }
349
350 if (!object) {
351 entry->vme_object_or_delta = 0;
352 } else if (is_kernel_object(object)) {
353 #if VM_BTLOG_TAGS
354 if (!(entry->vme_kernel_object && entry->vme_tag_btref))
355 #endif /* VM_BTLOG_TAGS */
356 {
357 entry->vme_object_or_delta = 0;
358 }
359 } else {
360 #if VM_BTLOG_TAGS
361 if (entry->vme_kernel_object && entry->vme_tag_btref) {
362 btref_put(entry->vme_tag_btref);
363 }
364 #endif /* VM_BTLOG_TAGS */
365 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
366 }
367
368 entry->vme_kernel_object = is_kernel_object(object);
369 entry->vme_resilient_codesign = false;
370 entry->used_for_jit = false;
371 }
372
373
374 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)375 VME_OFFSET_SET(
376 vm_map_entry_t entry,
377 vm_object_offset_t offset)
378 {
379 entry->vme_offset = offset >> VME_OFFSET_SHIFT;
380 assert3u(VME_OFFSET(entry), ==, offset);
381 }
382
383 /*
384 * IMPORTANT:
385 * The "alias" field can be updated while holding the VM map lock
386 * "shared". It's OK as along as it's the only field that can be
387 * updated without the VM map "exclusive" lock.
388 */
389 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)390 VME_ALIAS_SET(
391 vm_map_entry_t entry,
392 unsigned int alias)
393 {
394 assert3u(alias & VME_ALIAS_MASK, ==, alias);
395 entry->vme_alias = alias;
396 }
397
398 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)399 VME_OBJECT_SHADOW(
400 vm_map_entry_t entry,
401 vm_object_size_t length,
402 bool always)
403 {
404 vm_object_t object;
405 vm_object_offset_t offset;
406
407 object = VME_OBJECT(entry);
408 offset = VME_OFFSET(entry);
409 vm_object_shadow(&object, &offset, length, always);
410 if (object != VME_OBJECT(entry)) {
411 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
412 entry->use_pmap = true;
413 }
414 if (offset != VME_OFFSET(entry)) {
415 VME_OFFSET_SET(entry, offset);
416 }
417 }
418
419 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
420 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)421 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
422 {
423 #if VM_BTLOG_TAGS
424 if (vmtaglog_tag && (VME_ALIAS(entry) == vmtaglog_tag) && entry->vme_kernel_object && entry->wired_count) {
425 assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
426 entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
427 }
428 #endif /* VM_BTLOG_TAGS */
429 }
430
431 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)432 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
433 {
434 #if VM_BTLOG_TAGS
435 if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
436 btref_put(entry->vme_tag_btref);
437 entry->vme_tag_btref = 0;
438 }
439 #endif /* VM_BTLOG_TAGS */
440 }
441
442 extern kern_return_t
443 vm_map_copy_adjust_to_target(
444 vm_map_copy_t copy_map,
445 vm_map_offset_ut offset,
446 vm_map_size_ut size,
447 vm_map_t target_map,
448 boolean_t copy,
449 vm_map_copy_t *target_copy_map_p,
450 vm_map_offset_t *overmap_start_p,
451 vm_map_offset_t *overmap_end_p,
452 vm_map_offset_t *trimmed_start_p);
453
454
455 __attribute__((always_inline))
456 int vm_map_lock_read_to_write(vm_map_t map);
457
458 __attribute__((always_inline))
459 boolean_t vm_map_try_lock(vm_map_t map);
460
461 __attribute__((always_inline))
462 boolean_t vm_map_try_lock_read(vm_map_t map);
463
464 int vm_self_region_page_shift(vm_map_t target_map);
465 int vm_self_region_page_shift_safely(vm_map_t target_map);
466
467 /* Lookup map entry containing or the specified address in the given map */
468 extern boolean_t vm_map_lookup_entry_or_next(
469 vm_map_t map,
470 vm_map_address_t address,
471 vm_map_entry_t *entry); /* OUT */
472
473 /* like vm_map_lookup_entry without the PGZ bear trap */
474 #if CONFIG_PROB_GZALLOC
475 extern boolean_t vm_map_lookup_entry_allow_pgz(
476 vm_map_t map,
477 vm_map_address_t address,
478 vm_map_entry_t *entry); /* OUT */
479 #else /* !CONFIG_PROB_GZALLOC */
480 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
481 #endif /* !CONFIG_PROB_GZALLOC */
482
483
484 extern void vm_map_copy_remap(
485 vm_map_t map,
486 vm_map_entry_t where,
487 vm_map_copy_t copy,
488 vm_map_offset_t adjustment,
489 vm_prot_t cur_prot,
490 vm_prot_t max_prot,
491 vm_inherit_t inheritance);
492
493 /* Find the VM object, offset, and protection for a given virtual address
494 * in the specified map, assuming a page fault of the type specified. */
495 extern kern_return_t vm_map_lookup_and_lock_object(
496 vm_map_t *var_map, /* IN/OUT */
497 vm_map_address_t vaddr,
498 vm_prot_t fault_type,
499 int object_lock_type,
500 vm_map_version_t *out_version, /* OUT */
501 vm_object_t *object, /* OUT */
502 vm_object_offset_t *offset, /* OUT */
503 vm_prot_t *out_prot, /* OUT */
504 boolean_t *wired, /* OUT */
505 vm_object_fault_info_t fault_info, /* OUT */
506 vm_map_t *real_map, /* OUT */
507 bool *contended); /* OUT */
508
509 /* Verifies that the map has not changed since the given version. */
510 extern boolean_t vm_map_verify(
511 vm_map_t map,
512 vm_map_version_t *version); /* REF */
513
514
515 /* simplify map entries */
516 extern void vm_map_simplify_entry(
517 vm_map_t map,
518 vm_map_entry_t this_entry);
519 extern void vm_map_simplify(
520 vm_map_t map,
521 vm_map_offset_t start);
522
523 #if __arm64__
524 extern kern_return_t vm_map_enter_fourk(
525 vm_map_t map,
526 vm_map_offset_t *address,
527 vm_map_size_t size,
528 vm_map_offset_t mask,
529 vm_map_kernel_flags_t vmk_flags,
530 vm_object_t object,
531 vm_object_offset_t offset,
532 boolean_t needs_copy,
533 vm_prot_t cur_protection,
534 vm_prot_t max_protection,
535 vm_inherit_t inheritance);
536 #endif /* __arm64__ */
537
538
539 /* Enter a mapping */
540 extern kern_return_t vm_map_enter(
541 vm_map_t map,
542 vm_map_offset_t *address,
543 vm_map_size_t size,
544 vm_map_offset_t mask,
545 vm_map_kernel_flags_t vmk_flags,
546 vm_object_t object,
547 vm_object_offset_t offset,
548 boolean_t needs_copy,
549 vm_prot_t cur_protection,
550 vm_prot_t max_protection,
551 vm_inherit_t inheritance);
552
553
554 /* Enter a mapping of a memory object */
555 extern kern_return_t vm_map_enter_mem_object(
556 vm_map_t map,
557 vm_map_offset_ut *address,
558 vm_map_size_ut size,
559 vm_map_offset_ut mask,
560 vm_map_kernel_flags_t vmk_flags,
561 ipc_port_t port,
562 vm_object_offset_ut offset,
563 boolean_t needs_copy,
564 vm_prot_ut cur_protection,
565 vm_prot_ut max_protection,
566 vm_inherit_ut inheritance,
567 upl_page_list_ptr_t page_list,
568 unsigned int page_list_count);
569
570 extern kern_return_t vm_map_remap(
571 vm_map_t target_map,
572 vm_map_offset_ut *address,
573 vm_map_size_ut size,
574 vm_map_offset_ut mask,
575 vm_map_kernel_flags_t vmk_flags,
576 vm_map_t src_map,
577 vm_map_offset_ut memory_address,
578 boolean_t copy,
579 vm_prot_ut *cur_protection,
580 vm_prot_ut *max_protection,
581 vm_inherit_ut inheritance);
582
583
584 /* Add or remove machine-dependent attributes from map regions */
585 extern kern_return_t vm_map_machine_attribute(
586 vm_map_t map,
587 vm_map_offset_ut start,
588 vm_map_offset_ut end,
589 vm_machine_attribute_t attribute,
590 vm_machine_attribute_val_t *value); /* IN/OUT */
591
592 extern kern_return_t vm_map_msync(
593 vm_map_t map,
594 vm_map_address_ut address,
595 vm_map_size_ut size,
596 vm_sync_t sync_flags);
597
598 /* Set paging behavior */
599 extern kern_return_t vm_map_behavior_set(
600 vm_map_t map,
601 vm_map_offset_t start,
602 vm_map_offset_t end,
603 vm_behavior_t new_behavior);
604
605 extern kern_return_t vm_map_region(
606 vm_map_t map,
607 vm_map_offset_ut *address,
608 vm_map_size_ut *size,
609 vm_region_flavor_t flavor,
610 vm_region_info_t info,
611 mach_msg_type_number_t *count,
612 mach_port_t *object_name);
613
614 extern kern_return_t vm_map_region_recurse_64(
615 vm_map_t map,
616 vm_map_offset_ut *address,
617 vm_map_size_ut *size,
618 natural_t *nesting_depth,
619 vm_region_submap_info_64_t info,
620 mach_msg_type_number_t *count);
621
622 /* definitions related to overriding the NX behavior */
623
624 extern int override_nx(vm_map_t map, uint32_t user_tag);
625
626 extern void vm_map_region_top_walk(
627 vm_map_entry_t entry,
628 vm_region_top_info_t top);
629 extern void vm_map_region_walk(
630 vm_map_t map,
631 vm_map_offset_t va,
632 vm_map_entry_t entry,
633 vm_object_offset_t offset,
634 vm_object_size_t range,
635 vm_region_extended_info_t extended,
636 boolean_t look_for_pages,
637 mach_msg_type_number_t count);
638
639 extern void vm_map_copy_ledger(
640 task_t old_task,
641 task_t new_task,
642 int ledger_entry);
643
644 #endif /* MACH_KERNEL_PRIVATE */
645
646 /* Get rid of a map */
647 extern void vm_map_destroy(
648 vm_map_t map);
649
650 extern void vm_map_require(
651 vm_map_t map);
652
653 extern void vm_map_copy_require(
654 vm_map_copy_t copy);
655
656 extern kern_return_t vm_map_copy_extract(
657 vm_map_t src_map,
658 vm_map_address_t src_addr,
659 vm_map_size_t len,
660 boolean_t copy,
661 vm_map_copy_t *copy_result, /* OUT */
662 vm_prot_t *cur_prot, /* OUT */
663 vm_prot_t *max_prot, /* OUT */
664 vm_inherit_t inheritance,
665 vm_map_kernel_flags_t vmk_flags);
666
667 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
668 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
669 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
670 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
671 #define VM_MAP_COPYIN_FORK 0x00000010
672 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000001F
673
674 extern kern_return_t vm_map_copyin_internal(
675 vm_map_t src_map,
676 vm_map_address_ut src_addr_u,
677 vm_map_size_ut len_u,
678 int flags,
679 vm_map_copy_t *copy_result); /* OUT */
680
681 extern boolean_t vm_map_tpro_enforcement(
682 vm_map_t map);
683
684 extern void vm_map_iokit_mapped_region(
685 vm_map_t map,
686 vm_size_t bytes);
687
688 extern void vm_map_iokit_unmapped_region(
689 vm_map_t map,
690 vm_size_t bytes);
691
692 extern boolean_t first_free_is_valid(vm_map_t);
693
694 extern void vm_map_range_fork(
695 vm_map_t new_map,
696 vm_map_t old_map);
697
698 extern int vm_map_get_user_range(
699 vm_map_t map,
700 vm_map_range_id_t range_id,
701 mach_vm_range_t range);
702
703
704 #ifdef MACH_KERNEL_PRIVATE
705
706 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)707 VM_MAP_IS_EXOTIC(
708 vm_map_t map __unused)
709 {
710 #if __arm64__
711 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
712 pmap_is_exotic(map->pmap)) {
713 return true;
714 }
715 #endif /* __arm64__ */
716 return false;
717 }
718
719 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)720 VM_MAP_IS_ALIEN(
721 vm_map_t map __unused)
722 {
723 /*
724 * An "alien" process/task/map/pmap should mostly behave
725 * as it currently would on iOS.
726 */
727 #if XNU_TARGET_OS_OSX
728 if (map->is_alien) {
729 return true;
730 }
731 return false;
732 #else /* XNU_TARGET_OS_OSX */
733 return true;
734 #endif /* XNU_TARGET_OS_OSX */
735 }
736
737 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)738 VM_MAP_POLICY_WX_FAIL(
739 vm_map_t map __unused)
740 {
741 if (VM_MAP_IS_ALIEN(map)) {
742 return false;
743 }
744 return true;
745 }
746
747 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)748 VM_MAP_POLICY_WX_STRIP_X(
749 vm_map_t map __unused)
750 {
751 if (VM_MAP_IS_ALIEN(map)) {
752 return true;
753 }
754 return false;
755 }
756
757 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)758 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
759 vm_map_t map __unused)
760 {
761 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
762 return false;
763 }
764 return true;
765 }
766
767 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)768 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
769 vm_map_t map)
770 {
771 return VM_MAP_IS_ALIEN(map);
772 }
773
774 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)775 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
776 vm_map_t map __unused)
777 {
778 if (VM_MAP_IS_ALIEN(map)) {
779 return false;
780 }
781 return true;
782 }
783
784 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)785 VM_MAP_POLICY_ALLOW_JIT_SHARING(
786 vm_map_t map __unused)
787 {
788 if (VM_MAP_IS_ALIEN(map)) {
789 return false;
790 }
791 return true;
792 }
793
794 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)795 VM_MAP_POLICY_ALLOW_JIT_COPY(
796 vm_map_t map __unused)
797 {
798 if (VM_MAP_IS_ALIEN(map)) {
799 return false;
800 }
801 return true;
802 }
803
804 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)805 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
806 vm_map_t map __unused)
807 {
808 #if __x86_64__
809 return true;
810 #else /* __x86_64__ */
811 if (VM_MAP_IS_EXOTIC(map)) {
812 return true;
813 }
814 return false;
815 #endif /* __x86_64__ */
816 }
817
818 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)819 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
820 {
821 switch (prot) {
822 case MAP_MEM_NOOP: break;
823 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
824 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
825 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
826 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
827 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
828 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
829 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
830 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
831 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
832 default: break;
833 }
834 }
835
836 static inline boolean_t
vm_map_always_shadow(vm_map_t map)837 vm_map_always_shadow(vm_map_t map)
838 {
839 if (map->mapped_in_other_pmaps) {
840 /*
841 * This is a submap, mapped in other maps.
842 * Even if a VM object is mapped only once in this submap,
843 * the submap itself could be mapped multiple times,
844 * so vm_object_shadow() should always create a shadow
845 * object, even if the object has only 1 reference.
846 */
847 return TRUE;
848 }
849 return FALSE;
850 }
851
852 extern void
853 vm_map_sizes(vm_map_t map,
854 vm_map_size_t * psize,
855 vm_map_size_t * pfree,
856 vm_map_size_t * plargest_free);
857
858 #endif /* MACH_KERNEL_PRIVATE */
859
860 __END_DECLS
861
862 #endif /* _VM_VM_MAP_INTERNAL_H_ */
863