1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #ifndef _VM_VM_MAP_INTERNAL_H_
58 #define _VM_VM_MAP_INTERNAL_H_
59
60 #include <vm/vm_map_xnu.h>
61 #include <vm/vm_kern_xnu.h>
62 #include <mach/vm_types_unsafe.h>
63 #include <vm/vm_sanitize_internal.h>
64 #include <kern/thread_test_context.h>
65 #ifdef MACH_KERNEL_PRIVATE
66 #include <vm/vm_object_internal.h>
67 #endif /* MACH_KERNEL_PRIVATE */
68
69 __BEGIN_DECLS
70
71 #ifdef XNU_KERNEL_PRIVATE
72
73 /* Check protection */
74 extern boolean_t vm_map_check_protection(
75 vm_map_t map,
76 vm_map_offset_ut start_u,
77 vm_map_offset_ut end_u,
78 vm_prot_ut protection_u,
79 vm_sanitize_caller_t vm_sanitize_caller);
80
81 extern kern_return_t vm_map_wire_impl(
82 vm_map_t map,
83 vm_map_offset_ut start_u,
84 vm_map_offset_ut end_u,
85 vm_prot_ut prot_u,
86 vm_tag_t tag,
87 boolean_t user_wire,
88 ppnum_t *physpage_p,
89 vm_sanitize_caller_t vm_sanitize_caller);
90
91 extern kern_return_t vm_map_unwire_impl(
92 vm_map_t map,
93 vm_map_offset_ut start_u,
94 vm_map_offset_ut end_u,
95 boolean_t user_wire,
96 vm_sanitize_caller_t vm_sanitize_caller);
97
98 #endif /* XNU_KERNEL_PRIVATE */
99 #ifdef MACH_KERNEL_PRIVATE
100 #pragma GCC visibility push(hidden)
101
102 /* definitions related to overriding the NX behavior */
103 #define VM_ABI_32 0x1
104 #define VM_ABI_64 0x2
105
106 /*
107 * This file contains interfaces that are private to the VM
108 */
109
110 #define KiB(x) (1024 * (x))
111 #define MeB(x) (1024 * 1024 * (x))
112
113 #if __LP64__
114 #define KMEM_SMALLMAP_THRESHOLD (MeB(1))
115 #else
116 #define KMEM_SMALLMAP_THRESHOLD (KiB(256))
117 #endif
118
119 struct kmem_page_meta;
120
121
122 /* We can't extern this from vm_kern.h because we can't include pmap.h */
123 extern void kernel_memory_populate_object_and_unlock(
124 vm_object_t object, /* must be locked */
125 vm_address_t addr,
126 vm_offset_t offset,
127 vm_size_t size,
128 struct vm_page *page_list,
129 kma_flags_t flags,
130 vm_tag_t tag,
131 vm_prot_t prot,
132 pmap_mapping_type_t mapping_type);
133
134 /* Initialize the module */
135 extern void vm_map_init(void);
136
137 /*!
138 * @function vm_map_locate_space_anywhere()
139 *
140 * @brief
141 * Locate (no reservation) a range in the specified VM map.
142 *
143 * @param map the map to scan for memory, must be locked.
144 * @param size the size of the allocation to make.
145 * @param mask an alignment mask the allocation must respect.
146 * (takes vmk_flags.vmkf_guard_before into account).
147 * @param vmk_flags the vm map kernel flags to influence this call.
148 * vmk_flags.vmf_anywhere must be set.
149 * @param start_inout in: an optional address to start scanning from, or 0
150 * @param entry_out the entry right before the hole.
151 *
152 * @returns
153 * - KERN_SUCCESS in case of success, in which case:
154 * o the address pointed at by @c start_inout is updated to the start
155 * of the range located
156 * o entry_out is set to the entry right before the hole in the map.
157 *
158 * - KERN_INVALID_ARGUMENT if some of the parameters aren't right
159 * (typically invalid vmk_flags).
160 *
161 * - KERN_NO_SPACE if no space was found with the specified constraints.
162 */
163 extern kern_return_t vm_map_locate_space_anywhere(
164 vm_map_t map,
165 vm_map_size_t size,
166 vm_map_offset_t mask,
167 vm_map_kernel_flags_t vmk_flags,
168 vm_map_offset_t *start_inout,
169 vm_map_entry_t *entry_out);
170
171 /* Allocate a range in the specified virtual address map and
172 * return the entry allocated for that range. */
173 extern kern_return_t vm_map_find_space(
174 vm_map_t map,
175 vm_map_address_t hint_addr,
176 vm_map_size_t size,
177 vm_map_offset_t mask,
178 vm_map_kernel_flags_t vmk_flags,
179 vm_map_entry_t *o_entry); /* OUT */
180
181 extern void vm_map_clip_start(
182 vm_map_t map,
183 vm_map_entry_t entry,
184 vm_map_offset_t endaddr);
185
186 extern void vm_map_clip_end(
187 vm_map_t map,
188 vm_map_entry_t entry,
189 vm_map_offset_t endaddr);
190
191 extern boolean_t vm_map_entry_should_cow_for_true_share(
192 vm_map_entry_t entry);
193
194 /*!
195 * @typedef vmr_flags_t
196 *
197 * @brief
198 * Flags for vm_map_remove() and vm_map_delete()
199 *
200 * @const VM_MAP_REMOVE_NO_FLAGS
201 * When no special flags is to be passed.
202 *
203 * @const VM_MAP_REMOVE_KUNWIRE
204 * Unwire memory as a side effect.
205 *
206 * @const VM_MAP_REMOVE_INTERRUPTIBLE
207 * Whether the call is interruptible if it needs to wait for a vm map
208 * entry to quiesce (interruption leads to KERN_ABORTED).
209 *
210 * @const VM_MAP_REMOVE_NOKUNWIRE_LAST
211 * Do not unwire the last page of this entry during remove.
212 * (Used by kmem_realloc()).
213 *
214 * @const VM_MAP_REMOVE_IMMUTABLE
215 * Allow permanent entries to be removed.
216 *
217 * @const VM_MAP_REMOVE_GAPS_FAIL
218 * Return KERN_INVALID_VALUE when a gap is being removed instead of panicking.
219 *
220 * @const VM_MAP_REMOVE_NO_YIELD.
221 * Try to avoid yielding during this call.
222 *
223 * @const VM_MAP_REMOVE_GUESS_SIZE
224 * The caller doesn't know the precise size of the entry,
225 * but the address must match an atomic entry.
226 *
227 * @const VM_MAP_REMOVE_IMMUTABLE_CODE
228 * Allow executables entries to be removed (for VM_PROT_COPY),
229 * which is used by debuggers.
230 */
231 __options_decl(vmr_flags_t, uint32_t, {
232 VM_MAP_REMOVE_NO_FLAGS = 0x000,
233 VM_MAP_REMOVE_KUNWIRE = 0x001,
234 VM_MAP_REMOVE_INTERRUPTIBLE = 0x002,
235 VM_MAP_REMOVE_NOKUNWIRE_LAST = 0x004,
236 VM_MAP_REMOVE_NO_MAP_ALIGN = 0x008,
237 VM_MAP_REMOVE_IMMUTABLE = 0x010,
238 VM_MAP_REMOVE_GAPS_FAIL = 0x020,
239 VM_MAP_REMOVE_NO_YIELD = 0x040,
240 VM_MAP_REMOVE_GUESS_SIZE = 0x080,
241 VM_MAP_REMOVE_IMMUTABLE_CODE = 0x100,
242 VM_MAP_REMOVE_TO_OVERWRITE = 0x200,
243 });
244
245 /* Deallocate a region */
246 extern kmem_return_t vm_map_remove_guard(
247 vm_map_t map,
248 vm_map_offset_t start,
249 vm_map_offset_t end,
250 vmr_flags_t flags,
251 kmem_guard_t guard) __result_use_check;
252
253 extern kmem_return_t vm_map_remove_and_unlock(
254 vm_map_t map,
255 vm_map_offset_t start,
256 vm_map_offset_t end,
257 vmr_flags_t flags,
258 kmem_guard_t guard) __result_use_check;
259
260 /* Deallocate a region */
261 static inline void
vm_map_remove(vm_map_t map,vm_map_offset_t start,vm_map_offset_t end)262 vm_map_remove(
263 vm_map_t map,
264 vm_map_offset_t start,
265 vm_map_offset_t end)
266 {
267 vmr_flags_t flags = VM_MAP_REMOVE_NO_FLAGS;
268 kmem_guard_t guard = KMEM_GUARD_NONE;
269
270 (void)vm_map_remove_guard(map, start, end, flags, guard);
271 }
272
273 extern bool kmem_is_ptr_range(vm_map_range_id_t range_id);
274
275 extern mach_vm_range_t kmem_validate_range_for_overwrite(
276 vm_map_offset_t addr,
277 vm_map_size_t size);
278
279 extern uint32_t kmem_addr_get_slot_idx(
280 vm_map_offset_t start,
281 vm_map_offset_t end,
282 vm_map_range_id_t range_id,
283 struct kmem_page_meta **meta,
284 uint32_t *size_idx,
285 mach_vm_range_t slot);
286
287 extern void kmem_validate_slot(
288 vm_map_offset_t addr,
289 struct kmem_page_meta *meta,
290 uint32_t size_idx,
291 uint32_t slot_idx);
292
293 /*
294 * Function used to allocate VA from kmem pointer ranges
295 */
296 extern kern_return_t kmem_locate_space(
297 vm_map_size_t size,
298 vm_map_range_id_t range_id,
299 bool direction,
300 vm_map_offset_t *start_inout,
301 vm_map_entry_t *entry_out);
302
303 /*
304 * Function used to free VA to kmem pointer ranges
305 */
306 extern void kmem_free_space(
307 vm_map_offset_t start,
308 vm_map_offset_t end,
309 vm_map_range_id_t range_id,
310 mach_vm_range_t slot);
311
312 ppnum_t vm_map_get_phys_page(
313 vm_map_t map,
314 vm_offset_t offset);
315
316 /* Change inheritance */
317 extern kern_return_t vm_map_inherit(
318 vm_map_t map,
319 vm_map_offset_ut start,
320 vm_map_offset_ut end,
321 vm_inherit_ut new_inheritance);
322
323 /* Change protection */
324 extern kern_return_t vm_map_protect(
325 vm_map_t map,
326 vm_map_offset_ut start_u,
327 vm_map_offset_ut end_u,
328 boolean_t set_max,
329 vm_prot_ut new_prot_u);
330
331 #pragma GCC visibility pop
332
333 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)334 VME_OBJECT_SET(
335 vm_map_entry_t entry,
336 vm_object_t object,
337 bool atomic,
338 uint32_t context)
339 {
340 __builtin_assume(((vm_offset_t)object & 3) == 0);
341
342 entry->vme_atomic = atomic;
343 entry->is_sub_map = false;
344 if (atomic) {
345 entry->vme_context = context;
346 } else {
347 entry->vme_context = 0;
348 }
349
350 if (!object) {
351 entry->vme_object_or_delta = 0;
352 } else if (is_kernel_object(object)) {
353 #if VM_BTLOG_TAGS
354 if (!(entry->vme_kernel_object && entry->vme_tag_btref))
355 #endif /* VM_BTLOG_TAGS */
356 {
357 entry->vme_object_or_delta = 0;
358 }
359 } else {
360 #if VM_BTLOG_TAGS
361 if (entry->vme_kernel_object && entry->vme_tag_btref) {
362 btref_put(entry->vme_tag_btref);
363 }
364 #endif /* VM_BTLOG_TAGS */
365 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
366 }
367
368 entry->vme_kernel_object = is_kernel_object(object);
369 entry->vme_resilient_codesign = false;
370 entry->used_for_jit = false;
371 }
372
373
374 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)375 VME_OFFSET_SET(
376 vm_map_entry_t entry,
377 vm_object_offset_t offset)
378 {
379 entry->vme_offset = offset >> VME_OFFSET_SHIFT;
380 assert3u(VME_OFFSET(entry), ==, offset);
381 }
382
383 /*
384 * IMPORTANT:
385 * The "alias" field can be updated while holding the VM map lock
386 * "shared". It's OK as along as it's the only field that can be
387 * updated without the VM map "exclusive" lock.
388 */
389 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)390 VME_ALIAS_SET(
391 vm_map_entry_t entry,
392 unsigned int alias)
393 {
394 assert3u(alias & VME_ALIAS_MASK, ==, alias);
395 entry->vme_alias = alias;
396 }
397
398 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)399 VME_OBJECT_SHADOW(
400 vm_map_entry_t entry,
401 vm_object_size_t length,
402 bool always)
403 {
404 vm_object_t object;
405 vm_object_offset_t offset;
406
407 object = VME_OBJECT(entry);
408 offset = VME_OFFSET(entry);
409 vm_object_shadow(&object, &offset, length, always);
410 if (object != VME_OBJECT(entry)) {
411 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
412 entry->use_pmap = true;
413 }
414 if (offset != VME_OFFSET(entry)) {
415 VME_OFFSET_SET(entry, offset);
416 }
417 }
418
419 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
420
421 static inline bool
vmtaglog_matches(vm_tag_t tag)422 vmtaglog_matches(vm_tag_t tag)
423 {
424 switch (vmtaglog_tag) {
425 case VM_KERN_MEMORY_NONE:
426 return false;
427 case VM_KERN_MEMORY_FIRST_DYNAMIC:
428 return tag >= VM_KERN_MEMORY_FIRST_DYNAMIC;
429 case VM_KERN_MEMORY_ANY:
430 return tag != VM_KERN_MEMORY_NONE;
431 default:
432 return tag == vmtaglog_tag;
433 }
434 }
435
436 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)437 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
438 {
439 #if VM_BTLOG_TAGS
440 if (vmtaglog_matches(VME_ALIAS(entry)) && entry->vme_kernel_object && entry->wired_count) {
441 assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
442 entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
443 }
444 #endif /* VM_BTLOG_TAGS */
445 }
446
447 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)448 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
449 {
450 #if VM_BTLOG_TAGS
451 if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
452 btref_put(entry->vme_tag_btref);
453 entry->vme_tag_btref = 0;
454 }
455 #endif /* VM_BTLOG_TAGS */
456 }
457
458 extern kern_return_t
459 vm_map_copy_adjust_to_target(
460 vm_map_copy_t copy_map,
461 vm_map_offset_ut offset,
462 vm_map_size_ut size,
463 vm_map_t target_map,
464 boolean_t copy,
465 vm_map_copy_t *target_copy_map_p,
466 vm_map_offset_t *overmap_start_p,
467 vm_map_offset_t *overmap_end_p,
468 vm_map_offset_t *trimmed_start_p);
469
470
471 __attribute__((always_inline))
472 int vm_map_lock_read_to_write(vm_map_t map);
473
474 __attribute__((always_inline))
475 boolean_t vm_map_try_lock(vm_map_t map);
476
477 __attribute__((always_inline))
478 boolean_t vm_map_try_lock_read(vm_map_t map);
479
480 int vm_self_region_page_shift(vm_map_t target_map);
481 int vm_self_region_page_shift_safely(vm_map_t target_map);
482
483 /* Lookup map entry containing or the specified address in the given map */
484 extern boolean_t vm_map_lookup_entry_or_next(
485 vm_map_t map,
486 vm_map_address_t address,
487 vm_map_entry_t *entry); /* OUT */
488
489 /* like vm_map_lookup_entry without the PGZ bear trap */
490 #if CONFIG_PROB_GZALLOC
491 extern boolean_t vm_map_lookup_entry_allow_pgz(
492 vm_map_t map,
493 vm_map_address_t address,
494 vm_map_entry_t *entry); /* OUT */
495 #else /* !CONFIG_PROB_GZALLOC */
496 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
497 #endif /* !CONFIG_PROB_GZALLOC */
498
499
500 extern void vm_map_copy_remap(
501 vm_map_t map,
502 vm_map_entry_t where,
503 vm_map_copy_t copy,
504 vm_map_offset_t adjustment,
505 vm_prot_t cur_prot,
506 vm_prot_t max_prot,
507 vm_inherit_t inheritance);
508
509 /* Find the VM object, offset, and protection for a given virtual address
510 * in the specified map, assuming a page fault of the type specified. */
511 extern kern_return_t vm_map_lookup_and_lock_object(
512 vm_map_t *var_map, /* IN/OUT */
513 vm_map_address_t vaddr,
514 vm_prot_t fault_type,
515 int object_lock_type,
516 vm_map_version_t *out_version, /* OUT */
517 vm_object_t *object, /* OUT */
518 vm_object_offset_t *offset, /* OUT */
519 vm_prot_t *out_prot, /* OUT */
520 boolean_t *wired, /* OUT */
521 vm_object_fault_info_t fault_info, /* OUT */
522 vm_map_t *real_map, /* OUT */
523 bool *contended); /* OUT */
524
525 /* Verifies that the map has not changed since the given version. */
526 extern boolean_t vm_map_verify(
527 vm_map_t map,
528 vm_map_version_t *version); /* REF */
529
530
531 /* simplify map entries */
532 extern void vm_map_simplify_entry(
533 vm_map_t map,
534 vm_map_entry_t this_entry);
535 extern void vm_map_simplify(
536 vm_map_t map,
537 vm_map_offset_t start);
538
539 #if __arm64__
540 extern kern_return_t vm_map_enter_fourk(
541 vm_map_t map,
542 vm_map_offset_t *address,
543 vm_map_size_t size,
544 vm_map_offset_t mask,
545 vm_map_kernel_flags_t vmk_flags,
546 vm_object_t object,
547 vm_object_offset_t offset,
548 boolean_t needs_copy,
549 vm_prot_t cur_protection,
550 vm_prot_t max_protection,
551 vm_inherit_t inheritance);
552 #endif /* __arm64__ */
553
554
555 /* Enter a mapping */
556 extern kern_return_t vm_map_enter(
557 vm_map_t map,
558 vm_map_offset_t *address,
559 vm_map_size_t size,
560 vm_map_offset_t mask,
561 vm_map_kernel_flags_t vmk_flags,
562 vm_object_t object,
563 vm_object_offset_t offset,
564 boolean_t needs_copy,
565 vm_prot_t cur_protection,
566 vm_prot_t max_protection,
567 vm_inherit_t inheritance);
568
569
570 /* Enter a mapping of a memory object */
571 extern kern_return_t vm_map_enter_mem_object(
572 vm_map_t map,
573 vm_map_offset_ut *address,
574 vm_map_size_ut size,
575 vm_map_offset_ut mask,
576 vm_map_kernel_flags_t vmk_flags,
577 ipc_port_t port,
578 vm_object_offset_ut offset,
579 boolean_t needs_copy,
580 vm_prot_ut cur_protection,
581 vm_prot_ut max_protection,
582 vm_inherit_ut inheritance,
583 upl_page_list_ptr_t page_list,
584 unsigned int page_list_count);
585
586 extern kern_return_t vm_map_remap(
587 vm_map_t target_map,
588 vm_map_offset_ut *address,
589 vm_map_size_ut size,
590 vm_map_offset_ut mask,
591 vm_map_kernel_flags_t vmk_flags,
592 vm_map_t src_map,
593 vm_map_offset_ut memory_address,
594 boolean_t copy,
595 vm_prot_ut *cur_protection,
596 vm_prot_ut *max_protection,
597 vm_inherit_ut inheritance);
598
599
600 /* Add or remove machine-dependent attributes from map regions */
601 extern kern_return_t vm_map_machine_attribute(
602 vm_map_t map,
603 vm_map_offset_ut start,
604 vm_map_offset_ut end,
605 vm_machine_attribute_t attribute,
606 vm_machine_attribute_val_t *value); /* IN/OUT */
607
608 extern kern_return_t vm_map_msync(
609 vm_map_t map,
610 vm_map_address_ut address,
611 vm_map_size_ut size,
612 vm_sync_t sync_flags);
613
614 /* Set paging behavior */
615 extern kern_return_t vm_map_behavior_set(
616 vm_map_t map,
617 vm_map_offset_t start,
618 vm_map_offset_t end,
619 vm_behavior_t new_behavior);
620
621 extern kern_return_t vm_map_region(
622 vm_map_t map,
623 vm_map_offset_ut *address,
624 vm_map_size_ut *size,
625 vm_region_flavor_t flavor,
626 vm_region_info_t info,
627 mach_msg_type_number_t *count,
628 mach_port_t *object_name);
629
630 extern kern_return_t vm_map_region_recurse_64(
631 vm_map_t map,
632 vm_map_offset_ut *address,
633 vm_map_size_ut *size,
634 natural_t *nesting_depth,
635 vm_region_submap_info_64_t info,
636 mach_msg_type_number_t *count);
637
638 /* definitions related to overriding the NX behavior */
639
640 extern int override_nx(vm_map_t map, uint32_t user_tag);
641
642 extern void vm_map_region_top_walk(
643 vm_map_entry_t entry,
644 vm_region_top_info_t top);
645 extern void vm_map_region_walk(
646 vm_map_t map,
647 vm_map_offset_t va,
648 vm_map_entry_t entry,
649 vm_object_offset_t offset,
650 vm_object_size_t range,
651 vm_region_extended_info_t extended,
652 boolean_t look_for_pages,
653 mach_msg_type_number_t count);
654
655 extern void vm_map_copy_ledger(
656 task_t old_task,
657 task_t new_task,
658 int ledger_entry);
659
660 #endif /* MACH_KERNEL_PRIVATE */
661
662 /* Get rid of a map */
663 extern void vm_map_destroy(
664 vm_map_t map);
665
666 extern void vm_map_require(
667 vm_map_t map);
668
669 extern void vm_map_copy_require(
670 vm_map_copy_t copy);
671
672
673 extern kern_return_t vm_map_copy_extract(
674 vm_map_t src_map,
675 vm_map_address_t src_addr,
676 vm_map_size_t len,
677 boolean_t copy,
678 vm_map_copy_t *copy_result, /* OUT */
679 vm_prot_t *cur_prot, /* OUT */
680 vm_prot_t *max_prot, /* OUT */
681 vm_inherit_t inheritance,
682 vm_map_kernel_flags_t vmk_flags);
683
684 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
685 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
686 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
687 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
688 #define VM_MAP_COPYIN_FORK 0x00000010
689 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000001F
690
691 extern kern_return_t vm_map_copyin_internal(
692 vm_map_t src_map,
693 vm_map_address_ut src_addr_u,
694 vm_map_size_ut len_u,
695 int flags,
696 vm_map_copy_t *copy_result); /* OUT */
697
698 extern boolean_t vm_map_tpro_enforcement(
699 vm_map_t map);
700
701 extern void vm_map_iokit_mapped_region(
702 vm_map_t map,
703 vm_size_t bytes);
704
705 extern void vm_map_iokit_unmapped_region(
706 vm_map_t map,
707 vm_size_t bytes);
708
709 extern boolean_t first_free_is_valid(vm_map_t);
710
711 extern void vm_map_range_fork(
712 vm_map_t new_map,
713 vm_map_t old_map);
714
715 extern int vm_map_get_user_range(
716 vm_map_t map,
717 vm_map_range_id_t range_id,
718 mach_vm_range_t range);
719
720
721 #ifdef MACH_KERNEL_PRIVATE
722
723 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)724 VM_MAP_IS_EXOTIC(
725 vm_map_t map __unused)
726 {
727 #if __arm64__
728 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
729 pmap_is_exotic(map->pmap)) {
730 return true;
731 }
732 #endif /* __arm64__ */
733 return false;
734 }
735
736 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)737 VM_MAP_IS_ALIEN(
738 vm_map_t map __unused)
739 {
740 /*
741 * An "alien" process/task/map/pmap should mostly behave
742 * as it currently would on iOS.
743 */
744 #if XNU_TARGET_OS_OSX
745 if (map->is_alien) {
746 return true;
747 }
748 return false;
749 #else /* XNU_TARGET_OS_OSX */
750 return true;
751 #endif /* XNU_TARGET_OS_OSX */
752 }
753
754 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)755 VM_MAP_POLICY_WX_FAIL(
756 vm_map_t map __unused)
757 {
758 if (VM_MAP_IS_ALIEN(map)) {
759 return false;
760 }
761 return true;
762 }
763
764 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)765 VM_MAP_POLICY_WX_STRIP_X(
766 vm_map_t map __unused)
767 {
768 if (VM_MAP_IS_ALIEN(map)) {
769 return true;
770 }
771 return false;
772 }
773
774 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)775 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
776 vm_map_t map __unused)
777 {
778 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
779 return false;
780 }
781 return true;
782 }
783
784 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)785 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
786 vm_map_t map)
787 {
788 return VM_MAP_IS_ALIEN(map);
789 }
790
791 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)792 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
793 vm_map_t map __unused)
794 {
795 if (VM_MAP_IS_ALIEN(map)) {
796 return false;
797 }
798 return true;
799 }
800
801 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)802 VM_MAP_POLICY_ALLOW_JIT_SHARING(
803 vm_map_t map __unused)
804 {
805 if (VM_MAP_IS_ALIEN(map)) {
806 return false;
807 }
808 return true;
809 }
810
811 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)812 VM_MAP_POLICY_ALLOW_JIT_COPY(
813 vm_map_t map __unused)
814 {
815 if (VM_MAP_IS_ALIEN(map)) {
816 return false;
817 }
818 return true;
819 }
820
821 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)822 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
823 vm_map_t map __unused)
824 {
825 #if __x86_64__
826 return true;
827 #else /* __x86_64__ */
828 if (VM_MAP_IS_EXOTIC(map)) {
829 return true;
830 }
831 return false;
832 #endif /* __x86_64__ */
833 }
834
835 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)836 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
837 {
838 switch (prot) {
839 case MAP_MEM_NOOP: break;
840 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
841 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
842 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
843 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
844 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
845 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
846 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
847 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
848 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
849 default: break;
850 }
851 }
852
853 static inline boolean_t
vm_map_always_shadow(vm_map_t map)854 vm_map_always_shadow(vm_map_t map)
855 {
856 if (map->mapped_in_other_pmaps) {
857 /*
858 * This is a submap, mapped in other maps.
859 * Even if a VM object is mapped only once in this submap,
860 * the submap itself could be mapped multiple times,
861 * so vm_object_shadow() should always create a shadow
862 * object, even if the object has only 1 reference.
863 */
864 return TRUE;
865 }
866 return FALSE;
867 }
868
869 extern void
870 vm_map_sizes(vm_map_t map,
871 vm_map_size_t * psize,
872 vm_map_size_t * pfree,
873 vm_map_size_t * plargest_free);
874
875 extern void vm_map_guard_exception(
876 vm_map_offset_t address,
877 unsigned reason);
878
879 #endif /* MACH_KERNEL_PRIVATE */
880
881 __END_DECLS
882
883 #endif /* _VM_VM_MAP_INTERNAL_H_ */
884