1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <sys/cdefs.h>
74
75 #include <mach/mach_types.h>
76 #include <mach/kern_return.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_types.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/vm_behavior.h>
82 #include <mach/vm_param.h>
83 #include <mach/sdt.h>
84 #include <vm/pmap.h>
85 #include <os/overflow.h>
86 #ifdef XNU_KERNEL_PRIVATE
87 #include <vm/vm_protos.h>
88 #endif /* XNU_KERNEL_PRIVATE */
89 #ifdef MACH_KERNEL_PRIVATE
90 #include <mach_assert.h>
91 #include <vm/vm_map_store.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <kern/locks.h>
95 #include <kern/zalloc.h>
96 #include <kern/macro_help.h>
97
98 #include <kern/thread.h>
99 #include <os/refcnt.h>
100 #endif /* MACH_KERNEL_PRIVATE */
101
102 __BEGIN_DECLS
103
104 #ifdef KERNEL_PRIVATE
105
106 extern void vm_map_reference(vm_map_t map);
107 extern vm_map_t current_map(void);
108
109 /* Setup reserved areas in a new VM map */
110 extern kern_return_t vm_map_exec(
111 vm_map_t new_map,
112 task_t task,
113 boolean_t is64bit,
114 void *fsroot,
115 cpu_type_t cpu,
116 cpu_subtype_t cpu_subtype,
117 boolean_t reslide,
118 boolean_t is_driverkit,
119 uint32_t rsr_version);
120
121 #ifdef MACH_KERNEL_PRIVATE
122
123 #define current_map_fast() (current_thread()->map)
124 #define current_map() (current_map_fast())
125
126 /*
127 * Types defined:
128 *
129 * vm_map_t the high-level address map data structure.
130 * vm_map_entry_t an entry in an address map.
131 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
132 * vm_map_copy_t represents memory copied from an address map,
133 * used for inter-map copy operations
134 */
135 typedef struct vm_map_entry *vm_map_entry_t;
136 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
137
138
139 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
140 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
141 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
142 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
143
144 /*
145 * Type: vm_named_entry_t [internal use only]
146 *
147 * Description:
148 * Description of a mapping to a memory cache object.
149 *
150 * Implementation:
151 * While the handle to this object is used as a means to map
152 * and pass around the right to map regions backed by pagers
153 * of all sorts, the named_entry itself is only manipulated
154 * by the kernel. Named entries hold information on the
155 * right to map a region of a cached object. Namely,
156 * the target cache object, the beginning and ending of the
157 * region to be mapped, and the permissions, (read, write)
158 * with which it can be mapped.
159 *
160 */
161
162 struct vm_named_entry {
163 decl_lck_mtx_data(, Lock); /* Synchronization */
164 union {
165 vm_map_t map; /* map backing submap */
166 vm_map_copy_t copy; /* a VM map copy */
167 } backing;
168 vm_object_offset_t offset; /* offset into object */
169 vm_object_size_t size; /* size of region */
170 vm_object_offset_t data_offset; /* offset to first byte of data */
171 unsigned int /* Is backing.xxx : */
172 /* unsigned */ access:8, /* MAP_MEM_* */
173 /* vm_prot_t */ protection:4, /* access permissions */
174 /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
175 /* boolean_t */ internal:1, /* ... an internal object */
176 /* boolean_t */ is_sub_map:1, /* ... a submap? */
177 /* boolean_t */ is_copy:1, /* ... a VM map copy */
178 /* boolean_t */ is_fully_owned:1; /* ... all objects are owned */
179 #if VM_NAMED_ENTRY_DEBUG
180 uint32_t named_entry_bt; /* btref_t */
181 #endif /* VM_NAMED_ENTRY_DEBUG */
182 };
183
184 /*
185 * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
186 * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
187 * to convert between the "packed" representation in the vm_map_entry's fields
188 * and the equivalent bits defined in vm_prot_t.
189 */
190 #if defined(__x86_64__)
191 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
192 #else
193 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY)
194 #endif
195
196 /*
197 * FOOTPRINT ACCOUNTING:
198 * The "memory footprint" is better described in the pmap layer.
199 *
200 * At the VM level, these 2 vm_map_entry_t fields are relevant:
201 * iokit_mapped:
202 * For an "iokit_mapped" entry, we add the size of the entry to the
203 * footprint when the entry is entered into the map and we subtract that
204 * size when the entry is removed. No other accounting should take place.
205 * "use_pmap" should be FALSE but is not taken into account.
206 * use_pmap: (only when is_sub_map is FALSE)
207 * This indicates if we should ask the pmap layer to account for pages
208 * in this mapping. If FALSE, we expect that another form of accounting
209 * is being used (e.g. "iokit_mapped" or the explicit accounting of
210 * non-volatile purgable memory).
211 *
212 * So the logic is mostly:
213 * if entry->is_sub_map == TRUE
214 * anything in a submap does not count for the footprint
215 * else if entry->iokit_mapped == TRUE
216 * footprint includes the entire virtual size of this entry
217 * else if entry->use_pmap == FALSE
218 * tell pmap NOT to account for pages being pmap_enter()'d from this
219 * mapping (i.e. use "alternate accounting")
220 * else
221 * pmap will account for pages being pmap_enter()'d from this mapping
222 * as it sees fit (only if anonymous, etc...)
223 */
224
225 #define VME_ALIAS_BITS 12
226 #define VME_ALIAS_MASK ((1u << VME_ALIAS_BITS) - 1)
227 #define VME_OFFSET_SHIFT VME_ALIAS_BITS
228 #define VME_OFFSET_BITS (64 - VME_ALIAS_BITS)
229 #define VME_SUBMAP_SHIFT 2
230 #define VME_SUBMAP_BITS (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
231
232 struct vm_map_entry {
233 struct vm_map_links links; /* links to other entries */
234 #define vme_prev links.prev
235 #define vme_next links.next
236 #define vme_start links.start
237 #define vme_end links.end
238
239 struct vm_map_store store;
240
241 union {
242 vm_offset_t vme_object_value;
243 struct {
244 vm_offset_t vme_atomic:1; /* entry cannot be split/coalesced */
245 vm_offset_t is_sub_map:1; /* Is "object" a submap? */
246 vm_offset_t vme_submap:VME_SUBMAP_BITS;
247 };
248 struct {
249 uint32_t vme_ctx_atomic : 1;
250 uint32_t vme_ctx_is_sub_map : 1;
251 uint32_t vme_context : 30;
252
253 /**
254 * If vme_kernel_object==1 && KASAN,
255 * vme_object_or_delta holds the delta.
256 *
257 * If vme_kernel_object==1 && !KASAN,
258 * vme_tag_btref holds a btref when vme_alias is equal to the "vmtaglog"
259 * boot-arg.
260 *
261 * If vme_kernel_object==0,
262 * vme_object_or_delta holds the packed vm object.
263 */
264 union {
265 vm_page_object_t vme_object_or_delta;
266 btref_t vme_tag_btref;
267 };
268 };
269 };
270
271 unsigned long long
272 /* vm_tag_t */ vme_alias:VME_ALIAS_BITS, /* entry VM tag */
273 /* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
274
275 /* boolean_t */ is_shared:1, /* region is shared */
276 /* boolean_t */ __unused1:1,
277 /* boolean_t */ in_transition:1, /* Entry being changed */
278 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
279 /* behavior is not defined for submap type */
280 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
281 /* boolean_t */ needs_copy:1, /* object need to be copied? */
282
283 /* Only in task maps: */
284 #if defined(__arm64e__)
285 /*
286 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
287 * We reuse it here to keep track of mappings that have hardware support
288 * for read-only/read-write trusted paths.
289 */
290 /* vm_prot_t-like */ protection:3, /* protection code */
291 /* boolean_t */ used_for_tpro:1,
292 #else /* __arm64e__ */
293 /* vm_prot_t-like */protection:4, /* protection code, bit3=UEXEC */
294 #endif /* __arm64e__ */
295
296 /* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
297 /* vm_inherit_t */ inheritance:2, /* inheritance */
298
299 /*
300 * use_pmap is overloaded:
301 * if "is_sub_map":
302 * use a nested pmap?
303 * else (i.e. if object):
304 * use pmap accounting
305 * for footprint?
306 */
307 /* boolean_t */ use_pmap:1,
308 /* boolean_t */ no_cache:1, /* should new pages be cached? */
309 /* boolean_t */ vme_permanent:1, /* mapping can not be removed */
310 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
311 /* boolean_t */ map_aligned:1, /* align to map's page size */
312 /*
313 * zero out the wired pages of this entry
314 * if is being deleted without unwiring them
315 */
316 /* boolean_t */ zero_wired_pages:1,
317 /* boolean_t */ used_for_jit:1,
318 /* boolean_t */ csm_associated:1, /* code signing monitor will validate */
319
320 /* iokit accounting: use the virtual size rather than resident size: */
321 /* boolean_t */ iokit_acct:1,
322 /* boolean_t */ vme_resilient_codesign:1,
323 /* boolean_t */ vme_resilient_media:1,
324 /* boolean_t */ vme_xnu_user_debug:1,
325 /* boolean_t */ vme_no_copy_on_read:1,
326 /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
327 /* boolean_t */ vme_kernel_object:1; /* vme_object is kernel_object */
328
329 unsigned short wired_count; /* can be paged if = 0 */
330 unsigned short user_wired_count; /* for vm_wire */
331
332 #if DEBUG
333 #define MAP_ENTRY_CREATION_DEBUG (1)
334 #define MAP_ENTRY_INSERTION_DEBUG (1)
335 #endif /* DEBUG */
336 #if MAP_ENTRY_CREATION_DEBUG
337 struct vm_map_header *vme_creation_maphdr;
338 uint32_t vme_creation_bt; /* btref_t */
339 #endif /* MAP_ENTRY_CREATION_DEBUG */
340 #if MAP_ENTRY_INSERTION_DEBUG
341 uint32_t vme_insertion_bt; /* btref_t */
342 vm_map_offset_t vme_start_original;
343 vm_map_offset_t vme_end_original;
344 #endif /* MAP_ENTRY_INSERTION_DEBUG */
345 };
346
347 #define VME_ALIAS(entry) \
348 ((entry)->vme_alias)
349
350 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)351 _VME_SUBMAP(
352 vm_map_entry_t entry)
353 {
354 __builtin_assume(entry->vme_submap);
355 return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
356 }
357 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
358
359 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)360 VME_SUBMAP_SET(
361 vm_map_entry_t entry,
362 vm_map_t submap)
363 {
364 __builtin_assume(((vm_offset_t)submap & 3) == 0);
365
366 entry->is_sub_map = true;
367 entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
368 }
369
370 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)371 _VME_OBJECT(
372 vm_map_entry_t entry)
373 {
374 vm_object_t object;
375
376 if (!entry->vme_kernel_object) {
377 object = VM_OBJECT_UNPACK(entry->vme_object_or_delta);
378 __builtin_assume(!is_kernel_object(object));
379 } else {
380 object = kernel_object_default;
381 }
382 return object;
383 }
384 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
385
386 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)387 VME_OBJECT_SET(
388 vm_map_entry_t entry,
389 vm_object_t object,
390 bool atomic,
391 uint32_t context)
392 {
393 __builtin_assume(((vm_offset_t)object & 3) == 0);
394
395 entry->vme_atomic = atomic;
396 entry->is_sub_map = false;
397 if (atomic) {
398 entry->vme_context = context;
399 } else {
400 entry->vme_context = 0;
401 }
402
403 if (!object) {
404 entry->vme_object_or_delta = 0;
405 } else if (is_kernel_object(object)) {
406 #if VM_BTLOG_TAGS
407 if (!(entry->vme_kernel_object && entry->vme_tag_btref))
408 #endif /* VM_BTLOG_TAGS */
409 {
410 entry->vme_object_or_delta = 0;
411 }
412 } else {
413 #if VM_BTLOG_TAGS
414 if (entry->vme_kernel_object && entry->vme_tag_btref) {
415 btref_put(entry->vme_tag_btref);
416 }
417 #endif /* VM_BTLOG_TAGS */
418 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
419 }
420
421 entry->vme_kernel_object = is_kernel_object(object);
422 entry->vme_resilient_codesign = false;
423 entry->used_for_jit = false;
424 }
425
426 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)427 VME_OFFSET(
428 vm_map_entry_t entry)
429 {
430 return entry->vme_offset << VME_OFFSET_SHIFT;
431 }
432
433 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)434 VME_OFFSET_SET(
435 vm_map_entry_t entry,
436 vm_object_offset_t offset)
437 {
438 entry->vme_offset = offset >> VME_OFFSET_SHIFT;
439 assert3u(VME_OFFSET(entry), ==, offset);
440 }
441
442 /*
443 * IMPORTANT:
444 * The "alias" field can be updated while holding the VM map lock
445 * "shared". It's OK as along as it's the only field that can be
446 * updated without the VM map "exclusive" lock.
447 */
448 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)449 VME_ALIAS_SET(
450 vm_map_entry_t entry,
451 unsigned int alias)
452 {
453 assert3u(alias & VME_ALIAS_MASK, ==, alias);
454 entry->vme_alias = alias;
455 }
456
457 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)458 VME_OBJECT_SHADOW(
459 vm_map_entry_t entry,
460 vm_object_size_t length,
461 bool always)
462 {
463 vm_object_t object;
464 vm_object_offset_t offset;
465
466 object = VME_OBJECT(entry);
467 offset = VME_OFFSET(entry);
468 vm_object_shadow(&object, &offset, length, always);
469 if (object != VME_OBJECT(entry)) {
470 entry->vme_object_or_delta = VM_OBJECT_PACK(object);
471 entry->use_pmap = true;
472 }
473 if (offset != VME_OFFSET(entry)) {
474 VME_OFFSET_SET(entry, offset);
475 }
476 }
477
478 #if (DEBUG || DEVELOPMENT) && !KASAN
479 #define VM_BTLOG_TAGS 1
480 #else
481 #define VM_BTLOG_TAGS 0
482 #endif
483
484 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
485 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)486 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
487 {
488 #if VM_BTLOG_TAGS
489 if (vmtaglog_tag && (VME_ALIAS(entry) == vmtaglog_tag) && entry->vme_kernel_object && entry->wired_count) {
490 assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
491 entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
492 }
493 #endif /* VM_BTLOG_TAGS */
494 }
495
496 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)497 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
498 {
499 #if VM_BTLOG_TAGS
500 if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
501 btref_put(entry->vme_tag_btref);
502 entry->vme_tag_btref = 0;
503 }
504 #endif /* VM_BTLOG_TAGS */
505 }
506
507
508 /*
509 * Convenience macros for dealing with superpages
510 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
511 */
512 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
513 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
514 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
515 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
516
517 /*
518 * wired_counts are unsigned short. This value is used to safeguard
519 * against any mishaps due to runaway user programs.
520 */
521 #define MAX_WIRE_COUNT 65535
522
523 typedef struct vm_map_user_range {
524 vm_map_address_t vmur_min_address __kernel_data_semantics;
525
526 vm_map_address_t vmur_max_address : 56 __kernel_data_semantics;
527 vm_map_range_id_t vmur_range_id : 8;
528 } *vm_map_user_range_t;
529
530 /*
531 * Type: vm_map_t [exported; contents invisible]
532 *
533 * Description:
534 * An address map -- a directory relating valid
535 * regions of a task's address space to the corresponding
536 * virtual memory objects.
537 *
538 * Implementation:
539 * Maps are doubly-linked lists of map entries, sorted
540 * by address. One hint is used to start
541 * searches again from the last successful search,
542 * insertion, or removal. Another hint is used to
543 * quickly find free space.
544 *
545 * Note:
546 * vm_map_relocate_early_elem() knows about this layout,
547 * and needs to be kept in sync.
548 */
549 struct _vm_map {
550 lck_rw_t lock; /* map lock */
551 struct vm_map_header hdr; /* Map entry header */
552 #define min_offset hdr.links.start /* start of range */
553 #define max_offset hdr.links.end /* end of range */
554 pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
555 vm_map_size_t size; /* virtual size */
556 uint64_t size_limit; /* rlimit on address space size */
557 uint64_t data_limit; /* rlimit on data size */
558 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
559 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
560 #if __x86_64__
561 vm_map_offset_t vmmap_high_start;
562 #endif /* __x86_64__ */
563
564 os_ref_atomic_t map_refcnt; /* Reference count */
565
566 #if CONFIG_MAP_RANGES
567 #define VM_MAP_EXTRA_RANGES_MAX 1024
568 struct mach_vm_range default_range;
569 struct mach_vm_range data_range;
570
571 uint16_t extra_ranges_count;
572 vm_map_user_range_t extra_ranges;
573 #endif /* CONFIG_MAP_RANGES */
574
575 union {
576 /*
577 * If map->disable_vmentry_reuse == TRUE:
578 * the end address of the highest allocated vm_map_entry_t.
579 */
580 vm_map_offset_t vmu1_highest_entry_end;
581 /*
582 * For a nested VM map:
583 * the lowest address in this nested VM map that we would
584 * expect to be unnested under normal operation (i.e. for
585 * regular copy-on-write on DATA section).
586 */
587 vm_map_offset_t vmu1_lowest_unnestable_start;
588 } vmu1;
589 #define highest_entry_end vmu1.vmu1_highest_entry_end
590 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
591 vm_map_entry_t hint; /* hint for quick lookups */
592 union {
593 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
594 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
595 } vmmap_u_1;
596 #define hole_hint vmmap_u_1.vmmap_hole_hint
597 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
598 union {
599 vm_map_entry_t _first_free; /* First free space hint */
600 struct vm_map_links* _holes; /* links all holes between entries */
601 } f_s; /* Union for free space data structures being used */
602
603 #define first_free f_s._first_free
604 #define holes_list f_s._holes
605
606 unsigned int
607 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
608 /* boolean_t */ wiring_required:1, /* All memory wired? */
609 /* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */
610 /* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */
611 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
612 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
613 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
614 /* boolean_t */ holelistenabled:1,
615 /* boolean_t */ is_nested_map:1,
616 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
617 /* boolean_t */ jit_entry_exists:1,
618 /* boolean_t */ has_corpse_footprint:1,
619 /* boolean_t */ terminated:1,
620 /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
621 /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
622 /* boolean_t */ cs_debugged:1, /* code-signed but debugged */
623 /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
624 /* boolean_t */ single_jit:1, /* only allow one JIT mapping */
625 /* boolean_t */ never_faults:1, /* this map should never cause faults */
626 /* boolean_t */ uses_user_ranges:1, /* has the map been configured to use user VM ranges */
627 /* boolean_t */ tpro_enforcement:1, /* enforce TPRO propagation */
628 /* boolean_t */ corpse_source:1, /* map is being used to create a corpse for diagnostics.*/
629 /* reserved */ pad:10;
630 unsigned int timestamp; /* Version number */
631 };
632
633 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
634 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
635 #define vm_map_first_entry(map) ((map)->hdr.links.next)
636 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
637
638 /*
639 * Type: vm_map_version_t [exported; contents invisible]
640 *
641 * Description:
642 * Map versions may be used to quickly validate a previous
643 * lookup operation.
644 *
645 * Usage note:
646 * Because they are bulky objects, map versions are usually
647 * passed by reference.
648 *
649 * Implementation:
650 * Just a timestamp for the main map.
651 */
652 typedef struct vm_map_version {
653 unsigned int main_timestamp;
654 } vm_map_version_t;
655
656 /*
657 * Type: vm_map_copy_t [exported; contents invisible]
658 *
659 * Description:
660 * A map copy object represents a region of virtual memory
661 * that has been copied from an address map but is still
662 * in transit.
663 *
664 * A map copy object may only be used by a single thread
665 * at a time.
666 *
667 * Implementation:
668 * There are two formats for map copy objects.
669 * The first is very similar to the main
670 * address map in structure, and as a result, some
671 * of the internal maintenance functions/macros can
672 * be used with either address maps or map copy objects.
673 *
674 * The map copy object contains a header links
675 * entry onto which the other entries that represent
676 * the region are chained.
677 *
678 * The second format is a kernel buffer copy object - for data
679 * small enough that physical copies were the most efficient
680 * method. This method uses a zero-sized array unioned with
681 * other format-specific data in the 'c_u' member. This unsized
682 * array overlaps the other elements and allows us to use this
683 * extra structure space for physical memory copies. On 64-bit
684 * systems this saves ~64 bytes per vm_map_copy.
685 */
686
687 struct vm_map_copy {
688 #define VM_MAP_COPY_ENTRY_LIST 1
689 #define VM_MAP_COPY_KERNEL_BUFFER 2
690 uint16_t type;
691 bool is_kernel_range;
692 bool is_user_range;
693 vm_map_range_id_t orig_range;
694 vm_object_offset_t offset;
695 vm_map_size_t size;
696 union {
697 struct vm_map_header hdr; /* ENTRY_LIST */
698 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
699 } c_u;
700 };
701
702
703 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
704 #define vm_map_entry_zone (&zone_array[ZONE_ID_VM_MAP_ENTRY])
705
706 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
707 #define vm_map_holes_zone (&zone_array[ZONE_ID_VM_MAP_HOLES])
708
709 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
710 #define vm_map_zone (&zone_array[ZONE_ID_VM_MAP])
711
712
713 #define cpy_hdr c_u.hdr
714 #define cpy_kdata c_u.kdata
715
716 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
717 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
718 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
719
720 /*
721 * Useful macros for entry list copy objects
722 */
723
724 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
725 #define vm_map_copy_first_entry(copy) \
726 ((copy)->cpy_hdr.links.next)
727 #define vm_map_copy_last_entry(copy) \
728 ((copy)->cpy_hdr.links.prev)
729
730 extern kern_return_t
731 vm_map_copy_adjust_to_target(
732 vm_map_copy_t copy_map,
733 vm_map_offset_t offset,
734 vm_map_size_t size,
735 vm_map_t target_map,
736 boolean_t copy,
737 vm_map_copy_t *target_copy_map_p,
738 vm_map_offset_t *overmap_start_p,
739 vm_map_offset_t *overmap_end_p,
740 vm_map_offset_t *trimmed_start_p);
741
742 /*
743 * Macros: vm_map_lock, etc. [internal use only]
744 * Description:
745 * Perform locking on the data portion of a map.
746 * When multiple maps are to be locked, order by map address.
747 * (See vm_map.c::vm_remap())
748 */
749
750 #define vm_map_lock_init(map) \
751 ((map)->timestamp = 0 , \
752 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
753
754 #define vm_map_lock(map) \
755 MACRO_BEGIN \
756 DTRACE_VM(vm_map_lock_w); \
757 lck_rw_lock_exclusive(&(map)->lock); \
758 MACRO_END
759
760 #define vm_map_unlock(map) \
761 MACRO_BEGIN \
762 DTRACE_VM(vm_map_unlock_w); \
763 (map)->timestamp++; \
764 lck_rw_done(&(map)->lock); \
765 MACRO_END
766
767 #define vm_map_lock_read(map) \
768 MACRO_BEGIN \
769 DTRACE_VM(vm_map_lock_r); \
770 lck_rw_lock_shared(&(map)->lock); \
771 MACRO_END
772
773 #define vm_map_unlock_read(map) \
774 MACRO_BEGIN \
775 DTRACE_VM(vm_map_unlock_r); \
776 lck_rw_done(&(map)->lock); \
777 MACRO_END
778
779 #define vm_map_lock_write_to_read(map) \
780 MACRO_BEGIN \
781 DTRACE_VM(vm_map_lock_downgrade); \
782 (map)->timestamp++; \
783 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
784 MACRO_END
785
786 __attribute__((always_inline))
787 int vm_map_lock_read_to_write(vm_map_t map);
788
789 __attribute__((always_inline))
790 boolean_t vm_map_try_lock(vm_map_t map);
791
792 __attribute__((always_inline))
793 boolean_t vm_map_try_lock_read(vm_map_t map);
794
795 int vm_self_region_page_shift(vm_map_t target_map);
796 int vm_self_region_page_shift_safely(vm_map_t target_map);
797
798 #define vm_map_lock_assert_held(map) \
799 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_HELD)
800 #define vm_map_lock_assert_shared(map) \
801 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_SHARED)
802 #define vm_map_lock_assert_exclusive(map) \
803 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
804 #define vm_map_lock_assert_notheld(map) \
805 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
806
807 /*
808 * Exported procedures that operate on vm_map_t.
809 */
810
811 /* Lookup map entry containing or the specified address in the given map */
812 extern boolean_t vm_map_lookup_entry(
813 vm_map_t map,
814 vm_map_address_t address,
815 vm_map_entry_t *entry); /* OUT */
816
817 /* Lookup map entry containing or the specified address in the given map */
818 extern boolean_t vm_map_lookup_entry_or_next(
819 vm_map_t map,
820 vm_map_address_t address,
821 vm_map_entry_t *entry); /* OUT */
822
823 /* like vm_map_lookup_entry without the PGZ bear trap */
824 #if CONFIG_PROB_GZALLOC
825 extern boolean_t vm_map_lookup_entry_allow_pgz(
826 vm_map_t map,
827 vm_map_address_t address,
828 vm_map_entry_t *entry); /* OUT */
829 #else /* !CONFIG_PROB_GZALLOC */
830 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
831 #endif /* !CONFIG_PROB_GZALLOC */
832
833 extern void vm_map_copy_remap(
834 vm_map_t map,
835 vm_map_entry_t where,
836 vm_map_copy_t copy,
837 vm_map_offset_t adjustment,
838 vm_prot_t cur_prot,
839 vm_prot_t max_prot,
840 vm_inherit_t inheritance);
841
842 /* Find the VM object, offset, and protection for a given virtual address
843 * in the specified map, assuming a page fault of the type specified. */
844 extern kern_return_t vm_map_lookup_and_lock_object(
845 vm_map_t *var_map, /* IN/OUT */
846 vm_map_address_t vaddr,
847 vm_prot_t fault_type,
848 int object_lock_type,
849 vm_map_version_t *out_version, /* OUT */
850 vm_object_t *object, /* OUT */
851 vm_object_offset_t *offset, /* OUT */
852 vm_prot_t *out_prot, /* OUT */
853 boolean_t *wired, /* OUT */
854 vm_object_fault_info_t fault_info, /* OUT */
855 vm_map_t *real_map, /* OUT */
856 bool *contended); /* OUT */
857
858 /* Verifies that the map has not changed since the given version. */
859 extern boolean_t vm_map_verify(
860 vm_map_t map,
861 vm_map_version_t *version); /* REF */
862
863
864 /*
865 * Functions implemented as macros
866 */
867 #define vm_map_min(map) ((map)->min_offset)
868 /* Lowest valid address in
869 * a map */
870
871 #define vm_map_max(map) ((map)->max_offset)
872 /* Highest valid address */
873
874 #define vm_map_pmap(map) ((map)->pmap)
875 /* Physical map associated
876 * with this address map */
877
878 /* Gain a reference to an existing map */
879 extern void vm_map_reference(
880 vm_map_t map);
881
882 /*
883 * Wait and wakeup macros for in_transition map entries.
884 */
885 #define vm_map_entry_wait(map, interruptible) \
886 ((map)->timestamp++ , \
887 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
888 (event_t)&(map)->hdr, interruptible))
889
890
891 #define vm_map_entry_wakeup(map) \
892 thread_wakeup((event_t)(&(map)->hdr))
893
894
895 /* simplify map entries */
896 extern void vm_map_simplify_entry(
897 vm_map_t map,
898 vm_map_entry_t this_entry);
899 extern void vm_map_simplify(
900 vm_map_t map,
901 vm_map_offset_t start);
902
903 #if XNU_PLATFORM_MacOSX
904
905 /* Move the information in a map copy object to a new map copy object */
906 extern vm_map_copy_t vm_map_copy_copy(
907 vm_map_copy_t copy);
908
909 #endif /* XNU_PLATFORM_MacOSX */
910
911 /* Enter a mapping */
912 extern kern_return_t vm_map_enter(
913 vm_map_t map,
914 vm_map_offset_t *address,
915 vm_map_size_t size,
916 vm_map_offset_t mask,
917 vm_map_kernel_flags_t vmk_flags,
918 vm_object_t object,
919 vm_object_offset_t offset,
920 boolean_t needs_copy,
921 vm_prot_t cur_protection,
922 vm_prot_t max_protection,
923 vm_inherit_t inheritance);
924
925 #if __arm64__
926 extern kern_return_t vm_map_enter_fourk(
927 vm_map_t map,
928 vm_map_offset_t *address,
929 vm_map_size_t size,
930 vm_map_offset_t mask,
931 vm_map_kernel_flags_t vmk_flags,
932 vm_object_t object,
933 vm_object_offset_t offset,
934 boolean_t needs_copy,
935 vm_prot_t cur_protection,
936 vm_prot_t max_protection,
937 vm_inherit_t inheritance);
938 #endif /* __arm64__ */
939
940 /* XXX should go away - replaced with regular enter of contig object */
941 extern kern_return_t vm_map_enter_cpm(
942 vm_map_t map,
943 vm_map_address_t *addr,
944 vm_map_size_t size,
945 vm_map_kernel_flags_t vmk_flags);
946
947 extern kern_return_t vm_map_remap(
948 vm_map_t target_map,
949 vm_map_offset_t *address,
950 vm_map_size_t size,
951 vm_map_offset_t mask,
952 vm_map_kernel_flags_t vmk_flags,
953 vm_map_t src_map,
954 vm_map_offset_t memory_address,
955 boolean_t copy,
956 vm_prot_t *cur_protection,
957 vm_prot_t *max_protection,
958 vm_inherit_t inheritance);
959
960
961 /*
962 * Read and write from a kernel buffer to a specified map.
963 */
964 extern kern_return_t vm_map_write_user(
965 vm_map_t map,
966 void *src_p,
967 vm_map_offset_t dst_addr,
968 vm_size_t size);
969
970 extern kern_return_t vm_map_read_user(
971 vm_map_t map,
972 vm_map_offset_t src_addr,
973 void *dst_p,
974 vm_size_t size);
975
976 extern void vm_map_inherit_limits(
977 vm_map_t new_map,
978 const struct _vm_map *old_map);
979
980 /* Create a new task map using an existing task map as a template. */
981 extern vm_map_t vm_map_fork(
982 ledger_t ledger,
983 vm_map_t old_map,
984 int options);
985 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
986 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
987 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
988
989 /* Change inheritance */
990 extern kern_return_t vm_map_inherit(
991 vm_map_t map,
992 vm_map_offset_t start,
993 vm_map_offset_t end,
994 vm_inherit_t new_inheritance);
995
996 /* Add or remove machine-dependent attributes from map regions */
997 extern kern_return_t vm_map_machine_attribute(
998 vm_map_t map,
999 vm_map_offset_t start,
1000 vm_map_offset_t end,
1001 vm_machine_attribute_t attribute,
1002 vm_machine_attribute_val_t* value); /* IN/OUT */
1003
1004 extern kern_return_t vm_map_msync(
1005 vm_map_t map,
1006 vm_map_address_t address,
1007 vm_map_size_t size,
1008 vm_sync_t sync_flags);
1009
1010 /* Set paging behavior */
1011 extern kern_return_t vm_map_behavior_set(
1012 vm_map_t map,
1013 vm_map_offset_t start,
1014 vm_map_offset_t end,
1015 vm_behavior_t new_behavior);
1016
1017 extern kern_return_t vm_map_region(
1018 vm_map_t map,
1019 vm_map_offset_t *address,
1020 vm_map_size_t *size,
1021 vm_region_flavor_t flavor,
1022 vm_region_info_t info,
1023 mach_msg_type_number_t *count,
1024 mach_port_t *object_name);
1025
1026 extern kern_return_t vm_map_region_recurse_64(
1027 vm_map_t map,
1028 vm_map_offset_t *address,
1029 vm_map_size_t *size,
1030 natural_t *nesting_depth,
1031 vm_region_submap_info_64_t info,
1032 mach_msg_type_number_t *count);
1033
1034 extern kern_return_t vm_map_page_query_internal(
1035 vm_map_t map,
1036 vm_map_offset_t offset,
1037 int *disposition,
1038 int *ref_count);
1039
1040 extern kern_return_t vm_map_query_volatile(
1041 vm_map_t map,
1042 mach_vm_size_t *volatile_virtual_size_p,
1043 mach_vm_size_t *volatile_resident_size_p,
1044 mach_vm_size_t *volatile_compressed_size_p,
1045 mach_vm_size_t *volatile_pmap_size_p,
1046 mach_vm_size_t *volatile_compressed_pmap_size_p);
1047
1048 /* Convert from a map entry port to a map */
1049 extern vm_map_t convert_port_entry_to_map(
1050 ipc_port_t port);
1051
1052
1053 extern kern_return_t vm_map_set_cache_attr(
1054 vm_map_t map,
1055 vm_map_offset_t va);
1056
1057
1058 /* definitions related to overriding the NX behavior */
1059
1060 #define VM_ABI_32 0x1
1061 #define VM_ABI_64 0x2
1062
1063 extern int override_nx(vm_map_t map, uint32_t user_tag);
1064
1065 extern void vm_map_region_top_walk(
1066 vm_map_entry_t entry,
1067 vm_region_top_info_t top);
1068 extern void vm_map_region_walk(
1069 vm_map_t map,
1070 vm_map_offset_t va,
1071 vm_map_entry_t entry,
1072 vm_object_offset_t offset,
1073 vm_object_size_t range,
1074 vm_region_extended_info_t extended,
1075 boolean_t look_for_pages,
1076 mach_msg_type_number_t count);
1077
1078
1079
1080 extern void vm_map_copy_footprint_ledgers(
1081 task_t old_task,
1082 task_t new_task);
1083 extern void vm_map_copy_ledger(
1084 task_t old_task,
1085 task_t new_task,
1086 int ledger_entry);
1087
1088 /**
1089 * Represents a single region of virtual address space that should be reserved
1090 * (pre-mapped) in a user address space.
1091 */
1092 struct vm_reserved_region {
1093 const char *vmrr_name;
1094 vm_map_offset_t vmrr_addr;
1095 vm_map_size_t vmrr_size;
1096 };
1097
1098 /**
1099 * Return back a machine-dependent array of address space regions that should be
1100 * reserved by the VM. This function is defined in the machine-dependent
1101 * machine_routines.c files.
1102 */
1103 extern size_t ml_get_vm_reserved_regions(
1104 bool vm_is64bit,
1105 const struct vm_reserved_region **regions);
1106
1107 #endif /* MACH_KERNEL_PRIVATE */
1108
1109 /* Create an empty map */
1110 extern vm_map_t vm_map_create(
1111 pmap_t pmap,
1112 vm_map_offset_t min_off,
1113 vm_map_offset_t max_off,
1114 boolean_t pageable);
1115
1116 extern vm_map_size_t vm_map_adjusted_size(vm_map_t map);
1117
1118 extern void vm_map_disable_hole_optimization(vm_map_t map);
1119
1120 /* Get rid of a map */
1121 extern void vm_map_destroy(
1122 vm_map_t map);
1123
1124 /* Lose a reference */
1125 extern void vm_map_deallocate(
1126 vm_map_t map);
1127
1128 /* Lose a reference */
1129 extern void vm_map_inspect_deallocate(
1130 vm_map_inspect_t map);
1131
1132 /* Lose a reference */
1133 extern void vm_map_read_deallocate(
1134 vm_map_read_t map);
1135
1136 extern vm_map_t vm_map_switch(
1137 vm_map_t map);
1138
1139 /* Change protection */
1140 extern kern_return_t vm_map_protect(
1141 vm_map_t map,
1142 vm_map_offset_t start,
1143 vm_map_offset_t end,
1144 vm_prot_t new_prot,
1145 boolean_t set_max);
1146
1147 /* Check protection */
1148 extern boolean_t vm_map_check_protection(
1149 vm_map_t map,
1150 vm_map_offset_t start,
1151 vm_map_offset_t end,
1152 vm_prot_t protection);
1153
1154 extern boolean_t vm_map_cs_enforcement(
1155 vm_map_t map);
1156 extern void vm_map_cs_enforcement_set(
1157 vm_map_t map,
1158 boolean_t val);
1159
1160 extern void vm_map_cs_debugged_set(
1161 vm_map_t map,
1162 boolean_t val);
1163
1164 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1165 extern kern_return_t vm_map_csm_allow_jit(vm_map_t map);
1166
1167 /* wire down a region */
1168
1169 #ifdef XNU_KERNEL_PRIVATE
1170
1171 extern void vm_map_will_allocate_early_map(
1172 vm_map_t *map_owner);
1173
1174 extern void vm_map_relocate_early_maps(
1175 vm_offset_t delta);
1176
1177 extern void vm_map_relocate_early_elem(
1178 uint32_t zone_id,
1179 vm_offset_t new_addr,
1180 vm_offset_t delta);
1181
1182 /* never fails */
1183 extern vm_map_t vm_map_create_options(
1184 pmap_t pmap,
1185 vm_map_offset_t min_off,
1186 vm_map_offset_t max_off,
1187 vm_map_create_options_t options);
1188
1189 extern kern_return_t vm_map_wire_kernel(
1190 vm_map_t map,
1191 vm_map_offset_t start,
1192 vm_map_offset_t end,
1193 vm_prot_t access_type,
1194 vm_tag_t tag,
1195 boolean_t user_wire);
1196
1197 extern kern_return_t vm_map_wire_and_extract_kernel(
1198 vm_map_t map,
1199 vm_map_offset_t start,
1200 vm_prot_t access_type,
1201 vm_tag_t tag,
1202 boolean_t user_wire,
1203 ppnum_t *physpage_p);
1204
1205 /* kext exported versions */
1206
1207 extern kern_return_t vm_map_wire_external(
1208 vm_map_t map,
1209 vm_map_offset_t start,
1210 vm_map_offset_t end,
1211 vm_prot_t access_type,
1212 boolean_t user_wire);
1213
1214 extern kern_return_t vm_map_wire_and_extract_external(
1215 vm_map_t map,
1216 vm_map_offset_t start,
1217 vm_prot_t access_type,
1218 boolean_t user_wire,
1219 ppnum_t *physpage_p);
1220
1221 #else /* XNU_KERNEL_PRIVATE */
1222
1223 extern kern_return_t vm_map_wire(
1224 vm_map_t map,
1225 vm_map_offset_t start,
1226 vm_map_offset_t end,
1227 vm_prot_t access_type,
1228 boolean_t user_wire);
1229
1230 extern kern_return_t vm_map_wire_and_extract(
1231 vm_map_t map,
1232 vm_map_offset_t start,
1233 vm_prot_t access_type,
1234 boolean_t user_wire,
1235 ppnum_t *physpage_p);
1236
1237 #endif /* !XNU_KERNEL_PRIVATE */
1238
1239 /* unwire a region */
1240 extern kern_return_t vm_map_unwire(
1241 vm_map_t map,
1242 vm_map_offset_t start,
1243 vm_map_offset_t end,
1244 boolean_t user_wire);
1245
1246 #ifdef XNU_KERNEL_PRIVATE
1247
1248 /* Enter a mapping of a memory object */
1249 extern kern_return_t vm_map_enter_mem_object(
1250 vm_map_t map,
1251 vm_map_offset_t *address,
1252 vm_map_size_t size,
1253 vm_map_offset_t mask,
1254 vm_map_kernel_flags_t vmk_flags,
1255 ipc_port_t port,
1256 vm_object_offset_t offset,
1257 boolean_t needs_copy,
1258 vm_prot_t cur_protection,
1259 vm_prot_t max_protection,
1260 vm_inherit_t inheritance);
1261
1262 /* Enter a mapping of a memory object */
1263 extern kern_return_t vm_map_enter_mem_object_prefault(
1264 vm_map_t map,
1265 vm_map_offset_t *address,
1266 vm_map_size_t size,
1267 vm_map_offset_t mask,
1268 vm_map_kernel_flags_t vmk_flags,
1269 ipc_port_t port,
1270 vm_object_offset_t offset,
1271 vm_prot_t cur_protection,
1272 vm_prot_t max_protection,
1273 upl_page_list_ptr_t page_list,
1274 unsigned int page_list_count);
1275
1276 /* Enter a mapping of a memory object */
1277 extern kern_return_t vm_map_enter_mem_object_control(
1278 vm_map_t map,
1279 vm_map_offset_t *address,
1280 vm_map_size_t size,
1281 vm_map_offset_t mask,
1282 vm_map_kernel_flags_t vmk_flags,
1283 memory_object_control_t control,
1284 vm_object_offset_t offset,
1285 boolean_t needs_copy,
1286 vm_prot_t cur_protection,
1287 vm_prot_t max_protection,
1288 vm_inherit_t inheritance);
1289
1290 extern kern_return_t vm_map_terminate(
1291 vm_map_t map);
1292
1293 extern void vm_map_require(
1294 vm_map_t map);
1295
1296 extern void vm_map_copy_require(
1297 vm_map_copy_t copy);
1298
1299 extern kern_return_t vm_map_copy_extract(
1300 vm_map_t src_map,
1301 vm_map_address_t src_addr,
1302 vm_map_size_t len,
1303 boolean_t copy,
1304 vm_map_copy_t *copy_result, /* OUT */
1305 vm_prot_t *cur_prot, /* OUT */
1306 vm_prot_t *max_prot, /* OUT */
1307 vm_inherit_t inheritance,
1308 vm_map_kernel_flags_t vmk_flags);
1309
1310 #endif /* !XNU_KERNEL_PRIVATE */
1311
1312 /* Discard a copy without using it */
1313 extern void vm_map_copy_discard(
1314 vm_map_copy_t copy);
1315
1316 /* Overwrite existing memory with a copy */
1317 extern kern_return_t vm_map_copy_overwrite(
1318 vm_map_t dst_map,
1319 vm_map_address_t dst_addr,
1320 vm_map_copy_t copy,
1321 vm_map_size_t copy_size,
1322 boolean_t interruptible);
1323
1324 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1325
1326
1327 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1328 extern boolean_t vm_map_copy_validate_size(
1329 vm_map_t dst_map,
1330 vm_map_copy_t copy,
1331 vm_map_size_t *size);
1332
1333 /* Place a copy into a map */
1334 extern kern_return_t vm_map_copyout(
1335 vm_map_t dst_map,
1336 vm_map_address_t *dst_addr, /* OUT */
1337 vm_map_copy_t copy);
1338
1339 extern kern_return_t vm_map_copyout_size(
1340 vm_map_t dst_map,
1341 vm_map_address_t *dst_addr, /* OUT */
1342 vm_map_copy_t copy,
1343 vm_map_size_t copy_size);
1344
1345 extern kern_return_t vm_map_copyout_internal(
1346 vm_map_t dst_map,
1347 vm_map_address_t *dst_addr, /* OUT */
1348 vm_map_copy_t copy,
1349 vm_map_size_t copy_size,
1350 boolean_t consume_on_success,
1351 vm_prot_t cur_protection,
1352 vm_prot_t max_protection,
1353 vm_inherit_t inheritance);
1354
1355 extern kern_return_t vm_map_copyin(
1356 vm_map_t src_map,
1357 vm_map_address_t src_addr,
1358 vm_map_size_t len,
1359 boolean_t src_destroy,
1360 vm_map_copy_t *copy_result); /* OUT */
1361
1362 extern kern_return_t vm_map_copyin_common(
1363 vm_map_t src_map,
1364 vm_map_address_t src_addr,
1365 vm_map_size_t len,
1366 boolean_t src_destroy,
1367 boolean_t src_volatile,
1368 vm_map_copy_t *copy_result, /* OUT */
1369 boolean_t use_maxprot);
1370
1371 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1372 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1373 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1374 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1375 #define VM_MAP_COPYIN_FORK 0x00000010
1376 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000001F
1377 extern kern_return_t vm_map_copyin_internal(
1378 vm_map_t src_map,
1379 vm_map_address_t src_addr,
1380 vm_map_size_t len,
1381 int flags,
1382 vm_map_copy_t *copy_result); /* OUT */
1383
1384
1385 extern void vm_map_disable_NX(
1386 vm_map_t map);
1387
1388 extern void vm_map_disallow_data_exec(
1389 vm_map_t map);
1390
1391 extern void vm_map_set_64bit(
1392 vm_map_t map);
1393
1394 extern void vm_map_set_32bit(
1395 vm_map_t map);
1396
1397 extern void vm_map_set_jumbo(
1398 vm_map_t map);
1399
1400 extern void vm_map_set_jit_entitled(
1401 vm_map_t map);
1402
1403 extern void vm_map_set_max_addr(
1404 vm_map_t map, vm_map_offset_t new_max_offset);
1405
1406 extern boolean_t vm_map_has_hard_pagezero(
1407 vm_map_t map,
1408 vm_map_offset_t pagezero_size);
1409 extern void vm_commit_pagezero_status(vm_map_t tmap);
1410
1411 extern boolean_t vm_map_tpro(
1412 vm_map_t map);
1413
1414 extern void vm_map_set_tpro(
1415 vm_map_t map);
1416
1417 extern boolean_t vm_map_tpro_enforcement(
1418 vm_map_t map);
1419
1420 extern void vm_map_set_tpro_enforcement(
1421 vm_map_t map);
1422
1423 extern boolean_t vm_map_set_tpro_range(
1424 vm_map_t map,
1425 vm_map_address_t start,
1426 vm_map_address_t end);
1427
1428 extern boolean_t vm_map_is_64bit(
1429 vm_map_t map);
1430
1431 extern kern_return_t vm_map_raise_max_offset(
1432 vm_map_t map,
1433 vm_map_offset_t new_max_offset);
1434
1435 extern kern_return_t vm_map_raise_min_offset(
1436 vm_map_t map,
1437 vm_map_offset_t new_min_offset);
1438
1439 #if XNU_TARGET_OS_OSX
1440 extern void vm_map_set_high_start(
1441 vm_map_t map,
1442 vm_map_offset_t high_start);
1443 #endif /* XNU_TARGET_OS_OSX */
1444
1445 extern vm_map_offset_t vm_compute_max_offset(
1446 boolean_t is64);
1447
1448 extern void vm_map_get_max_aslr_slide_section(
1449 vm_map_t map,
1450 int64_t *max_sections,
1451 int64_t *section_size);
1452
1453 extern uint64_t vm_map_get_max_aslr_slide_pages(
1454 vm_map_t map);
1455
1456 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1457 vm_map_t map);
1458
1459 extern kern_return_t vm_map_set_size_limit(
1460 vm_map_t map,
1461 uint64_t limit);
1462
1463 extern kern_return_t vm_map_set_data_limit(
1464 vm_map_t map,
1465 uint64_t limit);
1466
1467 extern void vm_map_set_user_wire_limit(
1468 vm_map_t map,
1469 vm_size_t limit);
1470
1471 extern void vm_map_switch_protect(
1472 vm_map_t map,
1473 boolean_t val);
1474
1475 extern void vm_map_iokit_mapped_region(
1476 vm_map_t map,
1477 vm_size_t bytes);
1478
1479 extern void vm_map_iokit_unmapped_region(
1480 vm_map_t map,
1481 vm_size_t bytes);
1482
1483
1484 extern boolean_t first_free_is_valid(vm_map_t);
1485
1486 extern int vm_map_page_shift(
1487 vm_map_t map);
1488
1489 extern vm_map_offset_t vm_map_page_mask(
1490 vm_map_t map);
1491
1492 extern int vm_map_page_size(
1493 vm_map_t map);
1494
1495 extern vm_map_offset_t vm_map_round_page_mask(
1496 vm_map_offset_t offset,
1497 vm_map_offset_t mask);
1498
1499 extern vm_map_offset_t vm_map_trunc_page_mask(
1500 vm_map_offset_t offset,
1501 vm_map_offset_t mask);
1502
1503 extern boolean_t vm_map_page_aligned(
1504 vm_map_offset_t offset,
1505 vm_map_offset_t mask);
1506
1507 extern bool vm_map_range_overflows(
1508 vm_map_t map,
1509 vm_map_offset_t addr,
1510 vm_map_size_t size);
1511 #ifdef XNU_KERNEL_PRIVATE
1512
1513 /* Support for vm_map ranges */
1514 extern kern_return_t vm_map_range_configure(
1515 vm_map_t map);
1516
1517 extern void vm_map_range_fork(
1518 vm_map_t new_map,
1519 vm_map_t old_map);
1520
1521 extern int vm_map_get_user_range(
1522 vm_map_t map,
1523 vm_map_range_id_t range_id,
1524 mach_vm_range_t range);
1525
1526 /*!
1527 * @function vm_map_kernel_flags_update_range_id()
1528 *
1529 * @brief
1530 * Updates the @c vmkf_range_id field with the adequate value
1531 * according to the policy for specified map and tag set in @c vmk_flags.
1532 *
1533 * @discussion
1534 * This function is meant to be called by Mach VM entry points,
1535 * which matters for the kernel: allocations with pointers _MUST_
1536 * be allocated with @c kmem_*() functions.
1537 *
1538 * If the range ID is already set, it is preserved.
1539 */
1540 extern void vm_map_kernel_flags_update_range_id(
1541 vm_map_kernel_flags_t *flags,
1542 vm_map_t map);
1543
1544 #if XNU_TARGET_OS_OSX
1545 extern void vm_map_mark_alien(vm_map_t map);
1546 extern void vm_map_single_jit(vm_map_t map);
1547 #endif /* XNU_TARGET_OS_OSX */
1548
1549 extern kern_return_t vm_map_page_info(
1550 vm_map_t map,
1551 vm_map_offset_t offset,
1552 vm_page_info_flavor_t flavor,
1553 vm_page_info_t info,
1554 mach_msg_type_number_t *count);
1555 extern kern_return_t vm_map_page_range_info_internal(
1556 vm_map_t map,
1557 vm_map_offset_t start_offset,
1558 vm_map_offset_t end_offset,
1559 int effective_page_shift,
1560 vm_page_info_flavor_t flavor,
1561 vm_page_info_t info,
1562 mach_msg_type_number_t *count);
1563
1564 #endif /* XNU_KERNEL_PRIVATE */
1565 #ifdef MACH_KERNEL_PRIVATE
1566
1567
1568 /*
1569 * Internal macros for rounding and truncation of vm_map offsets and sizes
1570 */
1571 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1572 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1573
1574 /*
1575 * Macros for rounding and truncation of vm_map offsets and sizes
1576 */
1577 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1578 VM_MAP_PAGE_SHIFT(
1579 vm_map_t map)
1580 {
1581 int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1582 /*
1583 * help ubsan and codegen in general,
1584 * cannot use PAGE_{MIN,MAX}_SHIFT
1585 * because of testing code which
1586 * tests 16k aligned maps on 4k only systems.
1587 */
1588 __builtin_assume(shift >= 12 && shift <= 14);
1589 return shift;
1590 }
1591
1592 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1593 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1594 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1595
1596 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1597 VM_MAP_IS_EXOTIC(
1598 vm_map_t map __unused)
1599 {
1600 #if __arm64__
1601 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1602 pmap_is_exotic(map->pmap)) {
1603 return true;
1604 }
1605 #endif /* __arm64__ */
1606 return false;
1607 }
1608
1609 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1610 VM_MAP_IS_ALIEN(
1611 vm_map_t map __unused)
1612 {
1613 /*
1614 * An "alien" process/task/map/pmap should mostly behave
1615 * as it currently would on iOS.
1616 */
1617 #if XNU_TARGET_OS_OSX
1618 if (map->is_alien) {
1619 return true;
1620 }
1621 return false;
1622 #else /* XNU_TARGET_OS_OSX */
1623 return true;
1624 #endif /* XNU_TARGET_OS_OSX */
1625 }
1626
1627 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1628 VM_MAP_POLICY_WX_FAIL(
1629 vm_map_t map __unused)
1630 {
1631 if (VM_MAP_IS_ALIEN(map)) {
1632 return false;
1633 }
1634 return true;
1635 }
1636
1637 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1638 VM_MAP_POLICY_WX_STRIP_X(
1639 vm_map_t map __unused)
1640 {
1641 if (VM_MAP_IS_ALIEN(map)) {
1642 return true;
1643 }
1644 return false;
1645 }
1646
1647 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1648 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1649 vm_map_t map __unused)
1650 {
1651 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1652 return false;
1653 }
1654 return true;
1655 }
1656
1657 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1658 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1659 vm_map_t map)
1660 {
1661 return VM_MAP_IS_ALIEN(map);
1662 }
1663
1664 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1665 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1666 vm_map_t map __unused)
1667 {
1668 if (VM_MAP_IS_ALIEN(map)) {
1669 return false;
1670 }
1671 return true;
1672 }
1673
1674 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1675 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1676 vm_map_t map __unused)
1677 {
1678 if (VM_MAP_IS_ALIEN(map)) {
1679 return false;
1680 }
1681 return true;
1682 }
1683
1684 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1685 VM_MAP_POLICY_ALLOW_JIT_COPY(
1686 vm_map_t map __unused)
1687 {
1688 if (VM_MAP_IS_ALIEN(map)) {
1689 return false;
1690 }
1691 return true;
1692 }
1693
1694 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1695 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1696 vm_map_t map __unused)
1697 {
1698 #if __x86_64__
1699 return true;
1700 #else /* __x86_64__ */
1701 if (VM_MAP_IS_EXOTIC(map)) {
1702 return true;
1703 }
1704 return false;
1705 #endif /* __x86_64__ */
1706 }
1707
1708 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1709 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1710 {
1711 switch (prot) {
1712 case MAP_MEM_NOOP: break;
1713 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1714 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1715 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1716 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1717 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
1718 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1719 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1720 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1721 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
1722 default: break;
1723 }
1724 }
1725
1726 static inline boolean_t
vm_map_always_shadow(vm_map_t map)1727 vm_map_always_shadow(vm_map_t map)
1728 {
1729 if (map->mapped_in_other_pmaps) {
1730 /*
1731 * This is a submap, mapped in other maps.
1732 * Even if a VM object is mapped only once in this submap,
1733 * the submap itself could be mapped multiple times,
1734 * so vm_object_shadow() should always create a shadow
1735 * object, even if the object has only 1 reference.
1736 */
1737 return TRUE;
1738 }
1739 return FALSE;
1740 }
1741
1742 #endif /* MACH_KERNEL_PRIVATE */
1743 #ifdef XNU_KERNEL_PRIVATE
1744
1745 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1746 extern bool vm_map_is_exotic(vm_map_t map);
1747 extern bool vm_map_is_alien(vm_map_t map);
1748 extern pmap_t vm_map_get_pmap(vm_map_t map);
1749
1750 extern bool vm_map_is_corpse_source(vm_map_t map);
1751 extern void vm_map_set_corpse_source(vm_map_t map);
1752 extern void vm_map_unset_corpse_source(vm_map_t map);
1753 #endif /* XNU_KERNEL_PRIVATE */
1754
1755 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1756 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1757
1758 /* Support for UPLs from vm_maps */
1759
1760 #ifdef XNU_KERNEL_PRIVATE
1761
1762 extern kern_return_t vm_map_get_upl(
1763 vm_map_t target_map,
1764 vm_map_offset_t map_offset,
1765 upl_size_t *size,
1766 upl_t *upl,
1767 upl_page_info_array_t page_info,
1768 unsigned int *page_infoCnt,
1769 upl_control_flags_t *flags,
1770 vm_tag_t tag,
1771 int force_data_sync);
1772
1773 #endif /* XNU_KERNEL_PRIVATE */
1774
1775 extern void
1776 vm_map_sizes(vm_map_t map,
1777 vm_map_size_t * psize,
1778 vm_map_size_t * pfree,
1779 vm_map_size_t * plargest_free);
1780
1781 #if CONFIG_DYNAMIC_CODE_SIGNING
1782
1783 extern kern_return_t vm_map_sign(vm_map_t map,
1784 vm_map_offset_t start,
1785 vm_map_offset_t end);
1786
1787 #endif /* CONFIG_DYNAMIC_CODE_SIGNING */
1788
1789 extern kern_return_t vm_map_partial_reap(
1790 vm_map_t map,
1791 unsigned int *reclaimed_resident,
1792 unsigned int *reclaimed_compressed);
1793
1794
1795 #if DEVELOPMENT || DEBUG
1796
1797 extern int vm_map_disconnect_page_mappings(
1798 vm_map_t map,
1799 boolean_t);
1800
1801 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1802
1803 #endif /* DEVELOPMENT || DEBUG */
1804
1805 #if CONFIG_FREEZE
1806
1807 extern kern_return_t vm_map_freeze(
1808 task_t task,
1809 unsigned int *purgeable_count,
1810 unsigned int *wired_count,
1811 unsigned int *clean_count,
1812 unsigned int *dirty_count,
1813 unsigned int dirty_budget,
1814 unsigned int *shared_count,
1815 int *freezer_error_code,
1816 boolean_t eval_only);
1817
1818 __enum_decl(freezer_error_code_t, int, {
1819 FREEZER_ERROR_GENERIC = -1,
1820 FREEZER_ERROR_EXCESS_SHARED_MEMORY = -2,
1821 FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO = -3,
1822 FREEZER_ERROR_NO_COMPRESSOR_SPACE = -4,
1823 FREEZER_ERROR_NO_SWAP_SPACE = -5,
1824 FREEZER_ERROR_NO_SLOTS = -6,
1825 });
1826
1827 #endif /* CONFIG_FREEZE */
1828 #if XNU_KERNEL_PRIVATE
1829
1830 boolean_t kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1831
1832 boolean_t vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1833
1834 #endif /* XNU_KERNEL_PRIVATE */
1835
1836 /*
1837 * In some cases, we don't have a real VM object but still want to return a
1838 * unique ID (to avoid a memory region looking like shared memory), so build
1839 * a fake pointer based on the map's ledger and the index of the ledger being
1840 * reported.
1841 */
1842 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1843
1844 #endif /* KERNEL_PRIVATE */
1845
1846 __END_DECLS
1847
1848 #endif /* _VM_VM_MAP_H_ */
1849