1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84
85 #ifdef KERNEL_PRIVATE
86
87 #include <sys/cdefs.h>
88
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
92
93 __BEGIN_DECLS
94
95 extern void vm_map_reference(vm_map_t map);
96 extern vm_map_t current_map(void);
97
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t vm_map_exec(
100 vm_map_t new_map,
101 task_t task,
102 boolean_t is64bit,
103 void *fsroot,
104 cpu_type_t cpu,
105 cpu_subtype_t cpu_subtype,
106 boolean_t reslide,
107 boolean_t is_driverkit,
108 uint32_t rsr_version);
109
110 __END_DECLS
111
112 #ifdef MACH_KERNEL_PRIVATE
113
114 #include <mach_assert.h>
115
116 #include <vm/vm_object.h>
117 #include <vm/vm_page.h>
118 #include <kern/locks.h>
119 #include <kern/zalloc.h>
120 #include <kern/macro_help.h>
121
122 #include <kern/thread.h>
123 #include <os/refcnt.h>
124
125 #define current_map_fast() (current_thread()->map)
126 #define current_map() (current_map_fast())
127
128 #include <vm/vm_map_store.h>
129
130
131 /*
132 * Types defined:
133 *
134 * vm_map_t the high-level address map data structure.
135 * vm_map_entry_t an entry in an address map.
136 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
137 * vm_map_copy_t represents memory copied from an address map,
138 * used for inter-map copy operations
139 */
140 typedef struct vm_map_entry *vm_map_entry_t;
141 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
142
143
144 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
145 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
146 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
147 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
148
149 /*
150 * Type: vm_named_entry_t [internal use only]
151 *
152 * Description:
153 * Description of a mapping to a memory cache object.
154 *
155 * Implementation:
156 * While the handle to this object is used as a means to map
157 * and pass around the right to map regions backed by pagers
158 * of all sorts, the named_entry itself is only manipulated
159 * by the kernel. Named entries hold information on the
160 * right to map a region of a cached object. Namely,
161 * the target cache object, the beginning and ending of the
162 * region to be mapped, and the permissions, (read, write)
163 * with which it can be mapped.
164 *
165 */
166
167 struct vm_named_entry {
168 decl_lck_mtx_data(, Lock); /* Synchronization */
169 union {
170 vm_map_t map; /* map backing submap */
171 vm_map_copy_t copy; /* a VM map copy */
172 } backing;
173 vm_object_offset_t offset; /* offset into object */
174 vm_object_size_t size; /* size of region */
175 vm_object_offset_t data_offset; /* offset to first byte of data */
176 unsigned int /* Is backing.xxx : */
177 /* vm_prot_t */ protection:4, /* access permissions */
178 /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
179 /* boolean_t */ internal:1, /* ... an internal object */
180 /* boolean_t */ is_sub_map:1, /* ... a submap? */
181 /* boolean_t */ is_copy:1, /* ... a VM map copy */
182 /* boolean_t */ is_fully_owned:1; /* ... all objects are owned */
183 #if VM_NAMED_ENTRY_DEBUG
184 uint32_t named_entry_bt; /* btref_t */
185 #endif /* VM_NAMED_ENTRY_DEBUG */
186 };
187
188
189 /*
190 * Type: vm_map_entry_t [internal use only]
191 *
192 * Description:
193 * A single mapping within an address map.
194 *
195 * Implementation:
196 * Address map entries consist of start and end addresses,
197 * a VM object (or sub map) and offset into that object,
198 * and user-exported inheritance and protection information.
199 * Control information for virtual copy operations is also
200 * stored in the address map entry.
201 *
202 * Note:
203 * vm_map_relocate_early_elem() knows about this layout,
204 * and needs to be kept in sync.
205 */
206
207 struct vm_map_links {
208 struct vm_map_entry *prev; /* previous entry */
209 struct vm_map_entry *next; /* next entry */
210 vm_map_offset_t start; /* start address */
211 vm_map_offset_t end; /* end address */
212 };
213
214 /*
215 * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
216 * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
217 * to convert between the "packed" representation in the vm_map_entry's fields
218 * and the equivalent bits defined in vm_prot_t.
219 */
220 #if defined(__x86_64__)
221 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
222 #else
223 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY)
224 #endif
225
226 /*
227 * FOOTPRINT ACCOUNTING:
228 * The "memory footprint" is better described in the pmap layer.
229 *
230 * At the VM level, these 2 vm_map_entry_t fields are relevant:
231 * iokit_mapped:
232 * For an "iokit_mapped" entry, we add the size of the entry to the
233 * footprint when the entry is entered into the map and we subtract that
234 * size when the entry is removed. No other accounting should take place.
235 * "use_pmap" should be FALSE but is not taken into account.
236 * use_pmap: (only when is_sub_map is FALSE)
237 * This indicates if we should ask the pmap layer to account for pages
238 * in this mapping. If FALSE, we expect that another form of accounting
239 * is being used (e.g. "iokit_mapped" or the explicit accounting of
240 * non-volatile purgable memory).
241 *
242 * So the logic is mostly:
243 * if entry->is_sub_map == TRUE
244 * anything in a submap does not count for the footprint
245 * else if entry->iokit_mapped == TRUE
246 * footprint includes the entire virtual size of this entry
247 * else if entry->use_pmap == FALSE
248 * tell pmap NOT to account for pages being pmap_enter()'d from this
249 * mapping (i.e. use "alternate accounting")
250 * else
251 * pmap will account for pages being pmap_enter()'d from this mapping
252 * as it sees fit (only if anonymous, etc...)
253 */
254
255 #define VME_ALIAS_BITS 12
256 #define VME_ALIAS_MASK ((1u << VME_ALIAS_BITS) - 1)
257 #define VME_OFFSET_SHIFT VME_ALIAS_BITS
258 #define VME_OFFSET_BITS (64 - VME_ALIAS_BITS)
259 #define VME_SUBMAP_SHIFT 2
260 #define VME_SUBMAP_BITS (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
261
262 struct vm_map_entry {
263 struct vm_map_links links; /* links to other entries */
264 #define vme_prev links.prev
265 #define vme_next links.next
266 #define vme_start links.start
267 #define vme_end links.end
268
269 struct vm_map_store store;
270
271 union {
272 vm_offset_t vme_object_value;
273 struct {
274 vm_offset_t vme_atomic:1; /* entry cannot be split/coalesced */
275 vm_offset_t is_sub_map:1; /* Is "object" a submap? */
276 vm_offset_t vme_submap:VME_SUBMAP_BITS;
277 };
278 #if __LP64__
279 struct {
280 uint32_t vme_ctx_atomic : 1;
281 uint32_t vme_ctx_is_sub_map : 1;
282 uint32_t vme_context : 30;
283 vm_page_object_t vme_object;
284 };
285 #endif
286 };
287
288 unsigned long long
289 /* vm_tag_t */ vme_alias:VME_ALIAS_BITS, /* entry VM tag */
290 /* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
291
292 /* boolean_t */ is_shared:1, /* region is shared */
293 /* boolean_t */ __unused1:1,
294 /* boolean_t */ in_transition:1, /* Entry being changed */
295 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
296 /* behavior is not defined for submap type */
297 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
298 /* boolean_t */ needs_copy:1, /* object need to be copied? */
299
300 /* Only in task maps: */
301 #if defined(__arm64e__)
302 /*
303 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
304 * We reuse it here to keep track of mappings that have hardware support
305 * for read-only/read-write trusted paths.
306 */
307 /* vm_prot_t-like */ protection:3, /* protection code */
308 /* boolean_t */ used_for_tpro:1,
309 #else /* __arm64e__ */
310 /* vm_prot_t-like */protection:4, /* protection code, bit3=UEXEC */
311 #endif /* __arm64e__ */
312
313 /* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
314 /* vm_inherit_t */ inheritance:2, /* inheritance */
315
316 /*
317 * use_pmap is overloaded:
318 * if "is_sub_map":
319 * use a nested pmap?
320 * else (i.e. if object):
321 * use pmap accounting
322 * for footprint?
323 */
324 /* boolean_t */ use_pmap:1,
325 /* boolean_t */ no_cache:1, /* should new pages be cached? */
326 /* boolean_t */ vme_permanent:1, /* mapping can not be removed */
327 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
328 /* boolean_t */ map_aligned:1, /* align to map's page size */
329 /*
330 * zero out the wired pages of this entry
331 * if is being deleted without unwiring them
332 */
333 /* boolean_t */ zero_wired_pages:1,
334 /* boolean_t */ used_for_jit:1,
335 /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
336
337 /* iokit accounting: use the virtual size rather than resident size: */
338 /* boolean_t */ iokit_acct:1,
339 /* boolean_t */ vme_resilient_codesign:1,
340 /* boolean_t */ vme_resilient_media:1,
341 /* boolean_t */ __unused2:1,
342 /* boolean_t */ vme_no_copy_on_read:1,
343 /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
344 /* boolean_t */ vme_kernel_object:1; /* vme_object is kernel_object */
345
346 unsigned short wired_count; /* can be paged if = 0 */
347 unsigned short user_wired_count; /* for vm_wire */
348
349 #if DEBUG
350 #define MAP_ENTRY_CREATION_DEBUG (1)
351 #define MAP_ENTRY_INSERTION_DEBUG (1)
352 #endif
353 #if MAP_ENTRY_CREATION_DEBUG
354 struct vm_map_header *vme_creation_maphdr;
355 uint32_t vme_creation_bt; /* btref_t */
356 #endif
357 #if MAP_ENTRY_INSERTION_DEBUG
358 uint32_t vme_insertion_bt; /* btref_t */
359 vm_map_offset_t vme_start_original;
360 vm_map_offset_t vme_end_original;
361 #endif
362 };
363
364 #define VME_ALIAS(entry) \
365 ((entry)->vme_alias)
366
367 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)368 _VME_SUBMAP(
369 vm_map_entry_t entry)
370 {
371 __builtin_assume(entry->vme_submap);
372 return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
373 }
374 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
375
376 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)377 VME_SUBMAP_SET(
378 vm_map_entry_t entry,
379 vm_map_t submap)
380 {
381 __builtin_assume(((vm_offset_t)submap & 3) == 0);
382
383 entry->is_sub_map = true;
384 entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
385 }
386
387 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)388 _VME_OBJECT(
389 vm_map_entry_t entry)
390 {
391 vm_object_t object = kernel_object;
392
393 if (!entry->vme_kernel_object) {
394 #if __LP64__
395 object = VM_OBJECT_UNPACK(entry->vme_object);
396 __builtin_assume(object != kernel_object);
397 #else
398 object = (vm_object_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
399 #endif
400 }
401 return object;
402 }
403 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
404
405 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)406 VME_OBJECT_SET(
407 vm_map_entry_t entry,
408 vm_object_t object,
409 bool atomic,
410 uint32_t context)
411 {
412 __builtin_assume(((vm_offset_t)object & 3) == 0);
413
414 entry->vme_atomic = atomic;
415 entry->is_sub_map = false;
416 #if __LP64__
417 if (atomic) {
418 entry->vme_context = context;
419 } else {
420 entry->vme_context = 0;
421 }
422 #else
423 (void)context;
424 #endif
425
426 if (!object || object == kernel_object) {
427 #if __LP64__
428 entry->vme_object = 0;
429 #else
430 entry->vme_submap = 0;
431 #endif
432 } else {
433 #if __LP64__
434 entry->vme_object = VM_OBJECT_PACK(object);
435 #else
436 entry->vme_submap = (vm_offset_t)object >> VME_SUBMAP_SHIFT;
437 #endif
438 }
439
440 entry->vme_kernel_object = (object == kernel_object);
441 entry->vme_resilient_codesign = false;
442 entry->used_for_jit = false;
443 }
444
445 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)446 VME_OFFSET(
447 vm_map_entry_t entry)
448 {
449 return entry->vme_offset << VME_OFFSET_SHIFT;
450 }
451
452 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)453 VME_OFFSET_SET(
454 vm_map_entry_t entry,
455 vm_object_offset_t offset)
456 {
457 entry->vme_offset = offset >> VME_OFFSET_SHIFT;
458 assert3u(VME_OFFSET(entry), ==, offset);
459 }
460
461 /*
462 * IMPORTANT:
463 * The "alias" field can be updated while holding the VM map lock
464 * "shared". It's OK as along as it's the only field that can be
465 * updated without the VM map "exclusive" lock.
466 */
467 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)468 VME_ALIAS_SET(
469 vm_map_entry_t entry,
470 unsigned int alias)
471 {
472 assert3u(alias & VME_ALIAS_MASK, ==, alias);
473 entry->vme_alias = alias;
474 }
475
476 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)477 VME_OBJECT_SHADOW(
478 vm_map_entry_t entry,
479 vm_object_size_t length,
480 bool always)
481 {
482 vm_object_t object;
483 vm_object_offset_t offset;
484
485 object = VME_OBJECT(entry);
486 offset = VME_OFFSET(entry);
487 vm_object_shadow(&object, &offset, length, always);
488 if (object != VME_OBJECT(entry)) {
489 #if __LP64__
490 entry->vme_object = VM_OBJECT_PACK(object);
491 #else
492 entry->vme_submap = (vm_offset_t)object >> VME_SUBMAP_SHIFT;
493 #endif
494 entry->use_pmap = true;
495 }
496 if (offset != VME_OFFSET(entry)) {
497 VME_OFFSET_SET(entry, offset);
498 }
499 }
500
501
502 /*
503 * Convenience macros for dealing with superpages
504 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
505 */
506 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
507 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
508 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
509 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
510
511 /*
512 * wired_counts are unsigned short. This value is used to safeguard
513 * against any mishaps due to runaway user programs.
514 */
515 #define MAX_WIRE_COUNT 65535
516
517
518
519 /*
520 * Type: struct vm_map_header
521 *
522 * Description:
523 * Header for a vm_map and a vm_map_copy.
524 *
525 * Note:
526 * vm_map_relocate_early_elem() knows about this layout,
527 * and needs to be kept in sync.
528 */
529
530
531 struct vm_map_header {
532 struct vm_map_links links; /* first, last, min, max */
533 int nentries; /* Number of entries */
534 uint16_t page_shift; /* page shift */
535 unsigned int
536 /* boolean_t */ entries_pageable : 1, /* are map entries pageable? */
537 /* reserved */ __padding : 15;
538 #ifdef VM_MAP_STORE_USE_RB
539 struct rb_head rb_head_store;
540 #endif
541 };
542
543 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
544 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
545 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
546
547 /*
548 * Type: vm_map_t [exported; contents invisible]
549 *
550 * Description:
551 * An address map -- a directory relating valid
552 * regions of a task's address space to the corresponding
553 * virtual memory objects.
554 *
555 * Implementation:
556 * Maps are doubly-linked lists of map entries, sorted
557 * by address. One hint is used to start
558 * searches again from the last successful search,
559 * insertion, or removal. Another hint is used to
560 * quickly find free space.
561 *
562 * Note:
563 * vm_map_relocate_early_elem() knows about this layout,
564 * and needs to be kept in sync.
565 */
566 struct _vm_map {
567 lck_rw_t lock; /* map lock */
568 struct vm_map_header hdr; /* Map entry header */
569 #define min_offset hdr.links.start /* start of range */
570 #define max_offset hdr.links.end /* end of range */
571 pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
572 vm_map_size_t size; /* virtual size */
573 uint64_t size_limit; /* rlimit on address space size */
574 uint64_t data_limit; /* rlimit on data size */
575 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
576 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
577 #if CONFIG_MAP_RANGES
578 struct mach_vm_range user_range[UMEM_RANGE_COUNT]; /* user VM ranges */
579 #endif
580 #if XNU_TARGET_OS_OSX
581 vm_map_offset_t vmmap_high_start;
582 #endif /* XNU_TARGET_OS_OSX */
583
584 union {
585 /*
586 * If map->disable_vmentry_reuse == TRUE:
587 * the end address of the highest allocated vm_map_entry_t.
588 */
589 vm_map_offset_t vmu1_highest_entry_end;
590 /*
591 * For a nested VM map:
592 * the lowest address in this nested VM map that we would
593 * expect to be unnested under normal operation (i.e. for
594 * regular copy-on-write on DATA section).
595 */
596 vm_map_offset_t vmu1_lowest_unnestable_start;
597 } vmu1;
598 #define highest_entry_end vmu1.vmu1_highest_entry_end
599 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
600 vm_map_entry_t hint; /* hint for quick lookups */
601 union {
602 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
603 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
604 } vmmap_u_1;
605 #define hole_hint vmmap_u_1.vmmap_hole_hint
606 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
607 union {
608 vm_map_entry_t _first_free; /* First free space hint */
609 struct vm_map_links* _holes; /* links all holes between entries */
610 } f_s; /* Union for free space data structures being used */
611
612 #define first_free f_s._first_free
613 #define holes_list f_s._holes
614
615 os_ref_atomic_t map_refcnt; /* Reference count */
616
617 unsigned int
618 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
619 /* boolean_t */ wiring_required:1, /* All memory wired? */
620 /* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */
621 /* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */
622 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
623 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
624 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
625 /* boolean_t */ holelistenabled:1,
626 /* boolean_t */ is_nested_map:1,
627 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
628 /* boolean_t */ jit_entry_exists:1,
629 /* boolean_t */ has_corpse_footprint:1,
630 /* boolean_t */ terminated:1,
631 /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
632 /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
633 /* boolean_t */ cs_debugged:1, /* code-signed but debugged */
634 /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
635 /* boolean_t */ single_jit:1, /* only allow one JIT mapping */
636 /* boolean_t */ never_faults:1, /* this map should never cause faults */
637 /* boolean_t */ uses_user_ranges:1, /* has the map been configured to use user VM ranges */
638 /* reserved */ pad:12;
639 unsigned int timestamp; /* Version number */
640 };
641
642 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
643 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
644 #define vm_map_first_entry(map) ((map)->hdr.links.next)
645 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
646
647 /*
648 * Type: vm_map_version_t [exported; contents invisible]
649 *
650 * Description:
651 * Map versions may be used to quickly validate a previous
652 * lookup operation.
653 *
654 * Usage note:
655 * Because they are bulky objects, map versions are usually
656 * passed by reference.
657 *
658 * Implementation:
659 * Just a timestamp for the main map.
660 */
661 typedef struct vm_map_version {
662 unsigned int main_timestamp;
663 } vm_map_version_t;
664
665 /*
666 * Type: vm_map_copy_t [exported; contents invisible]
667 *
668 * Description:
669 * A map copy object represents a region of virtual memory
670 * that has been copied from an address map but is still
671 * in transit.
672 *
673 * A map copy object may only be used by a single thread
674 * at a time.
675 *
676 * Implementation:
677 * There are three formats for map copy objects.
678 * The first is very similar to the main
679 * address map in structure, and as a result, some
680 * of the internal maintenance functions/macros can
681 * be used with either address maps or map copy objects.
682 *
683 * The map copy object contains a header links
684 * entry onto which the other entries that represent
685 * the region are chained.
686 *
687 * The second format is a single vm object. This was used
688 * primarily in the pageout path - but is not currently used
689 * except for placeholder copy objects (see vm_map_copy_copy()).
690 *
691 * The third format is a kernel buffer copy object - for data
692 * small enough that physical copies were the most efficient
693 * method. This method uses a zero-sized array unioned with
694 * other format-specific data in the 'c_u' member. This unsized
695 * array overlaps the other elements and allows us to use this
696 * extra structure space for physical memory copies. On 64-bit
697 * systems this saves ~64 bytes per vm_map_copy.
698 */
699
700 struct vm_map_copy {
701 int type;
702 #define VM_MAP_COPY_ENTRY_LIST 1
703 #define VM_MAP_COPY_OBJECT 2
704 #define VM_MAP_COPY_KERNEL_BUFFER 3
705 vm_object_offset_t offset;
706 vm_map_size_t size;
707 union {
708 struct vm_map_header hdr; /* ENTRY_LIST */
709 vm_object_t object; /* OBJECT */
710 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
711 } c_u;
712 };
713
714
715 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
716 #define vm_map_entry_zone (&zone_array[ZONE_ID_VM_MAP_ENTRY])
717
718 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
719 #define vm_map_holes_zone (&zone_array[ZONE_ID_VM_MAP_HOLES])
720
721 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
722 #define vm_map_zone (&zone_array[ZONE_ID_VM_MAP])
723
724
725 #define cpy_hdr c_u.hdr
726
727 #define cpy_object c_u.object
728 #define cpy_kdata c_u.kdata
729
730 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
731 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
732 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
733
734 /*
735 * Useful macros for entry list copy objects
736 */
737
738 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
739 #define vm_map_copy_first_entry(copy) \
740 ((copy)->cpy_hdr.links.next)
741 #define vm_map_copy_last_entry(copy) \
742 ((copy)->cpy_hdr.links.prev)
743
744 extern kern_return_t
745 vm_map_copy_adjust_to_target(
746 vm_map_copy_t copy_map,
747 vm_map_offset_t offset,
748 vm_map_size_t size,
749 vm_map_t target_map,
750 boolean_t copy,
751 vm_map_copy_t *target_copy_map_p,
752 vm_map_offset_t *overmap_start_p,
753 vm_map_offset_t *overmap_end_p,
754 vm_map_offset_t *trimmed_start_p);
755
756 /*
757 * Macros: vm_map_lock, etc. [internal use only]
758 * Description:
759 * Perform locking on the data portion of a map.
760 * When multiple maps are to be locked, order by map address.
761 * (See vm_map.c::vm_remap())
762 */
763
764 #define vm_map_lock_init(map) \
765 ((map)->timestamp = 0 , \
766 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
767
768 #define vm_map_lock(map) \
769 MACRO_BEGIN \
770 DTRACE_VM(vm_map_lock_w); \
771 lck_rw_lock_exclusive(&(map)->lock); \
772 MACRO_END
773
774 #define vm_map_unlock(map) \
775 MACRO_BEGIN \
776 DTRACE_VM(vm_map_unlock_w); \
777 (map)->timestamp++; \
778 lck_rw_done(&(map)->lock); \
779 MACRO_END
780
781 #define vm_map_lock_read(map) \
782 MACRO_BEGIN \
783 DTRACE_VM(vm_map_lock_r); \
784 lck_rw_lock_shared(&(map)->lock); \
785 MACRO_END
786
787 #define vm_map_unlock_read(map) \
788 MACRO_BEGIN \
789 DTRACE_VM(vm_map_unlock_r); \
790 lck_rw_done(&(map)->lock); \
791 MACRO_END
792
793 #define vm_map_lock_write_to_read(map) \
794 MACRO_BEGIN \
795 DTRACE_VM(vm_map_lock_downgrade); \
796 (map)->timestamp++; \
797 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
798 MACRO_END
799
800 __attribute__((always_inline))
801 int vm_map_lock_read_to_write(vm_map_t map);
802
803 __attribute__((always_inline))
804 boolean_t vm_map_try_lock(vm_map_t map);
805
806 __attribute__((always_inline))
807 boolean_t vm_map_try_lock_read(vm_map_t map);
808
809 int vm_self_region_page_shift(vm_map_t target_map);
810 int vm_self_region_page_shift_safely(vm_map_t target_map);
811
812 #if MACH_ASSERT || DEBUG
813 #define vm_map_lock_assert_held(map) \
814 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
815 #define vm_map_lock_assert_shared(map) \
816 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
817 #define vm_map_lock_assert_exclusive(map) \
818 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
819 #define vm_map_lock_assert_notheld(map) \
820 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
821 #else /* MACH_ASSERT || DEBUG */
822 #define vm_map_lock_assert_held(map)
823 #define vm_map_lock_assert_shared(map)
824 #define vm_map_lock_assert_exclusive(map)
825 #define vm_map_lock_assert_notheld(map)
826 #endif /* MACH_ASSERT || DEBUG */
827
828 /*
829 * Exported procedures that operate on vm_map_t.
830 */
831
832 /* Lookup map entry containing or the specified address in the given map */
833 extern boolean_t vm_map_lookup_entry(
834 vm_map_t map,
835 vm_map_address_t address,
836 vm_map_entry_t *entry); /* OUT */
837
838 /* Lookup map entry containing or the specified address in the given map */
839 extern boolean_t vm_map_lookup_entry_or_next(
840 vm_map_t map,
841 vm_map_address_t address,
842 vm_map_entry_t *entry); /* OUT */
843
844 /* like vm_map_lookup_entry without the PGZ bear trap */
845 #if CONFIG_PROB_GZALLOC
846 extern boolean_t vm_map_lookup_entry_allow_pgz(
847 vm_map_t map,
848 vm_map_address_t address,
849 vm_map_entry_t *entry); /* OUT */
850 #else
851 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
852 #endif
853
854 extern void vm_map_copy_remap(
855 vm_map_t map,
856 vm_map_entry_t where,
857 vm_map_copy_t copy,
858 vm_map_offset_t adjustment,
859 vm_prot_t cur_prot,
860 vm_prot_t max_prot,
861 vm_inherit_t inheritance);
862
863 /* Find the VM object, offset, and protection for a given virtual address
864 * in the specified map, assuming a page fault of the type specified. */
865 extern kern_return_t vm_map_lookup_and_lock_object(
866 vm_map_t *var_map, /* IN/OUT */
867 vm_map_address_t vaddr,
868 vm_prot_t fault_type,
869 int object_lock_type,
870 vm_map_version_t *out_version, /* OUT */
871 vm_object_t *object, /* OUT */
872 vm_object_offset_t *offset, /* OUT */
873 vm_prot_t *out_prot, /* OUT */
874 boolean_t *wired, /* OUT */
875 vm_object_fault_info_t fault_info, /* OUT */
876 vm_map_t *real_map, /* OUT */
877 bool *contended); /* OUT */
878
879 /* Verifies that the map has not changed since the given version. */
880 extern boolean_t vm_map_verify(
881 vm_map_t map,
882 vm_map_version_t *version); /* REF */
883
884
885 /*
886 * Functions implemented as macros
887 */
888 #define vm_map_min(map) ((map)->min_offset)
889 /* Lowest valid address in
890 * a map */
891
892 #define vm_map_max(map) ((map)->max_offset)
893 /* Highest valid address */
894
895 #define vm_map_pmap(map) ((map)->pmap)
896 /* Physical map associated
897 * with this address map */
898
899 /* Gain a reference to an existing map */
900 extern void vm_map_reference(
901 vm_map_t map);
902
903 /*
904 * Wait and wakeup macros for in_transition map entries.
905 */
906 #define vm_map_entry_wait(map, interruptible) \
907 ((map)->timestamp++ , \
908 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
909 (event_t)&(map)->hdr, interruptible))
910
911
912 #define vm_map_entry_wakeup(map) \
913 thread_wakeup((event_t)(&(map)->hdr))
914
915
916 /* simplify map entries */
917 extern void vm_map_simplify_entry(
918 vm_map_t map,
919 vm_map_entry_t this_entry);
920 extern void vm_map_simplify(
921 vm_map_t map,
922 vm_map_offset_t start);
923
924 /* Move the information in a map copy object to a new map copy object */
925 extern vm_map_copy_t vm_map_copy_copy(
926 vm_map_copy_t copy);
927
928 /* Create a copy object from an object. */
929 extern kern_return_t vm_map_copyin_object(
930 vm_object_t object,
931 vm_object_offset_t offset,
932 vm_object_size_t size,
933 vm_map_copy_t *copy_result); /* OUT */
934
935 extern kern_return_t vm_map_random_address_for_size(
936 vm_map_t map,
937 vm_map_offset_t *address,
938 vm_map_size_t size,
939 vm_map_kernel_flags_t vmk_flags);
940
941 /* Enter a mapping */
942 extern kern_return_t vm_map_enter(
943 vm_map_t map,
944 vm_map_offset_t *address,
945 vm_map_size_t size,
946 vm_map_offset_t mask,
947 int flags,
948 vm_map_kernel_flags_t vmk_flags,
949 vm_tag_t tag,
950 vm_object_t object,
951 vm_object_offset_t offset,
952 boolean_t needs_copy,
953 vm_prot_t cur_protection,
954 vm_prot_t max_protection,
955 vm_inherit_t inheritance);
956
957 #if __arm64__
958 extern kern_return_t vm_map_enter_fourk(
959 vm_map_t map,
960 vm_map_offset_t *address,
961 vm_map_size_t size,
962 vm_map_offset_t mask,
963 int flags,
964 vm_map_kernel_flags_t vmk_flags,
965 vm_tag_t tag,
966 vm_object_t object,
967 vm_object_offset_t offset,
968 boolean_t needs_copy,
969 vm_prot_t cur_protection,
970 vm_prot_t max_protection,
971 vm_inherit_t inheritance);
972 #endif /* __arm64__ */
973
974 /* XXX should go away - replaced with regular enter of contig object */
975 extern kern_return_t vm_map_enter_cpm(
976 vm_map_t map,
977 vm_map_address_t *addr,
978 vm_map_size_t size,
979 int flags,
980 vm_map_kernel_flags_t vmk_flags);
981
982 extern kern_return_t vm_map_remap(
983 vm_map_t target_map,
984 vm_map_offset_t *address,
985 vm_map_size_t size,
986 vm_map_offset_t mask,
987 int flags,
988 vm_map_kernel_flags_t vmk_flags,
989 vm_tag_t tag,
990 vm_map_t src_map,
991 vm_map_offset_t memory_address,
992 boolean_t copy,
993 vm_prot_t *cur_protection,
994 vm_prot_t *max_protection,
995 vm_inherit_t inheritance);
996
997
998 /*
999 * Read and write from a kernel buffer to a specified map.
1000 */
1001 extern kern_return_t vm_map_write_user(
1002 vm_map_t map,
1003 void *src_p,
1004 vm_map_offset_t dst_addr,
1005 vm_size_t size);
1006
1007 extern kern_return_t vm_map_read_user(
1008 vm_map_t map,
1009 vm_map_offset_t src_addr,
1010 void *dst_p,
1011 vm_size_t size);
1012
1013 /* Create a new task map using an existing task map as a template. */
1014 extern vm_map_t vm_map_fork(
1015 ledger_t ledger,
1016 vm_map_t old_map,
1017 int options);
1018 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
1019 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
1020 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
1021
1022 /* Change inheritance */
1023 extern kern_return_t vm_map_inherit(
1024 vm_map_t map,
1025 vm_map_offset_t start,
1026 vm_map_offset_t end,
1027 vm_inherit_t new_inheritance);
1028
1029 /* Add or remove machine-dependent attributes from map regions */
1030 extern kern_return_t vm_map_machine_attribute(
1031 vm_map_t map,
1032 vm_map_offset_t start,
1033 vm_map_offset_t end,
1034 vm_machine_attribute_t attribute,
1035 vm_machine_attribute_val_t* value); /* IN/OUT */
1036
1037 extern kern_return_t vm_map_msync(
1038 vm_map_t map,
1039 vm_map_address_t address,
1040 vm_map_size_t size,
1041 vm_sync_t sync_flags);
1042
1043 /* Set paging behavior */
1044 extern kern_return_t vm_map_behavior_set(
1045 vm_map_t map,
1046 vm_map_offset_t start,
1047 vm_map_offset_t end,
1048 vm_behavior_t new_behavior);
1049
1050 extern kern_return_t vm_map_region(
1051 vm_map_t map,
1052 vm_map_offset_t *address,
1053 vm_map_size_t *size,
1054 vm_region_flavor_t flavor,
1055 vm_region_info_t info,
1056 mach_msg_type_number_t *count,
1057 mach_port_t *object_name);
1058
1059 extern kern_return_t vm_map_region_recurse_64(
1060 vm_map_t map,
1061 vm_map_offset_t *address,
1062 vm_map_size_t *size,
1063 natural_t *nesting_depth,
1064 vm_region_submap_info_64_t info,
1065 mach_msg_type_number_t *count);
1066
1067 extern kern_return_t vm_map_page_query_internal(
1068 vm_map_t map,
1069 vm_map_offset_t offset,
1070 int *disposition,
1071 int *ref_count);
1072
1073 extern kern_return_t vm_map_query_volatile(
1074 vm_map_t map,
1075 mach_vm_size_t *volatile_virtual_size_p,
1076 mach_vm_size_t *volatile_resident_size_p,
1077 mach_vm_size_t *volatile_compressed_size_p,
1078 mach_vm_size_t *volatile_pmap_size_p,
1079 mach_vm_size_t *volatile_compressed_pmap_size_p);
1080
1081 /* Convert from a map entry port to a map */
1082 extern vm_map_t convert_port_entry_to_map(
1083 ipc_port_t port);
1084
1085
1086 extern kern_return_t vm_map_set_cache_attr(
1087 vm_map_t map,
1088 vm_map_offset_t va);
1089
1090
1091 /* definitions related to overriding the NX behavior */
1092
1093 #define VM_ABI_32 0x1
1094 #define VM_ABI_64 0x2
1095
1096 extern int override_nx(vm_map_t map, uint32_t user_tag);
1097
1098
1099 extern void vm_map_region_top_walk(
1100 vm_map_entry_t entry,
1101 vm_region_top_info_t top);
1102 extern void vm_map_region_walk(
1103 vm_map_t map,
1104 vm_map_offset_t va,
1105 vm_map_entry_t entry,
1106 vm_object_offset_t offset,
1107 vm_object_size_t range,
1108 vm_region_extended_info_t extended,
1109 boolean_t look_for_pages,
1110 mach_msg_type_number_t count);
1111
1112
1113
1114 extern void vm_map_copy_footprint_ledgers(
1115 task_t old_task,
1116 task_t new_task);
1117 extern void vm_map_copy_ledger(
1118 task_t old_task,
1119 task_t new_task,
1120 int ledger_entry);
1121
1122 /**
1123 * Represents a single region of virtual address space that should be reserved
1124 * (pre-mapped) in a user address space.
1125 */
1126 struct vm_reserved_region {
1127 char *vmrr_name;
1128 vm_map_offset_t vmrr_addr;
1129 vm_map_size_t vmrr_size;
1130 };
1131
1132 /**
1133 * Return back a machine-dependent array of address space regions that should be
1134 * reserved by the VM. This function is defined in the machine-dependent
1135 * machine_routines.c files.
1136 */
1137 extern size_t ml_get_vm_reserved_regions(
1138 bool vm_is64bit,
1139 struct vm_reserved_region **regions);
1140
1141 #endif /* MACH_KERNEL_PRIVATE */
1142
1143 __BEGIN_DECLS
1144
1145 /* Create an empty map */
1146 extern vm_map_t vm_map_create(
1147 pmap_t pmap,
1148 vm_map_offset_t min_off,
1149 vm_map_offset_t max_off,
1150 boolean_t pageable);
1151
1152 extern vm_map_size_t vm_map_adjusted_size(vm_map_t map);
1153
1154 extern void vm_map_disable_hole_optimization(vm_map_t map);
1155
1156 /* Get rid of a map */
1157 extern void vm_map_destroy(
1158 vm_map_t map);
1159
1160 /* Lose a reference */
1161 extern void vm_map_deallocate(
1162 vm_map_t map);
1163
1164 /* Lose a reference */
1165 extern void vm_map_inspect_deallocate(
1166 vm_map_inspect_t map);
1167
1168 /* Lose a reference */
1169 extern void vm_map_read_deallocate(
1170 vm_map_read_t map);
1171
1172 extern vm_map_t vm_map_switch(
1173 vm_map_t map);
1174
1175 /* Change protection */
1176 extern kern_return_t vm_map_protect(
1177 vm_map_t map,
1178 vm_map_offset_t start,
1179 vm_map_offset_t end,
1180 vm_prot_t new_prot,
1181 boolean_t set_max);
1182
1183 /* Check protection */
1184 extern boolean_t vm_map_check_protection(
1185 vm_map_t map,
1186 vm_map_offset_t start,
1187 vm_map_offset_t end,
1188 vm_prot_t protection);
1189
1190 extern boolean_t vm_map_cs_enforcement(
1191 vm_map_t map);
1192 extern void vm_map_cs_enforcement_set(
1193 vm_map_t map,
1194 boolean_t val);
1195
1196 extern void vm_map_cs_debugged_set(
1197 vm_map_t map,
1198 boolean_t val);
1199
1200 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1201
1202 /* wire down a region */
1203
1204 #ifdef XNU_KERNEL_PRIVATE
1205
1206 extern void vm_map_will_allocate_early_map(
1207 vm_map_t *map_owner);
1208
1209 extern void vm_map_relocate_early_maps(
1210 vm_offset_t delta);
1211
1212 extern void vm_map_relocate_early_elem(
1213 uint32_t zone_id,
1214 vm_offset_t new_addr,
1215 vm_offset_t delta);
1216
1217 /* never fails */
1218 extern vm_map_t vm_map_create_options(
1219 pmap_t pmap,
1220 vm_map_offset_t min_off,
1221 vm_map_offset_t max_off,
1222 vm_map_create_options_t options);
1223
1224 extern kern_return_t vm_map_wire_kernel(
1225 vm_map_t map,
1226 vm_map_offset_t start,
1227 vm_map_offset_t end,
1228 vm_prot_t access_type,
1229 vm_tag_t tag,
1230 boolean_t user_wire);
1231
1232 extern kern_return_t vm_map_wire_and_extract_kernel(
1233 vm_map_t map,
1234 vm_map_offset_t start,
1235 vm_prot_t access_type,
1236 vm_tag_t tag,
1237 boolean_t user_wire,
1238 ppnum_t *physpage_p);
1239
1240 /* kext exported versions */
1241
1242 extern kern_return_t vm_map_wire_external(
1243 vm_map_t map,
1244 vm_map_offset_t start,
1245 vm_map_offset_t end,
1246 vm_prot_t access_type,
1247 boolean_t user_wire);
1248
1249 extern kern_return_t vm_map_wire_and_extract_external(
1250 vm_map_t map,
1251 vm_map_offset_t start,
1252 vm_prot_t access_type,
1253 boolean_t user_wire,
1254 ppnum_t *physpage_p);
1255
1256 #else /* XNU_KERNEL_PRIVATE */
1257
1258 extern kern_return_t vm_map_wire(
1259 vm_map_t map,
1260 vm_map_offset_t start,
1261 vm_map_offset_t end,
1262 vm_prot_t access_type,
1263 boolean_t user_wire);
1264
1265 extern kern_return_t vm_map_wire_and_extract(
1266 vm_map_t map,
1267 vm_map_offset_t start,
1268 vm_prot_t access_type,
1269 boolean_t user_wire,
1270 ppnum_t *physpage_p);
1271
1272 #endif /* !XNU_KERNEL_PRIVATE */
1273
1274 /* unwire a region */
1275 extern kern_return_t vm_map_unwire(
1276 vm_map_t map,
1277 vm_map_offset_t start,
1278 vm_map_offset_t end,
1279 boolean_t user_wire);
1280
1281 #ifdef XNU_KERNEL_PRIVATE
1282
1283 /* Enter a mapping of a memory object */
1284 extern kern_return_t vm_map_enter_mem_object(
1285 vm_map_t map,
1286 vm_map_offset_t *address,
1287 vm_map_size_t size,
1288 vm_map_offset_t mask,
1289 int flags,
1290 vm_map_kernel_flags_t vmk_flags,
1291 vm_tag_t tag,
1292 ipc_port_t port,
1293 vm_object_offset_t offset,
1294 boolean_t needs_copy,
1295 vm_prot_t cur_protection,
1296 vm_prot_t max_protection,
1297 vm_inherit_t inheritance);
1298
1299 /* Enter a mapping of a memory object */
1300 extern kern_return_t vm_map_enter_mem_object_prefault(
1301 vm_map_t map,
1302 vm_map_offset_t *address,
1303 vm_map_size_t size,
1304 vm_map_offset_t mask,
1305 int flags,
1306 vm_map_kernel_flags_t vmk_flags,
1307 vm_tag_t tag,
1308 ipc_port_t port,
1309 vm_object_offset_t offset,
1310 vm_prot_t cur_protection,
1311 vm_prot_t max_protection,
1312 upl_page_list_ptr_t page_list,
1313 unsigned int page_list_count);
1314
1315 /* Enter a mapping of a memory object */
1316 extern kern_return_t vm_map_enter_mem_object_control(
1317 vm_map_t map,
1318 vm_map_offset_t *address,
1319 vm_map_size_t size,
1320 vm_map_offset_t mask,
1321 int flags,
1322 vm_map_kernel_flags_t vmk_flags,
1323 vm_tag_t tag,
1324 memory_object_control_t control,
1325 vm_object_offset_t offset,
1326 boolean_t needs_copy,
1327 vm_prot_t cur_protection,
1328 vm_prot_t max_protection,
1329 vm_inherit_t inheritance);
1330
1331 extern kern_return_t vm_map_terminate(
1332 vm_map_t map);
1333
1334 extern void vm_map_require(
1335 vm_map_t map);
1336
1337 extern void vm_map_copy_require(
1338 vm_map_copy_t copy);
1339
1340 extern kern_return_t vm_map_copy_extract(
1341 vm_map_t src_map,
1342 vm_map_address_t src_addr,
1343 vm_map_size_t len,
1344 boolean_t copy,
1345 vm_map_copy_t *copy_result, /* OUT */
1346 vm_prot_t *cur_prot, /* OUT */
1347 vm_prot_t *max_prot, /* OUT */
1348 vm_inherit_t inheritance,
1349 vm_map_kernel_flags_t vmk_flags);
1350
1351 #endif /* !XNU_KERNEL_PRIVATE */
1352
1353 /* Discard a copy without using it */
1354 extern void vm_map_copy_discard(
1355 vm_map_copy_t copy);
1356
1357 /* Overwrite existing memory with a copy */
1358 extern kern_return_t vm_map_copy_overwrite(
1359 vm_map_t dst_map,
1360 vm_map_address_t dst_addr,
1361 vm_map_copy_t copy,
1362 vm_map_size_t copy_size,
1363 boolean_t interruptible);
1364
1365 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1366
1367
1368 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1369 extern boolean_t vm_map_copy_validate_size(
1370 vm_map_t dst_map,
1371 vm_map_copy_t copy,
1372 vm_map_size_t *size);
1373
1374 /* Place a copy into a map */
1375 extern kern_return_t vm_map_copyout(
1376 vm_map_t dst_map,
1377 vm_map_address_t *dst_addr, /* OUT */
1378 vm_map_copy_t copy);
1379
1380 extern kern_return_t vm_map_copyout_size(
1381 vm_map_t dst_map,
1382 vm_map_address_t *dst_addr, /* OUT */
1383 vm_map_copy_t copy,
1384 vm_map_size_t copy_size);
1385
1386 extern kern_return_t vm_map_copyout_internal(
1387 vm_map_t dst_map,
1388 vm_map_address_t *dst_addr, /* OUT */
1389 vm_map_copy_t copy,
1390 vm_map_size_t copy_size,
1391 boolean_t consume_on_success,
1392 vm_prot_t cur_protection,
1393 vm_prot_t max_protection,
1394 vm_inherit_t inheritance);
1395
1396 extern kern_return_t vm_map_copyin(
1397 vm_map_t src_map,
1398 vm_map_address_t src_addr,
1399 vm_map_size_t len,
1400 boolean_t src_destroy,
1401 vm_map_copy_t *copy_result); /* OUT */
1402
1403 extern kern_return_t vm_map_copyin_common(
1404 vm_map_t src_map,
1405 vm_map_address_t src_addr,
1406 vm_map_size_t len,
1407 boolean_t src_destroy,
1408 boolean_t src_volatile,
1409 vm_map_copy_t *copy_result, /* OUT */
1410 boolean_t use_maxprot);
1411
1412 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1413 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1414 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1415 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1416 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1417 extern kern_return_t vm_map_copyin_internal(
1418 vm_map_t src_map,
1419 vm_map_address_t src_addr,
1420 vm_map_size_t len,
1421 int flags,
1422 vm_map_copy_t *copy_result); /* OUT */
1423
1424
1425 extern void vm_map_disable_NX(
1426 vm_map_t map);
1427
1428 extern void vm_map_disallow_data_exec(
1429 vm_map_t map);
1430
1431 extern void vm_map_set_64bit(
1432 vm_map_t map);
1433
1434 extern void vm_map_set_32bit(
1435 vm_map_t map);
1436
1437 extern void vm_map_set_jumbo(
1438 vm_map_t map);
1439
1440 extern void vm_map_set_jit_entitled(
1441 vm_map_t map);
1442
1443 extern void vm_map_set_max_addr(
1444 vm_map_t map, vm_map_offset_t new_max_offset);
1445
1446 extern boolean_t vm_map_has_hard_pagezero(
1447 vm_map_t map,
1448 vm_map_offset_t pagezero_size);
1449 extern void vm_commit_pagezero_status(vm_map_t tmap);
1450
1451 extern void vm_map_set_tpro(
1452 vm_map_t map);
1453
1454 #ifdef __arm__
1455 static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map)1456 vm_map_is_64bit(__unused vm_map_t map)
1457 {
1458 return 0;
1459 }
1460 #else
1461 extern boolean_t vm_map_is_64bit(
1462 vm_map_t map);
1463 #endif
1464
1465
1466 extern kern_return_t vm_map_raise_max_offset(
1467 vm_map_t map,
1468 vm_map_offset_t new_max_offset);
1469
1470 extern kern_return_t vm_map_raise_min_offset(
1471 vm_map_t map,
1472 vm_map_offset_t new_min_offset);
1473 #if XNU_TARGET_OS_OSX
1474 extern void vm_map_set_high_start(
1475 vm_map_t map,
1476 vm_map_offset_t high_start);
1477 #endif /* XNU_TARGET_OS_OSX */
1478
1479 extern vm_map_offset_t vm_compute_max_offset(
1480 boolean_t is64);
1481
1482 extern void vm_map_get_max_aslr_slide_section(
1483 vm_map_t map,
1484 int64_t *max_sections,
1485 int64_t *section_size);
1486
1487 extern uint64_t vm_map_get_max_aslr_slide_pages(
1488 vm_map_t map);
1489
1490 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1491 vm_map_t map);
1492
1493 extern kern_return_t vm_map_set_size_limit(
1494 vm_map_t map,
1495 uint64_t limit);
1496
1497 extern kern_return_t vm_map_set_data_limit(
1498 vm_map_t map,
1499 uint64_t limit);
1500
1501 extern void vm_map_set_user_wire_limit(
1502 vm_map_t map,
1503 vm_size_t limit);
1504
1505 extern void vm_map_switch_protect(
1506 vm_map_t map,
1507 boolean_t val);
1508
1509 extern void vm_map_iokit_mapped_region(
1510 vm_map_t map,
1511 vm_size_t bytes);
1512
1513 extern void vm_map_iokit_unmapped_region(
1514 vm_map_t map,
1515 vm_size_t bytes);
1516
1517
1518 extern boolean_t first_free_is_valid(vm_map_t);
1519
1520 extern int vm_map_page_shift(
1521 vm_map_t map);
1522
1523 extern vm_map_offset_t vm_map_page_mask(
1524 vm_map_t map);
1525
1526 extern int vm_map_page_size(
1527 vm_map_t map);
1528
1529 extern vm_map_offset_t vm_map_round_page_mask(
1530 vm_map_offset_t offset,
1531 vm_map_offset_t mask);
1532
1533 extern vm_map_offset_t vm_map_trunc_page_mask(
1534 vm_map_offset_t offset,
1535 vm_map_offset_t mask);
1536
1537 extern boolean_t vm_map_page_aligned(
1538 vm_map_offset_t offset,
1539 vm_map_offset_t mask);
1540
1541 static inline int
vm_map_range_overflows(vm_map_offset_t addr,vm_map_size_t size)1542 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1543 {
1544 vm_map_offset_t sum;
1545 return os_add_overflow(addr, size, &sum);
1546 }
1547
1548 static inline int
mach_vm_range_overflows(mach_vm_offset_t addr,mach_vm_size_t size)1549 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1550 {
1551 mach_vm_offset_t sum;
1552 return os_add_overflow(addr, size, &sum);
1553 }
1554
1555 #ifdef XNU_KERNEL_PRIVATE
1556
1557 /* Support for vm_map ranges */
1558 extern kern_return_t vm_map_range_configure(
1559 vm_map_t map);
1560
1561 extern void vm_map_range_fork(
1562 vm_map_t new_map,
1563 vm_map_t old_map);
1564
1565 __attribute__((__overloadable__))
1566 extern vm_map_range_id_t vm_map_get_user_range_id(
1567 vm_map_t map,
1568 uint16_t tag);
1569
1570 __attribute__((__overloadable__))
1571 extern vm_map_range_id_t vm_map_get_user_range_id(
1572 vm_map_t map,
1573 mach_vm_offset_t addr,
1574 mach_vm_size_t size);
1575
1576 extern int vm_map_get_user_range(
1577 vm_map_t map,
1578 vm_map_range_id_t range_id,
1579 mach_vm_range_t range);
1580
1581 #if XNU_TARGET_OS_OSX
1582 extern void vm_map_mark_alien(vm_map_t map);
1583 extern void vm_map_single_jit(vm_map_t map);
1584 #endif /* XNU_TARGET_OS_OSX */
1585
1586 extern kern_return_t vm_map_page_info(
1587 vm_map_t map,
1588 vm_map_offset_t offset,
1589 vm_page_info_flavor_t flavor,
1590 vm_page_info_t info,
1591 mach_msg_type_number_t *count);
1592 extern kern_return_t vm_map_page_range_info_internal(
1593 vm_map_t map,
1594 vm_map_offset_t start_offset,
1595 vm_map_offset_t end_offset,
1596 int effective_page_shift,
1597 vm_page_info_flavor_t flavor,
1598 vm_page_info_t info,
1599 mach_msg_type_number_t *count);
1600
1601 #endif /* XNU_KERNEL_PRIVATE */
1602
1603
1604 #ifdef MACH_KERNEL_PRIVATE
1605
1606
1607 /*
1608 * Internal macros for rounding and truncation of vm_map offsets and sizes
1609 */
1610 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1611 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1612
1613 /*
1614 * Macros for rounding and truncation of vm_map offsets and sizes
1615 */
1616 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1617 VM_MAP_PAGE_SHIFT(
1618 vm_map_t map)
1619 {
1620 int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1621 /*
1622 * help ubsan and codegen in general,
1623 * cannot use PAGE_{MIN,MAX}_SHIFT
1624 * because of testing code which
1625 * tests 16k aligned maps on 4k only systems.
1626 */
1627 __builtin_assume(shift >= 12 && shift <= 14);
1628 return shift;
1629 }
1630
1631 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1632 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1633 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1634
1635 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1636 VM_MAP_IS_EXOTIC(
1637 vm_map_t map __unused)
1638 {
1639 #if __arm64__
1640 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1641 pmap_is_exotic(map->pmap)) {
1642 return true;
1643 }
1644 #endif /* __arm64__ */
1645 return false;
1646 }
1647
1648 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1649 VM_MAP_IS_ALIEN(
1650 vm_map_t map __unused)
1651 {
1652 /*
1653 * An "alien" process/task/map/pmap should mostly behave
1654 * as it currently would on iOS.
1655 */
1656 #if XNU_TARGET_OS_OSX
1657 if (map->is_alien) {
1658 return true;
1659 }
1660 return false;
1661 #else /* XNU_TARGET_OS_OSX */
1662 return true;
1663 #endif /* XNU_TARGET_OS_OSX */
1664 }
1665
1666 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1667 VM_MAP_POLICY_WX_FAIL(
1668 vm_map_t map __unused)
1669 {
1670 if (VM_MAP_IS_ALIEN(map)) {
1671 return false;
1672 }
1673 return true;
1674 }
1675
1676 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1677 VM_MAP_POLICY_WX_STRIP_X(
1678 vm_map_t map __unused)
1679 {
1680 if (VM_MAP_IS_ALIEN(map)) {
1681 return true;
1682 }
1683 return false;
1684 }
1685
1686 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1687 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1688 vm_map_t map __unused)
1689 {
1690 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1691 return false;
1692 }
1693 return true;
1694 }
1695
1696 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1697 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1698 vm_map_t map)
1699 {
1700 return VM_MAP_IS_ALIEN(map);
1701 }
1702
1703 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1704 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1705 vm_map_t map __unused)
1706 {
1707 if (VM_MAP_IS_ALIEN(map)) {
1708 return false;
1709 }
1710 return true;
1711 }
1712
1713 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1714 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1715 vm_map_t map __unused)
1716 {
1717 if (VM_MAP_IS_ALIEN(map)) {
1718 return false;
1719 }
1720 return true;
1721 }
1722
1723 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1724 VM_MAP_POLICY_ALLOW_JIT_COPY(
1725 vm_map_t map __unused)
1726 {
1727 if (VM_MAP_IS_ALIEN(map)) {
1728 return false;
1729 }
1730 return true;
1731 }
1732
1733 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1734 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1735 vm_map_t map __unused)
1736 {
1737 #if __x86_64__
1738 return true;
1739 #else /* __x86_64__ */
1740 if (VM_MAP_IS_EXOTIC(map)) {
1741 return true;
1742 }
1743 return false;
1744 #endif /* __x86_64__ */
1745 }
1746
1747 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1748 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1749 {
1750 switch (prot) {
1751 case MAP_MEM_NOOP: break;
1752 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1753 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1754 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1755 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1756 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
1757 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1758 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1759 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1760 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
1761 default: break;
1762 }
1763 }
1764
1765 static inline boolean_t
vm_map_always_shadow(vm_map_t map)1766 vm_map_always_shadow(vm_map_t map)
1767 {
1768 if (map->mapped_in_other_pmaps) {
1769 /*
1770 * This is a submap, mapped in other maps.
1771 * Even if a VM object is mapped only once in this submap,
1772 * the submap itself could be mapped multiple times,
1773 * so vm_object_shadow() should always create a shadow
1774 * object, even if the object has only 1 reference.
1775 */
1776 return TRUE;
1777 }
1778 return FALSE;
1779 }
1780
1781 #endif /* MACH_KERNEL_PRIVATE */
1782
1783 #ifdef XNU_KERNEL_PRIVATE
1784 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1785 extern bool vm_map_is_exotic(vm_map_t map);
1786 extern bool vm_map_is_alien(vm_map_t map);
1787 extern pmap_t vm_map_get_pmap(vm_map_t map);
1788 #endif /* XNU_KERNEL_PRIVATE */
1789
1790 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1791 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1792
1793 /* Support for UPLs from vm_maps */
1794
1795 #ifdef XNU_KERNEL_PRIVATE
1796
1797 extern kern_return_t vm_map_get_upl(
1798 vm_map_t target_map,
1799 vm_map_offset_t map_offset,
1800 upl_size_t *size,
1801 upl_t *upl,
1802 upl_page_info_array_t page_info,
1803 unsigned int *page_infoCnt,
1804 upl_control_flags_t *flags,
1805 vm_tag_t tag,
1806 int force_data_sync);
1807
1808 #endif /* XNU_KERNEL_PRIVATE */
1809
1810 extern void
1811 vm_map_sizes(vm_map_t map,
1812 vm_map_size_t * psize,
1813 vm_map_size_t * pfree,
1814 vm_map_size_t * plargest_free);
1815
1816 #if CONFIG_DYNAMIC_CODE_SIGNING
1817 extern kern_return_t vm_map_sign(vm_map_t map,
1818 vm_map_offset_t start,
1819 vm_map_offset_t end);
1820 #endif
1821
1822 extern kern_return_t vm_map_partial_reap(
1823 vm_map_t map,
1824 unsigned int *reclaimed_resident,
1825 unsigned int *reclaimed_compressed);
1826
1827
1828 #if DEVELOPMENT || DEBUG
1829
1830 extern int vm_map_disconnect_page_mappings(
1831 vm_map_t map,
1832 boolean_t);
1833
1834 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1835
1836 #endif
1837
1838
1839 #if CONFIG_FREEZE
1840
1841 extern kern_return_t vm_map_freeze(
1842 task_t task,
1843 unsigned int *purgeable_count,
1844 unsigned int *wired_count,
1845 unsigned int *clean_count,
1846 unsigned int *dirty_count,
1847 unsigned int dirty_budget,
1848 unsigned int *shared_count,
1849 int *freezer_error_code,
1850 boolean_t eval_only);
1851
1852 #define FREEZER_ERROR_GENERIC (-1)
1853 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1854 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1855 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1856 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1857
1858 #endif
1859
1860 #if XNU_KERNEL_PRIVATE
1861 boolean_t kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1862
1863 boolean_t vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1864 #endif /* XNU_KERNEL_PRIVATE */
1865
1866 __END_DECLS
1867
1868 /*
1869 * In some cases, we don't have a real VM object but still want to return a
1870 * unique ID (to avoid a memory region looking like shared memory), so build
1871 * a fake pointer based on the map's ledger and the index of the ledger being
1872 * reported.
1873 */
1874 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1875
1876 #endif /* KERNEL_PRIVATE */
1877
1878 #endif /* _VM_VM_MAP_H_ */
1879