1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84
85 #ifdef KERNEL_PRIVATE
86
87 #include <sys/cdefs.h>
88
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
92
93 __BEGIN_DECLS
94
95 extern void vm_map_reference(vm_map_t map);
96 extern vm_map_t current_map(void);
97
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t vm_map_exec(
100 vm_map_t new_map,
101 task_t task,
102 boolean_t is64bit,
103 void *fsroot,
104 cpu_type_t cpu,
105 cpu_subtype_t cpu_subtype,
106 boolean_t reslide,
107 boolean_t is_driverkit);
108
109 __END_DECLS
110
111 #ifdef MACH_KERNEL_PRIVATE
112
113 #include <mach_assert.h>
114
115 #include <vm/vm_object.h>
116 #include <vm/vm_page.h>
117 #include <kern/locks.h>
118 #include <kern/zalloc.h>
119 #include <kern/macro_help.h>
120
121 #include <kern/thread.h>
122 #include <os/refcnt.h>
123
124 #define current_map_fast() (current_thread()->map)
125 #define current_map() (current_map_fast())
126
127 #include <vm/vm_map_store.h>
128
129
130 /*
131 * Types defined:
132 *
133 * vm_map_t the high-level address map data structure.
134 * vm_map_entry_t an entry in an address map.
135 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
136 * vm_map_copy_t represents memory copied from an address map,
137 * used for inter-map copy operations
138 */
139 typedef struct vm_map_entry *vm_map_entry_t;
140 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
141
142
143 /*
144 * Type: vm_map_object_t [internal use only]
145 *
146 * Description:
147 * The target of an address mapping, either a virtual
148 * memory object or a sub map (of the kernel map).
149 */
150 typedef union vm_map_object {
151 vm_object_t vmo_object; /* object object */
152 vm_map_t vmo_submap; /* belongs to another map */
153 } vm_map_object_t;
154
155 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
156 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
157 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
158 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
159 #if VM_NAMED_ENTRY_LIST
160 extern queue_head_t vm_named_entry_list;
161 #endif /* VM_NAMED_ENTRY_LIST */
162
163 /*
164 * Type: vm_named_entry_t [internal use only]
165 *
166 * Description:
167 * Description of a mapping to a memory cache object.
168 *
169 * Implementation:
170 * While the handle to this object is used as a means to map
171 * and pass around the right to map regions backed by pagers
172 * of all sorts, the named_entry itself is only manipulated
173 * by the kernel. Named entries hold information on the
174 * right to map a region of a cached object. Namely,
175 * the target cache object, the beginning and ending of the
176 * region to be mapped, and the permissions, (read, write)
177 * with which it can be mapped.
178 *
179 */
180
181 struct vm_named_entry {
182 decl_lck_mtx_data(, Lock); /* Synchronization */
183 union {
184 vm_map_t map; /* map backing submap */
185 vm_map_copy_t copy; /* a VM map copy */
186 } backing;
187 vm_object_offset_t offset; /* offset into object */
188 vm_object_size_t size; /* size of region */
189 vm_object_offset_t data_offset; /* offset to first byte of data */
190 vm_prot_t protection; /* access permissions */
191 int ref_count; /* Number of references */
192 unsigned int /* Is backing.xxx : */
193 /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
194 /* boolean_t */ internal:1, /* ... an internal object */
195 /* boolean_t */ is_sub_map:1, /* ... a submap? */
196 /* boolean_t */ is_copy:1; /* ... a VM map copy */
197 #if VM_NAMED_ENTRY_LIST
198 queue_chain_t named_entry_list;
199 int named_entry_alias;
200 mach_port_t named_entry_port;
201 #define NAMED_ENTRY_BT_DEPTH 16
202 void *named_entry_bt[NAMED_ENTRY_BT_DEPTH];
203 #endif /* VM_NAMED_ENTRY_LIST */
204 };
205
206 /*
207 * Type: vm_map_entry_t [internal use only]
208 *
209 * Description:
210 * A single mapping within an address map.
211 *
212 * Implementation:
213 * Address map entries consist of start and end addresses,
214 * a VM object (or sub map) and offset into that object,
215 * and user-exported inheritance and protection information.
216 * Control information for virtual copy operations is also
217 * stored in the address map entry.
218 */
219
220 struct vm_map_links {
221 struct vm_map_entry *prev; /* previous entry */
222 struct vm_map_entry *next; /* next entry */
223 vm_map_offset_t start; /* start address */
224 vm_map_offset_t end; /* end address */
225 };
226
227 /*
228 * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
229 * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
230 * to convert between the "packed" representation in the vm_map_entry's fields
231 * and the equivalent bits defined in vm_prot_t.
232 */
233 #if defined(__x86_64__)
234 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
235 #else
236 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY)
237 #endif
238
239 /*
240 * FOOTPRINT ACCOUNTING:
241 * The "memory footprint" is better described in the pmap layer.
242 *
243 * At the VM level, these 2 vm_map_entry_t fields are relevant:
244 * iokit_mapped:
245 * For an "iokit_mapped" entry, we add the size of the entry to the
246 * footprint when the entry is entered into the map and we subtract that
247 * size when the entry is removed. No other accounting should take place.
248 * "use_pmap" should be FALSE but is not taken into account.
249 * use_pmap: (only when is_sub_map is FALSE)
250 * This indicates if we should ask the pmap layer to account for pages
251 * in this mapping. If FALSE, we expect that another form of accounting
252 * is being used (e.g. "iokit_mapped" or the explicit accounting of
253 * non-volatile purgable memory).
254 *
255 * So the logic is mostly:
256 * if entry->is_sub_map == TRUE
257 * anything in a submap does not count for the footprint
258 * else if entry->iokit_mapped == TRUE
259 * footprint includes the entire virtual size of this entry
260 * else if entry->use_pmap == FALSE
261 * tell pmap NOT to account for pages being pmap_enter()'d from this
262 * mapping (i.e. use "alternate accounting")
263 * else
264 * pmap will account for pages being pmap_enter()'d from this mapping
265 * as it sees fit (only if anonymous, etc...)
266 */
267
268 struct vm_map_entry {
269 struct vm_map_links links; /* links to other entries */
270 #define vme_prev links.prev
271 #define vme_next links.next
272 #define vme_start links.start
273 #define vme_end links.end
274
275 struct vm_map_store store;
276 union vm_map_object vme_object; /* object I point to */
277 vm_object_offset_t vme_offset; /* offset into object */
278
279 unsigned int
280 /* boolean_t */ is_shared:1, /* region is shared */
281 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
282 /* boolean_t */ in_transition:1, /* Entry being changed */
283 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
284 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
285 /* behavior is not defined for submap type */
286 /* boolean_t */ needs_copy:1, /* object need to be copied? */
287
288 /* Only in task maps: */
289 /* vm_prot_t-like */ protection:4, /* protection code, bit3=UEXEC */
290 /* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
291 /* vm_inherit_t */ inheritance:2, /* inheritance */
292 /* boolean_t */ use_pmap:1, /*
293 * use_pmap is overloaded:
294 * if "is_sub_map":
295 * use a nested pmap?
296 * else (i.e. if object):
297 * use pmap accounting
298 * for footprint?
299 */
300 /* boolean_t */ no_cache:1, /* should new pages be cached? */
301 /* boolean_t */ permanent:1, /* mapping can not be removed */
302 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
303 /* boolean_t */ map_aligned:1, /* align to map's page size */
304 /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
305 * this entry it is being deleted
306 * without unwiring them */
307 /* boolean_t */ used_for_jit:1,
308 /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
309
310 /* iokit accounting: use the virtual size rather than resident size: */
311 /* boolean_t */ iokit_acct:1,
312 /* boolean_t */ vme_resilient_codesign:1,
313 /* boolean_t */ vme_resilient_media:1,
314 /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
315 /* boolean_t */ vme_no_copy_on_read:1,
316 /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
317 /* boolean_t */ __padding:1;
318
319 unsigned short wired_count; /* can be paged if = 0 */
320 unsigned short user_wired_count; /* for vm_wire */
321 #if DEBUG
322 #define MAP_ENTRY_CREATION_DEBUG (1)
323 #define MAP_ENTRY_INSERTION_DEBUG (1)
324 #endif
325 #if MAP_ENTRY_CREATION_DEBUG
326 struct vm_map_header *vme_creation_maphdr;
327 uintptr_t vme_creation_bt[16];
328 #endif
329 #if MAP_ENTRY_INSERTION_DEBUG
330 vm_map_offset_t vme_start_original;
331 vm_map_offset_t vme_end_original;
332 uintptr_t vme_insertion_bt[16];
333 #endif
334 };
335
336 #define VME_SUBMAP_PTR(entry) \
337 (&((entry)->vme_object.vmo_submap))
338 #define VME_SUBMAP(entry) \
339 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
340 #define VME_OBJECT_PTR(entry) \
341 (&((entry)->vme_object.vmo_object))
342 #define VME_OBJECT(entry) \
343 ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry)))
344 #define VME_OFFSET(entry) \
345 ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
346 #define VME_ALIAS_MASK (FOURK_PAGE_MASK)
347 #define VME_ALIAS(entry) \
348 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
349
350 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object)351 VME_OBJECT_SET(
352 vm_map_entry_t entry,
353 vm_object_t object)
354 {
355 entry->vme_object.vmo_object = object;
356 if (object != VM_OBJECT_NULL && !object->internal) {
357 entry->vme_resilient_media = FALSE;
358 }
359 entry->vme_resilient_codesign = FALSE;
360 entry->used_for_jit = FALSE;
361 }
362 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)363 VME_SUBMAP_SET(
364 vm_map_entry_t entry,
365 vm_map_t submap)
366 {
367 entry->vme_object.vmo_submap = submap;
368 }
369 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)370 VME_OFFSET_SET(
371 vm_map_entry_t entry,
372 vm_object_offset_t offset)
373 {
374 unsigned int alias;
375 alias = VME_ALIAS(entry);
376 assert((offset & FOURK_PAGE_MASK) == 0);
377 entry->vme_offset = offset | alias;
378 }
379 /*
380 * IMPORTANT:
381 * The "alias" field can be updated while holding the VM map lock
382 * "shared". It's OK as along as it's the only field that can be
383 * updated without the VM map "exclusive" lock.
384 */
385 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,int alias)386 VME_ALIAS_SET(
387 vm_map_entry_t entry,
388 int alias)
389 {
390 vm_object_offset_t offset;
391 offset = VME_OFFSET(entry);
392 entry->vme_offset = offset | ((unsigned int)alias & VME_ALIAS_MASK);
393 }
394
395 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length)396 VME_OBJECT_SHADOW(
397 vm_map_entry_t entry,
398 vm_object_size_t length)
399 {
400 vm_object_t object;
401 vm_object_offset_t offset;
402
403 object = VME_OBJECT(entry);
404 offset = VME_OFFSET(entry);
405 vm_object_shadow(&object, &offset, length);
406 if (object != VME_OBJECT(entry)) {
407 VME_OBJECT_SET(entry, object);
408 entry->use_pmap = TRUE;
409 }
410 if (offset != VME_OFFSET(entry)) {
411 VME_OFFSET_SET(entry, offset);
412 }
413 }
414
415
416 /*
417 * Convenience macros for dealing with superpages
418 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
419 */
420 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
421 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
422 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
423 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
424
425 /*
426 * wired_counts are unsigned short. This value is used to safeguard
427 * against any mishaps due to runaway user programs.
428 */
429 #define MAX_WIRE_COUNT 65535
430
431
432
433 /*
434 * Type: struct vm_map_header
435 *
436 * Description:
437 * Header for a vm_map and a vm_map_copy.
438 */
439
440
441 struct vm_map_header {
442 struct vm_map_links links; /* first, last, min, max */
443 int nentries; /* Number of entries */
444 boolean_t entries_pageable;
445 /* are map entries pageable? */
446 #ifdef VM_MAP_STORE_USE_RB
447 struct rb_head rb_head_store;
448 #endif
449 int page_shift; /* page shift */
450 };
451
452 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
453 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
454 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
455
456 /*
457 * Type: vm_map_t [exported; contents invisible]
458 *
459 * Description:
460 * An address map -- a directory relating valid
461 * regions of a task's address space to the corresponding
462 * virtual memory objects.
463 *
464 * Implementation:
465 * Maps are doubly-linked lists of map entries, sorted
466 * by address. One hint is used to start
467 * searches again from the last successful search,
468 * insertion, or removal. Another hint is used to
469 * quickly find free space.
470 */
471 struct _vm_map {
472 lck_rw_t lock; /* map lock */
473 struct vm_map_header hdr; /* Map entry header */
474 #define min_offset hdr.links.start /* start of range */
475 #define max_offset hdr.links.end /* end of range */
476 pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
477 vm_map_size_t size; /* virtual size */
478 uint64_t size_limit; /* rlimit on address space size */
479 uint64_t data_limit; /* rlimit on data size */
480 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
481 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
482 #if XNU_TARGET_OS_OSX
483 vm_map_offset_t vmmap_high_start;
484 #endif /* XNU_TARGET_OS_OSX */
485
486 union {
487 /*
488 * If map->disable_vmentry_reuse == TRUE:
489 * the end address of the highest allocated vm_map_entry_t.
490 */
491 vm_map_offset_t vmu1_highest_entry_end;
492 /*
493 * For a nested VM map:
494 * the lowest address in this nested VM map that we would
495 * expect to be unnested under normal operation (i.e. for
496 * regular copy-on-write on DATA section).
497 */
498 vm_map_offset_t vmu1_lowest_unnestable_start;
499 } vmu1;
500 #define highest_entry_end vmu1.vmu1_highest_entry_end
501 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
502 decl_lck_mtx_data(, s_lock); /* Lock ref, res fields */
503 lck_mtx_ext_t s_lock_ext;
504 vm_map_entry_t hint; /* hint for quick lookups */
505 union {
506 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
507 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
508 } vmmap_u_1;
509 #define hole_hint vmmap_u_1.vmmap_hole_hint
510 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
511 union {
512 vm_map_entry_t _first_free; /* First free space hint */
513 struct vm_map_links* _holes; /* links all holes between entries */
514 } f_s; /* Union for free space data structures being used */
515
516 #define first_free f_s._first_free
517 #define holes_list f_s._holes
518
519 struct os_refcnt map_refcnt; /* Reference count */
520
521 unsigned int
522 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
523 /* boolean_t */ wiring_required:1, /* All memory wired? */
524 /* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */
525 /* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */
526 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
527 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
528 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
529 /* boolean_t */ holelistenabled:1,
530 /* boolean_t */ is_nested_map:1,
531 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
532 /* boolean_t */ jit_entry_exists:1,
533 /* boolean_t */ has_corpse_footprint:1,
534 /* boolean_t */ terminated:1,
535 /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
536 /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
537 /* boolean_t */ cs_debugged:1, /* code-signed but debugged */
538 /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
539 /* boolean_t */ single_jit:1, /* only allow one JIT mapping */
540 /* reserved */ pad:14;
541 unsigned int timestamp; /* Version number */
542 };
543
544 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
545 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
546 #define vm_map_first_entry(map) ((map)->hdr.links.next)
547 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
548
549 /*
550 * Type: vm_map_version_t [exported; contents invisible]
551 *
552 * Description:
553 * Map versions may be used to quickly validate a previous
554 * lookup operation.
555 *
556 * Usage note:
557 * Because they are bulky objects, map versions are usually
558 * passed by reference.
559 *
560 * Implementation:
561 * Just a timestamp for the main map.
562 */
563 typedef struct vm_map_version {
564 unsigned int main_timestamp;
565 } vm_map_version_t;
566
567 /*
568 * Type: vm_map_copy_t [exported; contents invisible]
569 *
570 * Description:
571 * A map copy object represents a region of virtual memory
572 * that has been copied from an address map but is still
573 * in transit.
574 *
575 * A map copy object may only be used by a single thread
576 * at a time.
577 *
578 * Implementation:
579 * There are three formats for map copy objects.
580 * The first is very similar to the main
581 * address map in structure, and as a result, some
582 * of the internal maintenance functions/macros can
583 * be used with either address maps or map copy objects.
584 *
585 * The map copy object contains a header links
586 * entry onto which the other entries that represent
587 * the region are chained.
588 *
589 * The second format is a single vm object. This was used
590 * primarily in the pageout path - but is not currently used
591 * except for placeholder copy objects (see vm_map_copy_copy()).
592 *
593 * The third format is a kernel buffer copy object - for data
594 * small enough that physical copies were the most efficient
595 * method. This method uses a zero-sized array unioned with
596 * other format-specific data in the 'c_u' member. This unsized
597 * array overlaps the other elements and allows us to use this
598 * extra structure space for physical memory copies. On 64-bit
599 * systems this saves ~64 bytes per vm_map_copy.
600 */
601
602 struct vm_map_copy {
603 int type;
604 #define VM_MAP_COPY_ENTRY_LIST 1
605 #define VM_MAP_COPY_OBJECT 2
606 #define VM_MAP_COPY_KERNEL_BUFFER 3
607 vm_object_offset_t offset;
608 vm_map_size_t size;
609 union {
610 struct vm_map_header hdr; /* ENTRY_LIST */
611 vm_object_t object; /* OBJECT */
612 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
613 } c_u;
614 };
615
616
617 #define cpy_hdr c_u.hdr
618
619 #define cpy_object c_u.object
620 #define cpy_kdata c_u.kdata
621
622 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
623 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
624 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
625
626 /*
627 * Useful macros for entry list copy objects
628 */
629
630 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
631 #define vm_map_copy_first_entry(copy) \
632 ((copy)->cpy_hdr.links.next)
633 #define vm_map_copy_last_entry(copy) \
634 ((copy)->cpy_hdr.links.prev)
635
636 extern kern_return_t
637 vm_map_copy_adjust_to_target(
638 vm_map_copy_t copy_map,
639 vm_map_offset_t offset,
640 vm_map_size_t size,
641 vm_map_t target_map,
642 boolean_t copy,
643 vm_map_copy_t *target_copy_map_p,
644 vm_map_offset_t *overmap_start_p,
645 vm_map_offset_t *overmap_end_p,
646 vm_map_offset_t *trimmed_start_p);
647
648 /*
649 * Macros: vm_map_lock, etc. [internal use only]
650 * Description:
651 * Perform locking on the data portion of a map.
652 * When multiple maps are to be locked, order by map address.
653 * (See vm_map.c::vm_remap())
654 */
655
656 #define vm_map_lock_init(map) \
657 ((map)->timestamp = 0 , \
658 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
659
660 #define vm_map_lock(map) \
661 MACRO_BEGIN \
662 DTRACE_VM(vm_map_lock_w); \
663 lck_rw_lock_exclusive(&(map)->lock); \
664 MACRO_END
665
666 #define vm_map_unlock(map) \
667 MACRO_BEGIN \
668 DTRACE_VM(vm_map_unlock_w); \
669 (map)->timestamp++; \
670 lck_rw_done(&(map)->lock); \
671 MACRO_END
672
673 #define vm_map_lock_read(map) \
674 MACRO_BEGIN \
675 DTRACE_VM(vm_map_lock_r); \
676 lck_rw_lock_shared(&(map)->lock); \
677 MACRO_END
678
679 #define vm_map_unlock_read(map) \
680 MACRO_BEGIN \
681 DTRACE_VM(vm_map_unlock_r); \
682 lck_rw_done(&(map)->lock); \
683 MACRO_END
684
685 #define vm_map_lock_write_to_read(map) \
686 MACRO_BEGIN \
687 DTRACE_VM(vm_map_lock_downgrade); \
688 (map)->timestamp++; \
689 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
690 MACRO_END
691
692 __attribute__((always_inline))
693 int vm_map_lock_read_to_write(vm_map_t map);
694
695 __attribute__((always_inline))
696 boolean_t vm_map_try_lock(vm_map_t map);
697
698 __attribute__((always_inline))
699 boolean_t vm_map_try_lock_read(vm_map_t map);
700
701 int vm_self_region_page_shift(vm_map_t target_map);
702 int vm_self_region_page_shift_safely(vm_map_t target_map);
703
704 #if MACH_ASSERT || DEBUG
705 #define vm_map_lock_assert_held(map) \
706 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
707 #define vm_map_lock_assert_shared(map) \
708 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
709 #define vm_map_lock_assert_exclusive(map) \
710 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
711 #define vm_map_lock_assert_notheld(map) \
712 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
713 #else /* MACH_ASSERT || DEBUG */
714 #define vm_map_lock_assert_held(map)
715 #define vm_map_lock_assert_shared(map)
716 #define vm_map_lock_assert_exclusive(map)
717 #define vm_map_lock_assert_notheld(map)
718 #endif /* MACH_ASSERT || DEBUG */
719
720 /*
721 * Exported procedures that operate on vm_map_t.
722 */
723
724 /* Initialize the module */
725 extern void vm_map_init(void);
726
727 /* Allocate a range in the specified virtual address map and
728 * return the entry allocated for that range. */
729 extern kern_return_t vm_map_find_space(
730 vm_map_t map,
731 vm_map_address_t *address, /* OUT */
732 vm_map_size_t size,
733 vm_map_offset_t mask,
734 int flags,
735 vm_map_kernel_flags_t vmk_flags,
736 vm_tag_t tag,
737 vm_map_entry_t *o_entry); /* OUT */
738
739 /* flags for vm_map_find_space */
740 #define VM_MAP_FIND_LAST_FREE 0x01
741
742 extern void vm_map_clip_start(
743 vm_map_t map,
744 vm_map_entry_t entry,
745 vm_map_offset_t endaddr);
746 extern void vm_map_clip_end(
747 vm_map_t map,
748 vm_map_entry_t entry,
749 vm_map_offset_t endaddr);
750 extern boolean_t vm_map_entry_should_cow_for_true_share(
751 vm_map_entry_t entry);
752
753 /* Lookup map entry containing or the specified address in the given map */
754 extern boolean_t vm_map_lookup_entry(
755 vm_map_t map,
756 vm_map_address_t address,
757 vm_map_entry_t *entry); /* OUT */
758
759 extern void vm_map_copy_remap(
760 vm_map_t map,
761 vm_map_entry_t where,
762 vm_map_copy_t copy,
763 vm_map_offset_t adjustment,
764 vm_prot_t cur_prot,
765 vm_prot_t max_prot,
766 vm_inherit_t inheritance);
767
768 /* Find the VM object, offset, and protection for a given virtual address
769 * in the specified map, assuming a page fault of the type specified. */
770 extern kern_return_t vm_map_lookup_locked(
771 vm_map_t *var_map, /* IN/OUT */
772 vm_map_address_t vaddr,
773 vm_prot_t fault_type,
774 int object_lock_type,
775 vm_map_version_t *out_version, /* OUT */
776 vm_object_t *object, /* OUT */
777 vm_object_offset_t *offset, /* OUT */
778 vm_prot_t *out_prot, /* OUT */
779 boolean_t *wired, /* OUT */
780 vm_object_fault_info_t fault_info, /* OUT */
781 vm_map_t *real_map, /* OUT */
782 bool *contended); /* OUT */
783
784 /* Verifies that the map has not changed since the given version. */
785 extern boolean_t vm_map_verify(
786 vm_map_t map,
787 vm_map_version_t *version); /* REF */
788
789 extern vm_map_entry_t vm_map_entry_insert(
790 vm_map_t map,
791 vm_map_entry_t insp_entry,
792 vm_map_offset_t start,
793 vm_map_offset_t end,
794 vm_object_t object,
795 vm_object_offset_t offset,
796 vm_map_kernel_flags_t vmk_flags,
797 boolean_t needs_copy,
798 boolean_t is_shared,
799 boolean_t in_transition,
800 vm_prot_t cur_protection,
801 vm_prot_t max_protection,
802 vm_behavior_t behavior,
803 vm_inherit_t inheritance,
804 unsigned short wired_count,
805 boolean_t no_cache,
806 boolean_t permanent,
807 boolean_t no_copy_on_read,
808 unsigned int superpage_size,
809 boolean_t clear_map_aligned,
810 boolean_t is_submap,
811 boolean_t used_for_jit,
812 int alias,
813 boolean_t translated_allow_execute);
814
815
816 /*
817 * Functions implemented as macros
818 */
819 #define vm_map_min(map) ((map)->min_offset)
820 /* Lowest valid address in
821 * a map */
822
823 #define vm_map_max(map) ((map)->max_offset)
824 /* Highest valid address */
825
826 #define vm_map_pmap(map) ((map)->pmap)
827 /* Physical map associated
828 * with this address map */
829
830 /* Gain a reference to an existing map */
831 extern void vm_map_reference(
832 vm_map_t map);
833
834 /*
835 * Submap object. Must be used to create memory to be put
836 * in a submap by vm_map_submap.
837 */
838 extern vm_object_t vm_submap_object;
839
840 /*
841 * Wait and wakeup macros for in_transition map entries.
842 */
843 #define vm_map_entry_wait(map, interruptible) \
844 ((map)->timestamp++ , \
845 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
846 (event_t)&(map)->hdr, interruptible))
847
848
849 #define vm_map_entry_wakeup(map) \
850 thread_wakeup((event_t)(&(map)->hdr))
851
852
853 /* simplify map entries */
854 extern void vm_map_simplify_entry(
855 vm_map_t map,
856 vm_map_entry_t this_entry);
857 extern void vm_map_simplify(
858 vm_map_t map,
859 vm_map_offset_t start);
860
861 /* Move the information in a map copy object to a new map copy object */
862 extern vm_map_copy_t vm_map_copy_copy(
863 vm_map_copy_t copy);
864
865 /* Create a copy object from an object. */
866 extern kern_return_t vm_map_copyin_object(
867 vm_object_t object,
868 vm_object_offset_t offset,
869 vm_object_size_t size,
870 vm_map_copy_t *copy_result); /* OUT */
871
872 extern kern_return_t vm_map_random_address_for_size(
873 vm_map_t map,
874 vm_map_offset_t *address,
875 vm_map_size_t size);
876
877 /* Enter a mapping */
878 extern kern_return_t vm_map_enter(
879 vm_map_t map,
880 vm_map_offset_t *address,
881 vm_map_size_t size,
882 vm_map_offset_t mask,
883 int flags,
884 vm_map_kernel_flags_t vmk_flags,
885 vm_tag_t tag,
886 vm_object_t object,
887 vm_object_offset_t offset,
888 boolean_t needs_copy,
889 vm_prot_t cur_protection,
890 vm_prot_t max_protection,
891 vm_inherit_t inheritance);
892
893 #if __arm64__
894 extern kern_return_t vm_map_enter_fourk(
895 vm_map_t map,
896 vm_map_offset_t *address,
897 vm_map_size_t size,
898 vm_map_offset_t mask,
899 int flags,
900 vm_map_kernel_flags_t vmk_flags,
901 vm_tag_t tag,
902 vm_object_t object,
903 vm_object_offset_t offset,
904 boolean_t needs_copy,
905 vm_prot_t cur_protection,
906 vm_prot_t max_protection,
907 vm_inherit_t inheritance);
908 #endif /* __arm64__ */
909
910 /* XXX should go away - replaced with regular enter of contig object */
911 extern kern_return_t vm_map_enter_cpm(
912 vm_map_t map,
913 vm_map_address_t *addr,
914 vm_map_size_t size,
915 int flags);
916
917 extern kern_return_t vm_map_remap(
918 vm_map_t target_map,
919 vm_map_offset_t *address,
920 vm_map_size_t size,
921 vm_map_offset_t mask,
922 int flags,
923 vm_map_kernel_flags_t vmk_flags,
924 vm_tag_t tag,
925 vm_map_t src_map,
926 vm_map_offset_t memory_address,
927 boolean_t copy,
928 vm_prot_t *cur_protection,
929 vm_prot_t *max_protection,
930 vm_inherit_t inheritance);
931
932
933 /*
934 * Read and write from a kernel buffer to a specified map.
935 */
936 extern kern_return_t vm_map_write_user(
937 vm_map_t map,
938 void *src_p,
939 vm_map_offset_t dst_addr,
940 vm_size_t size);
941
942 extern kern_return_t vm_map_read_user(
943 vm_map_t map,
944 vm_map_offset_t src_addr,
945 void *dst_p,
946 vm_size_t size);
947
948 /* Create a new task map using an existing task map as a template. */
949 extern vm_map_t vm_map_fork(
950 ledger_t ledger,
951 vm_map_t old_map,
952 int options);
953 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
954 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
955 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
956
957 /* Change inheritance */
958 extern kern_return_t vm_map_inherit(
959 vm_map_t map,
960 vm_map_offset_t start,
961 vm_map_offset_t end,
962 vm_inherit_t new_inheritance);
963
964 /* Add or remove machine-dependent attributes from map regions */
965 extern kern_return_t vm_map_machine_attribute(
966 vm_map_t map,
967 vm_map_offset_t start,
968 vm_map_offset_t end,
969 vm_machine_attribute_t attribute,
970 vm_machine_attribute_val_t* value); /* IN/OUT */
971
972 extern kern_return_t vm_map_msync(
973 vm_map_t map,
974 vm_map_address_t address,
975 vm_map_size_t size,
976 vm_sync_t sync_flags);
977
978 /* Set paging behavior */
979 extern kern_return_t vm_map_behavior_set(
980 vm_map_t map,
981 vm_map_offset_t start,
982 vm_map_offset_t end,
983 vm_behavior_t new_behavior);
984
985 extern kern_return_t vm_map_region(
986 vm_map_t map,
987 vm_map_offset_t *address,
988 vm_map_size_t *size,
989 vm_region_flavor_t flavor,
990 vm_region_info_t info,
991 mach_msg_type_number_t *count,
992 mach_port_t *object_name);
993
994 extern kern_return_t vm_map_region_recurse_64(
995 vm_map_t map,
996 vm_map_offset_t *address,
997 vm_map_size_t *size,
998 natural_t *nesting_depth,
999 vm_region_submap_info_64_t info,
1000 mach_msg_type_number_t *count);
1001
1002 extern kern_return_t vm_map_page_query_internal(
1003 vm_map_t map,
1004 vm_map_offset_t offset,
1005 int *disposition,
1006 int *ref_count);
1007
1008 extern kern_return_t vm_map_query_volatile(
1009 vm_map_t map,
1010 mach_vm_size_t *volatile_virtual_size_p,
1011 mach_vm_size_t *volatile_resident_size_p,
1012 mach_vm_size_t *volatile_compressed_size_p,
1013 mach_vm_size_t *volatile_pmap_size_p,
1014 mach_vm_size_t *volatile_compressed_pmap_size_p);
1015
1016 extern kern_return_t vm_map_submap(
1017 vm_map_t map,
1018 vm_map_offset_t start,
1019 vm_map_offset_t end,
1020 vm_map_t submap,
1021 vm_map_offset_t offset,
1022 boolean_t use_pmap);
1023
1024 extern void vm_map_submap_pmap_clean(
1025 vm_map_t map,
1026 vm_map_offset_t start,
1027 vm_map_offset_t end,
1028 vm_map_t sub_map,
1029 vm_map_offset_t offset);
1030
1031 /* Convert from a map entry port to a map */
1032 extern vm_map_t convert_port_entry_to_map(
1033 ipc_port_t port);
1034
1035
1036 extern kern_return_t vm_map_set_cache_attr(
1037 vm_map_t map,
1038 vm_map_offset_t va);
1039
1040
1041 /* definitions related to overriding the NX behavior */
1042
1043 #define VM_ABI_32 0x1
1044 #define VM_ABI_64 0x2
1045
1046 extern int override_nx(vm_map_t map, uint32_t user_tag);
1047
1048
1049 extern void vm_map_region_top_walk(
1050 vm_map_entry_t entry,
1051 vm_region_top_info_t top);
1052 extern void vm_map_region_walk(
1053 vm_map_t map,
1054 vm_map_offset_t va,
1055 vm_map_entry_t entry,
1056 vm_object_offset_t offset,
1057 vm_object_size_t range,
1058 vm_region_extended_info_t extended,
1059 boolean_t look_for_pages,
1060 mach_msg_type_number_t count);
1061
1062
1063
1064 extern void vm_map_copy_footprint_ledgers(
1065 task_t old_task,
1066 task_t new_task);
1067 extern void vm_map_copy_ledger(
1068 task_t old_task,
1069 task_t new_task,
1070 int ledger_entry);
1071
1072 /**
1073 * Represents a single region of virtual address space that should be reserved
1074 * (pre-mapped) in a user address space.
1075 */
1076 struct vm_reserved_region {
1077 char *vmrr_name;
1078 vm_map_offset_t vmrr_addr;
1079 vm_map_size_t vmrr_size;
1080 };
1081
1082 /**
1083 * Return back a machine-dependent array of address space regions that should be
1084 * reserved by the VM. This function is defined in the machine-dependent
1085 * machine_routines.c files.
1086 */
1087 extern size_t ml_get_vm_reserved_regions(
1088 bool vm_is64bit,
1089 struct vm_reserved_region **regions);
1090
1091 #endif /* MACH_KERNEL_PRIVATE */
1092
1093 __BEGIN_DECLS
1094
1095 /* Create an empty map */
1096 extern vm_map_t vm_map_create(
1097 pmap_t pmap,
1098 vm_map_offset_t min_off,
1099 vm_map_offset_t max_off,
1100 boolean_t pageable);
1101 extern vm_map_t vm_map_create_options(
1102 pmap_t pmap,
1103 vm_map_offset_t min_off,
1104 vm_map_offset_t max_off,
1105 int options);
1106 #define VM_MAP_CREATE_PAGEABLE 0x00000001
1107 #define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002
1108 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \
1109 VM_MAP_CREATE_CORPSE_FOOTPRINT)
1110
1111 extern vm_map_size_t vm_map_adjusted_size(vm_map_t map);
1112
1113 extern void vm_map_disable_hole_optimization(vm_map_t map);
1114
1115 /* Get rid of a map */
1116 extern void vm_map_destroy(
1117 vm_map_t map,
1118 int flags);
1119
1120 /* Lose a reference */
1121 extern void vm_map_deallocate(
1122 vm_map_t map);
1123
1124 /* Lose a reference */
1125 extern void vm_map_inspect_deallocate(
1126 vm_map_inspect_t map);
1127
1128 /* Lose a reference */
1129 extern void vm_map_read_deallocate(
1130 vm_map_read_t map);
1131
1132 extern vm_map_t vm_map_switch(
1133 vm_map_t map);
1134
1135 /* Change protection */
1136 extern kern_return_t vm_map_protect(
1137 vm_map_t map,
1138 vm_map_offset_t start,
1139 vm_map_offset_t end,
1140 vm_prot_t new_prot,
1141 boolean_t set_max);
1142
1143 /* Check protection */
1144 extern boolean_t vm_map_check_protection(
1145 vm_map_t map,
1146 vm_map_offset_t start,
1147 vm_map_offset_t end,
1148 vm_prot_t protection);
1149
1150 extern boolean_t vm_map_cs_enforcement(
1151 vm_map_t map);
1152 extern void vm_map_cs_enforcement_set(
1153 vm_map_t map,
1154 boolean_t val);
1155
1156 extern void vm_map_cs_debugged_set(
1157 vm_map_t map,
1158 boolean_t val);
1159
1160 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1161
1162 /* wire down a region */
1163
1164 #ifdef XNU_KERNEL_PRIVATE
1165
1166 extern kern_return_t vm_map_wire_kernel(
1167 vm_map_t map,
1168 vm_map_offset_t start,
1169 vm_map_offset_t end,
1170 vm_prot_t access_type,
1171 vm_tag_t tag,
1172 boolean_t user_wire);
1173
1174 extern kern_return_t vm_map_wire_and_extract_kernel(
1175 vm_map_t map,
1176 vm_map_offset_t start,
1177 vm_prot_t access_type,
1178 vm_tag_t tag,
1179 boolean_t user_wire,
1180 ppnum_t *physpage_p);
1181
1182 /* kext exported versions */
1183
1184 extern kern_return_t vm_map_wire_external(
1185 vm_map_t map,
1186 vm_map_offset_t start,
1187 vm_map_offset_t end,
1188 vm_prot_t access_type,
1189 boolean_t user_wire);
1190
1191 extern kern_return_t vm_map_wire_and_extract_external(
1192 vm_map_t map,
1193 vm_map_offset_t start,
1194 vm_prot_t access_type,
1195 boolean_t user_wire,
1196 ppnum_t *physpage_p);
1197
1198 #else /* XNU_KERNEL_PRIVATE */
1199
1200 extern kern_return_t vm_map_wire(
1201 vm_map_t map,
1202 vm_map_offset_t start,
1203 vm_map_offset_t end,
1204 vm_prot_t access_type,
1205 boolean_t user_wire);
1206
1207 extern kern_return_t vm_map_wire_and_extract(
1208 vm_map_t map,
1209 vm_map_offset_t start,
1210 vm_prot_t access_type,
1211 boolean_t user_wire,
1212 ppnum_t *physpage_p);
1213
1214 #endif /* !XNU_KERNEL_PRIVATE */
1215
1216 /* unwire a region */
1217 extern kern_return_t vm_map_unwire(
1218 vm_map_t map,
1219 vm_map_offset_t start,
1220 vm_map_offset_t end,
1221 boolean_t user_wire);
1222
1223 #ifdef XNU_KERNEL_PRIVATE
1224
1225 /* Enter a mapping of a memory object */
1226 extern kern_return_t vm_map_enter_mem_object(
1227 vm_map_t map,
1228 vm_map_offset_t *address,
1229 vm_map_size_t size,
1230 vm_map_offset_t mask,
1231 int flags,
1232 vm_map_kernel_flags_t vmk_flags,
1233 vm_tag_t tag,
1234 ipc_port_t port,
1235 vm_object_offset_t offset,
1236 boolean_t needs_copy,
1237 vm_prot_t cur_protection,
1238 vm_prot_t max_protection,
1239 vm_inherit_t inheritance);
1240
1241 /* Enter a mapping of a memory object */
1242 extern kern_return_t vm_map_enter_mem_object_prefault(
1243 vm_map_t map,
1244 vm_map_offset_t *address,
1245 vm_map_size_t size,
1246 vm_map_offset_t mask,
1247 int flags,
1248 vm_map_kernel_flags_t vmk_flags,
1249 vm_tag_t tag,
1250 ipc_port_t port,
1251 vm_object_offset_t offset,
1252 vm_prot_t cur_protection,
1253 vm_prot_t max_protection,
1254 upl_page_list_ptr_t page_list,
1255 unsigned int page_list_count);
1256
1257 /* Enter a mapping of a memory object */
1258 extern kern_return_t vm_map_enter_mem_object_control(
1259 vm_map_t map,
1260 vm_map_offset_t *address,
1261 vm_map_size_t size,
1262 vm_map_offset_t mask,
1263 int flags,
1264 vm_map_kernel_flags_t vmk_flags,
1265 vm_tag_t tag,
1266 memory_object_control_t control,
1267 vm_object_offset_t offset,
1268 boolean_t needs_copy,
1269 vm_prot_t cur_protection,
1270 vm_prot_t max_protection,
1271 vm_inherit_t inheritance);
1272
1273 extern kern_return_t vm_map_terminate(
1274 vm_map_t map);
1275
1276 extern void vm_map_require(
1277 vm_map_t map);
1278
1279 #endif /* !XNU_KERNEL_PRIVATE */
1280
1281 /* Deallocate a region */
1282 extern kern_return_t vm_map_remove(
1283 vm_map_t map,
1284 vm_map_offset_t start,
1285 vm_map_offset_t end,
1286 boolean_t flags);
1287
1288 /* Deallocate a region when the map is already locked */
1289 extern kern_return_t vm_map_remove_locked(
1290 vm_map_t map,
1291 vm_map_offset_t start,
1292 vm_map_offset_t end,
1293 boolean_t flags);
1294
1295 /* Discard a copy without using it */
1296 extern void vm_map_copy_discard(
1297 vm_map_copy_t copy);
1298
1299 /* Overwrite existing memory with a copy */
1300 extern kern_return_t vm_map_copy_overwrite(
1301 vm_map_t dst_map,
1302 vm_map_address_t dst_addr,
1303 vm_map_copy_t copy,
1304 vm_map_size_t copy_size,
1305 boolean_t interruptible);
1306
1307 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1308
1309
1310 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1311 extern boolean_t vm_map_copy_validate_size(
1312 vm_map_t dst_map,
1313 vm_map_copy_t copy,
1314 vm_map_size_t *size);
1315
1316 /* Place a copy into a map */
1317 extern kern_return_t vm_map_copyout(
1318 vm_map_t dst_map,
1319 vm_map_address_t *dst_addr, /* OUT */
1320 vm_map_copy_t copy);
1321
1322 extern kern_return_t vm_map_copyout_size(
1323 vm_map_t dst_map,
1324 vm_map_address_t *dst_addr, /* OUT */
1325 vm_map_copy_t copy,
1326 vm_map_size_t copy_size);
1327
1328 extern kern_return_t vm_map_copyout_internal(
1329 vm_map_t dst_map,
1330 vm_map_address_t *dst_addr, /* OUT */
1331 vm_map_copy_t copy,
1332 vm_map_size_t copy_size,
1333 boolean_t consume_on_success,
1334 vm_prot_t cur_protection,
1335 vm_prot_t max_protection,
1336 vm_inherit_t inheritance);
1337
1338 extern kern_return_t vm_map_copyin(
1339 vm_map_t src_map,
1340 vm_map_address_t src_addr,
1341 vm_map_size_t len,
1342 boolean_t src_destroy,
1343 vm_map_copy_t *copy_result); /* OUT */
1344
1345 extern kern_return_t vm_map_copyin_common(
1346 vm_map_t src_map,
1347 vm_map_address_t src_addr,
1348 vm_map_size_t len,
1349 boolean_t src_destroy,
1350 boolean_t src_volatile,
1351 vm_map_copy_t *copy_result, /* OUT */
1352 boolean_t use_maxprot);
1353
1354 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1355 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1356 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1357 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1358 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1359 extern kern_return_t vm_map_copyin_internal(
1360 vm_map_t src_map,
1361 vm_map_address_t src_addr,
1362 vm_map_size_t len,
1363 int flags,
1364 vm_map_copy_t *copy_result); /* OUT */
1365
1366 extern kern_return_t vm_map_copy_extract(
1367 vm_map_t src_map,
1368 vm_map_address_t src_addr,
1369 vm_map_size_t len,
1370 boolean_t copy,
1371 vm_map_copy_t *copy_result, /* OUT */
1372 vm_prot_t *cur_prot, /* OUT */
1373 vm_prot_t *max_prot, /* OUT */
1374 vm_inherit_t inheritance,
1375 vm_map_kernel_flags_t vmk_flags);
1376
1377
1378 extern void vm_map_disable_NX(
1379 vm_map_t map);
1380
1381 extern void vm_map_disallow_data_exec(
1382 vm_map_t map);
1383
1384 extern void vm_map_set_64bit(
1385 vm_map_t map);
1386
1387 extern void vm_map_set_32bit(
1388 vm_map_t map);
1389
1390 extern void vm_map_set_jumbo(
1391 vm_map_t map);
1392
1393 extern void vm_map_set_jit_entitled(
1394 vm_map_t map);
1395
1396 extern void vm_map_set_max_addr(
1397 vm_map_t map, vm_map_offset_t new_max_offset);
1398
1399 extern boolean_t vm_map_has_hard_pagezero(
1400 vm_map_t map,
1401 vm_map_offset_t pagezero_size);
1402 extern void vm_commit_pagezero_status(vm_map_t tmap);
1403
1404 #ifdef __arm__
1405 static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map)1406 vm_map_is_64bit(__unused vm_map_t map)
1407 {
1408 return 0;
1409 }
1410 #else
1411 extern boolean_t vm_map_is_64bit(
1412 vm_map_t map);
1413 #endif
1414
1415
1416 extern kern_return_t vm_map_raise_max_offset(
1417 vm_map_t map,
1418 vm_map_offset_t new_max_offset);
1419
1420 extern kern_return_t vm_map_raise_min_offset(
1421 vm_map_t map,
1422 vm_map_offset_t new_min_offset);
1423 #if XNU_TARGET_OS_OSX
1424 extern void vm_map_set_high_start(
1425 vm_map_t map,
1426 vm_map_offset_t high_start);
1427 #endif /* XNU_TARGET_OS_OSX */
1428
1429 extern vm_map_offset_t vm_compute_max_offset(
1430 boolean_t is64);
1431
1432 extern void vm_map_get_max_aslr_slide_section(
1433 vm_map_t map,
1434 int64_t *max_sections,
1435 int64_t *section_size);
1436
1437 extern uint64_t vm_map_get_max_aslr_slide_pages(
1438 vm_map_t map);
1439
1440 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1441 vm_map_t map);
1442
1443 extern kern_return_t vm_map_set_size_limit(
1444 vm_map_t map,
1445 uint64_t limit);
1446
1447 extern kern_return_t vm_map_set_data_limit(
1448 vm_map_t map,
1449 uint64_t limit);
1450
1451 extern void vm_map_set_user_wire_limit(
1452 vm_map_t map,
1453 vm_size_t limit);
1454
1455 extern void vm_map_switch_protect(
1456 vm_map_t map,
1457 boolean_t val);
1458
1459 extern void vm_map_iokit_mapped_region(
1460 vm_map_t map,
1461 vm_size_t bytes);
1462
1463 extern void vm_map_iokit_unmapped_region(
1464 vm_map_t map,
1465 vm_size_t bytes);
1466
1467
1468 extern boolean_t first_free_is_valid(vm_map_t);
1469
1470 extern int vm_map_page_shift(
1471 vm_map_t map);
1472
1473 extern vm_map_offset_t vm_map_page_mask(
1474 vm_map_t map);
1475
1476 extern int vm_map_page_size(
1477 vm_map_t map);
1478
1479 extern vm_map_offset_t vm_map_round_page_mask(
1480 vm_map_offset_t offset,
1481 vm_map_offset_t mask);
1482
1483 extern vm_map_offset_t vm_map_trunc_page_mask(
1484 vm_map_offset_t offset,
1485 vm_map_offset_t mask);
1486
1487 extern boolean_t vm_map_page_aligned(
1488 vm_map_offset_t offset,
1489 vm_map_offset_t mask);
1490
1491 static inline int
vm_map_range_overflows(vm_map_offset_t addr,vm_map_size_t size)1492 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1493 {
1494 vm_map_offset_t sum;
1495 return os_add_overflow(addr, size, &sum);
1496 }
1497
1498 static inline int
mach_vm_range_overflows(mach_vm_offset_t addr,mach_vm_size_t size)1499 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1500 {
1501 mach_vm_offset_t sum;
1502 return os_add_overflow(addr, size, &sum);
1503 }
1504
1505 #ifdef XNU_KERNEL_PRIVATE
1506
1507 #if XNU_TARGET_OS_OSX
1508 extern void vm_map_mark_alien(vm_map_t map);
1509 extern void vm_map_single_jit(vm_map_t map);
1510 #endif /* XNU_TARGET_OS_OSX */
1511
1512 extern kern_return_t vm_map_page_info(
1513 vm_map_t map,
1514 vm_map_offset_t offset,
1515 vm_page_info_flavor_t flavor,
1516 vm_page_info_t info,
1517 mach_msg_type_number_t *count);
1518 extern kern_return_t vm_map_page_range_info_internal(
1519 vm_map_t map,
1520 vm_map_offset_t start_offset,
1521 vm_map_offset_t end_offset,
1522 int effective_page_shift,
1523 vm_page_info_flavor_t flavor,
1524 vm_page_info_t info,
1525 mach_msg_type_number_t *count);
1526 #endif /* XNU_KERNEL_PRIVATE */
1527
1528
1529 #ifdef MACH_KERNEL_PRIVATE
1530
1531 /*
1532 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1533 * usual form; it handles a copyin based on the current protection
1534 * (current protection == VM_PROT_NONE) is a failure.
1535 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1536 * access. The difference is that a region with no current access
1537 * BUT possible maximum access is rejected by vm_map_copyin(), but
1538 * returned by vm_map_copyin_maxprot.
1539 */
1540 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1541 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1542 FALSE, copy_result, FALSE)
1543
1544 #define vm_map_copyin_maxprot(src_map, \
1545 src_addr, len, src_destroy, copy_result) \
1546 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1547 FALSE, copy_result, TRUE)
1548
1549
1550 /*
1551 * Internal macros for rounding and truncation of vm_map offsets and sizes
1552 */
1553 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1554 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1555
1556 /*
1557 * Macros for rounding and truncation of vm_map offsets and sizes
1558 */
1559 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1560 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1561 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1562 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1563
1564 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1565 VM_MAP_IS_EXOTIC(
1566 vm_map_t map __unused)
1567 {
1568 #if __arm64__
1569 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1570 pmap_is_exotic(map->pmap)) {
1571 return true;
1572 }
1573 #endif /* __arm64__ */
1574 return false;
1575 }
1576
1577 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1578 VM_MAP_IS_ALIEN(
1579 vm_map_t map __unused)
1580 {
1581 /*
1582 * An "alien" process/task/map/pmap should mostly behave
1583 * as it currently would on iOS.
1584 */
1585 #if XNU_TARGET_OS_OSX
1586 if (map->is_alien) {
1587 return true;
1588 }
1589 return false;
1590 #else /* XNU_TARGET_OS_OSX */
1591 return true;
1592 #endif /* XNU_TARGET_OS_OSX */
1593 }
1594
1595 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1596 VM_MAP_POLICY_WX_FAIL(
1597 vm_map_t map __unused)
1598 {
1599 if (VM_MAP_IS_ALIEN(map)) {
1600 return false;
1601 }
1602 return true;
1603 }
1604
1605 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1606 VM_MAP_POLICY_WX_STRIP_X(
1607 vm_map_t map __unused)
1608 {
1609 if (VM_MAP_IS_ALIEN(map)) {
1610 return true;
1611 }
1612 return false;
1613 }
1614
1615 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1616 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1617 vm_map_t map __unused)
1618 {
1619 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1620 return false;
1621 }
1622 return true;
1623 }
1624
1625 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1626 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1627 vm_map_t map)
1628 {
1629 return VM_MAP_IS_ALIEN(map);
1630 }
1631
1632 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1633 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1634 vm_map_t map __unused)
1635 {
1636 if (VM_MAP_IS_ALIEN(map)) {
1637 return false;
1638 }
1639 return true;
1640 }
1641
1642 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1643 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1644 vm_map_t map __unused)
1645 {
1646 if (VM_MAP_IS_ALIEN(map)) {
1647 return false;
1648 }
1649 return true;
1650 }
1651
1652 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1653 VM_MAP_POLICY_ALLOW_JIT_COPY(
1654 vm_map_t map __unused)
1655 {
1656 if (VM_MAP_IS_ALIEN(map)) {
1657 return false;
1658 }
1659 return true;
1660 }
1661
1662 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1663 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1664 vm_map_t map __unused)
1665 {
1666 #if __x86_64__
1667 return true;
1668 #else /* __x86_64__ */
1669 if (VM_MAP_IS_EXOTIC(map)) {
1670 return true;
1671 }
1672 return false;
1673 #endif /* __x86_64__ */
1674 }
1675
1676 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1677 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1678 {
1679 switch (prot) {
1680 case MAP_MEM_NOOP: break;
1681 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1682 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1683 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1684 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1685 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
1686 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1687 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1688 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1689 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
1690 default: break;
1691 }
1692 }
1693
1694 #endif /* MACH_KERNEL_PRIVATE */
1695
1696 #ifdef XNU_KERNEL_PRIVATE
1697 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1698 extern bool vm_map_is_exotic(vm_map_t map);
1699 extern bool vm_map_is_alien(vm_map_t map);
1700 extern pmap_t vm_map_get_pmap(vm_map_t map);
1701 #endif /* XNU_KERNEL_PRIVATE */
1702
1703 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1704 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1705
1706 /*
1707 * Flags for vm_map_remove() and vm_map_delete()
1708 */
1709 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1710 #define VM_MAP_REMOVE_KUNWIRE 0x1
1711 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1712 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1713 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1714 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1715 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1716 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1717 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1718 #define VM_MAP_REMOVE_GAPS_OK 0x100
1719
1720 /* Support for UPLs from vm_maps */
1721
1722 #ifdef XNU_KERNEL_PRIVATE
1723
1724 extern kern_return_t vm_map_get_upl(
1725 vm_map_t target_map,
1726 vm_map_offset_t map_offset,
1727 upl_size_t *size,
1728 upl_t *upl,
1729 upl_page_info_array_t page_info,
1730 unsigned int *page_infoCnt,
1731 upl_control_flags_t *flags,
1732 vm_tag_t tag,
1733 int force_data_sync);
1734
1735 #endif /* XNU_KERNEL_PRIVATE */
1736
1737 extern void
1738 vm_map_sizes(vm_map_t map,
1739 vm_map_size_t * psize,
1740 vm_map_size_t * pfree,
1741 vm_map_size_t * plargest_free);
1742
1743 #if CONFIG_DYNAMIC_CODE_SIGNING
1744 extern kern_return_t vm_map_sign(vm_map_t map,
1745 vm_map_offset_t start,
1746 vm_map_offset_t end);
1747 #endif
1748
1749 extern kern_return_t vm_map_partial_reap(
1750 vm_map_t map,
1751 unsigned int *reclaimed_resident,
1752 unsigned int *reclaimed_compressed);
1753
1754
1755 #if DEVELOPMENT || DEBUG
1756
1757 extern int vm_map_disconnect_page_mappings(
1758 vm_map_t map,
1759 boolean_t);
1760
1761 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1762
1763 #endif
1764
1765
1766 #if CONFIG_FREEZE
1767
1768 extern kern_return_t vm_map_freeze(
1769 task_t task,
1770 unsigned int *purgeable_count,
1771 unsigned int *wired_count,
1772 unsigned int *clean_count,
1773 unsigned int *dirty_count,
1774 unsigned int dirty_budget,
1775 unsigned int *shared_count,
1776 int *freezer_error_code,
1777 boolean_t eval_only);
1778
1779 #define FREEZER_ERROR_GENERIC (-1)
1780 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1781 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1782 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1783 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1784
1785 #endif
1786
1787 __END_DECLS
1788
1789 /*
1790 * In some cases, we don't have a real VM object but still want to return a
1791 * unique ID (to avoid a memory region looking like shared memory), so build
1792 * a fake pointer based on the map's ledger and the index of the ledger being
1793 * reported.
1794 */
1795 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1796
1797 #endif /* KERNEL_PRIVATE */
1798
1799 #endif /* _VM_VM_MAP_H_ */
1800