1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: vm/vm_map.h
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Date: 1985
63 *
64 * Virtual memory map module definitions.
65 *
66 * Contributors:
67 * avie, dlb, mwyoung
68 */
69
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84
85 #ifdef KERNEL_PRIVATE
86
87 #include <sys/cdefs.h>
88
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
92
93 __BEGIN_DECLS
94
95 extern void vm_map_reference(vm_map_t map);
96 extern vm_map_t current_map(void);
97
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t vm_map_exec(
100 vm_map_t new_map,
101 task_t task,
102 boolean_t is64bit,
103 void *fsroot,
104 cpu_type_t cpu,
105 cpu_subtype_t cpu_subtype,
106 boolean_t reslide,
107 boolean_t is_driverkit);
108
109 __END_DECLS
110
111 #ifdef MACH_KERNEL_PRIVATE
112
113 #include <mach_assert.h>
114
115 #include <vm/vm_object.h>
116 #include <vm/vm_page.h>
117 #include <kern/locks.h>
118 #include <kern/zalloc.h>
119 #include <kern/macro_help.h>
120
121 #include <kern/thread.h>
122 #include <os/refcnt.h>
123
124 #define current_map_fast() (current_thread()->map)
125 #define current_map() (current_map_fast())
126
127 #include <vm/vm_map_store.h>
128
129
130 /*
131 * Types defined:
132 *
133 * vm_map_t the high-level address map data structure.
134 * vm_map_entry_t an entry in an address map.
135 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
136 * vm_map_copy_t represents memory copied from an address map,
137 * used for inter-map copy operations
138 */
139 typedef struct vm_map_entry *vm_map_entry_t;
140 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
141
142
143 /*
144 * Type: vm_map_object_t [internal use only]
145 *
146 * Description:
147 * The target of an address mapping, either a virtual
148 * memory object or a sub map (of the kernel map).
149 */
150 typedef union vm_map_object {
151 vm_object_t vmo_object; /* object object */
152 vm_map_t vmo_submap; /* belongs to another map */
153 } vm_map_object_t;
154
155 /*
156 * Type: vm_named_entry_t [internal use only]
157 *
158 * Description:
159 * Description of a mapping to a memory cache object.
160 *
161 * Implementation:
162 * While the handle to this object is used as a means to map
163 * and pass around the right to map regions backed by pagers
164 * of all sorts, the named_entry itself is only manipulated
165 * by the kernel. Named entries hold information on the
166 * right to map a region of a cached object. Namely,
167 * the target cache object, the beginning and ending of the
168 * region to be mapped, and the permissions, (read, write)
169 * with which it can be mapped.
170 *
171 */
172
173 struct vm_named_entry {
174 union {
175 vm_map_t map; /* map backing submap */
176 vm_map_copy_t copy; /* a VM map copy */
177 } backing;
178 vm_object_offset_t offset; /* offset into object */
179 vm_object_size_t size; /* size of region */
180 vm_object_offset_t data_offset; /* offset to first byte of data */
181 unsigned int /* Is backing.xxx : */
182 /* vm_prot_t */ protection:4, /* access permissions */
183 /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
184 /* boolean_t */ internal:1, /* ... an internal object */
185 /* boolean_t */ is_sub_map:1, /* ... a submap? */
186 /* boolean_t */ is_copy:1; /* ... a VM map copy */
187 #if VM_NAMED_ENTRY_DEBUG
188 uint32_t named_entry_bt; /* btref_t */
189 #endif /* VM_NAMED_ENTRY_DEBUG */
190 };
191
192 /*
193 * Type: vm_map_entry_t [internal use only]
194 *
195 * Description:
196 * A single mapping within an address map.
197 *
198 * Implementation:
199 * Address map entries consist of start and end addresses,
200 * a VM object (or sub map) and offset into that object,
201 * and user-exported inheritance and protection information.
202 * Control information for virtual copy operations is also
203 * stored in the address map entry.
204 */
205
206 struct vm_map_links {
207 struct vm_map_entry *prev; /* previous entry */
208 struct vm_map_entry *next; /* next entry */
209 vm_map_offset_t start; /* start address */
210 vm_map_offset_t end; /* end address */
211 };
212
213 /*
214 * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
215 * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
216 * to convert between the "packed" representation in the vm_map_entry's fields
217 * and the equivalent bits defined in vm_prot_t.
218 */
219 #if defined(__x86_64__)
220 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
221 #else
222 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY)
223 #endif
224
225 /*
226 * FOOTPRINT ACCOUNTING:
227 * The "memory footprint" is better described in the pmap layer.
228 *
229 * At the VM level, these 2 vm_map_entry_t fields are relevant:
230 * iokit_mapped:
231 * For an "iokit_mapped" entry, we add the size of the entry to the
232 * footprint when the entry is entered into the map and we subtract that
233 * size when the entry is removed. No other accounting should take place.
234 * "use_pmap" should be FALSE but is not taken into account.
235 * use_pmap: (only when is_sub_map is FALSE)
236 * This indicates if we should ask the pmap layer to account for pages
237 * in this mapping. If FALSE, we expect that another form of accounting
238 * is being used (e.g. "iokit_mapped" or the explicit accounting of
239 * non-volatile purgable memory).
240 *
241 * So the logic is mostly:
242 * if entry->is_sub_map == TRUE
243 * anything in a submap does not count for the footprint
244 * else if entry->iokit_mapped == TRUE
245 * footprint includes the entire virtual size of this entry
246 * else if entry->use_pmap == FALSE
247 * tell pmap NOT to account for pages being pmap_enter()'d from this
248 * mapping (i.e. use "alternate accounting")
249 * else
250 * pmap will account for pages being pmap_enter()'d from this mapping
251 * as it sees fit (only if anonymous, etc...)
252 */
253
254 struct vm_map_entry {
255 struct vm_map_links links; /* links to other entries */
256 #define vme_prev links.prev
257 #define vme_next links.next
258 #define vme_start links.start
259 #define vme_end links.end
260
261 struct vm_map_store store;
262 union vm_map_object vme_object; /* object I point to */
263 vm_object_offset_t vme_offset; /* offset into object */
264
265 unsigned int
266 /* boolean_t */ is_shared:1, /* region is shared */
267 /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
268 /* boolean_t */ in_transition:1, /* Entry being changed */
269 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
270 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
271 /* behavior is not defined for submap type */
272 /* boolean_t */ needs_copy:1, /* object need to be copied? */
273
274 /* Only in task maps: */
275 /* vm_prot_t-like */ protection:4, /* protection code, bit3=UEXEC */
276 /* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
277 /* vm_inherit_t */ inheritance:2, /* inheritance */
278 /* boolean_t */ use_pmap:1, /*
279 * use_pmap is overloaded:
280 * if "is_sub_map":
281 * use a nested pmap?
282 * else (i.e. if object):
283 * use pmap accounting
284 * for footprint?
285 */
286 /* boolean_t */ no_cache:1, /* should new pages be cached? */
287 /* boolean_t */ permanent:1, /* mapping can not be removed */
288 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
289 /* boolean_t */ map_aligned:1, /* align to map's page size */
290 /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
291 * this entry it is being deleted
292 * without unwiring them */
293 /* boolean_t */ used_for_jit:1,
294 /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */
295
296 /* iokit accounting: use the virtual size rather than resident size: */
297 /* boolean_t */ iokit_acct:1,
298 /* boolean_t */ vme_resilient_codesign:1,
299 /* boolean_t */ vme_resilient_media:1,
300 /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
301 /* boolean_t */ vme_no_copy_on_read:1,
302 /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
303 /* boolean_t */ vme_kernel_object:1; /* vme_object is kernel_object */
304
305 unsigned short wired_count; /* can be paged if = 0 */
306 unsigned short user_wired_count; /* for vm_wire */
307 #if DEBUG
308 #define MAP_ENTRY_CREATION_DEBUG (1)
309 #define MAP_ENTRY_INSERTION_DEBUG (1)
310 #endif
311 #if MAP_ENTRY_CREATION_DEBUG
312 struct vm_map_header *vme_creation_maphdr;
313 uint32_t vme_creation_bt; /* btref_t */
314 #endif
315 #if MAP_ENTRY_INSERTION_DEBUG
316 uint32_t vme_insertion_bt; /* btref_t */
317 vm_map_offset_t vme_start_original;
318 vm_map_offset_t vme_end_original;
319 #endif
320 };
321
322 #define VME_SUBMAP_PTR(entry) \
323 (&((entry)->vme_object.vmo_submap))
324 #define VME_SUBMAP(entry) \
325 ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
326 #define VME_OBJECT(entry) \
327 ((entry)->vme_kernel_object ? \
328 kernel_object : \
329 ((entry)->vme_object.vmo_object))
330 #define VME_OFFSET(entry) \
331 ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
332 #define VME_ALIAS_MASK (FOURK_PAGE_MASK)
333 #define VME_ALIAS(entry) \
334 ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))
335
336 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object)337 VME_OBJECT_SET(
338 vm_map_entry_t entry,
339 vm_object_t object)
340 {
341 if (object == kernel_object) {
342 entry->vme_kernel_object = TRUE;
343 entry->vme_object.vmo_object = VM_OBJECT_NULL;
344 } else {
345 entry->vme_kernel_object = FALSE;
346 entry->vme_object.vmo_object = object;
347 }
348 if (object != VM_OBJECT_NULL && !object->internal) {
349 entry->vme_resilient_media = FALSE;
350 }
351 entry->vme_resilient_codesign = FALSE;
352 entry->used_for_jit = FALSE;
353 }
354 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)355 VME_SUBMAP_SET(
356 vm_map_entry_t entry,
357 vm_map_t submap)
358 {
359 entry->vme_object.vmo_submap = submap;
360 }
361 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)362 VME_OFFSET_SET(
363 vm_map_entry_t entry,
364 vm_object_offset_t offset)
365 {
366 unsigned int alias;
367 alias = VME_ALIAS(entry);
368 assert((offset & FOURK_PAGE_MASK) == 0);
369 entry->vme_offset = offset | alias;
370 }
371 /*
372 * IMPORTANT:
373 * The "alias" field can be updated while holding the VM map lock
374 * "shared". It's OK as along as it's the only field that can be
375 * updated without the VM map "exclusive" lock.
376 */
377 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,int alias)378 VME_ALIAS_SET(
379 vm_map_entry_t entry,
380 int alias)
381 {
382 vm_object_offset_t offset;
383 offset = VME_OFFSET(entry);
384 entry->vme_offset = offset | ((unsigned int)alias & VME_ALIAS_MASK);
385 }
386
387 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length)388 VME_OBJECT_SHADOW(
389 vm_map_entry_t entry,
390 vm_object_size_t length)
391 {
392 vm_object_t object;
393 vm_object_offset_t offset;
394
395 object = VME_OBJECT(entry);
396 offset = VME_OFFSET(entry);
397 vm_object_shadow(&object, &offset, length);
398 if (object != VME_OBJECT(entry)) {
399 VME_OBJECT_SET(entry, object);
400 entry->use_pmap = TRUE;
401 }
402 if (offset != VME_OFFSET(entry)) {
403 VME_OFFSET_SET(entry, offset);
404 }
405 }
406
407
408 /*
409 * Convenience macros for dealing with superpages
410 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
411 */
412 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
413 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
414 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
415 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
416
417 /*
418 * wired_counts are unsigned short. This value is used to safeguard
419 * against any mishaps due to runaway user programs.
420 */
421 #define MAX_WIRE_COUNT 65535
422
423
424
425 /*
426 * Type: struct vm_map_header
427 *
428 * Description:
429 * Header for a vm_map and a vm_map_copy.
430 */
431
432
433 struct vm_map_header {
434 struct vm_map_links links; /* first, last, min, max */
435 int nentries; /* Number of entries */
436 uint16_t page_shift; /* page shift */
437 unsigned int
438 /* boolean_t */ entries_pageable : 1, /* are map entries pageable? */
439 /* reserved */ __padding : 15;
440 #ifdef VM_MAP_STORE_USE_RB
441 struct rb_head rb_head_store;
442 #endif
443 };
444
445 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
446 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
447 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
448
449 /*
450 * Type: vm_map_t [exported; contents invisible]
451 *
452 * Description:
453 * An address map -- a directory relating valid
454 * regions of a task's address space to the corresponding
455 * virtual memory objects.
456 *
457 * Implementation:
458 * Maps are doubly-linked lists of map entries, sorted
459 * by address. One hint is used to start
460 * searches again from the last successful search,
461 * insertion, or removal. Another hint is used to
462 * quickly find free space.
463 */
464 struct _vm_map {
465 lck_rw_t lock; /* map lock */
466 struct vm_map_header hdr; /* Map entry header */
467 #define min_offset hdr.links.start /* start of range */
468 #define max_offset hdr.links.end /* end of range */
469 pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
470 vm_map_size_t size; /* virtual size */
471 uint64_t size_limit; /* rlimit on address space size */
472 uint64_t data_limit; /* rlimit on data size */
473 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
474 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
475 #if XNU_TARGET_OS_OSX
476 vm_map_offset_t vmmap_high_start;
477 #endif /* XNU_TARGET_OS_OSX */
478
479 union {
480 /*
481 * If map->disable_vmentry_reuse == TRUE:
482 * the end address of the highest allocated vm_map_entry_t.
483 */
484 vm_map_offset_t vmu1_highest_entry_end;
485 /*
486 * For a nested VM map:
487 * the lowest address in this nested VM map that we would
488 * expect to be unnested under normal operation (i.e. for
489 * regular copy-on-write on DATA section).
490 */
491 vm_map_offset_t vmu1_lowest_unnestable_start;
492 } vmu1;
493 #define highest_entry_end vmu1.vmu1_highest_entry_end
494 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
495 vm_map_entry_t hint; /* hint for quick lookups */
496 union {
497 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
498 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
499 } vmmap_u_1;
500 #define hole_hint vmmap_u_1.vmmap_hole_hint
501 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
502 union {
503 vm_map_entry_t _first_free; /* First free space hint */
504 struct vm_map_links* _holes; /* links all holes between entries */
505 } f_s; /* Union for free space data structures being used */
506
507 #define first_free f_s._first_free
508 #define holes_list f_s._holes
509
510 os_ref_atomic_t map_refcnt; /* Reference count */
511
512 unsigned int
513 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
514 /* boolean_t */ wiring_required:1, /* All memory wired? */
515 /* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */
516 /* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */
517 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
518 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
519 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
520 /* boolean_t */ holelistenabled:1,
521 /* boolean_t */ is_nested_map:1,
522 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
523 /* boolean_t */ jit_entry_exists:1,
524 /* boolean_t */ has_corpse_footprint:1,
525 /* boolean_t */ terminated:1,
526 /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
527 /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
528 /* boolean_t */ cs_debugged:1, /* code-signed but debugged */
529 /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
530 /* boolean_t */ single_jit:1, /* only allow one JIT mapping */
531 /* boolean_t */ never_faults:1, /* this map should never cause faults */
532 /* reserved */ pad:13;
533 unsigned int timestamp; /* Version number */
534 };
535
536 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
537 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
538 #define vm_map_first_entry(map) ((map)->hdr.links.next)
539 #define vm_map_last_entry(map) ((map)->hdr.links.prev)
540
541 /*
542 * Type: vm_map_version_t [exported; contents invisible]
543 *
544 * Description:
545 * Map versions may be used to quickly validate a previous
546 * lookup operation.
547 *
548 * Usage note:
549 * Because they are bulky objects, map versions are usually
550 * passed by reference.
551 *
552 * Implementation:
553 * Just a timestamp for the main map.
554 */
555 typedef struct vm_map_version {
556 unsigned int main_timestamp;
557 } vm_map_version_t;
558
559 /*
560 * Type: vm_map_copy_t [exported; contents invisible]
561 *
562 * Description:
563 * A map copy object represents a region of virtual memory
564 * that has been copied from an address map but is still
565 * in transit.
566 *
567 * A map copy object may only be used by a single thread
568 * at a time.
569 *
570 * Implementation:
571 * There are three formats for map copy objects.
572 * The first is very similar to the main
573 * address map in structure, and as a result, some
574 * of the internal maintenance functions/macros can
575 * be used with either address maps or map copy objects.
576 *
577 * The map copy object contains a header links
578 * entry onto which the other entries that represent
579 * the region are chained.
580 *
581 * The second format is a single vm object. This was used
582 * primarily in the pageout path - but is not currently used
583 * except for placeholder copy objects (see vm_map_copy_copy()).
584 *
585 * The third format is a kernel buffer copy object - for data
586 * small enough that physical copies were the most efficient
587 * method. This method uses a zero-sized array unioned with
588 * other format-specific data in the 'c_u' member. This unsized
589 * array overlaps the other elements and allows us to use this
590 * extra structure space for physical memory copies. On 64-bit
591 * systems this saves ~64 bytes per vm_map_copy.
592 */
593
594 struct vm_map_copy {
595 int type;
596 #define VM_MAP_COPY_ENTRY_LIST 1
597 #define VM_MAP_COPY_OBJECT 2
598 #define VM_MAP_COPY_KERNEL_BUFFER 3
599 vm_object_offset_t offset;
600 vm_map_size_t size;
601 union {
602 struct vm_map_header hdr; /* ENTRY_LIST */
603 vm_object_t object; /* OBJECT */
604 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
605 } c_u;
606 };
607
608
609 #define cpy_hdr c_u.hdr
610
611 #define cpy_object c_u.object
612 #define cpy_kdata c_u.kdata
613
614 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
615 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
616 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
617
618 /*
619 * Useful macros for entry list copy objects
620 */
621
622 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
623 #define vm_map_copy_first_entry(copy) \
624 ((copy)->cpy_hdr.links.next)
625 #define vm_map_copy_last_entry(copy) \
626 ((copy)->cpy_hdr.links.prev)
627
628 extern kern_return_t
629 vm_map_copy_adjust_to_target(
630 vm_map_copy_t copy_map,
631 vm_map_offset_t offset,
632 vm_map_size_t size,
633 vm_map_t target_map,
634 boolean_t copy,
635 vm_map_copy_t *target_copy_map_p,
636 vm_map_offset_t *overmap_start_p,
637 vm_map_offset_t *overmap_end_p,
638 vm_map_offset_t *trimmed_start_p);
639
640 /*
641 * Macros: vm_map_lock, etc. [internal use only]
642 * Description:
643 * Perform locking on the data portion of a map.
644 * When multiple maps are to be locked, order by map address.
645 * (See vm_map.c::vm_remap())
646 */
647
648 #define vm_map_lock_init(map) \
649 ((map)->timestamp = 0 , \
650 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
651
652 #define vm_map_lock(map) \
653 MACRO_BEGIN \
654 DTRACE_VM(vm_map_lock_w); \
655 lck_rw_lock_exclusive(&(map)->lock); \
656 MACRO_END
657
658 #define vm_map_unlock(map) \
659 MACRO_BEGIN \
660 DTRACE_VM(vm_map_unlock_w); \
661 (map)->timestamp++; \
662 lck_rw_done(&(map)->lock); \
663 MACRO_END
664
665 #define vm_map_lock_read(map) \
666 MACRO_BEGIN \
667 DTRACE_VM(vm_map_lock_r); \
668 lck_rw_lock_shared(&(map)->lock); \
669 MACRO_END
670
671 #define vm_map_unlock_read(map) \
672 MACRO_BEGIN \
673 DTRACE_VM(vm_map_unlock_r); \
674 lck_rw_done(&(map)->lock); \
675 MACRO_END
676
677 #define vm_map_lock_write_to_read(map) \
678 MACRO_BEGIN \
679 DTRACE_VM(vm_map_lock_downgrade); \
680 (map)->timestamp++; \
681 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
682 MACRO_END
683
684 __attribute__((always_inline))
685 int vm_map_lock_read_to_write(vm_map_t map);
686
687 __attribute__((always_inline))
688 boolean_t vm_map_try_lock(vm_map_t map);
689
690 __attribute__((always_inline))
691 boolean_t vm_map_try_lock_read(vm_map_t map);
692
693 int vm_self_region_page_shift(vm_map_t target_map);
694 int vm_self_region_page_shift_safely(vm_map_t target_map);
695
696 #if MACH_ASSERT || DEBUG
697 #define vm_map_lock_assert_held(map) \
698 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
699 #define vm_map_lock_assert_shared(map) \
700 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
701 #define vm_map_lock_assert_exclusive(map) \
702 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
703 #define vm_map_lock_assert_notheld(map) \
704 lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
705 #else /* MACH_ASSERT || DEBUG */
706 #define vm_map_lock_assert_held(map)
707 #define vm_map_lock_assert_shared(map)
708 #define vm_map_lock_assert_exclusive(map)
709 #define vm_map_lock_assert_notheld(map)
710 #endif /* MACH_ASSERT || DEBUG */
711
712 /*
713 * Exported procedures that operate on vm_map_t.
714 */
715
716 /* Initialize the module */
717 extern void vm_map_init(void);
718
719 /* Allocate a range in the specified virtual address map and
720 * return the entry allocated for that range. */
721 extern kern_return_t vm_map_find_space(
722 vm_map_t map,
723 vm_map_address_t *address, /* OUT */
724 vm_map_size_t size,
725 vm_map_offset_t mask,
726 vm_map_kernel_flags_t vmk_flags,
727 vm_tag_t tag,
728 vm_map_entry_t *o_entry); /* OUT */
729
730 extern void vm_map_clip_start(
731 vm_map_t map,
732 vm_map_entry_t entry,
733 vm_map_offset_t endaddr);
734 extern void vm_map_clip_end(
735 vm_map_t map,
736 vm_map_entry_t entry,
737 vm_map_offset_t endaddr);
738 extern boolean_t vm_map_entry_should_cow_for_true_share(
739 vm_map_entry_t entry);
740
741 /* Lookup map entry containing or the specified address in the given map */
742 extern boolean_t vm_map_lookup_entry(
743 vm_map_t map,
744 vm_map_address_t address,
745 vm_map_entry_t *entry); /* OUT */
746
747 /* like vm_map_lookup_entry without the PGZ bear trap */
748 #if CONFIG_PROB_GZALLOC
749 extern boolean_t vm_map_lookup_entry_allow_pgz(
750 vm_map_t map,
751 vm_map_address_t address,
752 vm_map_entry_t *entry); /* OUT */
753 #else
754 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
755 #endif
756
757 extern void vm_map_copy_remap(
758 vm_map_t map,
759 vm_map_entry_t where,
760 vm_map_copy_t copy,
761 vm_map_offset_t adjustment,
762 vm_prot_t cur_prot,
763 vm_prot_t max_prot,
764 vm_inherit_t inheritance);
765
766 /* Find the VM object, offset, and protection for a given virtual address
767 * in the specified map, assuming a page fault of the type specified. */
768 extern kern_return_t vm_map_lookup_locked(
769 vm_map_t *var_map, /* IN/OUT */
770 vm_map_address_t vaddr,
771 vm_prot_t fault_type,
772 int object_lock_type,
773 vm_map_version_t *out_version, /* OUT */
774 vm_object_t *object, /* OUT */
775 vm_object_offset_t *offset, /* OUT */
776 vm_prot_t *out_prot, /* OUT */
777 boolean_t *wired, /* OUT */
778 vm_object_fault_info_t fault_info, /* OUT */
779 vm_map_t *real_map, /* OUT */
780 bool *contended); /* OUT */
781
782 /* Verifies that the map has not changed since the given version. */
783 extern boolean_t vm_map_verify(
784 vm_map_t map,
785 vm_map_version_t *version); /* REF */
786
787 extern vm_map_entry_t vm_map_entry_insert(
788 vm_map_t map,
789 vm_map_entry_t insp_entry,
790 vm_map_offset_t start,
791 vm_map_offset_t end,
792 vm_object_t object,
793 vm_object_offset_t offset,
794 vm_map_kernel_flags_t vmk_flags,
795 boolean_t needs_copy,
796 boolean_t is_shared,
797 boolean_t in_transition,
798 vm_prot_t cur_protection,
799 vm_prot_t max_protection,
800 vm_behavior_t behavior,
801 vm_inherit_t inheritance,
802 unsigned short wired_count,
803 boolean_t no_cache,
804 boolean_t permanent,
805 boolean_t no_copy_on_read,
806 unsigned int superpage_size,
807 boolean_t clear_map_aligned,
808 boolean_t is_submap,
809 boolean_t used_for_jit,
810 int alias,
811 boolean_t translated_allow_execute);
812
813
814 /*
815 * Functions implemented as macros
816 */
817 #define vm_map_min(map) ((map)->min_offset)
818 /* Lowest valid address in
819 * a map */
820
821 #define vm_map_max(map) ((map)->max_offset)
822 /* Highest valid address */
823
824 #define vm_map_pmap(map) ((map)->pmap)
825 /* Physical map associated
826 * with this address map */
827
828 /* Gain a reference to an existing map */
829 extern void vm_map_reference(
830 vm_map_t map);
831
832 /*
833 * Submap object. Must be used to create memory to be put
834 * in a submap by vm_map_submap.
835 */
836 extern vm_object_t vm_submap_object;
837
838 /*
839 * Wait and wakeup macros for in_transition map entries.
840 */
841 #define vm_map_entry_wait(map, interruptible) \
842 ((map)->timestamp++ , \
843 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
844 (event_t)&(map)->hdr, interruptible))
845
846
847 #define vm_map_entry_wakeup(map) \
848 thread_wakeup((event_t)(&(map)->hdr))
849
850
851 /* simplify map entries */
852 extern void vm_map_simplify_entry(
853 vm_map_t map,
854 vm_map_entry_t this_entry);
855 extern void vm_map_simplify(
856 vm_map_t map,
857 vm_map_offset_t start);
858
859 /* Move the information in a map copy object to a new map copy object */
860 extern vm_map_copy_t vm_map_copy_copy(
861 vm_map_copy_t copy);
862
863 /* Create a copy object from an object. */
864 extern kern_return_t vm_map_copyin_object(
865 vm_object_t object,
866 vm_object_offset_t offset,
867 vm_object_size_t size,
868 vm_map_copy_t *copy_result); /* OUT */
869
870 extern kern_return_t vm_map_random_address_for_size(
871 vm_map_t map,
872 vm_map_offset_t *address,
873 vm_map_size_t size);
874
875 /* Enter a mapping */
876 extern kern_return_t vm_map_enter(
877 vm_map_t map,
878 vm_map_offset_t *address,
879 vm_map_size_t size,
880 vm_map_offset_t mask,
881 int flags,
882 vm_map_kernel_flags_t vmk_flags,
883 vm_tag_t tag,
884 vm_object_t object,
885 vm_object_offset_t offset,
886 boolean_t needs_copy,
887 vm_prot_t cur_protection,
888 vm_prot_t max_protection,
889 vm_inherit_t inheritance);
890
891 #if __arm64__
892 extern kern_return_t vm_map_enter_fourk(
893 vm_map_t map,
894 vm_map_offset_t *address,
895 vm_map_size_t size,
896 vm_map_offset_t mask,
897 int flags,
898 vm_map_kernel_flags_t vmk_flags,
899 vm_tag_t tag,
900 vm_object_t object,
901 vm_object_offset_t offset,
902 boolean_t needs_copy,
903 vm_prot_t cur_protection,
904 vm_prot_t max_protection,
905 vm_inherit_t inheritance);
906 #endif /* __arm64__ */
907
908 /* XXX should go away - replaced with regular enter of contig object */
909 extern kern_return_t vm_map_enter_cpm(
910 vm_map_t map,
911 vm_map_address_t *addr,
912 vm_map_size_t size,
913 int flags);
914
915 extern kern_return_t vm_map_remap(
916 vm_map_t target_map,
917 vm_map_offset_t *address,
918 vm_map_size_t size,
919 vm_map_offset_t mask,
920 int flags,
921 vm_map_kernel_flags_t vmk_flags,
922 vm_tag_t tag,
923 vm_map_t src_map,
924 vm_map_offset_t memory_address,
925 boolean_t copy,
926 vm_prot_t *cur_protection,
927 vm_prot_t *max_protection,
928 vm_inherit_t inheritance);
929
930
931 /*
932 * Read and write from a kernel buffer to a specified map.
933 */
934 extern kern_return_t vm_map_write_user(
935 vm_map_t map,
936 void *src_p,
937 vm_map_offset_t dst_addr,
938 vm_size_t size);
939
940 extern kern_return_t vm_map_read_user(
941 vm_map_t map,
942 vm_map_offset_t src_addr,
943 void *dst_p,
944 vm_size_t size);
945
946 /* Create a new task map using an existing task map as a template. */
947 extern vm_map_t vm_map_fork(
948 ledger_t ledger,
949 vm_map_t old_map,
950 int options);
951 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
952 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
953 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
954
955 /* Change inheritance */
956 extern kern_return_t vm_map_inherit(
957 vm_map_t map,
958 vm_map_offset_t start,
959 vm_map_offset_t end,
960 vm_inherit_t new_inheritance);
961
962 /* Add or remove machine-dependent attributes from map regions */
963 extern kern_return_t vm_map_machine_attribute(
964 vm_map_t map,
965 vm_map_offset_t start,
966 vm_map_offset_t end,
967 vm_machine_attribute_t attribute,
968 vm_machine_attribute_val_t* value); /* IN/OUT */
969
970 extern kern_return_t vm_map_msync(
971 vm_map_t map,
972 vm_map_address_t address,
973 vm_map_size_t size,
974 vm_sync_t sync_flags);
975
976 /* Set paging behavior */
977 extern kern_return_t vm_map_behavior_set(
978 vm_map_t map,
979 vm_map_offset_t start,
980 vm_map_offset_t end,
981 vm_behavior_t new_behavior);
982
983 extern kern_return_t vm_map_region(
984 vm_map_t map,
985 vm_map_offset_t *address,
986 vm_map_size_t *size,
987 vm_region_flavor_t flavor,
988 vm_region_info_t info,
989 mach_msg_type_number_t *count,
990 mach_port_t *object_name);
991
992 extern kern_return_t vm_map_region_recurse_64(
993 vm_map_t map,
994 vm_map_offset_t *address,
995 vm_map_size_t *size,
996 natural_t *nesting_depth,
997 vm_region_submap_info_64_t info,
998 mach_msg_type_number_t *count);
999
1000 extern kern_return_t vm_map_page_query_internal(
1001 vm_map_t map,
1002 vm_map_offset_t offset,
1003 int *disposition,
1004 int *ref_count);
1005
1006 extern kern_return_t vm_map_query_volatile(
1007 vm_map_t map,
1008 mach_vm_size_t *volatile_virtual_size_p,
1009 mach_vm_size_t *volatile_resident_size_p,
1010 mach_vm_size_t *volatile_compressed_size_p,
1011 mach_vm_size_t *volatile_pmap_size_p,
1012 mach_vm_size_t *volatile_compressed_pmap_size_p);
1013
1014 extern kern_return_t vm_map_submap(
1015 vm_map_t map,
1016 vm_map_offset_t start,
1017 vm_map_offset_t end,
1018 vm_map_t submap,
1019 vm_map_offset_t offset,
1020 boolean_t use_pmap);
1021
1022 extern void vm_map_submap_pmap_clean(
1023 vm_map_t map,
1024 vm_map_offset_t start,
1025 vm_map_offset_t end,
1026 vm_map_t sub_map,
1027 vm_map_offset_t offset);
1028
1029 /* Convert from a map entry port to a map */
1030 extern vm_map_t convert_port_entry_to_map(
1031 ipc_port_t port);
1032
1033
1034 extern kern_return_t vm_map_set_cache_attr(
1035 vm_map_t map,
1036 vm_map_offset_t va);
1037
1038
1039 /* definitions related to overriding the NX behavior */
1040
1041 #define VM_ABI_32 0x1
1042 #define VM_ABI_64 0x2
1043
1044 extern int override_nx(vm_map_t map, uint32_t user_tag);
1045
1046
1047 extern void vm_map_region_top_walk(
1048 vm_map_entry_t entry,
1049 vm_region_top_info_t top);
1050 extern void vm_map_region_walk(
1051 vm_map_t map,
1052 vm_map_offset_t va,
1053 vm_map_entry_t entry,
1054 vm_object_offset_t offset,
1055 vm_object_size_t range,
1056 vm_region_extended_info_t extended,
1057 boolean_t look_for_pages,
1058 mach_msg_type_number_t count);
1059
1060
1061
1062 extern void vm_map_copy_footprint_ledgers(
1063 task_t old_task,
1064 task_t new_task);
1065 extern void vm_map_copy_ledger(
1066 task_t old_task,
1067 task_t new_task,
1068 int ledger_entry);
1069
1070 /**
1071 * Represents a single region of virtual address space that should be reserved
1072 * (pre-mapped) in a user address space.
1073 */
1074 struct vm_reserved_region {
1075 char *vmrr_name;
1076 vm_map_offset_t vmrr_addr;
1077 vm_map_size_t vmrr_size;
1078 };
1079
1080 /**
1081 * Return back a machine-dependent array of address space regions that should be
1082 * reserved by the VM. This function is defined in the machine-dependent
1083 * machine_routines.c files.
1084 */
1085 extern size_t ml_get_vm_reserved_regions(
1086 bool vm_is64bit,
1087 struct vm_reserved_region **regions);
1088
1089 #endif /* MACH_KERNEL_PRIVATE */
1090
1091 __BEGIN_DECLS
1092
1093 /* Create an empty map */
1094 extern vm_map_t vm_map_create(
1095 pmap_t pmap,
1096 vm_map_offset_t min_off,
1097 vm_map_offset_t max_off,
1098 boolean_t pageable);
1099
1100 extern vm_map_size_t vm_map_adjusted_size(vm_map_t map);
1101
1102 extern void vm_map_disable_hole_optimization(vm_map_t map);
1103
1104 /* Get rid of a map */
1105 extern void vm_map_destroy(
1106 vm_map_t map,
1107 int flags);
1108
1109 /* Lose a reference */
1110 extern void vm_map_deallocate(
1111 vm_map_t map);
1112
1113 /* Lose a reference */
1114 extern void vm_map_inspect_deallocate(
1115 vm_map_inspect_t map);
1116
1117 /* Lose a reference */
1118 extern void vm_map_read_deallocate(
1119 vm_map_read_t map);
1120
1121 extern vm_map_t vm_map_switch(
1122 vm_map_t map);
1123
1124 /* Change protection */
1125 extern kern_return_t vm_map_protect(
1126 vm_map_t map,
1127 vm_map_offset_t start,
1128 vm_map_offset_t end,
1129 vm_prot_t new_prot,
1130 boolean_t set_max);
1131
1132 /* Check protection */
1133 extern boolean_t vm_map_check_protection(
1134 vm_map_t map,
1135 vm_map_offset_t start,
1136 vm_map_offset_t end,
1137 vm_prot_t protection);
1138
1139 extern boolean_t vm_map_cs_enforcement(
1140 vm_map_t map);
1141 extern void vm_map_cs_enforcement_set(
1142 vm_map_t map,
1143 boolean_t val);
1144
1145 extern void vm_map_cs_debugged_set(
1146 vm_map_t map,
1147 boolean_t val);
1148
1149 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1150
1151 /* wire down a region */
1152
1153 #ifdef XNU_KERNEL_PRIVATE
1154
1155 #define VM_MAP_CREATE_ZAP_OPTIONS(map) \
1156 (VM_MAP_CREATE_DISABLE_HOLELIST | ((map)->hdr.entries_pageable \
1157 ? VM_MAP_CREATE_PAGEABLE : VM_MAP_CREATE_DEFAULT))
1158
1159 /* never fails */
1160 extern vm_map_t vm_map_create_options(
1161 pmap_t pmap,
1162 vm_map_offset_t min_off,
1163 vm_map_offset_t max_off,
1164 vm_map_create_options_t options);
1165
1166 extern kern_return_t vm_map_wire_kernel(
1167 vm_map_t map,
1168 vm_map_offset_t start,
1169 vm_map_offset_t end,
1170 vm_prot_t access_type,
1171 vm_tag_t tag,
1172 boolean_t user_wire);
1173
1174 extern kern_return_t vm_map_wire_and_extract_kernel(
1175 vm_map_t map,
1176 vm_map_offset_t start,
1177 vm_prot_t access_type,
1178 vm_tag_t tag,
1179 boolean_t user_wire,
1180 ppnum_t *physpage_p);
1181
1182 /* kext exported versions */
1183
1184 extern kern_return_t vm_map_wire_external(
1185 vm_map_t map,
1186 vm_map_offset_t start,
1187 vm_map_offset_t end,
1188 vm_prot_t access_type,
1189 boolean_t user_wire);
1190
1191 extern kern_return_t vm_map_wire_and_extract_external(
1192 vm_map_t map,
1193 vm_map_offset_t start,
1194 vm_prot_t access_type,
1195 boolean_t user_wire,
1196 ppnum_t *physpage_p);
1197
1198 #else /* XNU_KERNEL_PRIVATE */
1199
1200 extern kern_return_t vm_map_wire(
1201 vm_map_t map,
1202 vm_map_offset_t start,
1203 vm_map_offset_t end,
1204 vm_prot_t access_type,
1205 boolean_t user_wire);
1206
1207 extern kern_return_t vm_map_wire_and_extract(
1208 vm_map_t map,
1209 vm_map_offset_t start,
1210 vm_prot_t access_type,
1211 boolean_t user_wire,
1212 ppnum_t *physpage_p);
1213
1214 #endif /* !XNU_KERNEL_PRIVATE */
1215
1216 /* unwire a region */
1217 extern kern_return_t vm_map_unwire(
1218 vm_map_t map,
1219 vm_map_offset_t start,
1220 vm_map_offset_t end,
1221 boolean_t user_wire);
1222
1223 #ifdef XNU_KERNEL_PRIVATE
1224
1225 /* Enter a mapping of a memory object */
1226 extern kern_return_t vm_map_enter_mem_object(
1227 vm_map_t map,
1228 vm_map_offset_t *address,
1229 vm_map_size_t size,
1230 vm_map_offset_t mask,
1231 int flags,
1232 vm_map_kernel_flags_t vmk_flags,
1233 vm_tag_t tag,
1234 ipc_port_t port,
1235 vm_object_offset_t offset,
1236 boolean_t needs_copy,
1237 vm_prot_t cur_protection,
1238 vm_prot_t max_protection,
1239 vm_inherit_t inheritance);
1240
1241 /* Enter a mapping of a memory object */
1242 extern kern_return_t vm_map_enter_mem_object_prefault(
1243 vm_map_t map,
1244 vm_map_offset_t *address,
1245 vm_map_size_t size,
1246 vm_map_offset_t mask,
1247 int flags,
1248 vm_map_kernel_flags_t vmk_flags,
1249 vm_tag_t tag,
1250 ipc_port_t port,
1251 vm_object_offset_t offset,
1252 vm_prot_t cur_protection,
1253 vm_prot_t max_protection,
1254 upl_page_list_ptr_t page_list,
1255 unsigned int page_list_count);
1256
1257 /* Enter a mapping of a memory object */
1258 extern kern_return_t vm_map_enter_mem_object_control(
1259 vm_map_t map,
1260 vm_map_offset_t *address,
1261 vm_map_size_t size,
1262 vm_map_offset_t mask,
1263 int flags,
1264 vm_map_kernel_flags_t vmk_flags,
1265 vm_tag_t tag,
1266 memory_object_control_t control,
1267 vm_object_offset_t offset,
1268 boolean_t needs_copy,
1269 vm_prot_t cur_protection,
1270 vm_prot_t max_protection,
1271 vm_inherit_t inheritance);
1272
1273 extern kern_return_t vm_map_terminate(
1274 vm_map_t map);
1275
1276 extern void vm_map_require(
1277 vm_map_t map);
1278
1279 #endif /* !XNU_KERNEL_PRIVATE */
1280
1281 /* Deallocate a region */
1282 extern kern_return_t vm_map_remove(
1283 vm_map_t map,
1284 vm_map_offset_t start,
1285 vm_map_offset_t end,
1286 boolean_t flags);
1287
1288 /* Deallocate a region when the map is already locked */
1289 extern kern_return_t vm_map_remove_locked(
1290 vm_map_t map,
1291 vm_map_offset_t start,
1292 vm_map_offset_t end,
1293 boolean_t flags);
1294
1295 /* Discard a copy without using it */
1296 extern void vm_map_copy_discard(
1297 vm_map_copy_t copy);
1298
1299 /* Overwrite existing memory with a copy */
1300 extern kern_return_t vm_map_copy_overwrite(
1301 vm_map_t dst_map,
1302 vm_map_address_t dst_addr,
1303 vm_map_copy_t copy,
1304 vm_map_size_t copy_size,
1305 boolean_t interruptible);
1306
1307 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES (3)
1308
1309
1310 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1311 extern boolean_t vm_map_copy_validate_size(
1312 vm_map_t dst_map,
1313 vm_map_copy_t copy,
1314 vm_map_size_t *size);
1315
1316 /* Place a copy into a map */
1317 extern kern_return_t vm_map_copyout(
1318 vm_map_t dst_map,
1319 vm_map_address_t *dst_addr, /* OUT */
1320 vm_map_copy_t copy);
1321
1322 extern kern_return_t vm_map_copyout_size(
1323 vm_map_t dst_map,
1324 vm_map_address_t *dst_addr, /* OUT */
1325 vm_map_copy_t copy,
1326 vm_map_size_t copy_size);
1327
1328 extern kern_return_t vm_map_copyout_internal(
1329 vm_map_t dst_map,
1330 vm_map_address_t *dst_addr, /* OUT */
1331 vm_map_copy_t copy,
1332 vm_map_size_t copy_size,
1333 boolean_t consume_on_success,
1334 vm_prot_t cur_protection,
1335 vm_prot_t max_protection,
1336 vm_inherit_t inheritance);
1337
1338 extern kern_return_t vm_map_copyin(
1339 vm_map_t src_map,
1340 vm_map_address_t src_addr,
1341 vm_map_size_t len,
1342 boolean_t src_destroy,
1343 vm_map_copy_t *copy_result); /* OUT */
1344
1345 extern kern_return_t vm_map_copyin_common(
1346 vm_map_t src_map,
1347 vm_map_address_t src_addr,
1348 vm_map_size_t len,
1349 boolean_t src_destroy,
1350 boolean_t src_volatile,
1351 vm_map_copy_t *copy_result, /* OUT */
1352 boolean_t use_maxprot);
1353
1354 #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001
1355 #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002
1356 #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004
1357 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1358 #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F
1359 extern kern_return_t vm_map_copyin_internal(
1360 vm_map_t src_map,
1361 vm_map_address_t src_addr,
1362 vm_map_size_t len,
1363 int flags,
1364 vm_map_copy_t *copy_result); /* OUT */
1365
1366 extern kern_return_t vm_map_copy_extract(
1367 vm_map_t src_map,
1368 vm_map_address_t src_addr,
1369 vm_map_size_t len,
1370 boolean_t copy,
1371 vm_map_copy_t *copy_result, /* OUT */
1372 vm_prot_t *cur_prot, /* OUT */
1373 vm_prot_t *max_prot, /* OUT */
1374 vm_inherit_t inheritance,
1375 vm_map_kernel_flags_t vmk_flags);
1376
1377
1378 extern void vm_map_disable_NX(
1379 vm_map_t map);
1380
1381 extern void vm_map_disallow_data_exec(
1382 vm_map_t map);
1383
1384 extern void vm_map_set_64bit(
1385 vm_map_t map);
1386
1387 extern void vm_map_set_32bit(
1388 vm_map_t map);
1389
1390 extern void vm_map_set_jumbo(
1391 vm_map_t map);
1392
1393 extern void vm_map_set_jit_entitled(
1394 vm_map_t map);
1395
1396 extern void vm_map_set_max_addr(
1397 vm_map_t map, vm_map_offset_t new_max_offset);
1398
1399 extern boolean_t vm_map_has_hard_pagezero(
1400 vm_map_t map,
1401 vm_map_offset_t pagezero_size);
1402 extern void vm_commit_pagezero_status(vm_map_t tmap);
1403
1404 #ifdef __arm__
1405 static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map)1406 vm_map_is_64bit(__unused vm_map_t map)
1407 {
1408 return 0;
1409 }
1410 #else
1411 extern boolean_t vm_map_is_64bit(
1412 vm_map_t map);
1413 #endif
1414
1415
1416 extern kern_return_t vm_map_raise_max_offset(
1417 vm_map_t map,
1418 vm_map_offset_t new_max_offset);
1419
1420 extern kern_return_t vm_map_raise_min_offset(
1421 vm_map_t map,
1422 vm_map_offset_t new_min_offset);
1423 #if XNU_TARGET_OS_OSX
1424 extern void vm_map_set_high_start(
1425 vm_map_t map,
1426 vm_map_offset_t high_start);
1427 #endif /* XNU_TARGET_OS_OSX */
1428
1429 extern vm_map_offset_t vm_compute_max_offset(
1430 boolean_t is64);
1431
1432 extern void vm_map_get_max_aslr_slide_section(
1433 vm_map_t map,
1434 int64_t *max_sections,
1435 int64_t *section_size);
1436
1437 extern uint64_t vm_map_get_max_aslr_slide_pages(
1438 vm_map_t map);
1439
1440 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1441 vm_map_t map);
1442
1443 extern kern_return_t vm_map_set_size_limit(
1444 vm_map_t map,
1445 uint64_t limit);
1446
1447 extern kern_return_t vm_map_set_data_limit(
1448 vm_map_t map,
1449 uint64_t limit);
1450
1451 extern void vm_map_set_user_wire_limit(
1452 vm_map_t map,
1453 vm_size_t limit);
1454
1455 extern void vm_map_switch_protect(
1456 vm_map_t map,
1457 boolean_t val);
1458
1459 extern void vm_map_iokit_mapped_region(
1460 vm_map_t map,
1461 vm_size_t bytes);
1462
1463 extern void vm_map_iokit_unmapped_region(
1464 vm_map_t map,
1465 vm_size_t bytes);
1466
1467
1468 extern boolean_t first_free_is_valid(vm_map_t);
1469
1470 extern int vm_map_page_shift(
1471 vm_map_t map);
1472
1473 extern vm_map_offset_t vm_map_page_mask(
1474 vm_map_t map);
1475
1476 extern int vm_map_page_size(
1477 vm_map_t map);
1478
1479 extern vm_map_offset_t vm_map_round_page_mask(
1480 vm_map_offset_t offset,
1481 vm_map_offset_t mask);
1482
1483 extern vm_map_offset_t vm_map_trunc_page_mask(
1484 vm_map_offset_t offset,
1485 vm_map_offset_t mask);
1486
1487 extern boolean_t vm_map_page_aligned(
1488 vm_map_offset_t offset,
1489 vm_map_offset_t mask);
1490
1491 static inline int
vm_map_range_overflows(vm_map_offset_t addr,vm_map_size_t size)1492 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1493 {
1494 vm_map_offset_t sum;
1495 return os_add_overflow(addr, size, &sum);
1496 }
1497
1498 static inline int
mach_vm_range_overflows(mach_vm_offset_t addr,mach_vm_size_t size)1499 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1500 {
1501 mach_vm_offset_t sum;
1502 return os_add_overflow(addr, size, &sum);
1503 }
1504
1505 #ifdef XNU_KERNEL_PRIVATE
1506
1507 #if XNU_TARGET_OS_OSX
1508 extern void vm_map_mark_alien(vm_map_t map);
1509 extern void vm_map_single_jit(vm_map_t map);
1510 #endif /* XNU_TARGET_OS_OSX */
1511
1512 extern kern_return_t vm_map_page_info(
1513 vm_map_t map,
1514 vm_map_offset_t offset,
1515 vm_page_info_flavor_t flavor,
1516 vm_page_info_t info,
1517 mach_msg_type_number_t *count);
1518 extern kern_return_t vm_map_page_range_info_internal(
1519 vm_map_t map,
1520 vm_map_offset_t start_offset,
1521 vm_map_offset_t end_offset,
1522 int effective_page_shift,
1523 vm_page_info_flavor_t flavor,
1524 vm_page_info_t info,
1525 mach_msg_type_number_t *count);
1526 #endif /* XNU_KERNEL_PRIVATE */
1527
1528
1529 #ifdef MACH_KERNEL_PRIVATE
1530
1531 /*
1532 * Macros to invoke vm_map_copyin_common. vm_map_copyin is the
1533 * usual form; it handles a copyin based on the current protection
1534 * (current protection == VM_PROT_NONE) is a failure.
1535 * vm_map_copyin_maxprot handles a copyin based on maximum possible
1536 * access. The difference is that a region with no current access
1537 * BUT possible maximum access is rejected by vm_map_copyin(), but
1538 * returned by vm_map_copyin_maxprot.
1539 */
1540 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1541 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1542 FALSE, copy_result, FALSE)
1543
1544 #define vm_map_copyin_maxprot(src_map, \
1545 src_addr, len, src_destroy, copy_result) \
1546 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1547 FALSE, copy_result, TRUE)
1548
1549
1550 /*
1551 * Internal macros for rounding and truncation of vm_map offsets and sizes
1552 */
1553 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1554 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1555
1556 /*
1557 * Macros for rounding and truncation of vm_map offsets and sizes
1558 */
1559 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1560 VM_MAP_PAGE_SHIFT(
1561 vm_map_t map)
1562 {
1563 if (map) {
1564 return map->hdr.page_shift;
1565 }
1566 return PAGE_SHIFT;
1567 }
1568
1569 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1570 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1571 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1572
1573 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1574 VM_MAP_IS_EXOTIC(
1575 vm_map_t map __unused)
1576 {
1577 #if __arm64__
1578 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1579 pmap_is_exotic(map->pmap)) {
1580 return true;
1581 }
1582 #endif /* __arm64__ */
1583 return false;
1584 }
1585
1586 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1587 VM_MAP_IS_ALIEN(
1588 vm_map_t map __unused)
1589 {
1590 /*
1591 * An "alien" process/task/map/pmap should mostly behave
1592 * as it currently would on iOS.
1593 */
1594 #if XNU_TARGET_OS_OSX
1595 if (map->is_alien) {
1596 return true;
1597 }
1598 return false;
1599 #else /* XNU_TARGET_OS_OSX */
1600 return true;
1601 #endif /* XNU_TARGET_OS_OSX */
1602 }
1603
1604 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1605 VM_MAP_POLICY_WX_FAIL(
1606 vm_map_t map __unused)
1607 {
1608 if (VM_MAP_IS_ALIEN(map)) {
1609 return false;
1610 }
1611 return true;
1612 }
1613
1614 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1615 VM_MAP_POLICY_WX_STRIP_X(
1616 vm_map_t map __unused)
1617 {
1618 if (VM_MAP_IS_ALIEN(map)) {
1619 return true;
1620 }
1621 return false;
1622 }
1623
1624 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1625 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1626 vm_map_t map __unused)
1627 {
1628 if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1629 return false;
1630 }
1631 return true;
1632 }
1633
1634 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1635 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1636 vm_map_t map)
1637 {
1638 return VM_MAP_IS_ALIEN(map);
1639 }
1640
1641 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1642 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1643 vm_map_t map __unused)
1644 {
1645 if (VM_MAP_IS_ALIEN(map)) {
1646 return false;
1647 }
1648 return true;
1649 }
1650
1651 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1652 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1653 vm_map_t map __unused)
1654 {
1655 if (VM_MAP_IS_ALIEN(map)) {
1656 return false;
1657 }
1658 return true;
1659 }
1660
1661 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1662 VM_MAP_POLICY_ALLOW_JIT_COPY(
1663 vm_map_t map __unused)
1664 {
1665 if (VM_MAP_IS_ALIEN(map)) {
1666 return false;
1667 }
1668 return true;
1669 }
1670
1671 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1672 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1673 vm_map_t map __unused)
1674 {
1675 #if __x86_64__
1676 return true;
1677 #else /* __x86_64__ */
1678 if (VM_MAP_IS_EXOTIC(map)) {
1679 return true;
1680 }
1681 return false;
1682 #endif /* __x86_64__ */
1683 }
1684
1685 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1686 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1687 {
1688 switch (prot) {
1689 case MAP_MEM_NOOP: break;
1690 case MAP_MEM_IO: *wimg = VM_WIMG_IO; break;
1691 case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break;
1692 case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break;
1693 case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break;
1694 case MAP_MEM_POSTED_REORDERED: *wimg = VM_WIMG_POSTED_REORDERED; break;
1695 case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1696 case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break;
1697 case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break;
1698 case MAP_MEM_RT: *wimg = VM_WIMG_RT; break;
1699 default: break;
1700 }
1701 }
1702
1703 #endif /* MACH_KERNEL_PRIVATE */
1704
1705 #ifdef XNU_KERNEL_PRIVATE
1706 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1707 extern bool vm_map_is_exotic(vm_map_t map);
1708 extern bool vm_map_is_alien(vm_map_t map);
1709 extern pmap_t vm_map_get_pmap(vm_map_t map);
1710 #endif /* XNU_KERNEL_PRIVATE */
1711
1712 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1713 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1714
1715 /*
1716 * Flags for vm_map_remove() and vm_map_delete()
1717 */
1718 #define VM_MAP_REMOVE_NO_FLAGS 0x0
1719 #define VM_MAP_REMOVE_KUNWIRE 0x1
1720 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2
1721 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4
1722 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8
1723 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10
1724 #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20
1725 #define VM_MAP_REMOVE_NO_UNNESTING 0x40
1726 #define VM_MAP_REMOVE_IMMUTABLE 0x80
1727 #define VM_MAP_REMOVE_GAPS_OK 0x100
1728
1729 /* Support for UPLs from vm_maps */
1730
1731 #ifdef XNU_KERNEL_PRIVATE
1732
1733 extern kern_return_t vm_map_get_upl(
1734 vm_map_t target_map,
1735 vm_map_offset_t map_offset,
1736 upl_size_t *size,
1737 upl_t *upl,
1738 upl_page_info_array_t page_info,
1739 unsigned int *page_infoCnt,
1740 upl_control_flags_t *flags,
1741 vm_tag_t tag,
1742 int force_data_sync);
1743
1744 #endif /* XNU_KERNEL_PRIVATE */
1745
1746 extern void
1747 vm_map_sizes(vm_map_t map,
1748 vm_map_size_t * psize,
1749 vm_map_size_t * pfree,
1750 vm_map_size_t * plargest_free);
1751
1752 #if CONFIG_DYNAMIC_CODE_SIGNING
1753 extern kern_return_t vm_map_sign(vm_map_t map,
1754 vm_map_offset_t start,
1755 vm_map_offset_t end);
1756 #endif
1757
1758 extern kern_return_t vm_map_partial_reap(
1759 vm_map_t map,
1760 unsigned int *reclaimed_resident,
1761 unsigned int *reclaimed_compressed);
1762
1763
1764 #if DEVELOPMENT || DEBUG
1765
1766 extern int vm_map_disconnect_page_mappings(
1767 vm_map_t map,
1768 boolean_t);
1769
1770 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1771
1772 #endif
1773
1774
1775 #if CONFIG_FREEZE
1776
1777 extern kern_return_t vm_map_freeze(
1778 task_t task,
1779 unsigned int *purgeable_count,
1780 unsigned int *wired_count,
1781 unsigned int *clean_count,
1782 unsigned int *dirty_count,
1783 unsigned int dirty_budget,
1784 unsigned int *shared_count,
1785 int *freezer_error_code,
1786 boolean_t eval_only);
1787
1788 #define FREEZER_ERROR_GENERIC (-1)
1789 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2)
1790 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3)
1791 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4)
1792 #define FREEZER_ERROR_NO_SWAP_SPACE (-5)
1793
1794 #endif
1795
1796 __END_DECLS
1797
1798 /*
1799 * In some cases, we don't have a real VM object but still want to return a
1800 * unique ID (to avoid a memory region looking like shared memory), so build
1801 * a fake pointer based on the map's ledger and the index of the ledger being
1802 * reported.
1803 */
1804 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1805
1806 #endif /* KERNEL_PRIVATE */
1807
1808 #endif /* _VM_VM_MAP_H_ */
1809