xref: /xnu-8020.121.3/osfmk/vm/vm_map.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	File:	vm/vm_map.h
61  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62  *	Date:	1985
63  *
64  *	Virtual memory map module definitions.
65  *
66  * Contributors:
67  *	avie, dlb, mwyoung
68  */
69 
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72 
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84 
85 #ifdef  KERNEL_PRIVATE
86 
87 #include <sys/cdefs.h>
88 
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
92 
93 __BEGIN_DECLS
94 
95 extern void     vm_map_reference(vm_map_t       map);
96 extern vm_map_t current_map(void);
97 
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t    vm_map_exec(
100 	vm_map_t                new_map,
101 	task_t                  task,
102 	boolean_t               is64bit,
103 	void                    *fsroot,
104 	cpu_type_t              cpu,
105 	cpu_subtype_t           cpu_subtype,
106 	boolean_t               reslide,
107 	boolean_t               is_driverkit);
108 
109 __END_DECLS
110 
111 #ifdef  MACH_KERNEL_PRIVATE
112 
113 #include <mach_assert.h>
114 
115 #include <vm/vm_object.h>
116 #include <vm/vm_page.h>
117 #include <kern/locks.h>
118 #include <kern/zalloc.h>
119 #include <kern/macro_help.h>
120 
121 #include <kern/thread.h>
122 #include <os/refcnt.h>
123 
124 #define current_map_fast()      (current_thread()->map)
125 #define current_map()           (current_map_fast())
126 
127 #include <vm/vm_map_store.h>
128 
129 
130 /*
131  *	Types defined:
132  *
133  *	vm_map_t		the high-level address map data structure.
134  *	vm_map_entry_t		an entry in an address map.
135  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
136  *	vm_map_copy_t		represents memory copied from an address map,
137  *				 used for inter-map copy operations
138  */
139 typedef struct vm_map_entry     *vm_map_entry_t;
140 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)
141 
142 
143 /*
144  *	Type:		vm_map_object_t [internal use only]
145  *
146  *	Description:
147  *		The target of an address mapping, either a virtual
148  *		memory object or a sub map (of the kernel map).
149  */
150 typedef union vm_map_object {
151 	vm_object_t             vmo_object;     /* object object */
152 	vm_map_t                vmo_submap;     /* belongs to another map */
153 } vm_map_object_t;
154 
155 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
156 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
157 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock)
158 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock)
159 
160 /*
161  *	Type:		vm_named_entry_t [internal use only]
162  *
163  *	Description:
164  *		Description of a mapping to a memory cache object.
165  *
166  *	Implementation:
167  *		While the handle to this object is used as a means to map
168  *              and pass around the right to map regions backed by pagers
169  *		of all sorts, the named_entry itself is only manipulated
170  *		by the kernel.  Named entries hold information on the
171  *		right to map a region of a cached object.  Namely,
172  *		the target cache object, the beginning and ending of the
173  *		region to be mapped, and the permissions, (read, write)
174  *		with which it can be mapped.
175  *
176  */
177 
178 struct vm_named_entry {
179 	decl_lck_mtx_data(, Lock);              /* Synchronization */
180 	union {
181 		vm_map_t        map;            /* map backing submap */
182 		vm_map_copy_t   copy;           /* a VM map copy */
183 	} backing;
184 	vm_object_offset_t      offset;         /* offset into object */
185 	vm_object_size_t        size;           /* size of region */
186 	vm_object_offset_t      data_offset;    /* offset to first byte of data */
187 	unsigned int                            /* Is backing.xxx : */
188 	/* vm_prot_t */ protection:4,           /* access permissions */
189 	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
190 	/* boolean_t */ internal:1,             /* ... an internal object */
191 	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
192 	/* boolean_t */ is_copy:1;              /* ... a VM map copy */
193 #if VM_NAMED_ENTRY_DEBUG
194 	uint32_t                named_entry_bt; /* btref_t */
195 #endif /* VM_NAMED_ENTRY_DEBUG */
196 };
197 
198 /*
199  *	Type:		vm_map_entry_t [internal use only]
200  *
201  *	Description:
202  *		A single mapping within an address map.
203  *
204  *	Implementation:
205  *		Address map entries consist of start and end addresses,
206  *		a VM object (or sub map) and offset into that object,
207  *		and user-exported inheritance and protection information.
208  *		Control information for virtual copy operations is also
209  *		stored in the address map entry.
210  *
211  *	Note:
212  *		vm_map_relocate_early_elem() knows about this layout,
213  *		and needs to be kept in sync.
214  */
215 
216 struct vm_map_links {
217 	struct vm_map_entry     *prev;          /* previous entry */
218 	struct vm_map_entry     *next;          /* next entry */
219 	vm_map_offset_t         start;          /* start address */
220 	vm_map_offset_t         end;            /* end address */
221 };
222 
223 /*
224  * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
225  * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
226  * to convert between the "packed" representation in the vm_map_entry's fields
227  * and the equivalent bits defined in vm_prot_t.
228  */
229 #if defined(__x86_64__)
230 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
231 #else
232 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
233 #endif
234 
235 /*
236  * FOOTPRINT ACCOUNTING:
237  * The "memory footprint" is better described in the pmap layer.
238  *
239  * At the VM level, these 2 vm_map_entry_t fields are relevant:
240  * iokit_mapped:
241  *	For an "iokit_mapped" entry, we add the size of the entry to the
242  *	footprint when the entry is entered into the map and we subtract that
243  *	size when the entry is removed.  No other accounting should take place.
244  *	"use_pmap" should be FALSE but is not taken into account.
245  * use_pmap: (only when is_sub_map is FALSE)
246  *	This indicates if we should ask the pmap layer to account for pages
247  *	in this mapping.  If FALSE, we expect that another form of accounting
248  *	is being used (e.g. "iokit_mapped" or the explicit accounting of
249  *	non-volatile purgable memory).
250  *
251  * So the logic is mostly:
252  * if entry->is_sub_map == TRUE
253  *	anything in a submap does not count for the footprint
254  * else if entry->iokit_mapped == TRUE
255  *	footprint includes the entire virtual size of this entry
256  * else if entry->use_pmap == FALSE
257  *	tell pmap NOT to account for pages being pmap_enter()'d from this
258  *	mapping (i.e. use "alternate accounting")
259  * else
260  *	pmap will account for pages being pmap_enter()'d from this mapping
261  *	as it sees fit (only if anonymous, etc...)
262  */
263 
264 #define VME_ALIAS_BITS          12
265 #define VME_ALIAS_MASK          ((1u << VME_ALIAS_BITS) - 1)
266 #define VME_OFFSET_SHIFT        VME_ALIAS_BITS
267 #define VME_OFFSET_BITS         (64 - VME_ALIAS_BITS)
268 
269 struct vm_map_entry {
270 	struct vm_map_links     links;                      /* links to other entries */
271 #define vme_prev                links.prev
272 #define vme_next                links.next
273 #define vme_start               links.start
274 #define vme_end                 links.end
275 
276 	struct vm_map_store     store;
277 	union vm_map_object     vme_object;                 /* object I point to */
278 
279 	unsigned long long
280 	/* vm_tag_t          */ vme_alias:VME_ALIAS_BITS,   /* entry VM tag */
281 	/* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
282 
283 	/* boolean_t         */ is_shared:1,                /* region is shared */
284 	/* boolean_t         */ is_sub_map:1,               /* Is "object" a submap? */
285 	/* boolean_t         */ in_transition:1,            /* Entry being changed */
286 	/* boolean_t         */ needs_wakeup:1,             /* Waiters on in_transition */
287 	/* behavior is not defined for submap type */
288 	/* vm_behavior_t     */ behavior:2,                 /* user paging behavior hint */
289 	/* boolean_t         */ needs_copy:1,               /* object need to be copied? */
290 
291 	/* Only in task maps: */
292 	/* vm_prot_t-like    */ protection:4,               /* protection code, bit3=UEXEC */
293 	/* vm_prot_t-like    */ max_protection:4,           /* maximum protection, bit3=UEXEC */
294 	/* vm_inherit_t      */ inheritance:2,              /* inheritance */
295 
296 	/*
297 	 * use_pmap is overloaded:
298 	 * if "is_sub_map":
299 	 *      use a nested pmap?
300 	 * else (i.e. if object):
301 	 *      use pmap accounting
302 	 *      for footprint?
303 	 */
304 	/* boolean_t         */ use_pmap:1,
305 	/* boolean_t         */ no_cache:1,                 /* should new pages be cached? */
306 	/* boolean_t         */ permanent:1,                /* mapping can not be removed */
307 	/* boolean_t         */ superpage_size:1,           /* use superpages of a certain size */
308 	/* boolean_t         */ map_aligned:1,              /* align to map's page size */
309 	/*
310 	 * zero out the wired pages of this entry
311 	 * if is being deleted without unwiring them
312 	 */
313 	/* boolean_t         */ zero_wired_pages:1,
314 	/* boolean_t         */ used_for_jit:1,
315 	/* boolean_t         */ pmap_cs_associated:1,       /* pmap_cs will validate */
316 
317 	/* iokit accounting: use the virtual size rather than resident size: */
318 	/* boolean_t         */ iokit_acct:1,
319 	/* boolean_t         */ vme_resilient_codesign:1,
320 	/* boolean_t         */ vme_resilient_media:1,
321 	/* boolean_t         */ vme_atomic:1,               /* entry cannot be split/coalesced */
322 	/* boolean_t         */ vme_no_copy_on_read:1,
323 	/* boolean_t         */ translated_allow_execute:1, /* execute in translated processes */
324 	/* boolean_t         */ vme_kernel_object:1;        /* vme_object is kernel_object */
325 
326 	unsigned short          wired_count;                /* can be paged if = 0 */
327 	unsigned short          user_wired_count;           /* for vm_wire */
328 
329 #if     DEBUG
330 #define MAP_ENTRY_CREATION_DEBUG (1)
331 #define MAP_ENTRY_INSERTION_DEBUG (1)
332 #endif
333 #if     MAP_ENTRY_CREATION_DEBUG
334 	struct vm_map_header    *vme_creation_maphdr;
335 	uint32_t                vme_creation_bt;            /* btref_t */
336 #endif
337 #if     MAP_ENTRY_INSERTION_DEBUG
338 	uint32_t                vme_insertion_bt;           /* btref_t */
339 	vm_map_offset_t         vme_start_original;
340 	vm_map_offset_t         vme_end_original;
341 #endif
342 };
343 
344 #define VME_SUBMAP(entry) \
345 	((entry)->vme_object.vmo_submap)
346 #define VME_OBJECT(entry) \
347 	((entry)->vme_kernel_object ? kernel_object : \
348 	((entry)->vme_object.vmo_object))
349 #define VME_ALIAS(entry) \
350 	((entry)->vme_alias)
351 
352 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object)353 VME_OBJECT_SET(
354 	vm_map_entry_t entry,
355 	vm_object_t object)
356 {
357 	if (object == kernel_object) {
358 		entry->vme_kernel_object = TRUE;
359 		entry->vme_object.vmo_object = VM_OBJECT_NULL;
360 	} else {
361 		entry->vme_kernel_object = FALSE;
362 		entry->vme_object.vmo_object = object;
363 	}
364 	if (object != VM_OBJECT_NULL && !object->internal) {
365 		entry->vme_resilient_media = FALSE;
366 	}
367 	entry->vme_resilient_codesign = FALSE;
368 	entry->used_for_jit = FALSE;
369 }
370 
371 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)372 VME_SUBMAP_SET(
373 	vm_map_entry_t entry,
374 	vm_map_t submap)
375 {
376 	entry->is_sub_map = TRUE;
377 	entry->vme_object.vmo_submap = submap;
378 }
379 
380 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)381 VME_OFFSET(
382 	vm_map_entry_t entry)
383 {
384 	return entry->vme_offset << VME_OFFSET_SHIFT;
385 }
386 
387 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)388 VME_OFFSET_SET(
389 	vm_map_entry_t entry,
390 	vm_object_offset_t offset)
391 {
392 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
393 	assert3u(VME_OFFSET(entry), ==, offset);
394 }
395 
396 /*
397  * IMPORTANT:
398  * The "alias" field can be updated while holding the VM map lock
399  * "shared".  It's OK as along as it's the only field that can be
400  * updated without the VM map "exclusive" lock.
401  */
402 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)403 VME_ALIAS_SET(
404 	vm_map_entry_t entry,
405 	unsigned int alias)
406 {
407 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
408 	entry->vme_alias = alias;
409 }
410 
411 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length)412 VME_OBJECT_SHADOW(
413 	vm_map_entry_t entry,
414 	vm_object_size_t length)
415 {
416 	vm_object_t object;
417 	vm_object_offset_t offset;
418 
419 	object = VME_OBJECT(entry);
420 	offset = VME_OFFSET(entry);
421 	vm_object_shadow(&object, &offset, length);
422 	if (object != VME_OBJECT(entry)) {
423 		VME_OBJECT_SET(entry, object);
424 		entry->use_pmap = TRUE;
425 	}
426 	if (offset != VME_OFFSET(entry)) {
427 		VME_OFFSET_SET(entry, offset);
428 	}
429 }
430 
431 
432 /*
433  * Convenience macros for dealing with superpages
434  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
435  */
436 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
437 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
438 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
439 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
440 
441 /*
442  * wired_counts are unsigned short.  This value is used to safeguard
443  * against any mishaps due to runaway user programs.
444  */
445 #define MAX_WIRE_COUNT          65535
446 
447 
448 
449 /*
450  *	Type:		struct vm_map_header
451  *
452  *	Description:
453  *		Header for a vm_map and a vm_map_copy.
454  *
455  *	Note:
456  *		vm_map_relocate_early_elem() knows about this layout,
457  *		and needs to be kept in sync.
458  */
459 
460 
461 struct vm_map_header {
462 	struct vm_map_links     links;          /* first, last, min, max */
463 	int                     nentries;       /* Number of entries */
464 	uint16_t                page_shift;     /* page shift */
465 	unsigned int
466 	/* boolean_t */ entries_pageable : 1,   /* are map entries pageable? */
467 	/* reserved  */ __padding : 15;
468 #ifdef VM_MAP_STORE_USE_RB
469 	struct rb_head  rb_head_store;
470 #endif
471 };
472 
473 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
474 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
475 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
476 
477 /*
478  *	Type:		vm_map_t [exported; contents invisible]
479  *
480  *	Description:
481  *		An address map -- a directory relating valid
482  *		regions of a task's address space to the corresponding
483  *		virtual memory objects.
484  *
485  *	Implementation:
486  *		Maps are doubly-linked lists of map entries, sorted
487  *		by address.  One hint is used to start
488  *		searches again from the last successful search,
489  *		insertion, or removal.  Another hint is used to
490  *		quickly find free space.
491  *
492  *	Note:
493  *		vm_map_relocate_early_elem() knows about this layout,
494  *		and needs to be kept in sync.
495  */
496 struct _vm_map {
497 	lck_rw_t                lock;           /* map lock */
498 	struct vm_map_header    hdr;            /* Map entry header */
499 #define min_offset              hdr.links.start /* start of range */
500 #define max_offset              hdr.links.end   /* end of range */
501 	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
502 	vm_map_size_t           size;           /* virtual size */
503 	uint64_t                size_limit;     /* rlimit on address space size */
504 	uint64_t                data_limit;     /* rlimit on data size */
505 	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
506 	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
507 #if XNU_TARGET_OS_OSX
508 	vm_map_offset_t         vmmap_high_start;
509 #endif /* XNU_TARGET_OS_OSX */
510 
511 	union {
512 		/*
513 		 * If map->disable_vmentry_reuse == TRUE:
514 		 * the end address of the highest allocated vm_map_entry_t.
515 		 */
516 		vm_map_offset_t         vmu1_highest_entry_end;
517 		/*
518 		 * For a nested VM map:
519 		 * the lowest address in this nested VM map that we would
520 		 * expect to be unnested under normal operation (i.e. for
521 		 * regular copy-on-write on DATA section).
522 		 */
523 		vm_map_offset_t         vmu1_lowest_unnestable_start;
524 	} vmu1;
525 #define highest_entry_end       vmu1.vmu1_highest_entry_end
526 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
527 	vm_map_entry_t          hint;           /* hint for quick lookups */
528 	union {
529 		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
530 		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
531 	} vmmap_u_1;
532 #define hole_hint vmmap_u_1.vmmap_hole_hint
533 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
534 	union {
535 		vm_map_entry_t          _first_free;    /* First free space hint */
536 		struct vm_map_links*    _holes;         /* links all holes between entries */
537 	} f_s;                                          /* Union for free space data structures being used */
538 
539 #define first_free              f_s._first_free
540 #define holes_list              f_s._holes
541 
542 	os_ref_atomic_t         map_refcnt;       /* Reference count */
543 
544 	unsigned int
545 	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
546 	/* boolean_t */ wiring_required:1,        /* All memory wired? */
547 	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
548 	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
549 	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
550 	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
551 	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
552 	/* boolean_t */ holelistenabled:1,
553 	/* boolean_t */ is_nested_map:1,
554 	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
555 	/* boolean_t */ jit_entry_exists:1,
556 	/* boolean_t */ has_corpse_footprint:1,
557 	/* boolean_t */ terminated:1,
558 	/* boolean_t */ is_alien:1,              /* for platform simulation, i.e. PLATFORM_IOS on OSX */
559 	/* boolean_t */ cs_enforcement:1,        /* code-signing enforcement */
560 	/* boolean_t */ cs_debugged:1,           /* code-signed but debugged */
561 	/* boolean_t */ reserved_regions:1,      /* has reserved regions. The map size that userspace sees should ignore these. */
562 	/* boolean_t */ single_jit:1,            /* only allow one JIT mapping */
563 	/* boolean_t */ never_faults:1,          /* this map should never cause faults */
564 	/* reserved  */ pad:13;
565 	unsigned int            timestamp;       /* Version number */
566 };
567 
568 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
569 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
570 #define vm_map_first_entry(map) ((map)->hdr.links.next)
571 #define vm_map_last_entry(map)  ((map)->hdr.links.prev)
572 
573 /*
574  *	Type:		vm_map_version_t [exported; contents invisible]
575  *
576  *	Description:
577  *		Map versions may be used to quickly validate a previous
578  *		lookup operation.
579  *
580  *	Usage note:
581  *		Because they are bulky objects, map versions are usually
582  *		passed by reference.
583  *
584  *	Implementation:
585  *		Just a timestamp for the main map.
586  */
587 typedef struct vm_map_version {
588 	unsigned int    main_timestamp;
589 } vm_map_version_t;
590 
591 /*
592  *	Type:		vm_map_copy_t [exported; contents invisible]
593  *
594  *	Description:
595  *		A map copy object represents a region of virtual memory
596  *		that has been copied from an address map but is still
597  *		in transit.
598  *
599  *		A map copy object may only be used by a single thread
600  *		at a time.
601  *
602  *	Implementation:
603  *              There are three formats for map copy objects.
604  *		The first is very similar to the main
605  *		address map in structure, and as a result, some
606  *		of the internal maintenance functions/macros can
607  *		be used with either address maps or map copy objects.
608  *
609  *		The map copy object contains a header links
610  *		entry onto which the other entries that represent
611  *		the region are chained.
612  *
613  *		The second format is a single vm object.  This was used
614  *		primarily in the pageout path - but is not currently used
615  *		except for placeholder copy objects (see vm_map_copy_copy()).
616  *
617  *		The third format is a kernel buffer copy object - for data
618  *              small enough that physical copies were the most efficient
619  *		method. This method uses a zero-sized array unioned with
620  *		other format-specific data in the 'c_u' member. This unsized
621  *		array overlaps the other elements and allows us to use this
622  *		extra structure space for physical memory copies. On 64-bit
623  *		systems this saves ~64 bytes per vm_map_copy.
624  */
625 
626 struct vm_map_copy {
627 	int                     type;
628 #define VM_MAP_COPY_ENTRY_LIST          1
629 #define VM_MAP_COPY_OBJECT              2
630 #define VM_MAP_COPY_KERNEL_BUFFER       3
631 	vm_object_offset_t      offset;
632 	vm_map_size_t           size;
633 	union {
634 		struct vm_map_header                  hdr;    /* ENTRY_LIST */
635 		vm_object_t                           object; /* OBJECT */
636 		void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
637 	} c_u;
638 };
639 
640 
641 #define cpy_hdr                 c_u.hdr
642 
643 #define cpy_object              c_u.object
644 #define cpy_kdata               c_u.kdata
645 
646 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
647 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
648 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
649 
650 /*
651  *	Useful macros for entry list copy objects
652  */
653 
654 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
655 #define vm_map_copy_first_entry(copy)           \
656 	        ((copy)->cpy_hdr.links.next)
657 #define vm_map_copy_last_entry(copy)            \
658 	        ((copy)->cpy_hdr.links.prev)
659 
660 extern kern_return_t
661 vm_map_copy_adjust_to_target(
662 	vm_map_copy_t           copy_map,
663 	vm_map_offset_t         offset,
664 	vm_map_size_t           size,
665 	vm_map_t                target_map,
666 	boolean_t               copy,
667 	vm_map_copy_t           *target_copy_map_p,
668 	vm_map_offset_t         *overmap_start_p,
669 	vm_map_offset_t         *overmap_end_p,
670 	vm_map_offset_t         *trimmed_start_p);
671 
672 /*
673  *	Macros:		vm_map_lock, etc. [internal use only]
674  *	Description:
675  *		Perform locking on the data portion of a map.
676  *	When multiple maps are to be locked, order by map address.
677  *	(See vm_map.c::vm_remap())
678  */
679 
680 #define vm_map_lock_init(map)                                           \
681 	((map)->timestamp = 0 ,                                         \
682 	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
683 
684 #define vm_map_lock(map)                     \
685 	MACRO_BEGIN                          \
686 	DTRACE_VM(vm_map_lock_w);            \
687 	lck_rw_lock_exclusive(&(map)->lock); \
688 	MACRO_END
689 
690 #define vm_map_unlock(map)          \
691 	MACRO_BEGIN                 \
692 	DTRACE_VM(vm_map_unlock_w); \
693 	(map)->timestamp++;         \
694 	lck_rw_done(&(map)->lock);  \
695 	MACRO_END
696 
697 #define vm_map_lock_read(map)             \
698 	MACRO_BEGIN                       \
699 	DTRACE_VM(vm_map_lock_r);         \
700 	lck_rw_lock_shared(&(map)->lock); \
701 	MACRO_END
702 
703 #define vm_map_unlock_read(map)     \
704 	MACRO_BEGIN                 \
705 	DTRACE_VM(vm_map_unlock_r); \
706 	lck_rw_done(&(map)->lock);  \
707 	MACRO_END
708 
709 #define vm_map_lock_write_to_read(map)                 \
710 	MACRO_BEGIN                                    \
711 	DTRACE_VM(vm_map_lock_downgrade);              \
712 	(map)->timestamp++;                            \
713 	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
714 	MACRO_END
715 
716 __attribute__((always_inline))
717 int vm_map_lock_read_to_write(vm_map_t map);
718 
719 __attribute__((always_inline))
720 boolean_t vm_map_try_lock(vm_map_t map);
721 
722 __attribute__((always_inline))
723 boolean_t vm_map_try_lock_read(vm_map_t map);
724 
725 int vm_self_region_page_shift(vm_map_t target_map);
726 int vm_self_region_page_shift_safely(vm_map_t target_map);
727 
728 #if MACH_ASSERT || DEBUG
729 #define vm_map_lock_assert_held(map) \
730 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
731 #define vm_map_lock_assert_shared(map)  \
732 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
733 #define vm_map_lock_assert_exclusive(map) \
734 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
735 #define vm_map_lock_assert_notheld(map) \
736 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
737 #else  /* MACH_ASSERT || DEBUG */
738 #define vm_map_lock_assert_held(map)
739 #define vm_map_lock_assert_shared(map)
740 #define vm_map_lock_assert_exclusive(map)
741 #define vm_map_lock_assert_notheld(map)
742 #endif /* MACH_ASSERT || DEBUG */
743 
744 /*
745  *	Exported procedures that operate on vm_map_t.
746  */
747 
748 /* Lookup map entry containing or the specified address in the given map */
749 extern boolean_t        vm_map_lookup_entry(
750 	vm_map_t                map,
751 	vm_map_address_t        address,
752 	vm_map_entry_t          *entry);                                /* OUT */
753 
754 /* Lookup map entry containing or the specified address in the given map */
755 extern boolean_t        vm_map_lookup_entry_or_next(
756 	vm_map_t                map,
757 	vm_map_address_t        address,
758 	vm_map_entry_t          *entry);                                /* OUT */
759 
760 /* like vm_map_lookup_entry without the PGZ bear trap */
761 #if CONFIG_PROB_GZALLOC
762 extern boolean_t        vm_map_lookup_entry_allow_pgz(
763 	vm_map_t                map,
764 	vm_map_address_t        address,
765 	vm_map_entry_t          *entry);                                /* OUT */
766 #else
767 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
768 #endif
769 
770 extern void             vm_map_copy_remap(
771 	vm_map_t                map,
772 	vm_map_entry_t          where,
773 	vm_map_copy_t           copy,
774 	vm_map_offset_t         adjustment,
775 	vm_prot_t               cur_prot,
776 	vm_prot_t               max_prot,
777 	vm_inherit_t            inheritance);
778 
779 /* Find the VM object, offset, and protection for a given virtual address
780  * in the specified map, assuming a page fault of the	type specified. */
781 extern kern_return_t    vm_map_lookup_locked(
782 	vm_map_t                *var_map,                               /* IN/OUT */
783 	vm_map_address_t        vaddr,
784 	vm_prot_t               fault_type,
785 	int                     object_lock_type,
786 	vm_map_version_t        *out_version,                           /* OUT */
787 	vm_object_t             *object,                                /* OUT */
788 	vm_object_offset_t      *offset,                                /* OUT */
789 	vm_prot_t               *out_prot,                              /* OUT */
790 	boolean_t               *wired,                                 /* OUT */
791 	vm_object_fault_info_t  fault_info,                             /* OUT */
792 	vm_map_t                *real_map,                              /* OUT */
793 	bool                    *contended);                            /* OUT */
794 
795 /* Verifies that the map has not changed since the given version. */
796 extern boolean_t        vm_map_verify(
797 	vm_map_t                map,
798 	vm_map_version_t        *version);                              /* REF */
799 
800 
801 /*
802  *	Functions implemented as macros
803  */
804 #define         vm_map_min(map) ((map)->min_offset)
805 /* Lowest valid address in
806  * a map */
807 
808 #define         vm_map_max(map) ((map)->max_offset)
809 /* Highest valid address */
810 
811 #define         vm_map_pmap(map)        ((map)->pmap)
812 /* Physical map associated
813 * with this address map */
814 
815 /* Gain a reference to an existing map */
816 extern void             vm_map_reference(
817 	vm_map_t        map);
818 
819 /*
820  *	Wait and wakeup macros for in_transition map entries.
821  */
822 #define vm_map_entry_wait(map, interruptible)           \
823 	((map)->timestamp++ ,                           \
824 	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
825 	                          (event_t)&(map)->hdr,	interruptible))
826 
827 
828 #define vm_map_entry_wakeup(map)        \
829 	thread_wakeup((event_t)(&(map)->hdr))
830 
831 
832 /* simplify map entries */
833 extern void             vm_map_simplify_entry(
834 	vm_map_t        map,
835 	vm_map_entry_t  this_entry);
836 extern void             vm_map_simplify(
837 	vm_map_t                map,
838 	vm_map_offset_t         start);
839 
840 /* Move the information in a map copy object to a new map copy object */
841 extern vm_map_copy_t    vm_map_copy_copy(
842 	vm_map_copy_t           copy);
843 
844 /* Create a copy object from an object. */
845 extern kern_return_t    vm_map_copyin_object(
846 	vm_object_t             object,
847 	vm_object_offset_t      offset,
848 	vm_object_size_t        size,
849 	vm_map_copy_t           *copy_result);                         /* OUT */
850 
851 extern kern_return_t    vm_map_random_address_for_size(
852 	vm_map_t                map,
853 	vm_map_offset_t        *address,
854 	vm_map_size_t           size,
855 	vm_map_kernel_flags_t   vmk_flags);
856 
857 /* Enter a mapping */
858 extern kern_return_t    vm_map_enter(
859 	vm_map_t                map,
860 	vm_map_offset_t         *address,
861 	vm_map_size_t           size,
862 	vm_map_offset_t         mask,
863 	int                     flags,
864 	vm_map_kernel_flags_t   vmk_flags,
865 	vm_tag_t                tag,
866 	vm_object_t             object,
867 	vm_object_offset_t      offset,
868 	boolean_t               needs_copy,
869 	vm_prot_t               cur_protection,
870 	vm_prot_t               max_protection,
871 	vm_inherit_t            inheritance);
872 
873 #if __arm64__
874 extern kern_return_t    vm_map_enter_fourk(
875 	vm_map_t                map,
876 	vm_map_offset_t         *address,
877 	vm_map_size_t           size,
878 	vm_map_offset_t         mask,
879 	int                     flags,
880 	vm_map_kernel_flags_t   vmk_flags,
881 	vm_tag_t                tag,
882 	vm_object_t             object,
883 	vm_object_offset_t      offset,
884 	boolean_t               needs_copy,
885 	vm_prot_t               cur_protection,
886 	vm_prot_t               max_protection,
887 	vm_inherit_t            inheritance);
888 #endif /* __arm64__ */
889 
890 /* XXX should go away - replaced with regular enter of contig object */
891 extern  kern_return_t   vm_map_enter_cpm(
892 	vm_map_t                map,
893 	vm_map_address_t        *addr,
894 	vm_map_size_t           size,
895 	int                     flags);
896 
897 extern kern_return_t vm_map_remap(
898 	vm_map_t                target_map,
899 	vm_map_offset_t         *address,
900 	vm_map_size_t           size,
901 	vm_map_offset_t         mask,
902 	int                     flags,
903 	vm_map_kernel_flags_t   vmk_flags,
904 	vm_tag_t                tag,
905 	vm_map_t                src_map,
906 	vm_map_offset_t         memory_address,
907 	boolean_t               copy,
908 	vm_prot_t               *cur_protection,
909 	vm_prot_t               *max_protection,
910 	vm_inherit_t            inheritance);
911 
912 
913 /*
914  * Read and write from a kernel buffer to a specified map.
915  */
916 extern  kern_return_t   vm_map_write_user(
917 	vm_map_t                map,
918 	void                    *src_p,
919 	vm_map_offset_t         dst_addr,
920 	vm_size_t               size);
921 
922 extern  kern_return_t   vm_map_read_user(
923 	vm_map_t                map,
924 	vm_map_offset_t         src_addr,
925 	void                    *dst_p,
926 	vm_size_t               size);
927 
928 /* Create a new task map using an existing task map as a template. */
929 extern vm_map_t         vm_map_fork(
930 	ledger_t                ledger,
931 	vm_map_t                old_map,
932 	int                     options);
933 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
934 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
935 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004
936 
937 /* Change inheritance */
938 extern kern_return_t    vm_map_inherit(
939 	vm_map_t                map,
940 	vm_map_offset_t         start,
941 	vm_map_offset_t         end,
942 	vm_inherit_t            new_inheritance);
943 
944 /* Add or remove machine-dependent attributes from map regions */
945 extern kern_return_t    vm_map_machine_attribute(
946 	vm_map_t                map,
947 	vm_map_offset_t         start,
948 	vm_map_offset_t         end,
949 	vm_machine_attribute_t  attribute,
950 	vm_machine_attribute_val_t* value);                         /* IN/OUT */
951 
952 extern kern_return_t    vm_map_msync(
953 	vm_map_t                map,
954 	vm_map_address_t        address,
955 	vm_map_size_t           size,
956 	vm_sync_t               sync_flags);
957 
958 /* Set paging behavior */
959 extern kern_return_t    vm_map_behavior_set(
960 	vm_map_t                map,
961 	vm_map_offset_t         start,
962 	vm_map_offset_t         end,
963 	vm_behavior_t           new_behavior);
964 
965 extern kern_return_t vm_map_region(
966 	vm_map_t                 map,
967 	vm_map_offset_t         *address,
968 	vm_map_size_t           *size,
969 	vm_region_flavor_t       flavor,
970 	vm_region_info_t         info,
971 	mach_msg_type_number_t  *count,
972 	mach_port_t             *object_name);
973 
974 extern kern_return_t vm_map_region_recurse_64(
975 	vm_map_t                 map,
976 	vm_map_offset_t         *address,
977 	vm_map_size_t           *size,
978 	natural_t               *nesting_depth,
979 	vm_region_submap_info_64_t info,
980 	mach_msg_type_number_t  *count);
981 
982 extern kern_return_t vm_map_page_query_internal(
983 	vm_map_t                map,
984 	vm_map_offset_t         offset,
985 	int                     *disposition,
986 	int                     *ref_count);
987 
988 extern kern_return_t vm_map_query_volatile(
989 	vm_map_t        map,
990 	mach_vm_size_t  *volatile_virtual_size_p,
991 	mach_vm_size_t  *volatile_resident_size_p,
992 	mach_vm_size_t  *volatile_compressed_size_p,
993 	mach_vm_size_t  *volatile_pmap_size_p,
994 	mach_vm_size_t  *volatile_compressed_pmap_size_p);
995 
996 /* Convert from a map entry port to a map */
997 extern vm_map_t convert_port_entry_to_map(
998 	ipc_port_t      port);
999 
1000 
1001 extern kern_return_t vm_map_set_cache_attr(
1002 	vm_map_t        map,
1003 	vm_map_offset_t va);
1004 
1005 
1006 /* definitions related to overriding the NX behavior */
1007 
1008 #define VM_ABI_32       0x1
1009 #define VM_ABI_64       0x2
1010 
1011 extern int override_nx(vm_map_t map, uint32_t user_tag);
1012 
1013 
1014 extern void vm_map_region_top_walk(
1015 	vm_map_entry_t entry,
1016 	vm_region_top_info_t top);
1017 extern void vm_map_region_walk(
1018 	vm_map_t map,
1019 	vm_map_offset_t va,
1020 	vm_map_entry_t entry,
1021 	vm_object_offset_t offset,
1022 	vm_object_size_t range,
1023 	vm_region_extended_info_t extended,
1024 	boolean_t look_for_pages,
1025 	mach_msg_type_number_t count);
1026 
1027 
1028 
1029 extern void vm_map_copy_footprint_ledgers(
1030 	task_t  old_task,
1031 	task_t  new_task);
1032 extern void vm_map_copy_ledger(
1033 	task_t  old_task,
1034 	task_t  new_task,
1035 	int     ledger_entry);
1036 
1037 /**
1038  * Represents a single region of virtual address space that should be reserved
1039  * (pre-mapped) in a user address space.
1040  */
1041 struct vm_reserved_region {
1042 	char            *vmrr_name;
1043 	vm_map_offset_t vmrr_addr;
1044 	vm_map_size_t   vmrr_size;
1045 };
1046 
1047 /**
1048  * Return back a machine-dependent array of address space regions that should be
1049  * reserved by the VM. This function is defined in the machine-dependent
1050  * machine_routines.c files.
1051  */
1052 extern size_t ml_get_vm_reserved_regions(
1053 	bool vm_is64bit,
1054 	struct vm_reserved_region **regions);
1055 
1056 #endif /* MACH_KERNEL_PRIVATE */
1057 
1058 __BEGIN_DECLS
1059 
1060 /* Create an empty map */
1061 extern vm_map_t         vm_map_create(
1062 	pmap_t                  pmap,
1063 	vm_map_offset_t         min_off,
1064 	vm_map_offset_t         max_off,
1065 	boolean_t               pageable);
1066 
1067 extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);
1068 
1069 extern void             vm_map_disable_hole_optimization(vm_map_t map);
1070 
1071 /* Get rid of a map */
1072 extern void             vm_map_destroy(
1073 	vm_map_t                map);
1074 
1075 /* Lose a reference */
1076 extern void             vm_map_deallocate(
1077 	vm_map_t                map);
1078 
1079 /* Lose a reference */
1080 extern void             vm_map_inspect_deallocate(
1081 	vm_map_inspect_t        map);
1082 
1083 /* Lose a reference */
1084 extern void             vm_map_read_deallocate(
1085 	vm_map_read_t        map);
1086 
1087 extern vm_map_t         vm_map_switch(
1088 	vm_map_t                map);
1089 
1090 /* Change protection */
1091 extern kern_return_t    vm_map_protect(
1092 	vm_map_t                map,
1093 	vm_map_offset_t         start,
1094 	vm_map_offset_t         end,
1095 	vm_prot_t               new_prot,
1096 	boolean_t               set_max);
1097 
1098 /* Check protection */
1099 extern boolean_t vm_map_check_protection(
1100 	vm_map_t                map,
1101 	vm_map_offset_t         start,
1102 	vm_map_offset_t         end,
1103 	vm_prot_t               protection);
1104 
1105 extern boolean_t vm_map_cs_enforcement(
1106 	vm_map_t                map);
1107 extern void vm_map_cs_enforcement_set(
1108 	vm_map_t                map,
1109 	boolean_t               val);
1110 
1111 extern void vm_map_cs_debugged_set(
1112 	vm_map_t map,
1113 	boolean_t val);
1114 
1115 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1116 
1117 /* wire down a region */
1118 
1119 #ifdef XNU_KERNEL_PRIVATE
1120 
1121 extern void vm_map_will_allocate_early_map(
1122 	vm_map_t               *map_owner);
1123 
1124 extern void vm_map_relocate_early_maps(
1125 	vm_offset_t             delta);
1126 
1127 extern void vm_map_relocate_early_elem(
1128 	uint32_t                zone_id,
1129 	vm_offset_t             new_addr,
1130 	vm_offset_t             delta);
1131 
1132 /* never fails */
1133 extern vm_map_t vm_map_create_options(
1134 	pmap_t                  pmap,
1135 	vm_map_offset_t         min_off,
1136 	vm_map_offset_t         max_off,
1137 	vm_map_create_options_t options);
1138 
1139 extern kern_return_t    vm_map_wire_kernel(
1140 	vm_map_t                map,
1141 	vm_map_offset_t         start,
1142 	vm_map_offset_t         end,
1143 	vm_prot_t               access_type,
1144 	vm_tag_t                tag,
1145 	boolean_t               user_wire);
1146 
1147 extern kern_return_t    vm_map_wire_and_extract_kernel(
1148 	vm_map_t                map,
1149 	vm_map_offset_t         start,
1150 	vm_prot_t               access_type,
1151 	vm_tag_t                tag,
1152 	boolean_t               user_wire,
1153 	ppnum_t                 *physpage_p);
1154 
1155 /* kext exported versions */
1156 
1157 extern kern_return_t    vm_map_wire_external(
1158 	vm_map_t                map,
1159 	vm_map_offset_t         start,
1160 	vm_map_offset_t         end,
1161 	vm_prot_t               access_type,
1162 	boolean_t               user_wire);
1163 
1164 extern kern_return_t    vm_map_wire_and_extract_external(
1165 	vm_map_t                map,
1166 	vm_map_offset_t         start,
1167 	vm_prot_t               access_type,
1168 	boolean_t               user_wire,
1169 	ppnum_t                 *physpage_p);
1170 
1171 #else /* XNU_KERNEL_PRIVATE */
1172 
1173 extern kern_return_t    vm_map_wire(
1174 	vm_map_t                map,
1175 	vm_map_offset_t         start,
1176 	vm_map_offset_t         end,
1177 	vm_prot_t               access_type,
1178 	boolean_t               user_wire);
1179 
1180 extern kern_return_t    vm_map_wire_and_extract(
1181 	vm_map_t                map,
1182 	vm_map_offset_t         start,
1183 	vm_prot_t               access_type,
1184 	boolean_t               user_wire,
1185 	ppnum_t                 *physpage_p);
1186 
1187 #endif /* !XNU_KERNEL_PRIVATE */
1188 
1189 /* unwire a region */
1190 extern kern_return_t    vm_map_unwire(
1191 	vm_map_t                map,
1192 	vm_map_offset_t         start,
1193 	vm_map_offset_t         end,
1194 	boolean_t               user_wire);
1195 
1196 #ifdef XNU_KERNEL_PRIVATE
1197 
1198 /* Enter a mapping of a memory object */
1199 extern kern_return_t    vm_map_enter_mem_object(
1200 	vm_map_t                map,
1201 	vm_map_offset_t         *address,
1202 	vm_map_size_t           size,
1203 	vm_map_offset_t         mask,
1204 	int                     flags,
1205 	vm_map_kernel_flags_t   vmk_flags,
1206 	vm_tag_t                tag,
1207 	ipc_port_t              port,
1208 	vm_object_offset_t      offset,
1209 	boolean_t               needs_copy,
1210 	vm_prot_t               cur_protection,
1211 	vm_prot_t               max_protection,
1212 	vm_inherit_t            inheritance);
1213 
1214 /* Enter a mapping of a memory object */
1215 extern kern_return_t    vm_map_enter_mem_object_prefault(
1216 	vm_map_t                map,
1217 	vm_map_offset_t         *address,
1218 	vm_map_size_t           size,
1219 	vm_map_offset_t         mask,
1220 	int                     flags,
1221 	vm_map_kernel_flags_t   vmk_flags,
1222 	vm_tag_t                tag,
1223 	ipc_port_t              port,
1224 	vm_object_offset_t      offset,
1225 	vm_prot_t               cur_protection,
1226 	vm_prot_t               max_protection,
1227 	upl_page_list_ptr_t     page_list,
1228 	unsigned int            page_list_count);
1229 
1230 /* Enter a mapping of a memory object */
1231 extern kern_return_t    vm_map_enter_mem_object_control(
1232 	vm_map_t                map,
1233 	vm_map_offset_t         *address,
1234 	vm_map_size_t           size,
1235 	vm_map_offset_t         mask,
1236 	int                     flags,
1237 	vm_map_kernel_flags_t   vmk_flags,
1238 	vm_tag_t                tag,
1239 	memory_object_control_t control,
1240 	vm_object_offset_t      offset,
1241 	boolean_t               needs_copy,
1242 	vm_prot_t               cur_protection,
1243 	vm_prot_t               max_protection,
1244 	vm_inherit_t            inheritance);
1245 
1246 extern kern_return_t    vm_map_terminate(
1247 	vm_map_t                map);
1248 
1249 extern void             vm_map_require(
1250 	vm_map_t                map);
1251 
1252 extern void             vm_map_copy_require(
1253 	vm_map_copy_t           copy);
1254 
1255 extern kern_return_t    vm_map_copy_extract(
1256 	vm_map_t                src_map,
1257 	vm_map_address_t        src_addr,
1258 	vm_map_size_t           len,
1259 	boolean_t               copy,
1260 	vm_map_copy_t           *copy_result,   /* OUT */
1261 	vm_prot_t               *cur_prot,      /* OUT */
1262 	vm_prot_t               *max_prot,      /* OUT */
1263 	vm_inherit_t            inheritance,
1264 	vm_map_kernel_flags_t   vmk_flags);
1265 
1266 #endif /* !XNU_KERNEL_PRIVATE */
1267 
1268 /* Discard a copy without using it */
1269 extern void             vm_map_copy_discard(
1270 	vm_map_copy_t           copy);
1271 
1272 /* Overwrite existing memory with a copy */
1273 extern kern_return_t    vm_map_copy_overwrite(
1274 	vm_map_t                dst_map,
1275 	vm_map_address_t        dst_addr,
1276 	vm_map_copy_t           copy,
1277 	vm_map_size_t           copy_size,
1278 	boolean_t               interruptible);
1279 
1280 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3)
1281 
1282 
1283 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1284 extern boolean_t        vm_map_copy_validate_size(
1285 	vm_map_t                dst_map,
1286 	vm_map_copy_t           copy,
1287 	vm_map_size_t           *size);
1288 
1289 /* Place a copy into a map */
1290 extern kern_return_t    vm_map_copyout(
1291 	vm_map_t                dst_map,
1292 	vm_map_address_t        *dst_addr,                              /* OUT */
1293 	vm_map_copy_t           copy);
1294 
1295 extern kern_return_t vm_map_copyout_size(
1296 	vm_map_t                dst_map,
1297 	vm_map_address_t        *dst_addr,                              /* OUT */
1298 	vm_map_copy_t           copy,
1299 	vm_map_size_t           copy_size);
1300 
1301 extern kern_return_t    vm_map_copyout_internal(
1302 	vm_map_t                dst_map,
1303 	vm_map_address_t        *dst_addr,      /* OUT */
1304 	vm_map_copy_t           copy,
1305 	vm_map_size_t           copy_size,
1306 	boolean_t               consume_on_success,
1307 	vm_prot_t               cur_protection,
1308 	vm_prot_t               max_protection,
1309 	vm_inherit_t            inheritance);
1310 
1311 extern kern_return_t    vm_map_copyin(
1312 	vm_map_t                src_map,
1313 	vm_map_address_t        src_addr,
1314 	vm_map_size_t           len,
1315 	boolean_t               src_destroy,
1316 	vm_map_copy_t           *copy_result);                          /* OUT */
1317 
1318 extern kern_return_t    vm_map_copyin_common(
1319 	vm_map_t                src_map,
1320 	vm_map_address_t        src_addr,
1321 	vm_map_size_t           len,
1322 	boolean_t               src_destroy,
1323 	boolean_t               src_volatile,
1324 	vm_map_copy_t           *copy_result,                           /* OUT */
1325 	boolean_t               use_maxprot);
1326 
1327 #define VM_MAP_COPYIN_SRC_DESTROY       0x00000001
1328 #define VM_MAP_COPYIN_USE_MAXPROT       0x00000002
1329 #define VM_MAP_COPYIN_ENTRY_LIST        0x00000004
1330 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1331 #define VM_MAP_COPYIN_ALL_FLAGS         0x0000000F
1332 extern kern_return_t    vm_map_copyin_internal(
1333 	vm_map_t                src_map,
1334 	vm_map_address_t        src_addr,
1335 	vm_map_size_t           len,
1336 	int                     flags,
1337 	vm_map_copy_t           *copy_result);                         /* OUT */
1338 
1339 
1340 extern void             vm_map_disable_NX(
1341 	vm_map_t                map);
1342 
1343 extern void             vm_map_disallow_data_exec(
1344 	vm_map_t                map);
1345 
1346 extern void             vm_map_set_64bit(
1347 	vm_map_t                map);
1348 
1349 extern void             vm_map_set_32bit(
1350 	vm_map_t                map);
1351 
1352 extern void             vm_map_set_jumbo(
1353 	vm_map_t                map);
1354 
1355 extern void             vm_map_set_jit_entitled(
1356 	vm_map_t                map);
1357 
1358 extern void             vm_map_set_max_addr(
1359 	vm_map_t                map, vm_map_offset_t new_max_offset);
1360 
1361 extern boolean_t        vm_map_has_hard_pagezero(
1362 	vm_map_t                map,
1363 	vm_map_offset_t         pagezero_size);
1364 extern void             vm_commit_pagezero_status(vm_map_t      tmap);
1365 
1366 #ifdef __arm__
1367 static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map)1368 vm_map_is_64bit(__unused vm_map_t map)
1369 {
1370 	return 0;
1371 }
1372 #else
1373 extern boolean_t        vm_map_is_64bit(
1374 	vm_map_t                map);
1375 #endif
1376 
1377 
1378 extern kern_return_t    vm_map_raise_max_offset(
1379 	vm_map_t        map,
1380 	vm_map_offset_t new_max_offset);
1381 
1382 extern kern_return_t    vm_map_raise_min_offset(
1383 	vm_map_t        map,
1384 	vm_map_offset_t new_min_offset);
1385 #if XNU_TARGET_OS_OSX
1386 extern void vm_map_set_high_start(
1387 	vm_map_t        map,
1388 	vm_map_offset_t high_start);
1389 #endif /* XNU_TARGET_OS_OSX */
1390 
1391 extern vm_map_offset_t  vm_compute_max_offset(
1392 	boolean_t               is64);
1393 
1394 extern void             vm_map_get_max_aslr_slide_section(
1395 	vm_map_t                map,
1396 	int64_t                 *max_sections,
1397 	int64_t                 *section_size);
1398 
1399 extern uint64_t         vm_map_get_max_aslr_slide_pages(
1400 	vm_map_t map);
1401 
1402 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
1403 	vm_map_t map);
1404 
1405 extern kern_return_t    vm_map_set_size_limit(
1406 	vm_map_t                map,
1407 	uint64_t                limit);
1408 
1409 extern kern_return_t    vm_map_set_data_limit(
1410 	vm_map_t                map,
1411 	uint64_t                limit);
1412 
1413 extern void             vm_map_set_user_wire_limit(
1414 	vm_map_t                map,
1415 	vm_size_t               limit);
1416 
1417 extern void vm_map_switch_protect(
1418 	vm_map_t                map,
1419 	boolean_t               val);
1420 
1421 extern void vm_map_iokit_mapped_region(
1422 	vm_map_t                map,
1423 	vm_size_t               bytes);
1424 
1425 extern void vm_map_iokit_unmapped_region(
1426 	vm_map_t                map,
1427 	vm_size_t               bytes);
1428 
1429 
1430 extern boolean_t first_free_is_valid(vm_map_t);
1431 
1432 extern int              vm_map_page_shift(
1433 	vm_map_t                map);
1434 
1435 extern vm_map_offset_t  vm_map_page_mask(
1436 	vm_map_t                map);
1437 
1438 extern int              vm_map_page_size(
1439 	vm_map_t                map);
1440 
1441 extern vm_map_offset_t  vm_map_round_page_mask(
1442 	vm_map_offset_t         offset,
1443 	vm_map_offset_t         mask);
1444 
1445 extern vm_map_offset_t  vm_map_trunc_page_mask(
1446 	vm_map_offset_t         offset,
1447 	vm_map_offset_t         mask);
1448 
1449 extern boolean_t        vm_map_page_aligned(
1450 	vm_map_offset_t         offset,
1451 	vm_map_offset_t         mask);
1452 
1453 static inline int
vm_map_range_overflows(vm_map_offset_t addr,vm_map_size_t size)1454 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1455 {
1456 	vm_map_offset_t sum;
1457 	return os_add_overflow(addr, size, &sum);
1458 }
1459 
1460 static inline int
mach_vm_range_overflows(mach_vm_offset_t addr,mach_vm_size_t size)1461 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1462 {
1463 	mach_vm_offset_t sum;
1464 	return os_add_overflow(addr, size, &sum);
1465 }
1466 
1467 #ifdef XNU_KERNEL_PRIVATE
1468 
1469 #if XNU_TARGET_OS_OSX
1470 extern void vm_map_mark_alien(vm_map_t map);
1471 extern void vm_map_single_jit(vm_map_t map);
1472 #endif /* XNU_TARGET_OS_OSX */
1473 
1474 extern kern_return_t vm_map_page_info(
1475 	vm_map_t                map,
1476 	vm_map_offset_t         offset,
1477 	vm_page_info_flavor_t   flavor,
1478 	vm_page_info_t          info,
1479 	mach_msg_type_number_t  *count);
1480 extern kern_return_t vm_map_page_range_info_internal(
1481 	vm_map_t                map,
1482 	vm_map_offset_t         start_offset,
1483 	vm_map_offset_t         end_offset,
1484 	int                     effective_page_shift,
1485 	vm_page_info_flavor_t   flavor,
1486 	vm_page_info_t          info,
1487 	mach_msg_type_number_t  *count);
1488 #endif /* XNU_KERNEL_PRIVATE */
1489 
1490 
1491 #ifdef  MACH_KERNEL_PRIVATE
1492 
1493 
1494 /*
1495  * Internal macros for rounding and truncation of vm_map offsets and sizes
1496  */
1497 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1498 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1499 
1500 /*
1501  * Macros for rounding and truncation of vm_map offsets and sizes
1502  */
1503 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1504 VM_MAP_PAGE_SHIFT(
1505 	vm_map_t map)
1506 {
1507 	int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1508 	/*
1509 	 * help ubsan and codegen in general,
1510 	 * cannot use PAGE_{MIN,MAX}_SHIFT
1511 	 * because of testing code which
1512 	 * tests 16k aligned maps on 4k only systems.
1513 	 */
1514 	__builtin_assume(shift >= 12 && shift <= 14);
1515 	return shift;
1516 }
1517 
1518 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1519 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1520 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1521 
1522 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1523 VM_MAP_IS_EXOTIC(
1524 	vm_map_t map __unused)
1525 {
1526 #if __arm64__
1527 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1528 	    pmap_is_exotic(map->pmap)) {
1529 		return true;
1530 	}
1531 #endif /* __arm64__ */
1532 	return false;
1533 }
1534 
1535 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1536 VM_MAP_IS_ALIEN(
1537 	vm_map_t map __unused)
1538 {
1539 	/*
1540 	 * An "alien" process/task/map/pmap should mostly behave
1541 	 * as it currently would on iOS.
1542 	 */
1543 #if XNU_TARGET_OS_OSX
1544 	if (map->is_alien) {
1545 		return true;
1546 	}
1547 	return false;
1548 #else /* XNU_TARGET_OS_OSX */
1549 	return true;
1550 #endif /* XNU_TARGET_OS_OSX */
1551 }
1552 
1553 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1554 VM_MAP_POLICY_WX_FAIL(
1555 	vm_map_t map __unused)
1556 {
1557 	if (VM_MAP_IS_ALIEN(map)) {
1558 		return false;
1559 	}
1560 	return true;
1561 }
1562 
1563 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1564 VM_MAP_POLICY_WX_STRIP_X(
1565 	vm_map_t map __unused)
1566 {
1567 	if (VM_MAP_IS_ALIEN(map)) {
1568 		return true;
1569 	}
1570 	return false;
1571 }
1572 
1573 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1574 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1575 	vm_map_t map __unused)
1576 {
1577 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1578 		return false;
1579 	}
1580 	return true;
1581 }
1582 
1583 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1584 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1585 	vm_map_t map)
1586 {
1587 	return VM_MAP_IS_ALIEN(map);
1588 }
1589 
1590 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1591 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1592 	vm_map_t map __unused)
1593 {
1594 	if (VM_MAP_IS_ALIEN(map)) {
1595 		return false;
1596 	}
1597 	return true;
1598 }
1599 
1600 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1601 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1602 	vm_map_t map __unused)
1603 {
1604 	if (VM_MAP_IS_ALIEN(map)) {
1605 		return false;
1606 	}
1607 	return true;
1608 }
1609 
1610 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1611 VM_MAP_POLICY_ALLOW_JIT_COPY(
1612 	vm_map_t map __unused)
1613 {
1614 	if (VM_MAP_IS_ALIEN(map)) {
1615 		return false;
1616 	}
1617 	return true;
1618 }
1619 
1620 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1621 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1622 	vm_map_t map __unused)
1623 {
1624 #if __x86_64__
1625 	return true;
1626 #else /* __x86_64__ */
1627 	if (VM_MAP_IS_EXOTIC(map)) {
1628 		return true;
1629 	}
1630 	return false;
1631 #endif /* __x86_64__ */
1632 }
1633 
1634 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1635 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1636 {
1637 	switch (prot) {
1638 	case MAP_MEM_NOOP:                      break;
1639 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
1640 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
1641 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
1642 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
1643 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
1644 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1645 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
1646 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
1647 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
1648 	default:                                break;
1649 	}
1650 }
1651 
1652 #endif /* MACH_KERNEL_PRIVATE */
1653 
1654 #ifdef XNU_KERNEL_PRIVATE
1655 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1656 extern bool vm_map_is_exotic(vm_map_t map);
1657 extern bool vm_map_is_alien(vm_map_t map);
1658 extern pmap_t vm_map_get_pmap(vm_map_t map);
1659 #endif /* XNU_KERNEL_PRIVATE */
1660 
1661 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1662 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1663 
1664 /* Support for UPLs from vm_maps */
1665 
1666 #ifdef XNU_KERNEL_PRIVATE
1667 
1668 extern kern_return_t vm_map_get_upl(
1669 	vm_map_t                target_map,
1670 	vm_map_offset_t         map_offset,
1671 	upl_size_t              *size,
1672 	upl_t                   *upl,
1673 	upl_page_info_array_t   page_info,
1674 	unsigned int            *page_infoCnt,
1675 	upl_control_flags_t     *flags,
1676 	vm_tag_t                tag,
1677 	int                     force_data_sync);
1678 
1679 #endif /* XNU_KERNEL_PRIVATE */
1680 
1681 extern void
1682 vm_map_sizes(vm_map_t map,
1683     vm_map_size_t * psize,
1684     vm_map_size_t * pfree,
1685     vm_map_size_t * plargest_free);
1686 
1687 #if CONFIG_DYNAMIC_CODE_SIGNING
1688 extern kern_return_t vm_map_sign(vm_map_t map,
1689     vm_map_offset_t start,
1690     vm_map_offset_t end);
1691 #endif
1692 
1693 extern kern_return_t vm_map_partial_reap(
1694 	vm_map_t map,
1695 	unsigned int *reclaimed_resident,
1696 	unsigned int *reclaimed_compressed);
1697 
1698 
1699 #if DEVELOPMENT || DEBUG
1700 
1701 extern int vm_map_disconnect_page_mappings(
1702 	vm_map_t map,
1703 	boolean_t);
1704 
1705 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1706 
1707 #endif
1708 
1709 
1710 #if CONFIG_FREEZE
1711 
1712 extern kern_return_t vm_map_freeze(
1713 	task_t       task,
1714 	unsigned int *purgeable_count,
1715 	unsigned int *wired_count,
1716 	unsigned int *clean_count,
1717 	unsigned int *dirty_count,
1718 	unsigned int dirty_budget,
1719 	unsigned int *shared_count,
1720 	int          *freezer_error_code,
1721 	boolean_t    eval_only);
1722 
1723 #define FREEZER_ERROR_GENERIC                   (-1)
1724 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY      (-2)
1725 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO  (-3)
1726 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE       (-4)
1727 #define FREEZER_ERROR_NO_SWAP_SPACE             (-5)
1728 
1729 #endif
1730 
1731 __END_DECLS
1732 
1733 /*
1734  * In some cases, we don't have a real VM object but still want to return a
1735  * unique ID (to avoid a memory region looking like shared memory), so build
1736  * a fake pointer based on the map's ledger and the index of the ledger being
1737  * reported.
1738  */
1739 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1740 
1741 #endif  /* KERNEL_PRIVATE */
1742 
1743 #endif  /* _VM_VM_MAP_H_ */
1744