xref: /xnu-8020.140.41/osfmk/vm/vm_map.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	File:	vm/vm_map.h
61  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62  *	Date:	1985
63  *
64  *	Virtual memory map module definitions.
65  *
66  * Contributors:
67  *	avie, dlb, mwyoung
68  */
69 
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72 
73 #include <mach/mach_types.h>
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_inherit.h>
79 #include <mach/vm_behavior.h>
80 #include <mach/vm_param.h>
81 #include <mach/sdt.h>
82 #include <vm/pmap.h>
83 #include <os/overflow.h>
84 
85 #ifdef  KERNEL_PRIVATE
86 
87 #include <sys/cdefs.h>
88 
89 #ifdef XNU_KERNEL_PRIVATE
90 #include <vm/vm_protos.h>
91 #endif /* XNU_KERNEL_PRIVATE */
92 
93 __BEGIN_DECLS
94 
95 extern void     vm_map_reference(vm_map_t       map);
96 extern vm_map_t current_map(void);
97 
98 /* Setup reserved areas in a new VM map */
99 extern kern_return_t    vm_map_exec(
100 	vm_map_t                new_map,
101 	task_t                  task,
102 	boolean_t               is64bit,
103 	void                    *fsroot,
104 	cpu_type_t              cpu,
105 	cpu_subtype_t           cpu_subtype,
106 	boolean_t               reslide,
107 	boolean_t               is_driverkit);
108 
109 __END_DECLS
110 
111 #ifdef  MACH_KERNEL_PRIVATE
112 
113 #include <mach_assert.h>
114 
115 #include <vm/vm_object.h>
116 #include <vm/vm_page.h>
117 #include <kern/locks.h>
118 #include <kern/zalloc.h>
119 #include <kern/macro_help.h>
120 
121 #include <kern/thread.h>
122 #include <os/refcnt.h>
123 
124 #define current_map_fast()      (current_thread()->map)
125 #define current_map()           (current_map_fast())
126 
127 #include <vm/vm_map_store.h>
128 
129 
130 /*
131  *	Types defined:
132  *
133  *	vm_map_t		the high-level address map data structure.
134  *	vm_map_entry_t		an entry in an address map.
135  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
136  *	vm_map_copy_t		represents memory copied from an address map,
137  *				 used for inter-map copy operations
138  */
139 typedef struct vm_map_entry     *vm_map_entry_t;
140 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)
141 
142 
143 /*
144  *	Type:		vm_named_entry_t [internal use only]
145  *
146  *	Description:
147  *		Description of a mapping to a memory cache object.
148  *
149  *	Implementation:
150  *		While the handle to this object is used as a means to map
151  *              and pass around the right to map regions backed by pagers
152  *		of all sorts, the named_entry itself is only manipulated
153  *		by the kernel.  Named entries hold information on the
154  *		right to map a region of a cached object.  Namely,
155  *		the target cache object, the beginning and ending of the
156  *		region to be mapped, and the permissions, (read, write)
157  *		with which it can be mapped.
158  *
159  */
160 
161 struct vm_named_entry {
162 	decl_lck_mtx_data(, Lock);              /* Synchronization */
163 	union {
164 		vm_map_t        map;            /* map backing submap */
165 		vm_map_copy_t   copy;           /* a VM map copy */
166 	} backing;
167 	vm_object_offset_t      offset;         /* offset into object */
168 	vm_object_size_t        size;           /* size of region */
169 	vm_object_offset_t      data_offset;    /* offset to first byte of data */
170 	unsigned int                            /* Is backing.xxx : */
171 	/* vm_prot_t */ protection:4,           /* access permissions */
172 	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
173 	/* boolean_t */ internal:1,             /* ... an internal object */
174 	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
175 	/* boolean_t */ is_copy:1;              /* ... a VM map copy */
176 #if VM_NAMED_ENTRY_DEBUG
177 	uint32_t                named_entry_bt; /* btref_t */
178 #endif /* VM_NAMED_ENTRY_DEBUG */
179 };
180 
181 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
182 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
183 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock)
184 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock)
185 
186 
187 /*
188  *	Type:		vm_map_entry_t [internal use only]
189  *
190  *	Description:
191  *		A single mapping within an address map.
192  *
193  *	Implementation:
194  *		Address map entries consist of start and end addresses,
195  *		a VM object (or sub map) and offset into that object,
196  *		and user-exported inheritance and protection information.
197  *		Control information for virtual copy operations is also
198  *		stored in the address map entry.
199  *
200  *	Note:
201  *		vm_map_relocate_early_elem() knows about this layout,
202  *		and needs to be kept in sync.
203  */
204 
205 struct vm_map_links {
206 	struct vm_map_entry     *prev;          /* previous entry */
207 	struct vm_map_entry     *next;          /* next entry */
208 	vm_map_offset_t         start;          /* start address */
209 	vm_map_offset_t         end;            /* end address */
210 };
211 
212 /*
213  * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
214  * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
215  * to convert between the "packed" representation in the vm_map_entry's fields
216  * and the equivalent bits defined in vm_prot_t.
217  */
218 #if defined(__x86_64__)
219 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
220 #else
221 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
222 #endif
223 
224 /*
225  * FOOTPRINT ACCOUNTING:
226  * The "memory footprint" is better described in the pmap layer.
227  *
228  * At the VM level, these 2 vm_map_entry_t fields are relevant:
229  * iokit_mapped:
230  *	For an "iokit_mapped" entry, we add the size of the entry to the
231  *	footprint when the entry is entered into the map and we subtract that
232  *	size when the entry is removed.  No other accounting should take place.
233  *	"use_pmap" should be FALSE but is not taken into account.
234  * use_pmap: (only when is_sub_map is FALSE)
235  *	This indicates if we should ask the pmap layer to account for pages
236  *	in this mapping.  If FALSE, we expect that another form of accounting
237  *	is being used (e.g. "iokit_mapped" or the explicit accounting of
238  *	non-volatile purgable memory).
239  *
240  * So the logic is mostly:
241  * if entry->is_sub_map == TRUE
242  *	anything in a submap does not count for the footprint
243  * else if entry->iokit_mapped == TRUE
244  *	footprint includes the entire virtual size of this entry
245  * else if entry->use_pmap == FALSE
246  *	tell pmap NOT to account for pages being pmap_enter()'d from this
247  *	mapping (i.e. use "alternate accounting")
248  * else
249  *	pmap will account for pages being pmap_enter()'d from this mapping
250  *	as it sees fit (only if anonymous, etc...)
251  */
252 
253 #define VME_ALIAS_BITS          12
254 #define VME_ALIAS_MASK          ((1u << VME_ALIAS_BITS) - 1)
255 #define VME_OFFSET_SHIFT        VME_ALIAS_BITS
256 #define VME_OFFSET_BITS         (64 - VME_ALIAS_BITS)
257 #define VME_SUBMAP_SHIFT        2
258 #define VME_SUBMAP_BITS         (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
259 
260 struct vm_map_entry {
261 	struct vm_map_links     links;                      /* links to other entries */
262 #define vme_prev                links.prev
263 #define vme_next                links.next
264 #define vme_start               links.start
265 #define vme_end                 links.end
266 
267 	struct vm_map_store     store;
268 
269 	union {
270 		vm_offset_t     vme_object_value;
271 		struct {
272 			vm_offset_t vme_atomic:1;           /* entry cannot be split/coalesced */
273 			vm_offset_t is_sub_map:1;           /* Is "object" a submap? */
274 			vm_offset_t vme_submap:VME_SUBMAP_BITS;
275 		};
276 #if __LP64__
277 		struct {
278 			uint32_t    vme_ctx_atomic : 1;
279 			uint32_t    vme_ctx_is_sub_map : 1;
280 			uint32_t    vme_context : 30;
281 			vm_page_object_t vme_object;
282 		};
283 #endif
284 	};
285 
286 	unsigned long long
287 	/* vm_tag_t          */ vme_alias:VME_ALIAS_BITS,   /* entry VM tag */
288 	/* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
289 
290 	/* boolean_t         */ is_shared:1,                /* region is shared */
291 	/* boolean_t         */ __unused1:1,
292 	/* boolean_t         */ in_transition:1,            /* Entry being changed */
293 	/* boolean_t         */ needs_wakeup:1,             /* Waiters on in_transition */
294 	/* behavior is not defined for submap type */
295 	/* vm_behavior_t     */ behavior:2,                 /* user paging behavior hint */
296 	/* boolean_t         */ needs_copy:1,               /* object need to be copied? */
297 
298 	/* Only in task maps: */
299 	/* vm_prot_t-like    */ protection:4,               /* protection code, bit3=UEXEC */
300 	/* vm_prot_t-like    */ max_protection:4,           /* maximum protection, bit3=UEXEC */
301 	/* vm_inherit_t      */ inheritance:2,              /* inheritance */
302 
303 	/*
304 	 * use_pmap is overloaded:
305 	 * if "is_sub_map":
306 	 *      use a nested pmap?
307 	 * else (i.e. if object):
308 	 *      use pmap accounting
309 	 *      for footprint?
310 	 */
311 	/* boolean_t         */ use_pmap:1,
312 	/* boolean_t         */ no_cache:1,                 /* should new pages be cached? */
313 	/* boolean_t         */ permanent:1,                /* mapping can not be removed */
314 	/* boolean_t         */ superpage_size:1,           /* use superpages of a certain size */
315 	/* boolean_t         */ map_aligned:1,              /* align to map's page size */
316 	/*
317 	 * zero out the wired pages of this entry
318 	 * if is being deleted without unwiring them
319 	 */
320 	/* boolean_t         */ zero_wired_pages:1,
321 	/* boolean_t         */ used_for_jit:1,
322 	/* boolean_t         */ pmap_cs_associated:1,       /* pmap_cs will validate */
323 
324 	/* iokit accounting: use the virtual size rather than resident size: */
325 	/* boolean_t         */ iokit_acct:1,
326 	/* boolean_t         */ vme_resilient_codesign:1,
327 	/* boolean_t         */ vme_resilient_media:1,
328 	/* boolean_t         */ __unused2:1,
329 	/* boolean_t         */ vme_no_copy_on_read:1,
330 	/* boolean_t         */ translated_allow_execute:1, /* execute in translated processes */
331 	/* boolean_t         */ vme_kernel_object:1;        /* vme_object is kernel_object */
332 
333 	unsigned short          wired_count;                /* can be paged if = 0 */
334 	unsigned short          user_wired_count;           /* for vm_wire */
335 
336 #if     DEBUG
337 #define MAP_ENTRY_CREATION_DEBUG (1)
338 #define MAP_ENTRY_INSERTION_DEBUG (1)
339 #endif
340 #if     MAP_ENTRY_CREATION_DEBUG
341 	struct vm_map_header    *vme_creation_maphdr;
342 	uint32_t                vme_creation_bt;            /* btref_t */
343 #endif
344 #if     MAP_ENTRY_INSERTION_DEBUG
345 	uint32_t                vme_insertion_bt;           /* btref_t */
346 	vm_map_offset_t         vme_start_original;
347 	vm_map_offset_t         vme_end_original;
348 #endif
349 };
350 
351 #define VME_ALIAS(entry) \
352 	((entry)->vme_alias)
353 
354 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)355 _VME_SUBMAP(
356 	vm_map_entry_t entry)
357 {
358 	__builtin_assume(entry->vme_submap);
359 	return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
360 }
361 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
362 
363 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)364 VME_SUBMAP_SET(
365 	vm_map_entry_t entry,
366 	vm_map_t submap)
367 {
368 	__builtin_assume(((vm_offset_t)submap & 3) == 0);
369 
370 	entry->is_sub_map = true;
371 	entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
372 }
373 
374 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)375 _VME_OBJECT(
376 	vm_map_entry_t entry)
377 {
378 	vm_object_t object = kernel_object;
379 
380 	if (!entry->vme_kernel_object) {
381 #if __LP64__
382 		object = VM_OBJECT_UNPACK(entry->vme_object);
383 		__builtin_assume(object != kernel_object);
384 #else
385 		object = (vm_object_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
386 #endif
387 	}
388 	return object;
389 }
390 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
391 
392 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)393 VME_OBJECT_SET(
394 	vm_map_entry_t entry,
395 	vm_object_t    object,
396 	bool           atomic,
397 	uint32_t       context)
398 {
399 	__builtin_assume(((vm_offset_t)object & 3) == 0);
400 
401 	entry->vme_atomic = atomic;
402 	entry->is_sub_map = false;
403 #if __LP64__
404 	if (atomic) {
405 		entry->vme_context = context;
406 	} else {
407 		entry->vme_context = 0;
408 	}
409 #else
410 	(void)context;
411 #endif
412 
413 	if (!object || object == kernel_object) {
414 #if __LP64__
415 		entry->vme_object = 0;
416 #else
417 		entry->vme_submap = 0;
418 #endif
419 	} else {
420 #if __LP64__
421 		entry->vme_object = VM_OBJECT_PACK(object);
422 #else
423 		entry->vme_submap = (vm_offset_t)object >> VME_SUBMAP_SHIFT;
424 #endif
425 	}
426 
427 	entry->vme_kernel_object = (object == kernel_object);
428 	entry->vme_resilient_codesign = false;
429 	entry->used_for_jit = false;
430 }
431 
432 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)433 VME_OFFSET(
434 	vm_map_entry_t entry)
435 {
436 	return entry->vme_offset << VME_OFFSET_SHIFT;
437 }
438 
439 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)440 VME_OFFSET_SET(
441 	vm_map_entry_t entry,
442 	vm_object_offset_t offset)
443 {
444 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
445 	assert3u(VME_OFFSET(entry), ==, offset);
446 }
447 
448 /*
449  * IMPORTANT:
450  * The "alias" field can be updated while holding the VM map lock
451  * "shared".  It's OK as along as it's the only field that can be
452  * updated without the VM map "exclusive" lock.
453  */
454 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)455 VME_ALIAS_SET(
456 	vm_map_entry_t entry,
457 	unsigned int alias)
458 {
459 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
460 	entry->vme_alias = alias;
461 }
462 
463 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length)464 VME_OBJECT_SHADOW(
465 	vm_map_entry_t entry,
466 	vm_object_size_t length)
467 {
468 	vm_object_t object;
469 	vm_object_offset_t offset;
470 
471 	object = VME_OBJECT(entry);
472 	offset = VME_OFFSET(entry);
473 	vm_object_shadow(&object, &offset, length);
474 	if (object != VME_OBJECT(entry)) {
475 #if __LP64__
476 		entry->vme_object = VM_OBJECT_PACK(object);
477 #else
478 		entry->vme_submap = (vm_offset_t)object >> VME_SUBMAP_SHIFT;
479 #endif
480 		entry->use_pmap = true;
481 	}
482 	if (offset != VME_OFFSET(entry)) {
483 		VME_OFFSET_SET(entry, offset);
484 	}
485 }
486 
487 
488 /*
489  * Convenience macros for dealing with superpages
490  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
491  */
492 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
493 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
494 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
495 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
496 
497 /*
498  * wired_counts are unsigned short.  This value is used to safeguard
499  * against any mishaps due to runaway user programs.
500  */
501 #define MAX_WIRE_COUNT          65535
502 
503 
504 
505 /*
506  *	Type:		struct vm_map_header
507  *
508  *	Description:
509  *		Header for a vm_map and a vm_map_copy.
510  *
511  *	Note:
512  *		vm_map_relocate_early_elem() knows about this layout,
513  *		and needs to be kept in sync.
514  */
515 
516 
517 struct vm_map_header {
518 	struct vm_map_links     links;          /* first, last, min, max */
519 	int                     nentries;       /* Number of entries */
520 	uint16_t                page_shift;     /* page shift */
521 	unsigned int
522 	/* boolean_t */ entries_pageable : 1,   /* are map entries pageable? */
523 	/* reserved  */ __padding : 15;
524 #ifdef VM_MAP_STORE_USE_RB
525 	struct rb_head  rb_head_store;
526 #endif
527 };
528 
529 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
530 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
531 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
532 
533 /*
534  *	Type:		vm_map_t [exported; contents invisible]
535  *
536  *	Description:
537  *		An address map -- a directory relating valid
538  *		regions of a task's address space to the corresponding
539  *		virtual memory objects.
540  *
541  *	Implementation:
542  *		Maps are doubly-linked lists of map entries, sorted
543  *		by address.  One hint is used to start
544  *		searches again from the last successful search,
545  *		insertion, or removal.  Another hint is used to
546  *		quickly find free space.
547  *
548  *	Note:
549  *		vm_map_relocate_early_elem() knows about this layout,
550  *		and needs to be kept in sync.
551  */
552 struct _vm_map {
553 	lck_rw_t                lock;           /* map lock */
554 	struct vm_map_header    hdr;            /* Map entry header */
555 #define min_offset              hdr.links.start /* start of range */
556 #define max_offset              hdr.links.end   /* end of range */
557 	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
558 	vm_map_size_t           size;           /* virtual size */
559 	uint64_t                size_limit;     /* rlimit on address space size */
560 	uint64_t                data_limit;     /* rlimit on data size */
561 	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
562 	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
563 #if XNU_TARGET_OS_OSX
564 	vm_map_offset_t         vmmap_high_start;
565 #endif /* XNU_TARGET_OS_OSX */
566 
567 	union {
568 		/*
569 		 * If map->disable_vmentry_reuse == TRUE:
570 		 * the end address of the highest allocated vm_map_entry_t.
571 		 */
572 		vm_map_offset_t         vmu1_highest_entry_end;
573 		/*
574 		 * For a nested VM map:
575 		 * the lowest address in this nested VM map that we would
576 		 * expect to be unnested under normal operation (i.e. for
577 		 * regular copy-on-write on DATA section).
578 		 */
579 		vm_map_offset_t         vmu1_lowest_unnestable_start;
580 	} vmu1;
581 #define highest_entry_end       vmu1.vmu1_highest_entry_end
582 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
583 	vm_map_entry_t          hint;           /* hint for quick lookups */
584 	union {
585 		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
586 		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
587 	} vmmap_u_1;
588 #define hole_hint vmmap_u_1.vmmap_hole_hint
589 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
590 	union {
591 		vm_map_entry_t          _first_free;    /* First free space hint */
592 		struct vm_map_links*    _holes;         /* links all holes between entries */
593 	} f_s;                                          /* Union for free space data structures being used */
594 
595 #define first_free              f_s._first_free
596 #define holes_list              f_s._holes
597 
598 	os_ref_atomic_t         map_refcnt;       /* Reference count */
599 
600 	unsigned int
601 	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
602 	/* boolean_t */ wiring_required:1,        /* All memory wired? */
603 	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
604 	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
605 	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
606 	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
607 	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
608 	/* boolean_t */ holelistenabled:1,
609 	/* boolean_t */ is_nested_map:1,
610 	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
611 	/* boolean_t */ jit_entry_exists:1,
612 	/* boolean_t */ has_corpse_footprint:1,
613 	/* boolean_t */ terminated:1,
614 	/* boolean_t */ is_alien:1,              /* for platform simulation, i.e. PLATFORM_IOS on OSX */
615 	/* boolean_t */ cs_enforcement:1,        /* code-signing enforcement */
616 	/* boolean_t */ cs_debugged:1,           /* code-signed but debugged */
617 	/* boolean_t */ reserved_regions:1,      /* has reserved regions. The map size that userspace sees should ignore these. */
618 	/* boolean_t */ single_jit:1,            /* only allow one JIT mapping */
619 	/* boolean_t */ never_faults:1,          /* this map should never cause faults */
620 	/* reserved  */ pad:13;
621 	unsigned int            timestamp;       /* Version number */
622 };
623 
624 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
625 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
626 #define vm_map_first_entry(map) ((map)->hdr.links.next)
627 #define vm_map_last_entry(map)  ((map)->hdr.links.prev)
628 
629 /*
630  *	Type:		vm_map_version_t [exported; contents invisible]
631  *
632  *	Description:
633  *		Map versions may be used to quickly validate a previous
634  *		lookup operation.
635  *
636  *	Usage note:
637  *		Because they are bulky objects, map versions are usually
638  *		passed by reference.
639  *
640  *	Implementation:
641  *		Just a timestamp for the main map.
642  */
643 typedef struct vm_map_version {
644 	unsigned int    main_timestamp;
645 } vm_map_version_t;
646 
647 /*
648  *	Type:		vm_map_copy_t [exported; contents invisible]
649  *
650  *	Description:
651  *		A map copy object represents a region of virtual memory
652  *		that has been copied from an address map but is still
653  *		in transit.
654  *
655  *		A map copy object may only be used by a single thread
656  *		at a time.
657  *
658  *	Implementation:
659  *              There are three formats for map copy objects.
660  *		The first is very similar to the main
661  *		address map in structure, and as a result, some
662  *		of the internal maintenance functions/macros can
663  *		be used with either address maps or map copy objects.
664  *
665  *		The map copy object contains a header links
666  *		entry onto which the other entries that represent
667  *		the region are chained.
668  *
669  *		The second format is a single vm object.  This was used
670  *		primarily in the pageout path - but is not currently used
671  *		except for placeholder copy objects (see vm_map_copy_copy()).
672  *
673  *		The third format is a kernel buffer copy object - for data
674  *              small enough that physical copies were the most efficient
675  *		method. This method uses a zero-sized array unioned with
676  *		other format-specific data in the 'c_u' member. This unsized
677  *		array overlaps the other elements and allows us to use this
678  *		extra structure space for physical memory copies. On 64-bit
679  *		systems this saves ~64 bytes per vm_map_copy.
680  */
681 
682 struct vm_map_copy {
683 	int                     type;
684 #define VM_MAP_COPY_ENTRY_LIST          1
685 #define VM_MAP_COPY_OBJECT              2
686 #define VM_MAP_COPY_KERNEL_BUFFER       3
687 	vm_object_offset_t      offset;
688 	vm_map_size_t           size;
689 	union {
690 		struct vm_map_header                  hdr;    /* ENTRY_LIST */
691 		vm_object_t                           object; /* OBJECT */
692 		void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
693 	} c_u;
694 };
695 
696 
697 #define cpy_hdr                 c_u.hdr
698 
699 #define cpy_object              c_u.object
700 #define cpy_kdata               c_u.kdata
701 
702 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
703 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
704 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
705 
706 /*
707  *	Useful macros for entry list copy objects
708  */
709 
710 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
711 #define vm_map_copy_first_entry(copy)           \
712 	        ((copy)->cpy_hdr.links.next)
713 #define vm_map_copy_last_entry(copy)            \
714 	        ((copy)->cpy_hdr.links.prev)
715 
716 extern kern_return_t
717 vm_map_copy_adjust_to_target(
718 	vm_map_copy_t           copy_map,
719 	vm_map_offset_t         offset,
720 	vm_map_size_t           size,
721 	vm_map_t                target_map,
722 	boolean_t               copy,
723 	vm_map_copy_t           *target_copy_map_p,
724 	vm_map_offset_t         *overmap_start_p,
725 	vm_map_offset_t         *overmap_end_p,
726 	vm_map_offset_t         *trimmed_start_p);
727 
728 /*
729  *	Macros:		vm_map_lock, etc. [internal use only]
730  *	Description:
731  *		Perform locking on the data portion of a map.
732  *	When multiple maps are to be locked, order by map address.
733  *	(See vm_map.c::vm_remap())
734  */
735 
736 #define vm_map_lock_init(map)                                           \
737 	((map)->timestamp = 0 ,                                         \
738 	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
739 
740 #define vm_map_lock(map)                     \
741 	MACRO_BEGIN                          \
742 	DTRACE_VM(vm_map_lock_w);            \
743 	lck_rw_lock_exclusive(&(map)->lock); \
744 	MACRO_END
745 
746 #define vm_map_unlock(map)          \
747 	MACRO_BEGIN                 \
748 	DTRACE_VM(vm_map_unlock_w); \
749 	(map)->timestamp++;         \
750 	lck_rw_done(&(map)->lock);  \
751 	MACRO_END
752 
753 #define vm_map_lock_read(map)             \
754 	MACRO_BEGIN                       \
755 	DTRACE_VM(vm_map_lock_r);         \
756 	lck_rw_lock_shared(&(map)->lock); \
757 	MACRO_END
758 
759 #define vm_map_unlock_read(map)     \
760 	MACRO_BEGIN                 \
761 	DTRACE_VM(vm_map_unlock_r); \
762 	lck_rw_done(&(map)->lock);  \
763 	MACRO_END
764 
765 #define vm_map_lock_write_to_read(map)                 \
766 	MACRO_BEGIN                                    \
767 	DTRACE_VM(vm_map_lock_downgrade);              \
768 	(map)->timestamp++;                            \
769 	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
770 	MACRO_END
771 
772 __attribute__((always_inline))
773 int vm_map_lock_read_to_write(vm_map_t map);
774 
775 __attribute__((always_inline))
776 boolean_t vm_map_try_lock(vm_map_t map);
777 
778 __attribute__((always_inline))
779 boolean_t vm_map_try_lock_read(vm_map_t map);
780 
781 int vm_self_region_page_shift(vm_map_t target_map);
782 int vm_self_region_page_shift_safely(vm_map_t target_map);
783 
784 #if MACH_ASSERT || DEBUG
785 #define vm_map_lock_assert_held(map) \
786 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
787 #define vm_map_lock_assert_shared(map)  \
788 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
789 #define vm_map_lock_assert_exclusive(map) \
790 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
791 #define vm_map_lock_assert_notheld(map) \
792 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
793 #else  /* MACH_ASSERT || DEBUG */
794 #define vm_map_lock_assert_held(map)
795 #define vm_map_lock_assert_shared(map)
796 #define vm_map_lock_assert_exclusive(map)
797 #define vm_map_lock_assert_notheld(map)
798 #endif /* MACH_ASSERT || DEBUG */
799 
800 /*
801  *	Exported procedures that operate on vm_map_t.
802  */
803 
804 /* Lookup map entry containing or the specified address in the given map */
805 extern boolean_t        vm_map_lookup_entry(
806 	vm_map_t                map,
807 	vm_map_address_t        address,
808 	vm_map_entry_t          *entry);                                /* OUT */
809 
810 /* Lookup map entry containing or the specified address in the given map */
811 extern boolean_t        vm_map_lookup_entry_or_next(
812 	vm_map_t                map,
813 	vm_map_address_t        address,
814 	vm_map_entry_t          *entry);                                /* OUT */
815 
816 /* like vm_map_lookup_entry without the PGZ bear trap */
817 #if CONFIG_PROB_GZALLOC
818 extern boolean_t        vm_map_lookup_entry_allow_pgz(
819 	vm_map_t                map,
820 	vm_map_address_t        address,
821 	vm_map_entry_t          *entry);                                /* OUT */
822 #else
823 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
824 #endif
825 
826 extern void             vm_map_copy_remap(
827 	vm_map_t                map,
828 	vm_map_entry_t          where,
829 	vm_map_copy_t           copy,
830 	vm_map_offset_t         adjustment,
831 	vm_prot_t               cur_prot,
832 	vm_prot_t               max_prot,
833 	vm_inherit_t            inheritance);
834 
835 /* Find the VM object, offset, and protection for a given virtual address
836  * in the specified map, assuming a page fault of the	type specified. */
837 extern kern_return_t    vm_map_lookup_locked(
838 	vm_map_t                *var_map,                               /* IN/OUT */
839 	vm_map_address_t        vaddr,
840 	vm_prot_t               fault_type,
841 	int                     object_lock_type,
842 	vm_map_version_t        *out_version,                           /* OUT */
843 	vm_object_t             *object,                                /* OUT */
844 	vm_object_offset_t      *offset,                                /* OUT */
845 	vm_prot_t               *out_prot,                              /* OUT */
846 	boolean_t               *wired,                                 /* OUT */
847 	vm_object_fault_info_t  fault_info,                             /* OUT */
848 	vm_map_t                *real_map,                              /* OUT */
849 	bool                    *contended);                            /* OUT */
850 
851 /* Verifies that the map has not changed since the given version. */
852 extern boolean_t        vm_map_verify(
853 	vm_map_t                map,
854 	vm_map_version_t        *version);                              /* REF */
855 
856 
857 /*
858  *	Functions implemented as macros
859  */
860 #define         vm_map_min(map) ((map)->min_offset)
861 /* Lowest valid address in
862  * a map */
863 
864 #define         vm_map_max(map) ((map)->max_offset)
865 /* Highest valid address */
866 
867 #define         vm_map_pmap(map)        ((map)->pmap)
868 /* Physical map associated
869 * with this address map */
870 
871 /* Gain a reference to an existing map */
872 extern void             vm_map_reference(
873 	vm_map_t        map);
874 
875 /*
876  *	Wait and wakeup macros for in_transition map entries.
877  */
878 #define vm_map_entry_wait(map, interruptible)           \
879 	((map)->timestamp++ ,                           \
880 	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
881 	                          (event_t)&(map)->hdr,	interruptible))
882 
883 
884 #define vm_map_entry_wakeup(map)        \
885 	thread_wakeup((event_t)(&(map)->hdr))
886 
887 
888 /* simplify map entries */
889 extern void             vm_map_simplify_entry(
890 	vm_map_t        map,
891 	vm_map_entry_t  this_entry);
892 extern void             vm_map_simplify(
893 	vm_map_t                map,
894 	vm_map_offset_t         start);
895 
896 /* Move the information in a map copy object to a new map copy object */
897 extern vm_map_copy_t    vm_map_copy_copy(
898 	vm_map_copy_t           copy);
899 
900 /* Create a copy object from an object. */
901 extern kern_return_t    vm_map_copyin_object(
902 	vm_object_t             object,
903 	vm_object_offset_t      offset,
904 	vm_object_size_t        size,
905 	vm_map_copy_t           *copy_result);                         /* OUT */
906 
907 extern kern_return_t    vm_map_random_address_for_size(
908 	vm_map_t                map,
909 	vm_map_offset_t        *address,
910 	vm_map_size_t           size,
911 	vm_map_kernel_flags_t   vmk_flags);
912 
913 /* Enter a mapping */
914 extern kern_return_t    vm_map_enter(
915 	vm_map_t                map,
916 	vm_map_offset_t         *address,
917 	vm_map_size_t           size,
918 	vm_map_offset_t         mask,
919 	int                     flags,
920 	vm_map_kernel_flags_t   vmk_flags,
921 	vm_tag_t                tag,
922 	vm_object_t             object,
923 	vm_object_offset_t      offset,
924 	boolean_t               needs_copy,
925 	vm_prot_t               cur_protection,
926 	vm_prot_t               max_protection,
927 	vm_inherit_t            inheritance);
928 
929 #if __arm64__
930 extern kern_return_t    vm_map_enter_fourk(
931 	vm_map_t                map,
932 	vm_map_offset_t         *address,
933 	vm_map_size_t           size,
934 	vm_map_offset_t         mask,
935 	int                     flags,
936 	vm_map_kernel_flags_t   vmk_flags,
937 	vm_tag_t                tag,
938 	vm_object_t             object,
939 	vm_object_offset_t      offset,
940 	boolean_t               needs_copy,
941 	vm_prot_t               cur_protection,
942 	vm_prot_t               max_protection,
943 	vm_inherit_t            inheritance);
944 #endif /* __arm64__ */
945 
946 /* XXX should go away - replaced with regular enter of contig object */
947 extern  kern_return_t   vm_map_enter_cpm(
948 	vm_map_t                map,
949 	vm_map_address_t        *addr,
950 	vm_map_size_t           size,
951 	int                     flags);
952 
953 extern kern_return_t vm_map_remap(
954 	vm_map_t                target_map,
955 	vm_map_offset_t         *address,
956 	vm_map_size_t           size,
957 	vm_map_offset_t         mask,
958 	int                     flags,
959 	vm_map_kernel_flags_t   vmk_flags,
960 	vm_tag_t                tag,
961 	vm_map_t                src_map,
962 	vm_map_offset_t         memory_address,
963 	boolean_t               copy,
964 	vm_prot_t               *cur_protection,
965 	vm_prot_t               *max_protection,
966 	vm_inherit_t            inheritance);
967 
968 
969 /*
970  * Read and write from a kernel buffer to a specified map.
971  */
972 extern  kern_return_t   vm_map_write_user(
973 	vm_map_t                map,
974 	void                    *src_p,
975 	vm_map_offset_t         dst_addr,
976 	vm_size_t               size);
977 
978 extern  kern_return_t   vm_map_read_user(
979 	vm_map_t                map,
980 	vm_map_offset_t         src_addr,
981 	void                    *dst_p,
982 	vm_size_t               size);
983 
984 /* Create a new task map using an existing task map as a template. */
985 extern vm_map_t         vm_map_fork(
986 	ledger_t                ledger,
987 	vm_map_t                old_map,
988 	int                     options);
989 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
990 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
991 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004
992 
993 /* Change inheritance */
994 extern kern_return_t    vm_map_inherit(
995 	vm_map_t                map,
996 	vm_map_offset_t         start,
997 	vm_map_offset_t         end,
998 	vm_inherit_t            new_inheritance);
999 
1000 /* Add or remove machine-dependent attributes from map regions */
1001 extern kern_return_t    vm_map_machine_attribute(
1002 	vm_map_t                map,
1003 	vm_map_offset_t         start,
1004 	vm_map_offset_t         end,
1005 	vm_machine_attribute_t  attribute,
1006 	vm_machine_attribute_val_t* value);                         /* IN/OUT */
1007 
1008 extern kern_return_t    vm_map_msync(
1009 	vm_map_t                map,
1010 	vm_map_address_t        address,
1011 	vm_map_size_t           size,
1012 	vm_sync_t               sync_flags);
1013 
1014 /* Set paging behavior */
1015 extern kern_return_t    vm_map_behavior_set(
1016 	vm_map_t                map,
1017 	vm_map_offset_t         start,
1018 	vm_map_offset_t         end,
1019 	vm_behavior_t           new_behavior);
1020 
1021 extern kern_return_t vm_map_region(
1022 	vm_map_t                 map,
1023 	vm_map_offset_t         *address,
1024 	vm_map_size_t           *size,
1025 	vm_region_flavor_t       flavor,
1026 	vm_region_info_t         info,
1027 	mach_msg_type_number_t  *count,
1028 	mach_port_t             *object_name);
1029 
1030 extern kern_return_t vm_map_region_recurse_64(
1031 	vm_map_t                 map,
1032 	vm_map_offset_t         *address,
1033 	vm_map_size_t           *size,
1034 	natural_t               *nesting_depth,
1035 	vm_region_submap_info_64_t info,
1036 	mach_msg_type_number_t  *count);
1037 
1038 extern kern_return_t vm_map_page_query_internal(
1039 	vm_map_t                map,
1040 	vm_map_offset_t         offset,
1041 	int                     *disposition,
1042 	int                     *ref_count);
1043 
1044 extern kern_return_t vm_map_query_volatile(
1045 	vm_map_t        map,
1046 	mach_vm_size_t  *volatile_virtual_size_p,
1047 	mach_vm_size_t  *volatile_resident_size_p,
1048 	mach_vm_size_t  *volatile_compressed_size_p,
1049 	mach_vm_size_t  *volatile_pmap_size_p,
1050 	mach_vm_size_t  *volatile_compressed_pmap_size_p);
1051 
1052 /* Convert from a map entry port to a map */
1053 extern vm_map_t convert_port_entry_to_map(
1054 	ipc_port_t      port);
1055 
1056 
1057 extern kern_return_t vm_map_set_cache_attr(
1058 	vm_map_t        map,
1059 	vm_map_offset_t va);
1060 
1061 
1062 /* definitions related to overriding the NX behavior */
1063 
1064 #define VM_ABI_32       0x1
1065 #define VM_ABI_64       0x2
1066 
1067 extern int override_nx(vm_map_t map, uint32_t user_tag);
1068 
1069 
1070 extern void vm_map_region_top_walk(
1071 	vm_map_entry_t entry,
1072 	vm_region_top_info_t top);
1073 extern void vm_map_region_walk(
1074 	vm_map_t map,
1075 	vm_map_offset_t va,
1076 	vm_map_entry_t entry,
1077 	vm_object_offset_t offset,
1078 	vm_object_size_t range,
1079 	vm_region_extended_info_t extended,
1080 	boolean_t look_for_pages,
1081 	mach_msg_type_number_t count);
1082 
1083 
1084 
1085 extern void vm_map_copy_footprint_ledgers(
1086 	task_t  old_task,
1087 	task_t  new_task);
1088 extern void vm_map_copy_ledger(
1089 	task_t  old_task,
1090 	task_t  new_task,
1091 	int     ledger_entry);
1092 
1093 /**
1094  * Represents a single region of virtual address space that should be reserved
1095  * (pre-mapped) in a user address space.
1096  */
1097 struct vm_reserved_region {
1098 	char            *vmrr_name;
1099 	vm_map_offset_t vmrr_addr;
1100 	vm_map_size_t   vmrr_size;
1101 };
1102 
1103 /**
1104  * Return back a machine-dependent array of address space regions that should be
1105  * reserved by the VM. This function is defined in the machine-dependent
1106  * machine_routines.c files.
1107  */
1108 extern size_t ml_get_vm_reserved_regions(
1109 	bool vm_is64bit,
1110 	struct vm_reserved_region **regions);
1111 
1112 #endif /* MACH_KERNEL_PRIVATE */
1113 
1114 __BEGIN_DECLS
1115 
1116 /* Create an empty map */
1117 extern vm_map_t         vm_map_create(
1118 	pmap_t                  pmap,
1119 	vm_map_offset_t         min_off,
1120 	vm_map_offset_t         max_off,
1121 	boolean_t               pageable);
1122 
1123 extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);
1124 
1125 extern void             vm_map_disable_hole_optimization(vm_map_t map);
1126 
1127 /* Get rid of a map */
1128 extern void             vm_map_destroy(
1129 	vm_map_t                map);
1130 
1131 /* Lose a reference */
1132 extern void             vm_map_deallocate(
1133 	vm_map_t                map);
1134 
1135 /* Lose a reference */
1136 extern void             vm_map_inspect_deallocate(
1137 	vm_map_inspect_t        map);
1138 
1139 /* Lose a reference */
1140 extern void             vm_map_read_deallocate(
1141 	vm_map_read_t        map);
1142 
1143 extern vm_map_t         vm_map_switch(
1144 	vm_map_t                map);
1145 
1146 /* Change protection */
1147 extern kern_return_t    vm_map_protect(
1148 	vm_map_t                map,
1149 	vm_map_offset_t         start,
1150 	vm_map_offset_t         end,
1151 	vm_prot_t               new_prot,
1152 	boolean_t               set_max);
1153 
1154 /* Check protection */
1155 extern boolean_t vm_map_check_protection(
1156 	vm_map_t                map,
1157 	vm_map_offset_t         start,
1158 	vm_map_offset_t         end,
1159 	vm_prot_t               protection);
1160 
1161 extern boolean_t vm_map_cs_enforcement(
1162 	vm_map_t                map);
1163 extern void vm_map_cs_enforcement_set(
1164 	vm_map_t                map,
1165 	boolean_t               val);
1166 
1167 extern void vm_map_cs_debugged_set(
1168 	vm_map_t map,
1169 	boolean_t val);
1170 
1171 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1172 
1173 /* wire down a region */
1174 
1175 #ifdef XNU_KERNEL_PRIVATE
1176 
1177 extern void vm_map_will_allocate_early_map(
1178 	vm_map_t               *map_owner);
1179 
1180 extern void vm_map_relocate_early_maps(
1181 	vm_offset_t             delta);
1182 
1183 extern void vm_map_relocate_early_elem(
1184 	uint32_t                zone_id,
1185 	vm_offset_t             new_addr,
1186 	vm_offset_t             delta);
1187 
1188 /* never fails */
1189 extern vm_map_t vm_map_create_options(
1190 	pmap_t                  pmap,
1191 	vm_map_offset_t         min_off,
1192 	vm_map_offset_t         max_off,
1193 	vm_map_create_options_t options);
1194 
1195 extern kern_return_t    vm_map_wire_kernel(
1196 	vm_map_t                map,
1197 	vm_map_offset_t         start,
1198 	vm_map_offset_t         end,
1199 	vm_prot_t               access_type,
1200 	vm_tag_t                tag,
1201 	boolean_t               user_wire);
1202 
1203 extern kern_return_t    vm_map_wire_and_extract_kernel(
1204 	vm_map_t                map,
1205 	vm_map_offset_t         start,
1206 	vm_prot_t               access_type,
1207 	vm_tag_t                tag,
1208 	boolean_t               user_wire,
1209 	ppnum_t                 *physpage_p);
1210 
1211 /* kext exported versions */
1212 
1213 extern kern_return_t    vm_map_wire_external(
1214 	vm_map_t                map,
1215 	vm_map_offset_t         start,
1216 	vm_map_offset_t         end,
1217 	vm_prot_t               access_type,
1218 	boolean_t               user_wire);
1219 
1220 extern kern_return_t    vm_map_wire_and_extract_external(
1221 	vm_map_t                map,
1222 	vm_map_offset_t         start,
1223 	vm_prot_t               access_type,
1224 	boolean_t               user_wire,
1225 	ppnum_t                 *physpage_p);
1226 
1227 #else /* XNU_KERNEL_PRIVATE */
1228 
1229 extern kern_return_t    vm_map_wire(
1230 	vm_map_t                map,
1231 	vm_map_offset_t         start,
1232 	vm_map_offset_t         end,
1233 	vm_prot_t               access_type,
1234 	boolean_t               user_wire);
1235 
1236 extern kern_return_t    vm_map_wire_and_extract(
1237 	vm_map_t                map,
1238 	vm_map_offset_t         start,
1239 	vm_prot_t               access_type,
1240 	boolean_t               user_wire,
1241 	ppnum_t                 *physpage_p);
1242 
1243 #endif /* !XNU_KERNEL_PRIVATE */
1244 
1245 /* unwire a region */
1246 extern kern_return_t    vm_map_unwire(
1247 	vm_map_t                map,
1248 	vm_map_offset_t         start,
1249 	vm_map_offset_t         end,
1250 	boolean_t               user_wire);
1251 
1252 #ifdef XNU_KERNEL_PRIVATE
1253 
1254 /* Enter a mapping of a memory object */
1255 extern kern_return_t    vm_map_enter_mem_object(
1256 	vm_map_t                map,
1257 	vm_map_offset_t         *address,
1258 	vm_map_size_t           size,
1259 	vm_map_offset_t         mask,
1260 	int                     flags,
1261 	vm_map_kernel_flags_t   vmk_flags,
1262 	vm_tag_t                tag,
1263 	ipc_port_t              port,
1264 	vm_object_offset_t      offset,
1265 	boolean_t               needs_copy,
1266 	vm_prot_t               cur_protection,
1267 	vm_prot_t               max_protection,
1268 	vm_inherit_t            inheritance);
1269 
1270 /* Enter a mapping of a memory object */
1271 extern kern_return_t    vm_map_enter_mem_object_prefault(
1272 	vm_map_t                map,
1273 	vm_map_offset_t         *address,
1274 	vm_map_size_t           size,
1275 	vm_map_offset_t         mask,
1276 	int                     flags,
1277 	vm_map_kernel_flags_t   vmk_flags,
1278 	vm_tag_t                tag,
1279 	ipc_port_t              port,
1280 	vm_object_offset_t      offset,
1281 	vm_prot_t               cur_protection,
1282 	vm_prot_t               max_protection,
1283 	upl_page_list_ptr_t     page_list,
1284 	unsigned int            page_list_count);
1285 
1286 /* Enter a mapping of a memory object */
1287 extern kern_return_t    vm_map_enter_mem_object_control(
1288 	vm_map_t                map,
1289 	vm_map_offset_t         *address,
1290 	vm_map_size_t           size,
1291 	vm_map_offset_t         mask,
1292 	int                     flags,
1293 	vm_map_kernel_flags_t   vmk_flags,
1294 	vm_tag_t                tag,
1295 	memory_object_control_t control,
1296 	vm_object_offset_t      offset,
1297 	boolean_t               needs_copy,
1298 	vm_prot_t               cur_protection,
1299 	vm_prot_t               max_protection,
1300 	vm_inherit_t            inheritance);
1301 
1302 extern kern_return_t    vm_map_terminate(
1303 	vm_map_t                map);
1304 
1305 extern void             vm_map_require(
1306 	vm_map_t                map);
1307 
1308 extern void             vm_map_copy_require(
1309 	vm_map_copy_t           copy);
1310 
1311 extern kern_return_t    vm_map_copy_extract(
1312 	vm_map_t                src_map,
1313 	vm_map_address_t        src_addr,
1314 	vm_map_size_t           len,
1315 	boolean_t               copy,
1316 	vm_map_copy_t           *copy_result,   /* OUT */
1317 	vm_prot_t               *cur_prot,      /* OUT */
1318 	vm_prot_t               *max_prot,      /* OUT */
1319 	vm_inherit_t            inheritance,
1320 	vm_map_kernel_flags_t   vmk_flags);
1321 
1322 #endif /* !XNU_KERNEL_PRIVATE */
1323 
1324 /* Discard a copy without using it */
1325 extern void             vm_map_copy_discard(
1326 	vm_map_copy_t           copy);
1327 
1328 /* Overwrite existing memory with a copy */
1329 extern kern_return_t    vm_map_copy_overwrite(
1330 	vm_map_t                dst_map,
1331 	vm_map_address_t        dst_addr,
1332 	vm_map_copy_t           copy,
1333 	vm_map_size_t           copy_size,
1334 	boolean_t               interruptible);
1335 
1336 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3)
1337 
1338 
1339 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1340 extern boolean_t        vm_map_copy_validate_size(
1341 	vm_map_t                dst_map,
1342 	vm_map_copy_t           copy,
1343 	vm_map_size_t           *size);
1344 
1345 /* Place a copy into a map */
1346 extern kern_return_t    vm_map_copyout(
1347 	vm_map_t                dst_map,
1348 	vm_map_address_t        *dst_addr,                              /* OUT */
1349 	vm_map_copy_t           copy);
1350 
1351 extern kern_return_t vm_map_copyout_size(
1352 	vm_map_t                dst_map,
1353 	vm_map_address_t        *dst_addr,                              /* OUT */
1354 	vm_map_copy_t           copy,
1355 	vm_map_size_t           copy_size);
1356 
1357 extern kern_return_t    vm_map_copyout_internal(
1358 	vm_map_t                dst_map,
1359 	vm_map_address_t        *dst_addr,      /* OUT */
1360 	vm_map_copy_t           copy,
1361 	vm_map_size_t           copy_size,
1362 	boolean_t               consume_on_success,
1363 	vm_prot_t               cur_protection,
1364 	vm_prot_t               max_protection,
1365 	vm_inherit_t            inheritance);
1366 
1367 extern kern_return_t    vm_map_copyin(
1368 	vm_map_t                src_map,
1369 	vm_map_address_t        src_addr,
1370 	vm_map_size_t           len,
1371 	boolean_t               src_destroy,
1372 	vm_map_copy_t           *copy_result);                          /* OUT */
1373 
1374 extern kern_return_t    vm_map_copyin_common(
1375 	vm_map_t                src_map,
1376 	vm_map_address_t        src_addr,
1377 	vm_map_size_t           len,
1378 	boolean_t               src_destroy,
1379 	boolean_t               src_volatile,
1380 	vm_map_copy_t           *copy_result,                           /* OUT */
1381 	boolean_t               use_maxprot);
1382 
1383 #define VM_MAP_COPYIN_SRC_DESTROY       0x00000001
1384 #define VM_MAP_COPYIN_USE_MAXPROT       0x00000002
1385 #define VM_MAP_COPYIN_ENTRY_LIST        0x00000004
1386 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1387 #define VM_MAP_COPYIN_ALL_FLAGS         0x0000000F
1388 extern kern_return_t    vm_map_copyin_internal(
1389 	vm_map_t                src_map,
1390 	vm_map_address_t        src_addr,
1391 	vm_map_size_t           len,
1392 	int                     flags,
1393 	vm_map_copy_t           *copy_result);                         /* OUT */
1394 
1395 
1396 extern void             vm_map_disable_NX(
1397 	vm_map_t                map);
1398 
1399 extern void             vm_map_disallow_data_exec(
1400 	vm_map_t                map);
1401 
1402 extern void             vm_map_set_64bit(
1403 	vm_map_t                map);
1404 
1405 extern void             vm_map_set_32bit(
1406 	vm_map_t                map);
1407 
1408 extern void             vm_map_set_jumbo(
1409 	vm_map_t                map);
1410 
1411 extern void             vm_map_set_jit_entitled(
1412 	vm_map_t                map);
1413 
1414 extern void             vm_map_set_max_addr(
1415 	vm_map_t                map, vm_map_offset_t new_max_offset);
1416 
1417 extern boolean_t        vm_map_has_hard_pagezero(
1418 	vm_map_t                map,
1419 	vm_map_offset_t         pagezero_size);
1420 extern void             vm_commit_pagezero_status(vm_map_t      tmap);
1421 
1422 #ifdef __arm__
1423 static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map)1424 vm_map_is_64bit(__unused vm_map_t map)
1425 {
1426 	return 0;
1427 }
1428 #else
1429 extern boolean_t        vm_map_is_64bit(
1430 	vm_map_t                map);
1431 #endif
1432 
1433 
1434 extern kern_return_t    vm_map_raise_max_offset(
1435 	vm_map_t        map,
1436 	vm_map_offset_t new_max_offset);
1437 
1438 extern kern_return_t    vm_map_raise_min_offset(
1439 	vm_map_t        map,
1440 	vm_map_offset_t new_min_offset);
1441 #if XNU_TARGET_OS_OSX
1442 extern void vm_map_set_high_start(
1443 	vm_map_t        map,
1444 	vm_map_offset_t high_start);
1445 #endif /* XNU_TARGET_OS_OSX */
1446 
1447 extern vm_map_offset_t  vm_compute_max_offset(
1448 	boolean_t               is64);
1449 
1450 extern void             vm_map_get_max_aslr_slide_section(
1451 	vm_map_t                map,
1452 	int64_t                 *max_sections,
1453 	int64_t                 *section_size);
1454 
1455 extern uint64_t         vm_map_get_max_aslr_slide_pages(
1456 	vm_map_t map);
1457 
1458 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
1459 	vm_map_t map);
1460 
1461 extern kern_return_t    vm_map_set_size_limit(
1462 	vm_map_t                map,
1463 	uint64_t                limit);
1464 
1465 extern kern_return_t    vm_map_set_data_limit(
1466 	vm_map_t                map,
1467 	uint64_t                limit);
1468 
1469 extern void             vm_map_set_user_wire_limit(
1470 	vm_map_t                map,
1471 	vm_size_t               limit);
1472 
1473 extern void vm_map_switch_protect(
1474 	vm_map_t                map,
1475 	boolean_t               val);
1476 
1477 extern void vm_map_iokit_mapped_region(
1478 	vm_map_t                map,
1479 	vm_size_t               bytes);
1480 
1481 extern void vm_map_iokit_unmapped_region(
1482 	vm_map_t                map,
1483 	vm_size_t               bytes);
1484 
1485 
1486 extern boolean_t first_free_is_valid(vm_map_t);
1487 
1488 extern int              vm_map_page_shift(
1489 	vm_map_t                map);
1490 
1491 extern vm_map_offset_t  vm_map_page_mask(
1492 	vm_map_t                map);
1493 
1494 extern int              vm_map_page_size(
1495 	vm_map_t                map);
1496 
1497 extern vm_map_offset_t  vm_map_round_page_mask(
1498 	vm_map_offset_t         offset,
1499 	vm_map_offset_t         mask);
1500 
1501 extern vm_map_offset_t  vm_map_trunc_page_mask(
1502 	vm_map_offset_t         offset,
1503 	vm_map_offset_t         mask);
1504 
1505 extern boolean_t        vm_map_page_aligned(
1506 	vm_map_offset_t         offset,
1507 	vm_map_offset_t         mask);
1508 
1509 static inline int
vm_map_range_overflows(vm_map_offset_t addr,vm_map_size_t size)1510 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1511 {
1512 	vm_map_offset_t sum;
1513 	return os_add_overflow(addr, size, &sum);
1514 }
1515 
1516 static inline int
mach_vm_range_overflows(mach_vm_offset_t addr,mach_vm_size_t size)1517 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1518 {
1519 	mach_vm_offset_t sum;
1520 	return os_add_overflow(addr, size, &sum);
1521 }
1522 
1523 #ifdef XNU_KERNEL_PRIVATE
1524 
1525 #if XNU_TARGET_OS_OSX
1526 extern void vm_map_mark_alien(vm_map_t map);
1527 extern void vm_map_single_jit(vm_map_t map);
1528 #endif /* XNU_TARGET_OS_OSX */
1529 
1530 extern kern_return_t vm_map_page_info(
1531 	vm_map_t                map,
1532 	vm_map_offset_t         offset,
1533 	vm_page_info_flavor_t   flavor,
1534 	vm_page_info_t          info,
1535 	mach_msg_type_number_t  *count);
1536 extern kern_return_t vm_map_page_range_info_internal(
1537 	vm_map_t                map,
1538 	vm_map_offset_t         start_offset,
1539 	vm_map_offset_t         end_offset,
1540 	int                     effective_page_shift,
1541 	vm_page_info_flavor_t   flavor,
1542 	vm_page_info_t          info,
1543 	mach_msg_type_number_t  *count);
1544 #endif /* XNU_KERNEL_PRIVATE */
1545 
1546 
1547 #ifdef  MACH_KERNEL_PRIVATE
1548 
1549 
1550 /*
1551  * Internal macros for rounding and truncation of vm_map offsets and sizes
1552  */
1553 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1554 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1555 
1556 /*
1557  * Macros for rounding and truncation of vm_map offsets and sizes
1558  */
1559 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1560 VM_MAP_PAGE_SHIFT(
1561 	vm_map_t map)
1562 {
1563 	int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1564 	/*
1565 	 * help ubsan and codegen in general,
1566 	 * cannot use PAGE_{MIN,MAX}_SHIFT
1567 	 * because of testing code which
1568 	 * tests 16k aligned maps on 4k only systems.
1569 	 */
1570 	__builtin_assume(shift >= 12 && shift <= 14);
1571 	return shift;
1572 }
1573 
1574 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1575 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1576 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1577 
1578 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1579 VM_MAP_IS_EXOTIC(
1580 	vm_map_t map __unused)
1581 {
1582 #if __arm64__
1583 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1584 	    pmap_is_exotic(map->pmap)) {
1585 		return true;
1586 	}
1587 #endif /* __arm64__ */
1588 	return false;
1589 }
1590 
1591 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1592 VM_MAP_IS_ALIEN(
1593 	vm_map_t map __unused)
1594 {
1595 	/*
1596 	 * An "alien" process/task/map/pmap should mostly behave
1597 	 * as it currently would on iOS.
1598 	 */
1599 #if XNU_TARGET_OS_OSX
1600 	if (map->is_alien) {
1601 		return true;
1602 	}
1603 	return false;
1604 #else /* XNU_TARGET_OS_OSX */
1605 	return true;
1606 #endif /* XNU_TARGET_OS_OSX */
1607 }
1608 
1609 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1610 VM_MAP_POLICY_WX_FAIL(
1611 	vm_map_t map __unused)
1612 {
1613 	if (VM_MAP_IS_ALIEN(map)) {
1614 		return false;
1615 	}
1616 	return true;
1617 }
1618 
1619 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1620 VM_MAP_POLICY_WX_STRIP_X(
1621 	vm_map_t map __unused)
1622 {
1623 	if (VM_MAP_IS_ALIEN(map)) {
1624 		return true;
1625 	}
1626 	return false;
1627 }
1628 
1629 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1630 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1631 	vm_map_t map __unused)
1632 {
1633 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1634 		return false;
1635 	}
1636 	return true;
1637 }
1638 
1639 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1640 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1641 	vm_map_t map)
1642 {
1643 	return VM_MAP_IS_ALIEN(map);
1644 }
1645 
1646 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1647 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1648 	vm_map_t map __unused)
1649 {
1650 	if (VM_MAP_IS_ALIEN(map)) {
1651 		return false;
1652 	}
1653 	return true;
1654 }
1655 
1656 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1657 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1658 	vm_map_t map __unused)
1659 {
1660 	if (VM_MAP_IS_ALIEN(map)) {
1661 		return false;
1662 	}
1663 	return true;
1664 }
1665 
1666 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1667 VM_MAP_POLICY_ALLOW_JIT_COPY(
1668 	vm_map_t map __unused)
1669 {
1670 	if (VM_MAP_IS_ALIEN(map)) {
1671 		return false;
1672 	}
1673 	return true;
1674 }
1675 
1676 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1677 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1678 	vm_map_t map __unused)
1679 {
1680 #if __x86_64__
1681 	return true;
1682 #else /* __x86_64__ */
1683 	if (VM_MAP_IS_EXOTIC(map)) {
1684 		return true;
1685 	}
1686 	return false;
1687 #endif /* __x86_64__ */
1688 }
1689 
1690 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1691 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1692 {
1693 	switch (prot) {
1694 	case MAP_MEM_NOOP:                      break;
1695 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
1696 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
1697 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
1698 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
1699 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
1700 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1701 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
1702 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
1703 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
1704 	default:                                break;
1705 	}
1706 }
1707 
1708 #endif /* MACH_KERNEL_PRIVATE */
1709 
1710 #ifdef XNU_KERNEL_PRIVATE
1711 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1712 extern bool vm_map_is_exotic(vm_map_t map);
1713 extern bool vm_map_is_alien(vm_map_t map);
1714 extern pmap_t vm_map_get_pmap(vm_map_t map);
1715 #endif /* XNU_KERNEL_PRIVATE */
1716 
1717 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1718 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1719 
1720 /* Support for UPLs from vm_maps */
1721 
1722 #ifdef XNU_KERNEL_PRIVATE
1723 
1724 extern kern_return_t vm_map_get_upl(
1725 	vm_map_t                target_map,
1726 	vm_map_offset_t         map_offset,
1727 	upl_size_t              *size,
1728 	upl_t                   *upl,
1729 	upl_page_info_array_t   page_info,
1730 	unsigned int            *page_infoCnt,
1731 	upl_control_flags_t     *flags,
1732 	vm_tag_t                tag,
1733 	int                     force_data_sync);
1734 
1735 #endif /* XNU_KERNEL_PRIVATE */
1736 
1737 extern void
1738 vm_map_sizes(vm_map_t map,
1739     vm_map_size_t * psize,
1740     vm_map_size_t * pfree,
1741     vm_map_size_t * plargest_free);
1742 
1743 #if CONFIG_DYNAMIC_CODE_SIGNING
1744 extern kern_return_t vm_map_sign(vm_map_t map,
1745     vm_map_offset_t start,
1746     vm_map_offset_t end);
1747 #endif
1748 
1749 extern kern_return_t vm_map_partial_reap(
1750 	vm_map_t map,
1751 	unsigned int *reclaimed_resident,
1752 	unsigned int *reclaimed_compressed);
1753 
1754 
1755 #if DEVELOPMENT || DEBUG
1756 
1757 extern int vm_map_disconnect_page_mappings(
1758 	vm_map_t map,
1759 	boolean_t);
1760 
1761 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1762 
1763 #endif
1764 
1765 
1766 #if CONFIG_FREEZE
1767 
1768 extern kern_return_t vm_map_freeze(
1769 	task_t       task,
1770 	unsigned int *purgeable_count,
1771 	unsigned int *wired_count,
1772 	unsigned int *clean_count,
1773 	unsigned int *dirty_count,
1774 	unsigned int dirty_budget,
1775 	unsigned int *shared_count,
1776 	int          *freezer_error_code,
1777 	boolean_t    eval_only);
1778 
1779 #define FREEZER_ERROR_GENERIC                   (-1)
1780 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY      (-2)
1781 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO  (-3)
1782 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE       (-4)
1783 #define FREEZER_ERROR_NO_SWAP_SPACE             (-5)
1784 
1785 #endif
1786 
1787 __END_DECLS
1788 
1789 /*
1790  * In some cases, we don't have a real VM object but still want to return a
1791  * unique ID (to avoid a memory region looking like shared memory), so build
1792  * a fake pointer based on the map's ledger and the index of the ledger being
1793  * reported.
1794  */
1795 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1796 
1797 #endif  /* KERNEL_PRIVATE */
1798 
1799 #endif  /* _VM_VM_MAP_H_ */
1800