xref: /xnu-8796.121.2/osfmk/vm/vm_map.h (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	File:	vm/vm_map.h
61  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62  *	Date:	1985
63  *
64  *	Virtual memory map module definitions.
65  *
66  * Contributors:
67  *	avie, dlb, mwyoung
68  */
69 
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72 
73 #include <sys/cdefs.h>
74 
75 #include <mach/mach_types.h>
76 #include <mach/kern_return.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_types.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/vm_behavior.h>
82 #include <mach/vm_param.h>
83 #include <mach/sdt.h>
84 #include <vm/pmap.h>
85 #include <os/overflow.h>
86 #ifdef XNU_KERNEL_PRIVATE
87 #include <vm/vm_protos.h>
88 #endif /* XNU_KERNEL_PRIVATE */
89 #ifdef  MACH_KERNEL_PRIVATE
90 #include <mach_assert.h>
91 #include <vm/vm_map_store.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <kern/locks.h>
95 #include <kern/zalloc.h>
96 #include <kern/macro_help.h>
97 
98 #include <kern/thread.h>
99 #include <os/refcnt.h>
100 #endif /* MACH_KERNEL_PRIVATE */
101 
102 __BEGIN_DECLS
103 
104 #ifdef  KERNEL_PRIVATE
105 
106 extern void     vm_map_reference(vm_map_t       map);
107 extern vm_map_t current_map(void);
108 
109 /* Setup reserved areas in a new VM map */
110 extern kern_return_t    vm_map_exec(
111 	vm_map_t                new_map,
112 	task_t                  task,
113 	boolean_t               is64bit,
114 	void                    *fsroot,
115 	cpu_type_t              cpu,
116 	cpu_subtype_t           cpu_subtype,
117 	boolean_t               reslide,
118 	boolean_t               is_driverkit,
119 	uint32_t                rsr_version);
120 
121 #ifdef  MACH_KERNEL_PRIVATE
122 
123 #define current_map_fast()      (current_thread()->map)
124 #define current_map()           (current_map_fast())
125 
126 /*
127  *	Types defined:
128  *
129  *	vm_map_t		the high-level address map data structure.
130  *	vm_map_entry_t		an entry in an address map.
131  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
132  *	vm_map_copy_t		represents memory copied from an address map,
133  *				 used for inter-map copy operations
134  */
135 typedef struct vm_map_entry     *vm_map_entry_t;
136 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)
137 
138 
139 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
140 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
141 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock)
142 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock)
143 
144 /*
145  *	Type:		vm_named_entry_t [internal use only]
146  *
147  *	Description:
148  *		Description of a mapping to a memory cache object.
149  *
150  *	Implementation:
151  *		While the handle to this object is used as a means to map
152  *              and pass around the right to map regions backed by pagers
153  *		of all sorts, the named_entry itself is only manipulated
154  *		by the kernel.  Named entries hold information on the
155  *		right to map a region of a cached object.  Namely,
156  *		the target cache object, the beginning and ending of the
157  *		region to be mapped, and the permissions, (read, write)
158  *		with which it can be mapped.
159  *
160  */
161 
162 struct vm_named_entry {
163 	decl_lck_mtx_data(, Lock);              /* Synchronization */
164 	union {
165 		vm_map_t        map;            /* map backing submap */
166 		vm_map_copy_t   copy;           /* a VM map copy */
167 	} backing;
168 	vm_object_offset_t      offset;         /* offset into object */
169 	vm_object_size_t        size;           /* size of region */
170 	vm_object_offset_t      data_offset;    /* offset to first byte of data */
171 	unsigned int                            /* Is backing.xxx : */
172 	/* unsigned  */ access:8,               /* MAP_MEM_* */
173 	/* vm_prot_t */ protection:4,           /* access permissions */
174 	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
175 	/* boolean_t */ internal:1,             /* ... an internal object */
176 	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
177 	/* boolean_t */ is_copy:1,              /* ... a VM map copy */
178 	/* boolean_t */ is_fully_owned:1;       /* ... all objects are owned */
179 #if VM_NAMED_ENTRY_DEBUG
180 	uint32_t                named_entry_bt; /* btref_t */
181 #endif /* VM_NAMED_ENTRY_DEBUG */
182 };
183 
184 
185 /*
186  * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
187  * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
188  * to convert between the "packed" representation in the vm_map_entry's fields
189  * and the equivalent bits defined in vm_prot_t.
190  */
191 #if defined(__x86_64__)
192 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
193 #else
194 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
195 #endif
196 
197 /*
198  * FOOTPRINT ACCOUNTING:
199  * The "memory footprint" is better described in the pmap layer.
200  *
201  * At the VM level, these 2 vm_map_entry_t fields are relevant:
202  * iokit_mapped:
203  *	For an "iokit_mapped" entry, we add the size of the entry to the
204  *	footprint when the entry is entered into the map and we subtract that
205  *	size when the entry is removed.  No other accounting should take place.
206  *	"use_pmap" should be FALSE but is not taken into account.
207  * use_pmap: (only when is_sub_map is FALSE)
208  *	This indicates if we should ask the pmap layer to account for pages
209  *	in this mapping.  If FALSE, we expect that another form of accounting
210  *	is being used (e.g. "iokit_mapped" or the explicit accounting of
211  *	non-volatile purgable memory).
212  *
213  * So the logic is mostly:
214  * if entry->is_sub_map == TRUE
215  *	anything in a submap does not count for the footprint
216  * else if entry->iokit_mapped == TRUE
217  *	footprint includes the entire virtual size of this entry
218  * else if entry->use_pmap == FALSE
219  *	tell pmap NOT to account for pages being pmap_enter()'d from this
220  *	mapping (i.e. use "alternate accounting")
221  * else
222  *	pmap will account for pages being pmap_enter()'d from this mapping
223  *	as it sees fit (only if anonymous, etc...)
224  */
225 
226 #define VME_ALIAS_BITS          12
227 #define VME_ALIAS_MASK          ((1u << VME_ALIAS_BITS) - 1)
228 #define VME_OFFSET_SHIFT        VME_ALIAS_BITS
229 #define VME_OFFSET_BITS         (64 - VME_ALIAS_BITS)
230 #define VME_SUBMAP_SHIFT        2
231 #define VME_SUBMAP_BITS         (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
232 
233 struct vm_map_entry {
234 	struct vm_map_links     links;                      /* links to other entries */
235 #define vme_prev                links.prev
236 #define vme_next                links.next
237 #define vme_start               links.start
238 #define vme_end                 links.end
239 
240 	struct vm_map_store     store;
241 
242 	union {
243 		vm_offset_t     vme_object_value;
244 		struct {
245 			vm_offset_t vme_atomic:1;           /* entry cannot be split/coalesced */
246 			vm_offset_t is_sub_map:1;           /* Is "object" a submap? */
247 			vm_offset_t vme_submap:VME_SUBMAP_BITS;
248 		};
249 		struct {
250 			uint32_t    vme_ctx_atomic : 1;
251 			uint32_t    vme_ctx_is_sub_map : 1;
252 			uint32_t    vme_context : 30;
253 			vm_page_object_t vme_object_or_delta;
254 		};
255 	};
256 
257 	unsigned long long
258 	/* vm_tag_t          */ vme_alias:VME_ALIAS_BITS,   /* entry VM tag */
259 	/* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
260 
261 	/* boolean_t         */ is_shared:1,                /* region is shared */
262 	/* boolean_t         */ __unused1:1,
263 	/* boolean_t         */ in_transition:1,            /* Entry being changed */
264 	/* boolean_t         */ needs_wakeup:1,             /* Waiters on in_transition */
265 	/* behavior is not defined for submap type */
266 	/* vm_behavior_t     */ behavior:2,                 /* user paging behavior hint */
267 	/* boolean_t         */ needs_copy:1,               /* object need to be copied? */
268 
269 	/* Only in task maps: */
270 #if defined(__arm64e__)
271 	/*
272 	 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
273 	 * We reuse it here to keep track of mappings that have hardware support
274 	 * for read-only/read-write trusted paths.
275 	 */
276 	/* vm_prot_t-like    */ protection:3,               /* protection code */
277 	/* boolean_t         */ used_for_tpro:1,
278 #else /* __arm64e__ */
279 	/* vm_prot_t-like    */protection:4,                /* protection code, bit3=UEXEC */
280 #endif /* __arm64e__ */
281 
282 	/* vm_prot_t-like    */ max_protection:4,           /* maximum protection, bit3=UEXEC */
283 	/* vm_inherit_t      */ inheritance:2,              /* inheritance */
284 
285 	/*
286 	 * use_pmap is overloaded:
287 	 * if "is_sub_map":
288 	 *      use a nested pmap?
289 	 * else (i.e. if object):
290 	 *      use pmap accounting
291 	 *      for footprint?
292 	 */
293 	/* boolean_t         */ use_pmap:1,
294 	/* boolean_t         */ no_cache:1,                 /* should new pages be cached? */
295 	/* boolean_t         */ vme_permanent:1,            /* mapping can not be removed */
296 	/* boolean_t         */ superpage_size:1,           /* use superpages of a certain size */
297 	/* boolean_t         */ map_aligned:1,              /* align to map's page size */
298 	/*
299 	 * zero out the wired pages of this entry
300 	 * if is being deleted without unwiring them
301 	 */
302 	/* boolean_t         */ zero_wired_pages:1,
303 	/* boolean_t         */ used_for_jit:1,
304 	/* boolean_t         */ csm_associated:1,       /* code signing monitor will validate */
305 
306 	/* iokit accounting: use the virtual size rather than resident size: */
307 	/* boolean_t         */ iokit_acct:1,
308 	/* boolean_t         */ vme_resilient_codesign:1,
309 	/* boolean_t         */ vme_resilient_media:1,
310 	/* boolean_t         */ vme_xnu_user_debug:1,
311 	/* boolean_t         */ vme_no_copy_on_read:1,
312 	/* boolean_t         */ translated_allow_execute:1, /* execute in translated processes */
313 	/* boolean_t         */ vme_kernel_object:1;        /* vme_object is kernel_object */
314 
315 	unsigned short          wired_count;                /* can be paged if = 0 */
316 	unsigned short          user_wired_count;           /* for vm_wire */
317 
318 #if     DEBUG
319 #define MAP_ENTRY_CREATION_DEBUG (1)
320 #define MAP_ENTRY_INSERTION_DEBUG (1)
321 #endif /* DEBUG */
322 #if     MAP_ENTRY_CREATION_DEBUG
323 	struct vm_map_header    *vme_creation_maphdr;
324 	uint32_t                vme_creation_bt;            /* btref_t */
325 #endif /* MAP_ENTRY_CREATION_DEBUG */
326 #if     MAP_ENTRY_INSERTION_DEBUG
327 	uint32_t                vme_insertion_bt;           /* btref_t */
328 	vm_map_offset_t         vme_start_original;
329 	vm_map_offset_t         vme_end_original;
330 #endif /* MAP_ENTRY_INSERTION_DEBUG */
331 };
332 
333 #define VME_ALIAS(entry) \
334 	((entry)->vme_alias)
335 
336 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)337 _VME_SUBMAP(
338 	vm_map_entry_t entry)
339 {
340 	__builtin_assume(entry->vme_submap);
341 	return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
342 }
343 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
344 
345 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)346 VME_SUBMAP_SET(
347 	vm_map_entry_t entry,
348 	vm_map_t submap)
349 {
350 	__builtin_assume(((vm_offset_t)submap & 3) == 0);
351 
352 	entry->is_sub_map = true;
353 	entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
354 }
355 
356 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)357 _VME_OBJECT(
358 	vm_map_entry_t entry)
359 {
360 	vm_object_t object = kernel_object;
361 
362 	if (!entry->vme_kernel_object) {
363 		object = VM_OBJECT_UNPACK(entry->vme_object_or_delta);
364 		__builtin_assume(object != kernel_object);
365 	}
366 	return object;
367 }
368 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
369 
370 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)371 VME_OBJECT_SET(
372 	vm_map_entry_t entry,
373 	vm_object_t    object,
374 	bool           atomic,
375 	uint32_t       context)
376 {
377 	__builtin_assume(((vm_offset_t)object & 3) == 0);
378 
379 	entry->vme_atomic = atomic;
380 	entry->is_sub_map = false;
381 	if (atomic) {
382 		entry->vme_context = context;
383 	} else {
384 		entry->vme_context = 0;
385 	}
386 
387 	if (!object || object == kernel_object) {
388 		entry->vme_object_or_delta = 0;
389 	} else {
390 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
391 	}
392 
393 	entry->vme_kernel_object = (object == kernel_object);
394 	entry->vme_resilient_codesign = false;
395 	entry->used_for_jit = false;
396 }
397 
398 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)399 VME_OFFSET(
400 	vm_map_entry_t entry)
401 {
402 	return entry->vme_offset << VME_OFFSET_SHIFT;
403 }
404 
405 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)406 VME_OFFSET_SET(
407 	vm_map_entry_t entry,
408 	vm_object_offset_t offset)
409 {
410 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
411 	assert3u(VME_OFFSET(entry), ==, offset);
412 }
413 
414 /*
415  * IMPORTANT:
416  * The "alias" field can be updated while holding the VM map lock
417  * "shared".  It's OK as along as it's the only field that can be
418  * updated without the VM map "exclusive" lock.
419  */
420 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)421 VME_ALIAS_SET(
422 	vm_map_entry_t entry,
423 	unsigned int alias)
424 {
425 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
426 	entry->vme_alias = alias;
427 }
428 
429 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)430 VME_OBJECT_SHADOW(
431 	vm_map_entry_t entry,
432 	vm_object_size_t length,
433 	bool always)
434 {
435 	vm_object_t object;
436 	vm_object_offset_t offset;
437 
438 	object = VME_OBJECT(entry);
439 	offset = VME_OFFSET(entry);
440 	vm_object_shadow(&object, &offset, length, always);
441 	if (object != VME_OBJECT(entry)) {
442 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
443 		entry->use_pmap = true;
444 	}
445 	if (offset != VME_OFFSET(entry)) {
446 		VME_OFFSET_SET(entry, offset);
447 	}
448 }
449 
450 
451 /*
452  * Convenience macros for dealing with superpages
453  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
454  */
455 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
456 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
457 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
458 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
459 
460 /*
461  * wired_counts are unsigned short.  This value is used to safeguard
462  * against any mishaps due to runaway user programs.
463  */
464 #define MAX_WIRE_COUNT          65535
465 
466 
467 /*
468  *	Type:		vm_map_t [exported; contents invisible]
469  *
470  *	Description:
471  *		An address map -- a directory relating valid
472  *		regions of a task's address space to the corresponding
473  *		virtual memory objects.
474  *
475  *	Implementation:
476  *		Maps are doubly-linked lists of map entries, sorted
477  *		by address.  One hint is used to start
478  *		searches again from the last successful search,
479  *		insertion, or removal.  Another hint is used to
480  *		quickly find free space.
481  *
482  *	Note:
483  *		vm_map_relocate_early_elem() knows about this layout,
484  *		and needs to be kept in sync.
485  */
486 struct _vm_map {
487 	lck_rw_t                lock;           /* map lock */
488 	struct vm_map_header    hdr;            /* Map entry header */
489 #define min_offset              hdr.links.start /* start of range */
490 #define max_offset              hdr.links.end   /* end of range */
491 	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
492 	vm_map_size_t           size;           /* virtual size */
493 	uint64_t                size_limit;     /* rlimit on address space size */
494 	uint64_t                data_limit;     /* rlimit on data size */
495 	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
496 	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
497 #if CONFIG_MAP_RANGES
498 	struct mach_vm_range    user_range[UMEM_RANGE_COUNT]; /* user VM ranges */
499 #endif /* CONFIG_MAP_RANGES */
500 #if XNU_TARGET_OS_OSX
501 	vm_map_offset_t         vmmap_high_start;
502 #endif /* XNU_TARGET_OS_OSX */
503 
504 	union {
505 		/*
506 		 * If map->disable_vmentry_reuse == TRUE:
507 		 * the end address of the highest allocated vm_map_entry_t.
508 		 */
509 		vm_map_offset_t         vmu1_highest_entry_end;
510 		/*
511 		 * For a nested VM map:
512 		 * the lowest address in this nested VM map that we would
513 		 * expect to be unnested under normal operation (i.e. for
514 		 * regular copy-on-write on DATA section).
515 		 */
516 		vm_map_offset_t         vmu1_lowest_unnestable_start;
517 	} vmu1;
518 #define highest_entry_end       vmu1.vmu1_highest_entry_end
519 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
520 	vm_map_entry_t          hint;           /* hint for quick lookups */
521 	union {
522 		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
523 		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
524 	} vmmap_u_1;
525 #define hole_hint vmmap_u_1.vmmap_hole_hint
526 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
527 	union {
528 		vm_map_entry_t          _first_free;    /* First free space hint */
529 		struct vm_map_links*    _holes;         /* links all holes between entries */
530 	} f_s;                                          /* Union for free space data structures being used */
531 
532 #define first_free              f_s._first_free
533 #define holes_list              f_s._holes
534 
535 	os_ref_atomic_t         map_refcnt;       /* Reference count */
536 
537 	unsigned int
538 	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
539 	/* boolean_t */ wiring_required:1,        /* All memory wired? */
540 	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
541 	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
542 	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
543 	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
544 	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
545 	/* boolean_t */ holelistenabled:1,
546 	/* boolean_t */ is_nested_map:1,
547 	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
548 	/* boolean_t */ jit_entry_exists:1,
549 	/* boolean_t */ has_corpse_footprint:1,
550 	/* boolean_t */ terminated:1,
551 	/* boolean_t */ is_alien:1,              /* for platform simulation, i.e. PLATFORM_IOS on OSX */
552 	/* boolean_t */ cs_enforcement:1,        /* code-signing enforcement */
553 	/* boolean_t */ cs_debugged:1,           /* code-signed but debugged */
554 	/* boolean_t */ reserved_regions:1,      /* has reserved regions. The map size that userspace sees should ignore these. */
555 	/* boolean_t */ single_jit:1,            /* only allow one JIT mapping */
556 	/* boolean_t */ never_faults:1,          /* this map should never cause faults */
557 	/* boolean_t */ uses_user_ranges:1,      /* has the map been configured to use user VM ranges */
558 	/* reserved  */ pad:12;
559 	unsigned int            timestamp;       /* Version number */
560 };
561 
562 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
563 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
564 #define vm_map_first_entry(map) ((map)->hdr.links.next)
565 #define vm_map_last_entry(map)  ((map)->hdr.links.prev)
566 
567 /*
568  *	Type:		vm_map_version_t [exported; contents invisible]
569  *
570  *	Description:
571  *		Map versions may be used to quickly validate a previous
572  *		lookup operation.
573  *
574  *	Usage note:
575  *		Because they are bulky objects, map versions are usually
576  *		passed by reference.
577  *
578  *	Implementation:
579  *		Just a timestamp for the main map.
580  */
581 typedef struct vm_map_version {
582 	unsigned int    main_timestamp;
583 } vm_map_version_t;
584 
585 /*
586  *	Type:		vm_map_copy_t [exported; contents invisible]
587  *
588  *	Description:
589  *		A map copy object represents a region of virtual memory
590  *		that has been copied from an address map but is still
591  *		in transit.
592  *
593  *		A map copy object may only be used by a single thread
594  *		at a time.
595  *
596  *	Implementation:
597  *              There are two formats for map copy objects.
598  *		The first is very similar to the main
599  *		address map in structure, and as a result, some
600  *		of the internal maintenance functions/macros can
601  *		be used with either address maps or map copy objects.
602  *
603  *		The map copy object contains a header links
604  *		entry onto which the other entries that represent
605  *		the region are chained.
606  *
607  *		The second format is a kernel buffer copy object - for data
608  *              small enough that physical copies were the most efficient
609  *		method. This method uses a zero-sized array unioned with
610  *		other format-specific data in the 'c_u' member. This unsized
611  *		array overlaps the other elements and allows us to use this
612  *		extra structure space for physical memory copies. On 64-bit
613  *		systems this saves ~64 bytes per vm_map_copy.
614  */
615 
616 struct vm_map_copy {
617 #define VM_MAP_COPY_ENTRY_LIST          1
618 #define VM_MAP_COPY_KERNEL_BUFFER       2
619 	uint16_t                type;
620 	bool                    is_kernel_range;
621 	bool                    is_user_range;
622 	vm_map_range_id_t       orig_range;
623 	vm_object_offset_t      offset;
624 	vm_map_size_t           size;
625 	union {
626 		struct vm_map_header                  hdr;    /* ENTRY_LIST */
627 		void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
628 	} c_u;
629 };
630 
631 
632 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
633 #define vm_map_entry_zone       (&zone_array[ZONE_ID_VM_MAP_ENTRY])
634 
635 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
636 #define vm_map_holes_zone       (&zone_array[ZONE_ID_VM_MAP_HOLES])
637 
638 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
639 #define vm_map_zone             (&zone_array[ZONE_ID_VM_MAP])
640 
641 
642 #define cpy_hdr                 c_u.hdr
643 #define cpy_kdata               c_u.kdata
644 
645 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
646 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
647 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
648 
649 /*
650  *	Useful macros for entry list copy objects
651  */
652 
653 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
654 #define vm_map_copy_first_entry(copy)           \
655 	        ((copy)->cpy_hdr.links.next)
656 #define vm_map_copy_last_entry(copy)            \
657 	        ((copy)->cpy_hdr.links.prev)
658 
659 extern kern_return_t
660 vm_map_copy_adjust_to_target(
661 	vm_map_copy_t           copy_map,
662 	vm_map_offset_t         offset,
663 	vm_map_size_t           size,
664 	vm_map_t                target_map,
665 	boolean_t               copy,
666 	vm_map_copy_t           *target_copy_map_p,
667 	vm_map_offset_t         *overmap_start_p,
668 	vm_map_offset_t         *overmap_end_p,
669 	vm_map_offset_t         *trimmed_start_p);
670 
671 /*
672  *	Macros:		vm_map_lock, etc. [internal use only]
673  *	Description:
674  *		Perform locking on the data portion of a map.
675  *	When multiple maps are to be locked, order by map address.
676  *	(See vm_map.c::vm_remap())
677  */
678 
679 #define vm_map_lock_init(map)                                           \
680 	((map)->timestamp = 0 ,                                         \
681 	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
682 
683 #define vm_map_lock(map)                     \
684 	MACRO_BEGIN                          \
685 	DTRACE_VM(vm_map_lock_w);            \
686 	lck_rw_lock_exclusive(&(map)->lock); \
687 	MACRO_END
688 
689 #define vm_map_unlock(map)          \
690 	MACRO_BEGIN                 \
691 	DTRACE_VM(vm_map_unlock_w); \
692 	(map)->timestamp++;         \
693 	lck_rw_done(&(map)->lock);  \
694 	MACRO_END
695 
696 #define vm_map_lock_read(map)             \
697 	MACRO_BEGIN                       \
698 	DTRACE_VM(vm_map_lock_r);         \
699 	lck_rw_lock_shared(&(map)->lock); \
700 	MACRO_END
701 
702 #define vm_map_unlock_read(map)     \
703 	MACRO_BEGIN                 \
704 	DTRACE_VM(vm_map_unlock_r); \
705 	lck_rw_done(&(map)->lock);  \
706 	MACRO_END
707 
708 #define vm_map_lock_write_to_read(map)                 \
709 	MACRO_BEGIN                                    \
710 	DTRACE_VM(vm_map_lock_downgrade);              \
711 	(map)->timestamp++;                            \
712 	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
713 	MACRO_END
714 
715 __attribute__((always_inline))
716 int vm_map_lock_read_to_write(vm_map_t map);
717 
718 __attribute__((always_inline))
719 boolean_t vm_map_try_lock(vm_map_t map);
720 
721 __attribute__((always_inline))
722 boolean_t vm_map_try_lock_read(vm_map_t map);
723 
724 int vm_self_region_page_shift(vm_map_t target_map);
725 int vm_self_region_page_shift_safely(vm_map_t target_map);
726 
727 #if MACH_ASSERT || DEBUG
728 #define vm_map_lock_assert_held(map) \
729 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
730 #define vm_map_lock_assert_shared(map)  \
731 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
732 #define vm_map_lock_assert_exclusive(map) \
733 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
734 #define vm_map_lock_assert_notheld(map) \
735 	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
736 #else  /* MACH_ASSERT || DEBUG */
737 #define vm_map_lock_assert_held(map)
738 #define vm_map_lock_assert_shared(map)
739 #define vm_map_lock_assert_exclusive(map)
740 #define vm_map_lock_assert_notheld(map)
741 #endif /* MACH_ASSERT || DEBUG */
742 
743 /*
744  *	Exported procedures that operate on vm_map_t.
745  */
746 
747 /* Lookup map entry containing or the specified address in the given map */
748 extern boolean_t        vm_map_lookup_entry(
749 	vm_map_t                map,
750 	vm_map_address_t        address,
751 	vm_map_entry_t          *entry);                                /* OUT */
752 
753 /* Lookup map entry containing or the specified address in the given map */
754 extern boolean_t        vm_map_lookup_entry_or_next(
755 	vm_map_t                map,
756 	vm_map_address_t        address,
757 	vm_map_entry_t          *entry);                                /* OUT */
758 
759 /* like vm_map_lookup_entry without the PGZ bear trap */
760 #if CONFIG_PROB_GZALLOC
761 extern boolean_t        vm_map_lookup_entry_allow_pgz(
762 	vm_map_t                map,
763 	vm_map_address_t        address,
764 	vm_map_entry_t          *entry);                                /* OUT */
765 #else /* !CONFIG_PROB_GZALLOC */
766 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
767 #endif /* !CONFIG_PROB_GZALLOC */
768 
769 extern void             vm_map_copy_remap(
770 	vm_map_t                map,
771 	vm_map_entry_t          where,
772 	vm_map_copy_t           copy,
773 	vm_map_offset_t         adjustment,
774 	vm_prot_t               cur_prot,
775 	vm_prot_t               max_prot,
776 	vm_inherit_t            inheritance);
777 
778 /* Find the VM object, offset, and protection for a given virtual address
779  * in the specified map, assuming a page fault of the	type specified. */
780 extern kern_return_t    vm_map_lookup_and_lock_object(
781 	vm_map_t                *var_map,                               /* IN/OUT */
782 	vm_map_address_t        vaddr,
783 	vm_prot_t               fault_type,
784 	int                     object_lock_type,
785 	vm_map_version_t        *out_version,                           /* OUT */
786 	vm_object_t             *object,                                /* OUT */
787 	vm_object_offset_t      *offset,                                /* OUT */
788 	vm_prot_t               *out_prot,                              /* OUT */
789 	boolean_t               *wired,                                 /* OUT */
790 	vm_object_fault_info_t  fault_info,                             /* OUT */
791 	vm_map_t                *real_map,                              /* OUT */
792 	bool                    *contended);                            /* OUT */
793 
794 /* Verifies that the map has not changed since the given version. */
795 extern boolean_t        vm_map_verify(
796 	vm_map_t                map,
797 	vm_map_version_t        *version);                              /* REF */
798 
799 
800 /*
801  *	Functions implemented as macros
802  */
803 #define         vm_map_min(map) ((map)->min_offset)
804 /* Lowest valid address in
805  * a map */
806 
807 #define         vm_map_max(map) ((map)->max_offset)
808 /* Highest valid address */
809 
810 #define         vm_map_pmap(map)        ((map)->pmap)
811 /* Physical map associated
812 * with this address map */
813 
814 /* Gain a reference to an existing map */
815 extern void             vm_map_reference(
816 	vm_map_t        map);
817 
818 /*
819  *	Wait and wakeup macros for in_transition map entries.
820  */
821 #define vm_map_entry_wait(map, interruptible)           \
822 	((map)->timestamp++ ,                           \
823 	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
824 	                          (event_t)&(map)->hdr,	interruptible))
825 
826 
827 #define vm_map_entry_wakeup(map)        \
828 	thread_wakeup((event_t)(&(map)->hdr))
829 
830 
831 /* simplify map entries */
832 extern void             vm_map_simplify_entry(
833 	vm_map_t        map,
834 	vm_map_entry_t  this_entry);
835 extern void             vm_map_simplify(
836 	vm_map_t                map,
837 	vm_map_offset_t         start);
838 
839 #if XNU_PLATFORM_MacOSX
840 
841 /* Move the information in a map copy object to a new map copy object */
842 extern vm_map_copy_t    vm_map_copy_copy(
843 	vm_map_copy_t           copy);
844 
845 #endif /* XNU_PLATFORM_MacOSX */
846 
847 /* Enter a mapping */
848 extern kern_return_t    vm_map_enter(
849 	vm_map_t                map,
850 	vm_map_offset_t         *address,
851 	vm_map_size_t           size,
852 	vm_map_offset_t         mask,
853 	vm_map_kernel_flags_t   vmk_flags,
854 	vm_object_t             object,
855 	vm_object_offset_t      offset,
856 	boolean_t               needs_copy,
857 	vm_prot_t               cur_protection,
858 	vm_prot_t               max_protection,
859 	vm_inherit_t            inheritance);
860 
861 #if __arm64__
862 extern kern_return_t    vm_map_enter_fourk(
863 	vm_map_t                map,
864 	vm_map_offset_t         *address,
865 	vm_map_size_t           size,
866 	vm_map_offset_t         mask,
867 	vm_map_kernel_flags_t   vmk_flags,
868 	vm_object_t             object,
869 	vm_object_offset_t      offset,
870 	boolean_t               needs_copy,
871 	vm_prot_t               cur_protection,
872 	vm_prot_t               max_protection,
873 	vm_inherit_t            inheritance);
874 #endif /* __arm64__ */
875 
876 /* XXX should go away - replaced with regular enter of contig object */
877 extern  kern_return_t   vm_map_enter_cpm(
878 	vm_map_t                map,
879 	vm_map_address_t        *addr,
880 	vm_map_size_t           size,
881 	vm_map_kernel_flags_t   vmk_flags);
882 
883 extern kern_return_t vm_map_remap(
884 	vm_map_t                target_map,
885 	vm_map_offset_t         *address,
886 	vm_map_size_t           size,
887 	vm_map_offset_t         mask,
888 	vm_map_kernel_flags_t   vmk_flags,
889 	vm_map_t                src_map,
890 	vm_map_offset_t         memory_address,
891 	boolean_t               copy,
892 	vm_prot_t               *cur_protection,
893 	vm_prot_t               *max_protection,
894 	vm_inherit_t            inheritance);
895 
896 
897 /*
898  * Read and write from a kernel buffer to a specified map.
899  */
900 extern  kern_return_t   vm_map_write_user(
901 	vm_map_t                map,
902 	void                    *src_p,
903 	vm_map_offset_t         dst_addr,
904 	vm_size_t               size);
905 
906 extern  kern_return_t   vm_map_read_user(
907 	vm_map_t                map,
908 	vm_map_offset_t         src_addr,
909 	void                    *dst_p,
910 	vm_size_t               size);
911 
912 extern void             vm_map_inherit_limits(
913 	vm_map_t                new_map,
914 	const struct _vm_map   *old_map);
915 
916 /* Create a new task map using an existing task map as a template. */
917 extern vm_map_t         vm_map_fork(
918 	ledger_t                ledger,
919 	vm_map_t                old_map,
920 	int                     options);
921 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
922 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
923 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004
924 
925 /* Change inheritance */
926 extern kern_return_t    vm_map_inherit(
927 	vm_map_t                map,
928 	vm_map_offset_t         start,
929 	vm_map_offset_t         end,
930 	vm_inherit_t            new_inheritance);
931 
932 /* Add or remove machine-dependent attributes from map regions */
933 extern kern_return_t    vm_map_machine_attribute(
934 	vm_map_t                map,
935 	vm_map_offset_t         start,
936 	vm_map_offset_t         end,
937 	vm_machine_attribute_t  attribute,
938 	vm_machine_attribute_val_t* value);                         /* IN/OUT */
939 
940 extern kern_return_t    vm_map_msync(
941 	vm_map_t                map,
942 	vm_map_address_t        address,
943 	vm_map_size_t           size,
944 	vm_sync_t               sync_flags);
945 
946 /* Set paging behavior */
947 extern kern_return_t    vm_map_behavior_set(
948 	vm_map_t                map,
949 	vm_map_offset_t         start,
950 	vm_map_offset_t         end,
951 	vm_behavior_t           new_behavior);
952 
953 extern kern_return_t vm_map_region(
954 	vm_map_t                 map,
955 	vm_map_offset_t         *address,
956 	vm_map_size_t           *size,
957 	vm_region_flavor_t       flavor,
958 	vm_region_info_t         info,
959 	mach_msg_type_number_t  *count,
960 	mach_port_t             *object_name);
961 
962 extern kern_return_t vm_map_region_recurse_64(
963 	vm_map_t                 map,
964 	vm_map_offset_t         *address,
965 	vm_map_size_t           *size,
966 	natural_t               *nesting_depth,
967 	vm_region_submap_info_64_t info,
968 	mach_msg_type_number_t  *count);
969 
970 extern kern_return_t vm_map_page_query_internal(
971 	vm_map_t                map,
972 	vm_map_offset_t         offset,
973 	int                     *disposition,
974 	int                     *ref_count);
975 
976 extern kern_return_t vm_map_query_volatile(
977 	vm_map_t        map,
978 	mach_vm_size_t  *volatile_virtual_size_p,
979 	mach_vm_size_t  *volatile_resident_size_p,
980 	mach_vm_size_t  *volatile_compressed_size_p,
981 	mach_vm_size_t  *volatile_pmap_size_p,
982 	mach_vm_size_t  *volatile_compressed_pmap_size_p);
983 
984 /* Convert from a map entry port to a map */
985 extern vm_map_t convert_port_entry_to_map(
986 	ipc_port_t      port);
987 
988 
989 extern kern_return_t vm_map_set_cache_attr(
990 	vm_map_t        map,
991 	vm_map_offset_t va);
992 
993 
994 /* definitions related to overriding the NX behavior */
995 
996 #define VM_ABI_32       0x1
997 #define VM_ABI_64       0x2
998 
999 extern int override_nx(vm_map_t map, uint32_t user_tag);
1000 
1001 extern void vm_map_region_top_walk(
1002 	vm_map_entry_t entry,
1003 	vm_region_top_info_t top);
1004 extern void vm_map_region_walk(
1005 	vm_map_t map,
1006 	vm_map_offset_t va,
1007 	vm_map_entry_t entry,
1008 	vm_object_offset_t offset,
1009 	vm_object_size_t range,
1010 	vm_region_extended_info_t extended,
1011 	boolean_t look_for_pages,
1012 	mach_msg_type_number_t count);
1013 
1014 
1015 
1016 extern void vm_map_copy_footprint_ledgers(
1017 	task_t  old_task,
1018 	task_t  new_task);
1019 extern void vm_map_copy_ledger(
1020 	task_t  old_task,
1021 	task_t  new_task,
1022 	int     ledger_entry);
1023 
1024 /**
1025  * Represents a single region of virtual address space that should be reserved
1026  * (pre-mapped) in a user address space.
1027  */
1028 struct vm_reserved_region {
1029 	const char             *vmrr_name;
1030 	vm_map_offset_t         vmrr_addr;
1031 	vm_map_size_t           vmrr_size;
1032 };
1033 
1034 /**
1035  * Return back a machine-dependent array of address space regions that should be
1036  * reserved by the VM. This function is defined in the machine-dependent
1037  * machine_routines.c files.
1038  */
1039 extern size_t ml_get_vm_reserved_regions(
1040 	bool                    vm_is64bit,
1041 	const struct vm_reserved_region **regions);
1042 
1043 #endif /* MACH_KERNEL_PRIVATE */
1044 
1045 /* Create an empty map */
1046 extern vm_map_t         vm_map_create(
1047 	pmap_t                  pmap,
1048 	vm_map_offset_t         min_off,
1049 	vm_map_offset_t         max_off,
1050 	boolean_t               pageable);
1051 
1052 extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);
1053 
1054 extern void             vm_map_disable_hole_optimization(vm_map_t map);
1055 
1056 /* Get rid of a map */
1057 extern void             vm_map_destroy(
1058 	vm_map_t                map);
1059 
1060 /* Lose a reference */
1061 extern void             vm_map_deallocate(
1062 	vm_map_t                map);
1063 
1064 /* Lose a reference */
1065 extern void             vm_map_inspect_deallocate(
1066 	vm_map_inspect_t        map);
1067 
1068 /* Lose a reference */
1069 extern void             vm_map_read_deallocate(
1070 	vm_map_read_t        map);
1071 
1072 extern vm_map_t         vm_map_switch(
1073 	vm_map_t                map);
1074 
1075 /* Change protection */
1076 extern kern_return_t    vm_map_protect(
1077 	vm_map_t                map,
1078 	vm_map_offset_t         start,
1079 	vm_map_offset_t         end,
1080 	vm_prot_t               new_prot,
1081 	boolean_t               set_max);
1082 
1083 /* Check protection */
1084 extern boolean_t vm_map_check_protection(
1085 	vm_map_t                map,
1086 	vm_map_offset_t         start,
1087 	vm_map_offset_t         end,
1088 	vm_prot_t               protection);
1089 
1090 extern boolean_t vm_map_cs_enforcement(
1091 	vm_map_t                map);
1092 extern void vm_map_cs_enforcement_set(
1093 	vm_map_t                map,
1094 	boolean_t               val);
1095 
1096 extern void vm_map_cs_debugged_set(
1097 	vm_map_t map,
1098 	boolean_t val);
1099 
1100 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1101 
1102 /* wire down a region */
1103 
1104 #ifdef XNU_KERNEL_PRIVATE
1105 
1106 extern void vm_map_will_allocate_early_map(
1107 	vm_map_t               *map_owner);
1108 
1109 extern void vm_map_relocate_early_maps(
1110 	vm_offset_t             delta);
1111 
1112 extern void vm_map_relocate_early_elem(
1113 	uint32_t                zone_id,
1114 	vm_offset_t             new_addr,
1115 	vm_offset_t             delta);
1116 
1117 /* never fails */
1118 extern vm_map_t vm_map_create_options(
1119 	pmap_t                  pmap,
1120 	vm_map_offset_t         min_off,
1121 	vm_map_offset_t         max_off,
1122 	vm_map_create_options_t options);
1123 
1124 extern kern_return_t    vm_map_wire_kernel(
1125 	vm_map_t                map,
1126 	vm_map_offset_t         start,
1127 	vm_map_offset_t         end,
1128 	vm_prot_t               access_type,
1129 	vm_tag_t                tag,
1130 	boolean_t               user_wire);
1131 
1132 extern kern_return_t    vm_map_wire_and_extract_kernel(
1133 	vm_map_t                map,
1134 	vm_map_offset_t         start,
1135 	vm_prot_t               access_type,
1136 	vm_tag_t                tag,
1137 	boolean_t               user_wire,
1138 	ppnum_t                 *physpage_p);
1139 
1140 /* kext exported versions */
1141 
1142 extern kern_return_t    vm_map_wire_external(
1143 	vm_map_t                map,
1144 	vm_map_offset_t         start,
1145 	vm_map_offset_t         end,
1146 	vm_prot_t               access_type,
1147 	boolean_t               user_wire);
1148 
1149 extern kern_return_t    vm_map_wire_and_extract_external(
1150 	vm_map_t                map,
1151 	vm_map_offset_t         start,
1152 	vm_prot_t               access_type,
1153 	boolean_t               user_wire,
1154 	ppnum_t                 *physpage_p);
1155 
1156 #else /* XNU_KERNEL_PRIVATE */
1157 
1158 extern kern_return_t    vm_map_wire(
1159 	vm_map_t                map,
1160 	vm_map_offset_t         start,
1161 	vm_map_offset_t         end,
1162 	vm_prot_t               access_type,
1163 	boolean_t               user_wire);
1164 
1165 extern kern_return_t    vm_map_wire_and_extract(
1166 	vm_map_t                map,
1167 	vm_map_offset_t         start,
1168 	vm_prot_t               access_type,
1169 	boolean_t               user_wire,
1170 	ppnum_t                 *physpage_p);
1171 
1172 #endif /* !XNU_KERNEL_PRIVATE */
1173 
1174 /* unwire a region */
1175 extern kern_return_t    vm_map_unwire(
1176 	vm_map_t                map,
1177 	vm_map_offset_t         start,
1178 	vm_map_offset_t         end,
1179 	boolean_t               user_wire);
1180 
1181 #ifdef XNU_KERNEL_PRIVATE
1182 
1183 /* Enter a mapping of a memory object */
1184 extern kern_return_t    vm_map_enter_mem_object(
1185 	vm_map_t                map,
1186 	vm_map_offset_t         *address,
1187 	vm_map_size_t           size,
1188 	vm_map_offset_t         mask,
1189 	vm_map_kernel_flags_t   vmk_flags,
1190 	ipc_port_t              port,
1191 	vm_object_offset_t      offset,
1192 	boolean_t               needs_copy,
1193 	vm_prot_t               cur_protection,
1194 	vm_prot_t               max_protection,
1195 	vm_inherit_t            inheritance);
1196 
1197 /* Enter a mapping of a memory object */
1198 extern kern_return_t    vm_map_enter_mem_object_prefault(
1199 	vm_map_t                map,
1200 	vm_map_offset_t         *address,
1201 	vm_map_size_t           size,
1202 	vm_map_offset_t         mask,
1203 	vm_map_kernel_flags_t   vmk_flags,
1204 	ipc_port_t              port,
1205 	vm_object_offset_t      offset,
1206 	vm_prot_t               cur_protection,
1207 	vm_prot_t               max_protection,
1208 	upl_page_list_ptr_t     page_list,
1209 	unsigned int            page_list_count);
1210 
1211 /* Enter a mapping of a memory object */
1212 extern kern_return_t    vm_map_enter_mem_object_control(
1213 	vm_map_t                map,
1214 	vm_map_offset_t         *address,
1215 	vm_map_size_t           size,
1216 	vm_map_offset_t         mask,
1217 	vm_map_kernel_flags_t   vmk_flags,
1218 	memory_object_control_t control,
1219 	vm_object_offset_t      offset,
1220 	boolean_t               needs_copy,
1221 	vm_prot_t               cur_protection,
1222 	vm_prot_t               max_protection,
1223 	vm_inherit_t            inheritance);
1224 
1225 extern kern_return_t    vm_map_terminate(
1226 	vm_map_t                map);
1227 
1228 extern void             vm_map_require(
1229 	vm_map_t                map);
1230 
1231 extern void             vm_map_copy_require(
1232 	vm_map_copy_t           copy);
1233 
1234 extern kern_return_t    vm_map_copy_extract(
1235 	vm_map_t                src_map,
1236 	vm_map_address_t        src_addr,
1237 	vm_map_size_t           len,
1238 	boolean_t               copy,
1239 	vm_map_copy_t           *copy_result,   /* OUT */
1240 	vm_prot_t               *cur_prot,      /* OUT */
1241 	vm_prot_t               *max_prot,      /* OUT */
1242 	vm_inherit_t            inheritance,
1243 	vm_map_kernel_flags_t   vmk_flags);
1244 
1245 #endif /* !XNU_KERNEL_PRIVATE */
1246 
1247 /* Discard a copy without using it */
1248 extern void             vm_map_copy_discard(
1249 	vm_map_copy_t           copy);
1250 
1251 /* Overwrite existing memory with a copy */
1252 extern kern_return_t    vm_map_copy_overwrite(
1253 	vm_map_t                dst_map,
1254 	vm_map_address_t        dst_addr,
1255 	vm_map_copy_t           copy,
1256 	vm_map_size_t           copy_size,
1257 	boolean_t               interruptible);
1258 
1259 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3)
1260 
1261 
1262 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1263 extern boolean_t        vm_map_copy_validate_size(
1264 	vm_map_t                dst_map,
1265 	vm_map_copy_t           copy,
1266 	vm_map_size_t           *size);
1267 
1268 /* Place a copy into a map */
1269 extern kern_return_t    vm_map_copyout(
1270 	vm_map_t                dst_map,
1271 	vm_map_address_t        *dst_addr,                              /* OUT */
1272 	vm_map_copy_t           copy);
1273 
1274 extern kern_return_t vm_map_copyout_size(
1275 	vm_map_t                dst_map,
1276 	vm_map_address_t        *dst_addr,                              /* OUT */
1277 	vm_map_copy_t           copy,
1278 	vm_map_size_t           copy_size);
1279 
1280 extern kern_return_t    vm_map_copyout_internal(
1281 	vm_map_t                dst_map,
1282 	vm_map_address_t        *dst_addr,      /* OUT */
1283 	vm_map_copy_t           copy,
1284 	vm_map_size_t           copy_size,
1285 	boolean_t               consume_on_success,
1286 	vm_prot_t               cur_protection,
1287 	vm_prot_t               max_protection,
1288 	vm_inherit_t            inheritance);
1289 
1290 extern kern_return_t    vm_map_copyin(
1291 	vm_map_t                src_map,
1292 	vm_map_address_t        src_addr,
1293 	vm_map_size_t           len,
1294 	boolean_t               src_destroy,
1295 	vm_map_copy_t           *copy_result);                          /* OUT */
1296 
1297 extern kern_return_t    vm_map_copyin_common(
1298 	vm_map_t                src_map,
1299 	vm_map_address_t        src_addr,
1300 	vm_map_size_t           len,
1301 	boolean_t               src_destroy,
1302 	boolean_t               src_volatile,
1303 	vm_map_copy_t           *copy_result,                           /* OUT */
1304 	boolean_t               use_maxprot);
1305 
1306 #define VM_MAP_COPYIN_SRC_DESTROY       0x00000001
1307 #define VM_MAP_COPYIN_USE_MAXPROT       0x00000002
1308 #define VM_MAP_COPYIN_ENTRY_LIST        0x00000004
1309 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1310 #define VM_MAP_COPYIN_ALL_FLAGS         0x0000000F
1311 extern kern_return_t    vm_map_copyin_internal(
1312 	vm_map_t                src_map,
1313 	vm_map_address_t        src_addr,
1314 	vm_map_size_t           len,
1315 	int                     flags,
1316 	vm_map_copy_t           *copy_result);                         /* OUT */
1317 
1318 
1319 extern void             vm_map_disable_NX(
1320 	vm_map_t                map);
1321 
1322 extern void             vm_map_disallow_data_exec(
1323 	vm_map_t                map);
1324 
1325 extern void             vm_map_set_64bit(
1326 	vm_map_t                map);
1327 
1328 extern void             vm_map_set_32bit(
1329 	vm_map_t                map);
1330 
1331 extern void             vm_map_set_jumbo(
1332 	vm_map_t                map);
1333 
1334 extern void             vm_map_set_jit_entitled(
1335 	vm_map_t                map);
1336 
1337 extern void             vm_map_set_max_addr(
1338 	vm_map_t                map, vm_map_offset_t new_max_offset);
1339 
1340 extern boolean_t        vm_map_has_hard_pagezero(
1341 	vm_map_t                map,
1342 	vm_map_offset_t         pagezero_size);
1343 extern void             vm_commit_pagezero_status(vm_map_t      tmap);
1344 
1345 extern boolean_t        vm_map_tpro(
1346 	vm_map_t                map);
1347 
1348 extern void             vm_map_set_tpro(
1349 	vm_map_t                map);
1350 
1351 extern boolean_t        vm_map_is_64bit(
1352 	vm_map_t                map);
1353 
1354 extern kern_return_t    vm_map_raise_max_offset(
1355 	vm_map_t        map,
1356 	vm_map_offset_t new_max_offset);
1357 
1358 extern kern_return_t    vm_map_raise_min_offset(
1359 	vm_map_t        map,
1360 	vm_map_offset_t new_min_offset);
1361 
1362 #if XNU_TARGET_OS_OSX
1363 extern void vm_map_set_high_start(
1364 	vm_map_t        map,
1365 	vm_map_offset_t high_start);
1366 #endif /* XNU_TARGET_OS_OSX */
1367 
1368 extern vm_map_offset_t  vm_compute_max_offset(
1369 	boolean_t               is64);
1370 
1371 extern void             vm_map_get_max_aslr_slide_section(
1372 	vm_map_t                map,
1373 	int64_t                 *max_sections,
1374 	int64_t                 *section_size);
1375 
1376 extern uint64_t         vm_map_get_max_aslr_slide_pages(
1377 	vm_map_t map);
1378 
1379 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
1380 	vm_map_t map);
1381 
1382 extern kern_return_t    vm_map_set_size_limit(
1383 	vm_map_t                map,
1384 	uint64_t                limit);
1385 
1386 extern kern_return_t    vm_map_set_data_limit(
1387 	vm_map_t                map,
1388 	uint64_t                limit);
1389 
1390 extern void             vm_map_set_user_wire_limit(
1391 	vm_map_t                map,
1392 	vm_size_t               limit);
1393 
1394 extern void vm_map_switch_protect(
1395 	vm_map_t                map,
1396 	boolean_t               val);
1397 
1398 extern void vm_map_iokit_mapped_region(
1399 	vm_map_t                map,
1400 	vm_size_t               bytes);
1401 
1402 extern void vm_map_iokit_unmapped_region(
1403 	vm_map_t                map,
1404 	vm_size_t               bytes);
1405 
1406 
1407 extern boolean_t first_free_is_valid(vm_map_t);
1408 
1409 extern int              vm_map_page_shift(
1410 	vm_map_t                map);
1411 
1412 extern vm_map_offset_t  vm_map_page_mask(
1413 	vm_map_t                map);
1414 
1415 extern int              vm_map_page_size(
1416 	vm_map_t                map);
1417 
1418 extern vm_map_offset_t  vm_map_round_page_mask(
1419 	vm_map_offset_t         offset,
1420 	vm_map_offset_t         mask);
1421 
1422 extern vm_map_offset_t  vm_map_trunc_page_mask(
1423 	vm_map_offset_t         offset,
1424 	vm_map_offset_t         mask);
1425 
1426 extern boolean_t        vm_map_page_aligned(
1427 	vm_map_offset_t         offset,
1428 	vm_map_offset_t         mask);
1429 
1430 static inline int
vm_map_range_overflows(vm_map_offset_t addr,vm_map_size_t size)1431 vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
1432 {
1433 	vm_map_offset_t sum;
1434 	return os_add_overflow(addr, size, &sum);
1435 }
1436 
1437 static inline int
mach_vm_range_overflows(mach_vm_offset_t addr,mach_vm_size_t size)1438 mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
1439 {
1440 	mach_vm_offset_t sum;
1441 	return os_add_overflow(addr, size, &sum);
1442 }
1443 
1444 #ifdef XNU_KERNEL_PRIVATE
1445 
1446 /* Support for vm_map ranges */
1447 extern kern_return_t    vm_map_range_configure(
1448 	vm_map_t                map);
1449 
1450 extern void             vm_map_range_fork(
1451 	vm_map_t                new_map,
1452 	vm_map_t                old_map);
1453 
1454 extern int              vm_map_get_user_range(
1455 	vm_map_t                map,
1456 	vm_map_range_id_t       range_id,
1457 	mach_vm_range_t         range);
1458 
1459 /*!
1460  * @function vm_map_kernel_flags_update_range_id()
1461  *
1462  * @brief
1463  * Updates the @c vmkf_range_id field with the adequate value
1464  * according to the policy for specified map and tag set in @c vmk_flags.
1465  *
1466  * @discussion
1467  * This function is meant to be called by Mach VM entry points,
1468  * which matters for the kernel: allocations with pointers _MUST_
1469  * be allocated with @c kmem_*() functions.
1470  *
1471  * If the range ID is already set, it is preserved.
1472  */
1473 extern void             vm_map_kernel_flags_update_range_id(
1474 	vm_map_kernel_flags_t  *flags,
1475 	vm_map_t                map);
1476 
1477 #if XNU_TARGET_OS_OSX
1478 extern void vm_map_mark_alien(vm_map_t map);
1479 extern void vm_map_single_jit(vm_map_t map);
1480 #endif /* XNU_TARGET_OS_OSX */
1481 
1482 extern kern_return_t vm_map_page_info(
1483 	vm_map_t                map,
1484 	vm_map_offset_t         offset,
1485 	vm_page_info_flavor_t   flavor,
1486 	vm_page_info_t          info,
1487 	mach_msg_type_number_t  *count);
1488 extern kern_return_t vm_map_page_range_info_internal(
1489 	vm_map_t                map,
1490 	vm_map_offset_t         start_offset,
1491 	vm_map_offset_t         end_offset,
1492 	int                     effective_page_shift,
1493 	vm_page_info_flavor_t   flavor,
1494 	vm_page_info_t          info,
1495 	mach_msg_type_number_t  *count);
1496 
1497 #endif /* XNU_KERNEL_PRIVATE */
1498 #ifdef  MACH_KERNEL_PRIVATE
1499 
1500 
1501 /*
1502  * Internal macros for rounding and truncation of vm_map offsets and sizes
1503  */
1504 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1505 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1506 
1507 /*
1508  * Macros for rounding and truncation of vm_map offsets and sizes
1509  */
1510 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1511 VM_MAP_PAGE_SHIFT(
1512 	vm_map_t map)
1513 {
1514 	int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1515 	/*
1516 	 * help ubsan and codegen in general,
1517 	 * cannot use PAGE_{MIN,MAX}_SHIFT
1518 	 * because of testing code which
1519 	 * tests 16k aligned maps on 4k only systems.
1520 	 */
1521 	__builtin_assume(shift >= 12 && shift <= 14);
1522 	return shift;
1523 }
1524 
1525 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1526 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1527 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1528 
1529 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1530 VM_MAP_IS_EXOTIC(
1531 	vm_map_t map __unused)
1532 {
1533 #if __arm64__
1534 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1535 	    pmap_is_exotic(map->pmap)) {
1536 		return true;
1537 	}
1538 #endif /* __arm64__ */
1539 	return false;
1540 }
1541 
1542 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1543 VM_MAP_IS_ALIEN(
1544 	vm_map_t map __unused)
1545 {
1546 	/*
1547 	 * An "alien" process/task/map/pmap should mostly behave
1548 	 * as it currently would on iOS.
1549 	 */
1550 #if XNU_TARGET_OS_OSX
1551 	if (map->is_alien) {
1552 		return true;
1553 	}
1554 	return false;
1555 #else /* XNU_TARGET_OS_OSX */
1556 	return true;
1557 #endif /* XNU_TARGET_OS_OSX */
1558 }
1559 
1560 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1561 VM_MAP_POLICY_WX_FAIL(
1562 	vm_map_t map __unused)
1563 {
1564 	if (VM_MAP_IS_ALIEN(map)) {
1565 		return false;
1566 	}
1567 	return true;
1568 }
1569 
1570 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1571 VM_MAP_POLICY_WX_STRIP_X(
1572 	vm_map_t map __unused)
1573 {
1574 	if (VM_MAP_IS_ALIEN(map)) {
1575 		return true;
1576 	}
1577 	return false;
1578 }
1579 
1580 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1581 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1582 	vm_map_t map __unused)
1583 {
1584 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1585 		return false;
1586 	}
1587 	return true;
1588 }
1589 
1590 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1591 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1592 	vm_map_t map)
1593 {
1594 	return VM_MAP_IS_ALIEN(map);
1595 }
1596 
1597 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1598 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1599 	vm_map_t map __unused)
1600 {
1601 	if (VM_MAP_IS_ALIEN(map)) {
1602 		return false;
1603 	}
1604 	return true;
1605 }
1606 
1607 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1608 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1609 	vm_map_t map __unused)
1610 {
1611 	if (VM_MAP_IS_ALIEN(map)) {
1612 		return false;
1613 	}
1614 	return true;
1615 }
1616 
1617 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1618 VM_MAP_POLICY_ALLOW_JIT_COPY(
1619 	vm_map_t map __unused)
1620 {
1621 	if (VM_MAP_IS_ALIEN(map)) {
1622 		return false;
1623 	}
1624 	return true;
1625 }
1626 
1627 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1628 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1629 	vm_map_t map __unused)
1630 {
1631 #if __x86_64__
1632 	return true;
1633 #else /* __x86_64__ */
1634 	if (VM_MAP_IS_EXOTIC(map)) {
1635 		return true;
1636 	}
1637 	return false;
1638 #endif /* __x86_64__ */
1639 }
1640 
1641 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1642 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1643 {
1644 	switch (prot) {
1645 	case MAP_MEM_NOOP:                      break;
1646 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
1647 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
1648 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
1649 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
1650 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
1651 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1652 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
1653 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
1654 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
1655 	default:                                break;
1656 	}
1657 }
1658 
1659 static inline boolean_t
vm_map_always_shadow(vm_map_t map)1660 vm_map_always_shadow(vm_map_t map)
1661 {
1662 	if (map->mapped_in_other_pmaps) {
1663 		/*
1664 		 * This is a submap, mapped in other maps.
1665 		 * Even if a VM object is mapped only once in this submap,
1666 		 * the submap itself could be mapped multiple times,
1667 		 * so vm_object_shadow() should always create a shadow
1668 		 * object, even if the object has only 1 reference.
1669 		 */
1670 		return TRUE;
1671 	}
1672 	return FALSE;
1673 }
1674 
1675 #endif /* MACH_KERNEL_PRIVATE */
1676 #ifdef XNU_KERNEL_PRIVATE
1677 
1678 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1679 extern bool vm_map_is_exotic(vm_map_t map);
1680 extern bool vm_map_is_alien(vm_map_t map);
1681 extern pmap_t vm_map_get_pmap(vm_map_t map);
1682 
1683 #endif /* XNU_KERNEL_PRIVATE */
1684 
1685 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1686 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1687 
1688 /* Support for UPLs from vm_maps */
1689 
1690 #ifdef XNU_KERNEL_PRIVATE
1691 
1692 extern kern_return_t vm_map_get_upl(
1693 	vm_map_t                target_map,
1694 	vm_map_offset_t         map_offset,
1695 	upl_size_t              *size,
1696 	upl_t                   *upl,
1697 	upl_page_info_array_t   page_info,
1698 	unsigned int            *page_infoCnt,
1699 	upl_control_flags_t     *flags,
1700 	vm_tag_t                tag,
1701 	int                     force_data_sync);
1702 
1703 #endif /* XNU_KERNEL_PRIVATE */
1704 
1705 extern void
1706 vm_map_sizes(vm_map_t map,
1707     vm_map_size_t * psize,
1708     vm_map_size_t * pfree,
1709     vm_map_size_t * plargest_free);
1710 
1711 #if CONFIG_DYNAMIC_CODE_SIGNING
1712 
1713 extern kern_return_t vm_map_sign(vm_map_t map,
1714     vm_map_offset_t start,
1715     vm_map_offset_t end);
1716 
1717 #endif /* CONFIG_DYNAMIC_CODE_SIGNING */
1718 
1719 extern kern_return_t vm_map_partial_reap(
1720 	vm_map_t map,
1721 	unsigned int *reclaimed_resident,
1722 	unsigned int *reclaimed_compressed);
1723 
1724 
1725 #if DEVELOPMENT || DEBUG
1726 
1727 extern int vm_map_disconnect_page_mappings(
1728 	vm_map_t map,
1729 	boolean_t);
1730 
1731 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1732 
1733 #endif /* DEVELOPMENT || DEBUG */
1734 #if CONFIG_FREEZE
1735 
1736 extern kern_return_t vm_map_freeze(
1737 	task_t       task,
1738 	unsigned int *purgeable_count,
1739 	unsigned int *wired_count,
1740 	unsigned int *clean_count,
1741 	unsigned int *dirty_count,
1742 	unsigned int dirty_budget,
1743 	unsigned int *shared_count,
1744 	int          *freezer_error_code,
1745 	boolean_t    eval_only);
1746 
1747 #define FREEZER_ERROR_GENERIC                   (-1)
1748 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY      (-2)
1749 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO  (-3)
1750 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE       (-4)
1751 #define FREEZER_ERROR_NO_SWAP_SPACE             (-5)
1752 
1753 #endif /* CONFIG_FREEZE */
1754 #if XNU_KERNEL_PRIVATE
1755 
1756 boolean_t        kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1757 
1758 boolean_t        vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1759 
1760 #endif /* XNU_KERNEL_PRIVATE */
1761 
1762 /*
1763  * In some cases, we don't have a real VM object but still want to return a
1764  * unique ID (to avoid a memory region looking like shared memory), so build
1765  * a fake pointer based on the map's ledger and the index of the ledger being
1766  * reported.
1767  */
1768 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1769 
1770 #endif  /* KERNEL_PRIVATE */
1771 
1772 __END_DECLS
1773 
1774 #endif  /* _VM_VM_MAP_H_ */
1775