xref: /xnu-11417.121.6/osfmk/vm/vm_map_xnu.h (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_MAP_XNU_H_
30 #define _VM_VM_MAP_XNU_H_
31 
32 #ifdef XNU_KERNEL_PRIVATE
33 
34 #include <sys/cdefs.h>
35 #include <vm/vm_map.h>
36 
37 
38 __BEGIN_DECLS
39 
40 extern void     vm_map_reference(vm_map_t       map);
41 extern vm_map_t current_map(void);
42 
43 /* Setup reserved areas in a new VM map */
44 extern kern_return_t    vm_map_exec(
45 	vm_map_t                new_map,
46 	task_t                  task,
47 	boolean_t               is64bit,
48 	void                    *fsroot,
49 	cpu_type_t              cpu,
50 	cpu_subtype_t           cpu_subtype,
51 	boolean_t               reslide,
52 	boolean_t               is_driverkit,
53 	uint32_t                rsr_version);
54 
55 
56 
57 #ifdef  MACH_KERNEL_PRIVATE
58 
59 #define current_map_fast()      (current_thread()->map)
60 #define current_map()           (current_map_fast())
61 
62 /*
63  *	Types defined:
64  *
65  *	vm_map_t		the high-level address map data structure.
66  *	vm_map_entry_t		an entry in an address map.
67  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
68  *	vm_map_copy_t		represents memory copied from an address map,
69  *				 used for inter-map copy operations
70  */
71 typedef struct vm_map_entry     *vm_map_entry_t;
72 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)
73 
74 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
75 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
76 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock)
77 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock)
78 
79 /*
80  *	Type:		vm_named_entry_t [internal use only]
81  *
82  *	Description:
83  *		Description of a mapping to a memory cache object.
84  *
85  *	Implementation:
86  *		While the handle to this object is used as a means to map
87  *              and pass around the right to map regions backed by pagers
88  *		of all sorts, the named_entry itself is only manipulated
89  *		by the kernel.  Named entries hold information on the
90  *		right to map a region of a cached object.  Namely,
91  *		the target cache object, the beginning and ending of the
92  *		region to be mapped, and the permissions, (read, write)
93  *		with which it can be mapped.
94  *
95  */
96 
97 struct vm_named_entry {
98 	decl_lck_mtx_data(, Lock);              /* Synchronization */
99 	union {
100 		vm_map_t        map;            /* map backing submap */
101 		vm_map_copy_t   copy;           /* a VM map copy */
102 	} backing;
103 	vm_object_offset_t      offset;         /* offset into object */
104 	vm_object_size_t        size;           /* size of region */
105 	vm_object_offset_t      data_offset;    /* offset to first byte of data */
106 	unsigned int                            /* Is backing.xxx : */
107 	/* unsigned  */ access:8,               /* MAP_MEM_* */
108 	/* vm_prot_t */ protection:4,           /* access permissions */
109 	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
110 	/* boolean_t */ internal:1,             /* ... an internal object */
111 	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
112 	/* boolean_t */ is_copy:1,              /* ... a VM map copy */
113 	/* boolean_t */ is_fully_owned:1;       /* ... all objects are owned */
114 #if VM_NAMED_ENTRY_DEBUG
115 	uint32_t                named_entry_bt; /* btref_t */
116 #endif /* VM_NAMED_ENTRY_DEBUG */
117 };
118 
119 /*
120  * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
121  * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
122  * to convert between the "packed" representation in the vm_map_entry's fields
123  * and the equivalent bits defined in vm_prot_t.
124  */
125 #if defined(__x86_64__)
126 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
127 #else
128 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
129 #endif
130 
131 /*
132  * FOOTPRINT ACCOUNTING:
133  * The "memory footprint" is better described in the pmap layer.
134  *
135  * At the VM level, these 2 vm_map_entry_t fields are relevant:
136  * iokit_mapped:
137  *	For an "iokit_mapped" entry, we add the size of the entry to the
138  *	footprint when the entry is entered into the map and we subtract that
139  *	size when the entry is removed.  No other accounting should take place.
140  *	"use_pmap" should be FALSE but is not taken into account.
141  * use_pmap: (only when is_sub_map is FALSE)
142  *	This indicates if we should ask the pmap layer to account for pages
143  *	in this mapping.  If FALSE, we expect that another form of accounting
144  *	is being used (e.g. "iokit_mapped" or the explicit accounting of
145  *	non-volatile purgable memory).
146  *
147  * So the logic is mostly:
148  * if entry->is_sub_map == TRUE
149  *	anything in a submap does not count for the footprint
150  * else if entry->iokit_mapped == TRUE
151  *	footprint includes the entire virtual size of this entry
152  * else if entry->use_pmap == FALSE
153  *	tell pmap NOT to account for pages being pmap_enter()'d from this
154  *	mapping (i.e. use "alternate accounting")
155  * else
156  *	pmap will account for pages being pmap_enter()'d from this mapping
157  *	as it sees fit (only if anonymous, etc...)
158  */
159 
160 #define VME_ALIAS_BITS          12
161 #define VME_ALIAS_MASK          ((1u << VME_ALIAS_BITS) - 1)
162 #define VME_OFFSET_SHIFT        VME_ALIAS_BITS
163 #define VME_OFFSET_BITS         (64 - VME_ALIAS_BITS)
164 #define VME_SUBMAP_SHIFT        2
165 #define VME_SUBMAP_BITS         (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
166 
167 struct vm_map_entry {
168 	struct vm_map_links     links;                      /* links to other entries */
169 #define vme_prev                links.prev
170 #define vme_next                links.next
171 #define vme_start               links.start
172 #define vme_end                 links.end
173 
174 	struct vm_map_store     store;
175 
176 	union {
177 		vm_offset_t     vme_object_value;
178 		struct {
179 			vm_offset_t vme_atomic:1;           /* entry cannot be split/coalesced */
180 			vm_offset_t is_sub_map:1;           /* Is "object" a submap? */
181 			vm_offset_t vme_submap:VME_SUBMAP_BITS;
182 		};
183 		struct {
184 			uint32_t    vme_ctx_atomic : 1;
185 			uint32_t    vme_ctx_is_sub_map : 1;
186 			uint32_t    vme_context : 30;
187 
188 			/**
189 			 * If vme_kernel_object==1 && KASAN,
190 			 * vme_object_or_delta holds the delta.
191 			 *
192 			 * If vme_kernel_object==1 && !KASAN,
193 			 * vme_tag_btref holds a btref when vme_alias is equal to the "vmtaglog"
194 			 * boot-arg.
195 			 *
196 			 * If vme_kernel_object==0,
197 			 * vme_object_or_delta holds the packed vm object.
198 			 */
199 			union {
200 				vm_page_object_t vme_object_or_delta;
201 				btref_t vme_tag_btref;
202 			};
203 		};
204 	};
205 
206 	unsigned long long
207 	/* vm_tag_t          */ vme_alias:VME_ALIAS_BITS,   /* entry VM tag */
208 	/* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
209 
210 	/* boolean_t         */ is_shared:1,                /* region is shared */
211 	/* boolean_t         */__unused1:1,
212 	/* boolean_t         */in_transition:1,             /* Entry being changed */
213 	/* boolean_t         */ needs_wakeup:1,             /* Waiters on in_transition */
214 	/* behavior is not defined for submap type */
215 	/* vm_behavior_t     */ behavior:2,                 /* user paging behavior hint */
216 	/* boolean_t         */ needs_copy:1,               /* object need to be copied? */
217 
218 	/* Only in task maps: */
219 #if defined(__arm64e__)
220 	/*
221 	 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
222 	 * We reuse it here to keep track of mappings that have hardware support
223 	 * for read-only/read-write trusted paths.
224 	 */
225 	/* vm_prot_t-like    */ protection:3,               /* protection code */
226 	/* boolean_t         */ used_for_tpro:1,
227 #else /* __arm64e__ */
228 	/* vm_prot_t-like    */protection:4,                /* protection code, bit3=UEXEC */
229 #endif /* __arm64e__ */
230 
231 	/* vm_prot_t-like    */ max_protection:4,           /* maximum protection, bit3=UEXEC */
232 	/* vm_inherit_t      */ inheritance:2,              /* inheritance */
233 
234 	/*
235 	 * use_pmap is overloaded:
236 	 * if "is_sub_map":
237 	 *      use a nested pmap?
238 	 * else (i.e. if object):
239 	 *      use pmap accounting
240 	 *      for footprint?
241 	 */
242 	/* boolean_t         */ use_pmap:1,
243 	/* boolean_t         */ no_cache:1,                 /* should new pages be cached? */
244 	/* boolean_t         */ vme_permanent:1,            /* mapping can not be removed */
245 	/* boolean_t         */ superpage_size:1,           /* use superpages of a certain size */
246 	/* boolean_t         */ map_aligned:1,              /* align to map's page size */
247 	/*
248 	 * zero out the wired pages of this entry
249 	 * if is being deleted without unwiring them
250 	 */
251 	/* boolean_t         */ zero_wired_pages:1,
252 	/* boolean_t         */ used_for_jit:1,
253 	/* boolean_t         */ csm_associated:1,       /* code signing monitor will validate */
254 
255 	/* iokit accounting: use the virtual size rather than resident size: */
256 	/* boolean_t         */ iokit_acct:1,
257 	/* boolean_t         */ vme_resilient_codesign:1,
258 	/* boolean_t         */ vme_resilient_media:1,
259 	/* boolean_t         */ vme_xnu_user_debug:1,
260 	/* boolean_t         */ vme_no_copy_on_read:1,
261 	/* boolean_t         */ translated_allow_execute:1, /* execute in translated processes */
262 	/* boolean_t         */ vme_kernel_object:1;        /* vme_object is a kernel_object */
263 
264 	unsigned short          wired_count;                /* can be paged if = 0 */
265 	unsigned short          user_wired_count;           /* for vm_wire */
266 
267 #if     DEBUG
268 #define MAP_ENTRY_CREATION_DEBUG (1)
269 #define MAP_ENTRY_INSERTION_DEBUG (1)
270 #endif /* DEBUG */
271 #if     MAP_ENTRY_CREATION_DEBUG
272 	struct vm_map_header    *vme_creation_maphdr;
273 	uint32_t                vme_creation_bt;            /* btref_t */
274 #endif /* MAP_ENTRY_CREATION_DEBUG */
275 #if     MAP_ENTRY_INSERTION_DEBUG
276 	uint32_t                vme_insertion_bt;           /* btref_t */
277 	vm_map_offset_t         vme_start_original;
278 	vm_map_offset_t         vme_end_original;
279 #endif /* MAP_ENTRY_INSERTION_DEBUG */
280 };
281 
282 #define VME_ALIAS(entry) \
283 	((entry)->vme_alias)
284 
285 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)286 _VME_SUBMAP(
287 	vm_map_entry_t entry)
288 {
289 	__builtin_assume(entry->vme_submap);
290 	return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
291 }
292 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
293 
294 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)295 VME_SUBMAP_SET(
296 	vm_map_entry_t entry,
297 	vm_map_t submap)
298 {
299 	__builtin_assume(((vm_offset_t)submap & 3) == 0);
300 
301 	entry->is_sub_map = true;
302 	entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
303 }
304 
305 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)306 _VME_OBJECT(
307 	vm_map_entry_t entry)
308 {
309 	vm_object_t object;
310 
311 	if (!entry->vme_kernel_object) {
312 		object = VM_OBJECT_UNPACK(entry->vme_object_or_delta);
313 		__builtin_assume(!is_kernel_object(object));
314 	} else {
315 		object = kernel_object_default;
316 	}
317 	return object;
318 }
319 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
320 
321 
322 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)323 VME_OFFSET(
324 	vm_map_entry_t entry)
325 {
326 	return entry->vme_offset << VME_OFFSET_SHIFT;
327 }
328 
329 
330 #if (DEBUG || DEVELOPMENT) && !KASAN
331 #define VM_BTLOG_TAGS 1
332 #else
333 #define VM_BTLOG_TAGS 0
334 #endif
335 
336 
337 /*
338  * Convenience macros for dealing with superpages
339  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
340  */
341 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
342 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
343 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
344 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
345 
346 /*
347  * wired_counts are unsigned short.  This value is used to safeguard
348  * against any mishaps due to runaway user programs.
349  */
350 #define MAX_WIRE_COUNT          65535
351 
352 typedef struct vm_map_user_range {
353 	vm_map_address_t        vmur_min_address __kernel_data_semantics;
354 
355 	vm_map_address_t        vmur_max_address : 56 __kernel_data_semantics;
356 	vm_map_range_id_t       vmur_range_id : 8;
357 } *vm_map_user_range_t;
358 
359 /*
360  *	Type:		vm_map_t [exported; contents invisible]
361  *
362  *	Description:
363  *		An address map -- a directory relating valid
364  *		regions of a task's address space to the corresponding
365  *		virtual memory objects.
366  *
367  *	Implementation:
368  *		Maps are doubly-linked lists of map entries, sorted
369  *		by address.  One hint is used to start
370  *		searches again from the last successful search,
371  *		insertion, or removal.  Another hint is used to
372  *		quickly find free space.
373  *
374  *	Note:
375  *		vm_map_relocate_early_elem() knows about this layout,
376  *		and needs to be kept in sync.
377  */
378 struct _vm_map {
379 	lck_rw_t                lock;           /* map lock */
380 	struct vm_map_header    hdr;            /* Map entry header */
381 #define min_offset              hdr.links.start /* start of range */
382 #define max_offset              hdr.links.end   /* end of range */
383 	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
384 	vm_map_size_t           size;           /* virtual size */
385 	uint64_t                size_limit;     /* rlimit on address space size */
386 	uint64_t                data_limit;     /* rlimit on data size */
387 	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
388 	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
389 #if __x86_64__
390 	vm_map_offset_t         vmmap_high_start;
391 #endif /* __x86_64__ */
392 
393 	os_ref_atomic_t         map_refcnt;       /* Reference count */
394 
395 #if CONFIG_MAP_RANGES
396 #define VM_MAP_EXTRA_RANGES_MAX 1024
397 	struct mach_vm_range    default_range;
398 	struct mach_vm_range    data_range;
399 	struct mach_vm_range    large_file_range;
400 
401 	uint16_t                extra_ranges_count;
402 	vm_map_user_range_t     extra_ranges;
403 #endif /* CONFIG_MAP_RANGES */
404 
405 	union {
406 		/*
407 		 * If map->disable_vmentry_reuse == TRUE:
408 		 * the end address of the highest allocated vm_map_entry_t.
409 		 */
410 		vm_map_offset_t         vmu1_highest_entry_end;
411 		/*
412 		 * For a nested VM map:
413 		 * the lowest address in this nested VM map that we would
414 		 * expect to be unnested under normal operation (i.e. for
415 		 * regular copy-on-write on DATA section).
416 		 */
417 		vm_map_offset_t         vmu1_lowest_unnestable_start;
418 	} vmu1;
419 #define highest_entry_end       vmu1.vmu1_highest_entry_end
420 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
421 	vm_map_entry_t          hint;           /* hint for quick lookups */
422 	union {
423 		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
424 		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
425 	} vmmap_u_1;
426 #define hole_hint vmmap_u_1.vmmap_hole_hint
427 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
428 	union {
429 		vm_map_entry_t          _first_free;    /* First free space hint */
430 		struct vm_map_links*    _holes;         /* links all holes between entries */
431 	} f_s;                                      /* Union for free space data structures being used */
432 
433 #define first_free              f_s._first_free
434 #define holes_list              f_s._holes
435 
436 	unsigned int
437 	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
438 	/* boolean_t */ wiring_required:1,        /* All memory wired? */
439 	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
440 	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
441 	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
442 	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
443 	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
444 	/* boolean_t */ holelistenabled:1,
445 	/* boolean_t */ is_nested_map:1,
446 	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
447 	/* boolean_t */ jit_entry_exists:1,
448 	/* boolean_t */ has_corpse_footprint:1,
449 	/* boolean_t */ terminated:1,
450 	/* boolean_t */ is_alien:1,               /* for platform simulation, i.e. PLATFORM_IOS on OSX */
451 	/* boolean_t */ cs_enforcement:1,         /* code-signing enforcement */
452 	/* boolean_t */ cs_debugged:1,            /* code-signed but debugged */
453 	/* boolean_t */ reserved_regions:1,       /* has reserved regions. The map size that userspace sees should ignore these. */
454 	/* boolean_t */ single_jit:1,             /* only allow one JIT mapping */
455 	/* boolean_t */ never_faults:1,           /* this map should never cause faults */
456 	/* boolean_t */ uses_user_ranges:1,       /* has the map been configured to use user VM ranges */
457 	/* boolean_t */ tpro_enforcement:1,       /* enforce TPRO propagation */
458 	/* boolean_t */ corpse_source:1,          /* map is being used to create a corpse for diagnostics.*/
459 	/* reserved */ res0:1,
460 	/* reserved  */pad:9;
461 	unsigned int            timestamp;          /* Version number */
462 	/*
463 	 * Weak reference to the task that owns this map. This will be NULL if the
464 	 * map has terminated, so you must have a task reference to be able to safely
465 	 * access this. Under the map lock, you can safely acquire a task reference
466 	 * if owning_task is not NULL, since vm_map_terminate requires the map lock.
467 	 */
468 	task_t owning_task;
469 
470 	/*
471 	 * A generation ID for maps that increments monotonically.
472 	 * This is a pointer type just so we get dPAC out-of-the-box, but
473 	 * conceptually it's just an ID.
474 	 * Note that this is not a unique object ID. In particular, fork()
475 	 * will produce a child map with the same ID as its parent.
476 	 */
477 	vm_map_serial_t serial_id;
478 };
479 
480 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
481 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
482 #define vm_map_first_entry(map) ((map)->hdr.links.next)
483 #define vm_map_last_entry(map)  ((map)->hdr.links.prev)
484 
485 /*
486  *	Type:		vm_map_version_t [exported; contents invisible]
487  *
488  *	Description:
489  *		Map versions may be used to quickly validate a previous
490  *		lookup operation.
491  *
492  *	Usage note:
493  *		Because they are bulky objects, map versions are usually
494  *		passed by reference.
495  *
496  *	Implementation:
497  *		Just a timestamp for the main map.
498  */
499 typedef struct vm_map_version {
500 	unsigned int    main_timestamp;
501 } vm_map_version_t;
502 
503 /*
504  *	Type:		vm_map_copy_t [exported; contents invisible]
505  *
506  *	Description:
507  *		A map copy object represents a region of virtual memory
508  *		that has been copied from an address map but is still
509  *		in transit.
510  *
511  *		A map copy object may only be used by a single thread
512  *		at a time.
513  *
514  *	Implementation:
515  *              There are two formats for map copy objects.
516  *		The first is very similar to the main
517  *		address map in structure, and as a result, some
518  *		of the internal maintenance functions/macros can
519  *		be used with either address maps or map copy objects.
520  *
521  *		The map copy object contains a header links
522  *		entry onto which the other entries that represent
523  *		the region are chained.
524  *
525  *		The second format is a kernel buffer copy object - for data
526  *              small enough that physical copies were the most efficient
527  *		method. This method uses a zero-sized array unioned with
528  *		other format-specific data in the 'c_u' member. This unsized
529  *		array overlaps the other elements and allows us to use this
530  *		extra structure space for physical memory copies. On 64-bit
531  *		systems this saves ~64 bytes per vm_map_copy.
532  */
533 
534 struct vm_map_copy {
535 #define VM_MAP_COPY_ENTRY_LIST          1
536 #define VM_MAP_COPY_KERNEL_BUFFER       2
537 	uint16_t                type;
538 	bool                    is_kernel_range;
539 	bool                    is_user_range;
540 	vm_map_range_id_t       orig_range;
541 	vm_object_offset_t      offset;
542 	vm_map_size_t           size;
543 	union {
544 		struct vm_map_header                  hdr;    /* ENTRY_LIST */
545 		void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
546 	} c_u;
547 };
548 
549 
550 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
551 #define vm_map_entry_zone       (&zone_array[ZONE_ID_VM_MAP_ENTRY])
552 
553 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
554 #define vm_map_holes_zone       (&zone_array[ZONE_ID_VM_MAP_HOLES])
555 
556 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
557 #define vm_map_zone             (&zone_array[ZONE_ID_VM_MAP])
558 
559 
560 #define cpy_hdr                 c_u.hdr
561 #define cpy_kdata               c_u.kdata
562 
563 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
564 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
565 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
566 
567 /*
568  *	Useful macros for entry list copy objects
569  */
570 
571 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
572 #define vm_map_copy_first_entry(copy)           \
573 	        ((copy)->cpy_hdr.links.next)
574 #define vm_map_copy_last_entry(copy)            \
575 	        ((copy)->cpy_hdr.links.prev)
576 
577 
578 /*
579  *	Macros:		vm_map_lock, etc. [internal use only]
580  *	Description:
581  *		Perform locking on the data portion of a map.
582  *	When multiple maps are to be locked, order by map address.
583  *	(See vm_map.c::vm_remap())
584  */
585 
586 #define vm_map_lock_init(map)                                           \
587 	((map)->timestamp = 0 ,                                         \
588 	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
589 
590 #define vm_map_lock(map)                     \
591 	MACRO_BEGIN                          \
592 	DTRACE_VM(vm_map_lock_w);            \
593 	lck_rw_lock_exclusive(&(map)->lock); \
594 	MACRO_END
595 
596 #define vm_map_unlock(map)          \
597 	MACRO_BEGIN                 \
598 	DTRACE_VM(vm_map_unlock_w); \
599 	(map)->timestamp++;         \
600 	lck_rw_done(&(map)->lock);  \
601 	MACRO_END
602 
603 #define vm_map_lock_read(map)             \
604 	MACRO_BEGIN                       \
605 	DTRACE_VM(vm_map_lock_r);         \
606 	lck_rw_lock_shared(&(map)->lock); \
607 	MACRO_END
608 
609 #define vm_map_unlock_read(map)     \
610 	MACRO_BEGIN                 \
611 	DTRACE_VM(vm_map_unlock_r); \
612 	lck_rw_done(&(map)->lock);  \
613 	MACRO_END
614 
615 #define vm_map_lock_write_to_read(map)                 \
616 	MACRO_BEGIN                                    \
617 	DTRACE_VM(vm_map_lock_downgrade);              \
618 	(map)->timestamp++;                            \
619 	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
620 	MACRO_END
621 
622 #define vm_map_lock_assert_held(map) \
623 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_HELD)
624 #define vm_map_lock_assert_shared(map)  \
625 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_SHARED)
626 #define vm_map_lock_assert_exclusive(map) \
627 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
628 #define vm_map_lock_assert_notheld(map) \
629 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
630 
631 /*
632  *	Exported procedures that operate on vm_map_t.
633  */
634 
635 /* Lookup map entry containing or the specified address in the given map */
636 extern boolean_t        vm_map_lookup_entry(
637 	vm_map_t                map,
638 	vm_map_address_t        address,
639 	vm_map_entry_t          *entry);                                /* OUT */
640 
641 
642 /*
643  *	Functions implemented as macros
644  */
645 #define         vm_map_min(map) ((map)->min_offset)
646 /* Lowest valid address in
647  * a map */
648 
649 #define         vm_map_max(map) ((map)->max_offset)
650 /* Highest valid address */
651 
652 #define         vm_map_pmap(map)        ((map)->pmap)
653 /* Physical map associated
654 * with this address map */
655 
656 /* Gain a reference to an existing map */
657 extern void             vm_map_reference(
658 	vm_map_t        map);
659 
660 /*
661  *	Wait and wakeup macros for in_transition map entries.
662  */
663 #define vm_map_entry_wait(map, interruptible)           \
664 	((map)->timestamp++ ,                           \
665 	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
666 	                          (event_t)&(map)->hdr,	interruptible))
667 
668 
669 #define vm_map_entry_wakeup(map)        \
670 	thread_wakeup((event_t)(&(map)->hdr))
671 
672 
673 extern void             vm_map_inherit_limits(
674 	vm_map_t                new_map,
675 	const struct _vm_map   *old_map);
676 
677 /* Create a new task map using an existing task map as a template. */
678 extern vm_map_t         vm_map_fork(
679 	ledger_t                ledger,
680 	vm_map_t                old_map,
681 	int                     options);
682 
683 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
684 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
685 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004
686 #define VM_MAP_FORK_SHARE_IF_OWNED              0x00000008
687 
688 
689 extern kern_return_t vm_map_query_volatile(
690 	vm_map_t        map,
691 	mach_vm_size_t  *volatile_virtual_size_p,
692 	mach_vm_size_t  *volatile_resident_size_p,
693 	mach_vm_size_t  *volatile_compressed_size_p,
694 	mach_vm_size_t  *volatile_pmap_size_p,
695 	mach_vm_size_t  *volatile_compressed_pmap_size_p);
696 
697 
698 extern kern_return_t vm_map_set_cache_attr(
699 	vm_map_t        map,
700 	vm_map_offset_t va);
701 
702 
703 extern void vm_map_copy_footprint_ledgers(
704 	task_t  old_task,
705 	task_t  new_task);
706 
707 
708 /**
709  * Represents a single region of virtual address space that should be reserved
710  * (pre-mapped) in a user address space.
711  */
712 struct vm_reserved_region {
713 	const char             *vmrr_name;
714 	vm_map_offset_t         vmrr_addr;
715 	vm_map_size_t           vmrr_size;
716 };
717 
718 /**
719  * Return back a machine-dependent array of address space regions that should be
720  * reserved by the VM. This function is defined in the machine-dependent
721  * machine_routines.c files.
722  */
723 extern size_t ml_get_vm_reserved_regions(
724 	bool                    vm_is64bit,
725 	const struct vm_reserved_region **regions);
726 
727 /**
728  * Explicitly preallocates a floating point save area. This function is defined
729  * in the machine-dependent machine_routines.c files.
730  */
731 extern void ml_fp_save_area_prealloc(void);
732 
733 #endif /* MACH_KERNEL_PRIVATE */
734 
735 /*
736  * Read and write from a kernel buffer to a specified map.
737  */
738 extern  kern_return_t   vm_map_write_user(
739 	vm_map_t                map,
740 	void                   *src_p,
741 	vm_map_offset_ut        dst_addr_u,
742 	vm_size_ut              size_u);
743 
744 extern  kern_return_t   vm_map_read_user(
745 	vm_map_t                map,
746 	vm_map_offset_ut        src_addr_u,
747 	void                   *dst_p,
748 	vm_size_ut              size_u);
749 
750 extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);
751 
752 typedef struct {
753 	vm_map_t map;
754 	task_t task;
755 } vm_map_switch_context_t;
756 extern vm_map_switch_context_t vm_map_switch_to(vm_map_t map);
757 extern void vm_map_switch_back(vm_map_switch_context_t ctx);
758 
759 extern boolean_t vm_map_cs_enforcement(
760 	vm_map_t                map);
761 extern void vm_map_cs_enforcement_set(
762 	vm_map_t                map,
763 	boolean_t               val);
764 
765 extern void vm_map_cs_debugged_set(
766 	vm_map_t map,
767 	boolean_t val);
768 
769 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
770 extern kern_return_t vm_map_csm_allow_jit(vm_map_t map);
771 
772 
773 extern void vm_map_will_allocate_early_map(
774 	vm_map_t               *map_owner);
775 
776 extern void vm_map_relocate_early_maps(
777 	vm_offset_t             delta);
778 
779 extern void vm_map_relocate_early_elem(
780 	uint32_t                zone_id,
781 	vm_offset_t             new_addr,
782 	vm_offset_t             delta);
783 
784 /* wire down a region */
785 
786 /* never fails */
787 extern vm_map_t vm_map_create_options(
788 	pmap_t                  pmap,
789 	vm_map_offset_t         min_off,
790 	vm_map_offset_t         max_off,
791 	vm_map_create_options_t options);
792 
793 extern boolean_t        vm_kernel_map_is_kernel(vm_map_t map);
794 
795 /*!
796  * @function vm_map_enter_mem_object_control()
797  *
798  * @brief
799  * Enters a mapping of @c initial_size bytes at @c *address (subject to
800  * fixed/anywhere semantics, see @c VM_FLAGS_FIXED/VM_FLAGS_ANYWHERE ).
801  * The pages will come from a memory object paged in by the @c control pager,
802  * and the caller may specify an @c offset into the object.
803  *
804  * @param target_map     The map into which to enter the mapping.
805  * @param address        [in]  Pointer to the address at which to enter the
806  *                             mapping (or use as a hint for anywhere
807  *                             mappings).
808  *                             No alignment is required, the function will
809  *                             round this down to a page boundary in the
810  *                             @c target_map.
811  *                       [out] On success, it will be filled with the address
812  *                             at which the object data is made available, and
813  *                             will have the same misalignment into
814  *                             @c target_map as @c offset.
815  *                             On failure, it remains unmodified.
816  * @param initial_size   Size of the mapping to enter.
817  *                       Must be non-zero.
818  *                       No alignment is required.
819  * @param mask           An alignment mask the mapping must respect.
820  * @param vmk_flags      The vm map kernel flags to influence this call.
821  * @param control        The pager-managed memory object which is the source
822  *                       of the pages.
823  * @param offset         The offset into the memory object to use when
824  *                       paging.
825  *                       @c vm_map_enter, which is called into by
826  *                       @c vm_map_enter_mem_object_control, requires that
827  *                       @c offset be page-aligned for either @c target_map
828  *                       pages or kernel pages.
829  * @param needs_copy     Boolean which can be set to request that the mapped
830  *                       pages be a copy of the memory object's pages.
831  * @param cur_protection Effective protection that should be set for the
832  *                       mapping.
833  * @param max_protection Max protection that should be allowed for the
834  *                       mapping. Should at least cover @c cur_protection.
835  * @param inheritance    Inheritance policy for the mapping.
836  *
837  * @returns @c KERN_SUCCESS if the mapping was successfully entered, an error
838  *          code otherwise.
839  */
840 extern kern_return_t    vm_map_enter_mem_object_control(
841 	vm_map_t                target_map,
842 	vm_map_offset_ut       *address,
843 	vm_map_size_ut          initial_size,
844 	vm_map_offset_ut        mask,
845 	vm_map_kernel_flags_t   vmk_flags,
846 	memory_object_control_t control,
847 	vm_object_offset_ut     offset,
848 	boolean_t               needs_copy,
849 	vm_prot_ut              cur_protection,
850 	vm_prot_ut              max_protection,
851 	vm_inherit_ut           inheritance);
852 
853 /* Must be executed on a new task's map before the task is enabled for IPC access */
854 extern void vm_map_setup(vm_map_t map, task_t task); /* always succeeds */
855 
856 extern kern_return_t    vm_map_terminate(
857 	vm_map_t                map);
858 
859 /* Overwrite existing memory with a copy */
860 extern kern_return_t    vm_map_copy_overwrite(
861 	vm_map_t                dst_map,
862 	vm_map_address_ut       dst_addr_u,
863 	vm_map_copy_t           copy,
864 	vm_map_size_ut          copy_size_u,
865 	boolean_t               interruptible);
866 
867 /* returns TRUE if size of vm_map_copy == *size, FALSE otherwise */
868 extern boolean_t        vm_map_copy_validate_size(
869 	vm_map_t                dst_map,
870 	vm_map_copy_t           copy,
871 	vm_map_size_t          *size);
872 
873 extern kern_return_t    vm_map_copyout_size(
874 	vm_map_t                dst_map,
875 	vm_map_address_t       *dst_addr, /* OUT */
876 	vm_map_copy_t           copy,
877 	vm_map_size_ut          copy_size);
878 
879 extern void             vm_map_disable_NX(
880 	vm_map_t                map);
881 
882 extern void             vm_map_disallow_data_exec(
883 	vm_map_t                map);
884 
885 extern void             vm_map_set_64bit(
886 	vm_map_t                map);
887 
888 extern void             vm_map_set_32bit(
889 	vm_map_t                map);
890 
891 extern void             vm_map_set_jumbo(
892 	vm_map_t                map);
893 
894 #if XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT
895 extern void             vm_map_set_extra_jumbo(
896 	vm_map_t                map);
897 #endif /* XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT */
898 
899 extern void             vm_map_set_jit_entitled(
900 	vm_map_t                map);
901 
902 extern void             vm_map_set_max_addr(
903 	vm_map_t                map,
904 	vm_map_offset_t         new_max_offset,
905 	bool                    extra_jumbo);
906 
907 extern boolean_t        vm_map_has_hard_pagezero(
908 	vm_map_t                map,
909 	vm_map_offset_t         pagezero_size);
910 
911 extern void             vm_commit_pagezero_status(vm_map_t      tmap);
912 
913 extern boolean_t        vm_map_tpro(
914 	vm_map_t                map);
915 
916 extern void             vm_map_set_tpro(
917 	vm_map_t                map);
918 
919 
920 
921 extern void             vm_map_set_tpro_enforcement(
922 	vm_map_t                map);
923 
924 extern boolean_t        vm_map_set_tpro_range(
925 	vm_map_t                map,
926 	vm_map_address_t        start,
927 	vm_map_address_t        end);
928 
929 extern boolean_t        vm_map_is_64bit(
930 	vm_map_t                map);
931 
932 extern kern_return_t    vm_map_raise_max_offset(
933 	vm_map_t        map,
934 	vm_map_offset_t new_max_offset);
935 
936 extern kern_return_t    vm_map_raise_min_offset(
937 	vm_map_t        map,
938 	vm_map_offset_t new_min_offset);
939 
940 #if XNU_TARGET_OS_OSX
941 extern void vm_map_set_high_start(
942 	vm_map_t        map,
943 	vm_map_offset_t high_start);
944 #endif /* XNU_TARGET_OS_OSX */
945 
946 
947 extern vm_map_offset_t  vm_compute_max_offset(
948 	boolean_t               is64);
949 
950 extern void             vm_map_get_max_aslr_slide_section(
951 	vm_map_t                map,
952 	int64_t                 *max_sections,
953 	int64_t                 *section_size);
954 
955 extern uint64_t         vm_map_get_max_aslr_slide_pages(
956 	vm_map_t map);
957 
958 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
959 	vm_map_t map);
960 
961 extern kern_return_t    vm_map_set_size_limit(
962 	vm_map_t                map,
963 	uint64_t                limit);
964 
965 extern kern_return_t    vm_map_set_data_limit(
966 	vm_map_t                map,
967 	uint64_t                limit);
968 
969 extern void             vm_map_set_user_wire_limit(
970 	vm_map_t                map,
971 	vm_size_t               limit);
972 
973 extern void vm_map_switch_protect(
974 	vm_map_t                map,
975 	boolean_t               val);
976 
977 extern boolean_t        vm_map_page_aligned(
978 	vm_map_offset_t         offset,
979 	vm_map_offset_t         mask);
980 
981 extern bool vm_map_range_overflows(
982 	vm_map_t                map,
983 	vm_map_offset_t         addr,
984 	vm_map_size_t           size);
985 
986 /* Support for vm_map ranges */
987 extern kern_return_t    vm_map_range_configure(
988 	vm_map_t                map,
989 	bool                    needs_extra_jumbo_va);
990 
991 
992 
993 /*!
994  * @function vm_map_kernel_flags_update_range_id()
995  *
996  * @brief
997  * Updates the @c vmkf_range_id field with the adequate value
998  * according to the policy for specified map and tag set in @c vmk_flags.
999  *
1000  * @discussion
1001  * This function is meant to be called by Mach VM entry points,
1002  * which matters for the kernel: allocations with pointers _MUST_
1003  * be allocated with @c kmem_*() functions.
1004  *
1005  * If the range ID is already set, it is preserved.
1006  */
1007 extern void             vm_map_kernel_flags_update_range_id(
1008 	vm_map_kernel_flags_t  *flags,
1009 	vm_map_t                map,
1010 	vm_map_size_t           size);
1011 
1012 #if XNU_TARGET_OS_OSX
1013 extern void vm_map_mark_alien(vm_map_t map);
1014 extern void vm_map_single_jit(vm_map_t map);
1015 #endif /* XNU_TARGET_OS_OSX */
1016 
1017 extern kern_return_t vm_map_page_info(
1018 	vm_map_t                map,
1019 	vm_map_offset_ut        offset,
1020 	vm_page_info_flavor_t   flavor,
1021 	vm_page_info_t          info,
1022 	mach_msg_type_number_t  *count);
1023 
1024 extern kern_return_t vm_map_page_range_info_internal(
1025 	vm_map_t                map,
1026 	vm_map_offset_ut        start_offset,
1027 	vm_map_offset_ut        end_offset,
1028 	int                     effective_page_shift,
1029 	vm_page_info_flavor_t   flavor,
1030 	vm_page_info_t          info,
1031 	mach_msg_type_number_t  *count);
1032 
1033 #ifdef MACH_KERNEL_PRIVATE
1034 
1035 /*
1036  * Internal macros for rounding and truncation of vm_map offsets and sizes
1037  */
1038 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1039 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1040 
1041 /*
1042  * Macros for rounding and truncation of vm_map offsets and sizes
1043  */
1044 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1045 VM_MAP_PAGE_SHIFT(
1046 	vm_map_t map)
1047 {
1048 	int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1049 	/*
1050 	 * help ubsan and codegen in general,
1051 	 * cannot use PAGE_{MIN,MAX}_SHIFT
1052 	 * because of testing code which
1053 	 * tests 16k aligned maps on 4k only systems.
1054 	 */
1055 	__builtin_assume(shift >= 12 && shift <= 14);
1056 	return shift;
1057 }
1058 
1059 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1060 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1061 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1062 
1063 #endif /* MACH_KERNEL_PRIVATE */
1064 
1065 
1066 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1067 extern bool vm_map_is_exotic(vm_map_t map);
1068 extern bool vm_map_is_alien(vm_map_t map);
1069 extern pmap_t vm_map_get_pmap(vm_map_t map);
1070 
1071 extern void vm_map_guard_exception(vm_map_offset_t gap_start, unsigned reason);
1072 
1073 
1074 extern bool vm_map_is_corpse_source(vm_map_t map);
1075 extern void vm_map_set_corpse_source(vm_map_t map);
1076 extern void vm_map_unset_corpse_source(vm_map_t map);
1077 
1078 #if CONFIG_DYNAMIC_CODE_SIGNING
1079 
1080 extern kern_return_t vm_map_sign(vm_map_t map,
1081     vm_map_offset_t start,
1082     vm_map_offset_t end);
1083 
1084 #endif /* CONFIG_DYNAMIC_CODE_SIGNING */
1085 #if CONFIG_FREEZE
1086 
1087 extern kern_return_t vm_map_freeze(
1088 	task_t       task,
1089 	unsigned int *purgeable_count,
1090 	unsigned int *wired_count,
1091 	unsigned int *clean_count,
1092 	unsigned int *dirty_count,
1093 	unsigned int dirty_budget,
1094 	unsigned int *shared_count,
1095 	int          *freezer_error_code,
1096 	boolean_t    eval_only);
1097 
1098 __enum_decl(freezer_error_code_t, int, {
1099 	FREEZER_ERROR_GENERIC = -1,
1100 	FREEZER_ERROR_EXCESS_SHARED_MEMORY = -2,
1101 	FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO = -3,
1102 	FREEZER_ERROR_NO_COMPRESSOR_SPACE = -4,
1103 	FREEZER_ERROR_NO_SWAP_SPACE = -5,
1104 	FREEZER_ERROR_NO_SLOTS = -6,
1105 });
1106 
1107 #endif /* CONFIG_FREEZE */
1108 
1109 extern kern_return_t vm_map_partial_reap(
1110 	vm_map_t map,
1111 	unsigned int *reclaimed_resident,
1112 	unsigned int *reclaimed_compressed);
1113 
1114 /*
1115  * In some cases, we don't have a real VM object but still want to return a
1116  * unique ID (to avoid a memory region looking like shared memory), so build
1117  * a fake pointer based on the map's ledger and the index of the ledger being
1118  * reported.
1119  */
1120 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((int*)((map)->pmap->ledger)+(ledger_id)))
1121 
1122 #if DEVELOPMENT || DEBUG
1123 
1124 extern int vm_map_disconnect_page_mappings(
1125 	vm_map_t map,
1126 	boolean_t);
1127 
1128 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1129 
1130 extern kern_return_t vm_map_entries_foreach(vm_map_t map, kern_return_t (^count_handler)(int nentries),
1131     kern_return_t (^entry_handler)(void* entry));
1132 extern kern_return_t vm_map_dump_entry_and_compressor_pager(void* entry, char *buf, size_t *count);
1133 
1134 #endif /* DEVELOPMENT || DEBUG */
1135 
1136 boolean_t        kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1137 
1138 boolean_t        vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1139 
1140 
1141 #ifdef VM_SCAN_FOR_SHADOW_CHAIN
1142 int vm_map_shadow_max(vm_map_t map);
1143 #endif
1144 
1145 bool vm_map_is_map_size_valid(vm_map_t target_map, vm_size_t size, bool no_soft_limit);
1146 
1147 /* Returns the map's ID or VM_MAP_SERIAL_NONE if the input map is NULL */
1148 vm_map_serial_t vm_map_maybe_serial_id(vm_map_t maybe_vm_map);
1149 
1150 __END_DECLS
1151 
1152 #endif /* XNU_KERNEL_PRIVATE */
1153 #endif  /* _VM_VM_MAP_XNU_H_ */
1154