xref: /xnu-12377.1.9/osfmk/vm/vm_map_xnu.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_MAP_XNU_H_
30 #define _VM_VM_MAP_XNU_H_
31 
32 #ifdef XNU_KERNEL_PRIVATE
33 
34 #include <mach/vm_types.h>
35 #include <sys/cdefs.h>
36 #include <vm/vm_map.h>
37 
38 
39 __BEGIN_DECLS
40 
41 extern void     vm_map_reference(vm_map_t       map);
42 extern vm_map_t current_map(void);
43 
44 /* Setup reserved areas in a new VM map */
45 extern kern_return_t    vm_map_exec(
46 	vm_map_t                new_map,
47 	task_t                  task,
48 	boolean_t               is64bit,
49 	void                    *fsroot,
50 	cpu_type_t              cpu,
51 	cpu_subtype_t           cpu_subtype,
52 	boolean_t               reslide,
53 	boolean_t               is_driverkit,
54 	uint32_t                rsr_version);
55 
56 
57 
58 #ifdef  MACH_KERNEL_PRIVATE
59 
60 #define current_map_fast()      (current_thread()->map)
61 #define current_map()           (current_map_fast())
62 
63 /*
64  *	Types defined:
65  *
66  *	vm_map_t		the high-level address map data structure.
67  *	vm_map_entry_t		an entry in an address map.
68  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
69  *	vm_map_copy_t		represents memory copied from an address map,
70  *				 used for inter-map copy operations
71  */
72 typedef struct vm_map_entry     *vm_map_entry_t;
73 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)
74 
75 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
76 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
77 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock)
78 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock)
79 
80 /*
81  *	Type:		vm_named_entry_t [internal use only]
82  *
83  *	Description:
84  *		Description of a mapping to a memory cache object.
85  *
86  *	Implementation:
87  *		While the handle to this object is used as a means to map
88  *              and pass around the right to map regions backed by pagers
89  *		of all sorts, the named_entry itself is only manipulated
90  *		by the kernel.  Named entries hold information on the
91  *		right to map a region of a cached object.  Namely,
92  *		the target cache object, the beginning and ending of the
93  *		region to be mapped, and the permissions, (read, write)
94  *		with which it can be mapped.
95  *
96  */
97 
98 struct vm_named_entry {
99 	decl_lck_mtx_data(, Lock);              /* Synchronization */
100 	union {
101 		vm_map_t        map;            /* map backing submap */
102 		vm_map_copy_t   copy;           /* a VM map copy */
103 	} backing;
104 	vm_object_offset_t      offset;         /* offset into object */
105 	vm_object_size_t        size;           /* size of region */
106 	vm_object_offset_t      data_offset;    /* offset to first byte of data */
107 	unsigned int                            /* Is backing.xxx : */
108 	/* unsigned  */ access:8,               /* MAP_MEM_* */
109 	/* vm_prot_t */ protection:4,           /* access permissions */
110 	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
111 	/* boolean_t */ internal:1,             /* ... an internal object */
112 	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
113 	/* boolean_t */ is_copy:1,              /* ... a VM map copy */
114 	/* boolean_t */ is_fully_owned:1;       /* ... all objects are owned */
115 #if VM_NAMED_ENTRY_DEBUG
116 	uint32_t                named_entry_bt; /* btref_t */
117 #endif /* VM_NAMED_ENTRY_DEBUG */
118 };
119 
120 /*
121  * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
122  * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
123  * to convert between the "packed" representation in the vm_map_entry's fields
124  * and the equivalent bits defined in vm_prot_t.
125  */
126 #if defined(__x86_64__)
127 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
128 #else
129 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
130 #endif
131 
132 /*
133  * FOOTPRINT ACCOUNTING:
134  * The "memory footprint" is better described in the pmap layer.
135  *
136  * At the VM level, these 2 vm_map_entry_t fields are relevant:
137  * iokit_mapped:
138  *	For an "iokit_mapped" entry, we add the size of the entry to the
139  *	footprint when the entry is entered into the map and we subtract that
140  *	size when the entry is removed.  No other accounting should take place.
141  *	"use_pmap" should be FALSE but is not taken into account.
142  * use_pmap: (only when is_sub_map is FALSE)
143  *	This indicates if we should ask the pmap layer to account for pages
144  *	in this mapping.  If FALSE, we expect that another form of accounting
145  *	is being used (e.g. "iokit_mapped" or the explicit accounting of
146  *	non-volatile purgable memory).
147  *
148  * So the logic is mostly:
149  * if entry->is_sub_map == TRUE
150  *	anything in a submap does not count for the footprint
151  * else if entry->iokit_mapped == TRUE
152  *	footprint includes the entire virtual size of this entry
153  * else if entry->use_pmap == FALSE
154  *	tell pmap NOT to account for pages being pmap_enter()'d from this
155  *	mapping (i.e. use "alternate accounting")
156  * else
157  *	pmap will account for pages being pmap_enter()'d from this mapping
158  *	as it sees fit (only if anonymous, etc...)
159  */
160 
161 #define VME_ALIAS_BITS          12
162 #define VME_ALIAS_MASK          ((1u << VME_ALIAS_BITS) - 1)
163 #define VME_OFFSET_SHIFT        VME_ALIAS_BITS
164 #define VME_OFFSET_BITS         (64 - VME_ALIAS_BITS)
165 #define VME_SUBMAP_SHIFT        2
166 #define VME_SUBMAP_BITS         (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
167 
168 struct vm_map_entry {
169 	struct vm_map_links     links;                      /* links to other entries */
170 #define vme_next                links.next
171 #define vme_start               links.start
172 #define vme_end                 links.end
173 
174 	struct vm_map_store     store;
175 
176 	union {
177 		vm_offset_t     vme_object_value;
178 		struct {
179 			vm_offset_t vme_atomic:1;           /* entry cannot be split/coalesced */
180 			vm_offset_t is_sub_map:1;           /* Is "object" a submap? */
181 			vm_offset_t vme_submap:VME_SUBMAP_BITS;
182 		};
183 		struct {
184 			uint32_t    vme_ctx_atomic : 1;
185 			uint32_t    vme_ctx_is_sub_map : 1;
186 			uint32_t    vme_context : 30;
187 
188 			/**
189 			 * If vme_kernel_object==1 && KASAN,
190 			 * vme_object_or_delta holds the delta.
191 			 *
192 			 * If vme_kernel_object==1 && !KASAN,
193 			 * vme_tag_btref holds a btref when vme_alias is equal to the "vmtaglog"
194 			 * boot-arg.
195 			 *
196 			 * If vme_kernel_object==0,
197 			 * vme_object_or_delta holds the packed vm object.
198 			 */
199 			union {
200 				vm_page_object_t vme_object_or_delta;
201 				btref_t vme_tag_btref;
202 			};
203 		};
204 	};
205 
206 	unsigned long long
207 	/* vm_tag_t          */ vme_alias:VME_ALIAS_BITS,   /* entry VM tag */
208 	/* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
209 
210 	/* boolean_t         */ is_shared:1,                /* region is shared */
211 	/* boolean_t         */__unused1:1,
212 	/* boolean_t         */in_transition:1,             /* Entry being changed */
213 	/* boolean_t         */ needs_wakeup:1,             /* Waiters on in_transition */
214 	/* behavior is not defined for submap type */
215 	/* vm_behavior_t     */ behavior:2,                 /* user paging behavior hint */
216 	/* boolean_t         */ needs_copy:1,               /* object need to be copied? */
217 
218 	/* Only in task maps: */
219 #if defined(__arm64e__)
220 	/*
221 	 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
222 	 * We reuse it here to keep track of mappings that have hardware support
223 	 * for read-only/read-write trusted paths.
224 	 */
225 	/* vm_prot_t-like    */ protection:3,               /* protection code */
226 	/* boolean_t         */ used_for_tpro:1,
227 #else /* __arm64e__ */
228 	/* vm_prot_t-like    */protection:4,                /* protection code, bit3=UEXEC */
229 #endif /* __arm64e__ */
230 
231 	/* vm_prot_t-like    */ max_protection:4,           /* maximum protection, bit3=UEXEC */
232 	/* vm_inherit_t      */ inheritance:2,              /* inheritance */
233 
234 	/*
235 	 * use_pmap is overloaded:
236 	 * if "is_sub_map":
237 	 *      use a nested pmap?
238 	 * else (i.e. if object):
239 	 *      use pmap accounting
240 	 *      for footprint?
241 	 */
242 	/* boolean_t         */ use_pmap:1,
243 	/* boolean_t         */ no_cache:1,                 /* should new pages be cached? */
244 	/* boolean_t         */ vme_permanent:1,            /* mapping can not be removed */
245 	/* boolean_t         */ superpage_size:1,           /* use superpages of a certain size */
246 	/*
247 	 * zero out the wired pages of this entry
248 	 * if is being deleted without unwiring them
249 	 */
250 	/* boolean_t         */ zero_wired_pages:1,
251 	/* boolean_t         */ used_for_jit:1,
252 	/* boolean_t         */ csm_associated:1,       /* code signing monitor will validate */
253 
254 	/* iokit accounting: use the virtual size rather than resident size: */
255 	/* boolean_t         */ iokit_acct:1,
256 	/* boolean_t         */ vme_resilient_codesign:1,
257 	/* boolean_t         */ vme_resilient_media:1,
258 	/* boolean_t         */ vme_xnu_user_debug:1,
259 	/* boolean_t         */ vme_no_copy_on_read:1,
260 	/* boolean_t         */ translated_allow_execute:1, /* execute in translated processes */
261 	/* boolean_t         */ vme_kernel_object:1,        /* vme_object is a kernel_object */
262 	/* boolean_t         */ __unused:1;
263 
264 	unsigned short          wired_count;                /* can be paged if = 0 */
265 	unsigned short          user_wired_count;           /* for vm_wire */
266 
267 #if     DEBUG
268 #define MAP_ENTRY_CREATION_DEBUG (1)
269 #define MAP_ENTRY_INSERTION_DEBUG (1)
270 #endif /* DEBUG */
271 #if     MAP_ENTRY_CREATION_DEBUG
272 	struct vm_map_header    *vme_creation_maphdr;
273 	uint32_t                vme_creation_bt;            /* btref_t */
274 #endif /* MAP_ENTRY_CREATION_DEBUG */
275 #if     MAP_ENTRY_INSERTION_DEBUG
276 	uint32_t                vme_insertion_bt;           /* btref_t */
277 	vm_map_offset_t         vme_start_original;
278 	vm_map_offset_t         vme_end_original;
279 #endif /* MAP_ENTRY_INSERTION_DEBUG */
280 };
281 
282 #define VME_ALIAS(entry) \
283 	((entry)->vme_alias)
284 
285 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)286 _VME_SUBMAP(
287 	vm_map_entry_t entry)
288 {
289 	__builtin_assume(entry->vme_submap);
290 	return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
291 }
292 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
293 
294 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)295 VME_SUBMAP_SET(
296 	vm_map_entry_t entry,
297 	vm_map_t submap)
298 {
299 	__builtin_assume(((vm_offset_t)submap & 3) == 0);
300 
301 	entry->is_sub_map = true;
302 	entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
303 }
304 
305 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)306 _VME_OBJECT(
307 	vm_map_entry_t entry)
308 {
309 	vm_object_t object;
310 
311 	if (!entry->vme_kernel_object) {
312 		object = VM_OBJECT_UNPACK(entry->vme_object_or_delta);
313 		__builtin_assume(!is_kernel_object(object));
314 	} else {
315 		object = kernel_object_default;
316 	}
317 	return object;
318 }
319 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
320 
321 
322 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)323 VME_OFFSET(
324 	vm_map_entry_t entry)
325 {
326 	return entry->vme_offset << VME_OFFSET_SHIFT;
327 }
328 
329 
330 #if (DEBUG || DEVELOPMENT) && !KASAN
331 #define VM_BTLOG_TAGS 1
332 #else
333 #define VM_BTLOG_TAGS 0
334 #endif
335 
336 
337 /*
338  * Convenience macros for dealing with superpages
339  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
340  */
341 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
342 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
343 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
344 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
345 
346 /*
347  * wired_counts are unsigned short.  This value is used to safeguard
348  * against any mishaps due to runaway user programs.
349  */
350 #define MAX_WIRE_COUNT          65535
351 
352 typedef struct vm_map_user_range {
353 	vm_map_address_t        vmur_min_address __kernel_data_semantics;
354 
355 	vm_map_address_t        vmur_max_address : 56 __kernel_data_semantics;
356 	vm_map_range_id_t       vmur_range_id : 8;
357 } *vm_map_user_range_t;
358 
359 /*
360  *	Type:		vm_map_t [exported; contents invisible]
361  *
362  *	Description:
363  *		An address map -- a directory relating valid
364  *		regions of a task's address space to the corresponding
365  *		virtual memory objects.
366  *
367  *	Implementation:
368  *		Maps are doubly-linked lists of map entries, sorted
369  *		by address.  One hint is used to start
370  *		searches again from the last successful search,
371  *		insertion, or removal.  Another hint is used to
372  *		quickly find free space.
373  *
374  *	Note:
375  *		vm_map_relocate_early_elem() knows about this layout,
376  *		and needs to be kept in sync.
377  */
378 struct _vm_map {
379 	lck_rw_t                lock;           /* map lock */
380 	struct vm_map_header    hdr;            /* Map entry header */
381 #define min_offset              hdr.links.start /* start of range */
382 #define max_offset              hdr.links.end   /* end of range */
383 	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
384 	vm_map_size_t           size;           /* virtual size */
385 	uint64_t                size_limit;     /* rlimit on address space size */
386 	uint64_t                data_limit;     /* rlimit on data size */
387 	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
388 	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
389 #if __x86_64__
390 	vm_map_offset_t         vmmap_high_start;
391 #endif /* __x86_64__ */
392 
393 	os_ref_atomic_t         map_refcnt;       /* Reference count */
394 
395 #if CONFIG_MAP_RANGES
396 #define VM_MAP_EXTRA_RANGES_MAX 1024
397 	struct mach_vm_range    default_range;
398 	struct mach_vm_range    data_range;
399 	struct mach_vm_range    large_file_range;
400 
401 	uint16_t                extra_ranges_count;
402 	vm_map_user_range_t     extra_ranges;
403 #endif /* CONFIG_MAP_RANGES */
404 
405 	union {
406 		/*
407 		 * If map->disable_vmentry_reuse == TRUE:
408 		 * the end address of the highest allocated vm_map_entry_t.
409 		 */
410 		vm_map_offset_t         vmu1_highest_entry_end;
411 		/*
412 		 * For a nested VM map:
413 		 * the lowest address in this nested VM map that we would
414 		 * expect to be unnested under normal operation (i.e. for
415 		 * regular copy-on-write on DATA section).
416 		 */
417 		vm_map_offset_t         vmu1_lowest_unnestable_start;
418 	} vmu1;
419 #define highest_entry_end       vmu1.vmu1_highest_entry_end
420 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
421 	vm_map_entry_t          hint;           /* hint for quick lookups */
422 	union {
423 		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
424 		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
425 	} vmmap_u_1;
426 #define hole_hint vmmap_u_1.vmmap_hole_hint
427 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
428 	union {
429 		vm_map_entry_t          _first_free;    /* First free space hint */
430 		struct vm_map_links*    _holes;         /* links all holes between entries */
431 	} f_s;                                      /* Union for free space data structures being used */
432 
433 #define first_free              f_s._first_free
434 #define holes_list              f_s._holes
435 
436 	unsigned int
437 	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
438 	/* boolean_t */ wiring_required:1,        /* All memory wired? */
439 	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
440 	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
441 	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
442 	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
443 	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
444 	/* boolean_t */ holelistenabled:1,
445 	/* boolean_t */ is_nested_map:1,
446 	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
447 	/* boolean_t */ jit_entry_exists:1,
448 	/* boolean_t */ has_corpse_footprint:1,
449 	/* boolean_t */ terminated:1,
450 	/* boolean_t */ is_alien:1,               /* for platform simulation, i.e. PLATFORM_IOS on OSX */
451 	/* boolean_t */ cs_enforcement:1,         /* code-signing enforcement */
452 	/* boolean_t */ cs_debugged:1,            /* code-signed but debugged */
453 	/* boolean_t */ reserved_regions:1,       /* has reserved regions. The map size that userspace sees should ignore these. */
454 	/* boolean_t */ single_jit:1,             /* only allow one JIT mapping */
455 	/* boolean_t */ never_faults:1,           /* this map should never cause faults */
456 	/* boolean_t */ uses_user_ranges:1,       /* has the map been configured to use user VM ranges */
457 	/* boolean_t */ tpro_enforcement:1,       /* enforce TPRO propagation */
458 	/* boolean_t */ corpse_source:1,          /* map is being used to create a corpse for diagnostics.*/
459 	/* boolean_t */ cs_platform_binary:1,     /* map belongs to a platform binary */
460 
461 #define VM_MAP_NOT_SEALED 0                       /* map is not sealed and may be freely modified. */
462 #define VM_MAP_WILL_BE_SEALED 1                   /* map will be sealed and is subject to limited modification. */
463 #define VM_MAP_SEALED 2                           /* map is sealed and should not be modified. */
464 	/* unsigned int */ vmmap_sealed:2,        /* sealed state of map, see definitions above. */
465 	/* reserved */ res0:1,
466 	/* reserved  */pad:6;
467 	unsigned int            timestamp;          /* Version number */
468 	/*
469 	 * Weak reference to the task that owns this map. This will be NULL if the
470 	 * map has terminated, so you must have a task reference to be able to safely
471 	 * access this. Under the map lock, you can safely acquire a task reference
472 	 * if owning_task is not NULL, since vm_map_terminate requires the map lock.
473 	 */
474 	task_t owning_task;
475 
476 	/*
477 	 * A generation ID for maps that increments monotonically.
478 	 * This is a pointer type just so we get dPAC out-of-the-box, but
479 	 * conceptually it's just an ID.
480 	 * Note that this is not a unique object ID. In particular, fork()
481 	 * will produce a child map with the same ID as its parent.
482 	 */
483 	vm_map_serial_t serial_id;
484 };
485 
486 #define VME_PREV(entry) VM_PREV_UNPACK((entry)->links.prev)
487 #define VMH_PREV(hdr) (VM_PREV_UNPACK((hdr)->links.prev))
488 #define VML_PREV(links) (VM_PREV_UNPACK((links)->prev))
489 
490 static inline
491 void
VME_PREV_SET(vm_map_entry_t entry,vm_map_entry_t prev)492 VME_PREV_SET(vm_map_entry_t entry, vm_map_entry_t prev)
493 {
494 	entry->links.prev = VM_PREV_PACK(prev);
495 }
496 
497 static inline
498 void
VMH_PREV_SET(struct vm_map_header * hdr,vm_map_entry_t prev)499 VMH_PREV_SET(struct vm_map_header * hdr, vm_map_entry_t prev)
500 {
501 	hdr->links.prev = VM_PREV_PACK(prev);
502 }
503 
504 static inline
505 void
VML_PREV_SET(struct vm_map_links * links,vm_map_entry_t prev)506 VML_PREV_SET(struct vm_map_links * links, vm_map_entry_t prev)
507 {
508 	links->prev = VM_PREV_PACK(prev);
509 }
510 
511 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
512 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
513 #define vm_map_first_entry(map) ((map)->hdr.links.next)
514 #define vm_map_last_entry(map)  (VME_PREV(vm_map_to_entry(map)))
515 
516 /*
517  *	Type:		vm_map_version_t [exported; contents invisible]
518  *
519  *	Description:
520  *		Map versions may be used to quickly validate a previous
521  *		lookup operation.
522  *
523  *	Usage note:
524  *		Because they are bulky objects, map versions are usually
525  *		passed by reference.
526  *
527  *	Implementation:
528  *		Just a timestamp for the main map.
529  */
530 typedef struct vm_map_version {
531 	unsigned int    main_timestamp;
532 } vm_map_version_t;
533 
534 /*
535  *	Type:		vm_map_copy_t [exported; contents invisible]
536  *
537  *	Description:
538  *		A map copy object represents a region of virtual memory
539  *		that has been copied from an address map but is still
540  *		in transit.
541  *
542  *		A map copy object may only be used by a single thread
543  *		at a time.
544  *
545  *	Implementation:
546  *              There are two formats for map copy objects.
547  *		The first is very similar to the main
548  *		address map in structure, and as a result, some
549  *		of the internal maintenance functions/macros can
550  *		be used with either address maps or map copy objects.
551  *
552  *		The map copy object contains a header links
553  *		entry onto which the other entries that represent
554  *		the region are chained.
555  *
556  *		The second format is a kernel buffer copy object - for data
557  *              small enough that physical copies were the most efficient
558  *		method. This method uses a zero-sized array unioned with
559  *		other format-specific data in the 'c_u' member. This unsized
560  *		array overlaps the other elements and allows us to use this
561  *		extra structure space for physical memory copies. On 64-bit
562  *		systems this saves ~64 bytes per vm_map_copy.
563  */
564 
565 struct vm_map_copy {
566 #define VM_MAP_COPY_ENTRY_LIST          1
567 #define VM_MAP_COPY_KERNEL_BUFFER       2
568 	uint16_t                type;
569 	bool                    is_kernel_range;
570 	bool                    is_user_range;
571 	vm_map_range_id_t       orig_range;
572 	vm_object_offset_t      offset;
573 	vm_map_size_t           size;
574 	union {
575 		struct vm_map_header                  hdr;    /* ENTRY_LIST */
576 		struct {
577 			void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
578 		} buffer_data;
579 	} c_u;
580 };
581 
582 
583 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
584 #define vm_map_entry_zone       (&zone_array[ZONE_ID_VM_MAP_ENTRY])
585 
586 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
587 #define vm_map_holes_zone       (&zone_array[ZONE_ID_VM_MAP_HOLES])
588 
589 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
590 #define vm_map_zone             (&zone_array[ZONE_ID_VM_MAP])
591 
592 
593 #define cpy_hdr                 c_u.hdr
594 #define cpy_kdata               c_u.buffer_data.kdata
595 
596 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
597 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
598 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
599 
600 /*
601  *	Useful macros for entry list copy objects
602  */
603 
604 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
605 #define vm_map_copy_first_entry(copy)           \
606 	        ((copy)->cpy_hdr.links.next)
607 #define vm_map_copy_last_entry(copy)            \
608 	        (VM_PREV_UNPACK((copy)->cpy_hdr.links.prev))
609 
610 
611 /*
612  *	Macros:		vm_map_lock, etc. [internal use only]
613  *	Description:
614  *		Perform locking on the data portion of a map.
615  *	When multiple maps are to be locked, order by map address.
616  *	(See vm_map.c::vm_remap())
617  */
618 
619 #include <vm/vm_lock_perf.h>
620 
621 #define vm_map_lock_init(map)                                           \
622 	((map)->timestamp = 0 ,                                         \
623 	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
624 
625 #define vm_map_lock(map)                     \
626 	MACRO_BEGIN                          \
627 	DTRACE_VM(vm_map_lock_w);            \
628 	vmlp_lock_event_unlocked(VMLP_EVENT_LOCK_REQ_EXCL, map); \
629 	assert(!vm_map_is_sealed(map));      \
630 	lck_rw_lock_exclusive(&(map)->lock); \
631 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_GOT_EXCL, map); \
632 	MACRO_END
633 
634 #define vm_map_lock_unseal(map)                  \
635 	MACRO_BEGIN                              \
636 	DTRACE_VM(vm_map_lock_w);                \
637 	assert(vm_map_is_sealed(map));           \
638 	lck_rw_lock_exclusive(&(map)->lock);     \
639 	(map)->vmmap_sealed = VM_MAP_NOT_SEALED; \
640 	MACRO_END
641 
642 #define vm_map_unlock(map)          \
643 	MACRO_BEGIN                 \
644 	DTRACE_VM(vm_map_unlock_w); \
645 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_UNLOCK_EXCL, map); \
646 	assert(!vm_map_is_sealed(map)); \
647 	(map)->timestamp++;         \
648 	lck_rw_done(&(map)->lock);  \
649 	MACRO_END
650 
651 #define vm_map_lock_read(map)             \
652 	MACRO_BEGIN                       \
653 	DTRACE_VM(vm_map_lock_r);         \
654 	vmlp_lock_event_unlocked(VMLP_EVENT_LOCK_REQ_SH, map); \
655 	lck_rw_lock_shared(&(map)->lock); \
656 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_GOT_SH, map); \
657 	MACRO_END
658 
659 #define vm_map_unlock_read(map)     \
660 	MACRO_BEGIN                 \
661 	DTRACE_VM(vm_map_unlock_r); \
662 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_UNLOCK_SH, map); \
663 	lck_rw_done(&(map)->lock);  \
664 	MACRO_END
665 
666 #define vm_map_lock_write_to_read(map)                 \
667 	MACRO_BEGIN                                    \
668 	DTRACE_VM(vm_map_lock_downgrade);              \
669 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_DOWNGRADE, map); \
670 	(map)->timestamp++;                            \
671 	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
672 	MACRO_END
673 
674 #define vm_map_lock_assert_held(map) \
675 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_HELD)
676 #define vm_map_lock_assert_shared(map)  \
677 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_SHARED)
678 #define vm_map_lock_assert_exclusive(map) \
679 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
680 #define vm_map_lock_assert_notheld(map) \
681 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
682 
683 /*
684  *	Exported procedures that operate on vm_map_t.
685  */
686 
687 /* Lookup map entry containing or the specified address in the given map */
688 extern boolean_t        vm_map_lookup_entry(
689 	vm_map_t                map,
690 	vm_map_address_t        address,
691 	vm_map_entry_t          *entry);                                /* OUT */
692 
693 
694 /*
695  *	Functions implemented as macros
696  */
697 #define         vm_map_min(map) ((map)->min_offset)
698 /* Lowest valid address in
699  * a map */
700 
701 #define         vm_map_max(map) ((map)->max_offset)
702 /* Highest valid address */
703 
704 #define         vm_map_pmap(map)        ((map)->pmap)
705 /* Physical map associated
706 * with this address map */
707 
708 /* Gain a reference to an existing map */
709 extern void             vm_map_reference(
710 	vm_map_t        map);
711 
712 /*
713  *	Wait and wakeup macros for in_transition map entries.
714  */
715 static inline wait_result_t
_vm_map_entry_wait_helper(vm_map_t map,wait_interrupt_t interruptible)716 _vm_map_entry_wait_helper(vm_map_t map, wait_interrupt_t interruptible)
717 {
718 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_SLEEP_BEGIN, map);
719 	map->timestamp++;
720 	wait_result_t res = lck_rw_sleep(&map->lock, LCK_SLEEP_EXCLUSIVE | LCK_SLEEP_PROMOTED_PRI,
721 	    (event_t)&map->hdr, interruptible);
722 	vmlp_lock_event_locked(VMLP_EVENT_LOCK_SLEEP_END, map);
723 	return res;
724 }
725 #define vm_map_entry_wait(map, interruptible) _vm_map_entry_wait_helper((map), (interruptible))
726 
727 #define vm_map_entry_wakeup(map)        \
728 	thread_wakeup((event_t)(&(map)->hdr))
729 
730 
731 extern void             vm_map_inherit_limits(
732 	vm_map_t                new_map,
733 	const struct _vm_map   *old_map);
734 
735 /* Create a new task map using an existing task map as a template. */
736 extern vm_map_t         vm_map_fork(
737 	ledger_t                ledger,
738 	vm_map_t                old_map,
739 	int                     options);
740 
741 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
742 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
743 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004
744 #define VM_MAP_FORK_SHARE_IF_OWNED              0x00000008
745 
746 
747 extern kern_return_t vm_map_query_volatile(
748 	vm_map_t        map,
749 	mach_vm_size_t  *volatile_virtual_size_p,
750 	mach_vm_size_t  *volatile_resident_size_p,
751 	mach_vm_size_t  *volatile_compressed_size_p,
752 	mach_vm_size_t  *volatile_pmap_size_p,
753 	mach_vm_size_t  *volatile_compressed_pmap_size_p);
754 
755 
756 extern kern_return_t vm_map_set_cache_attr(
757 	vm_map_t        map,
758 	vm_map_offset_t va);
759 
760 
761 extern void vm_map_copy_footprint_ledgers(
762 	task_t  old_task,
763 	task_t  new_task);
764 
765 
766 /**
767  * Represents a single region of virtual address space that should be reserved
768  * (pre-mapped) in a user address space.
769  */
770 struct vm_reserved_region {
771 	const char             *vmrr_name;
772 	vm_map_offset_t         vmrr_addr;
773 	vm_map_size_t           vmrr_size;
774 };
775 
776 /**
777  * Return back a machine-dependent array of address space regions that should be
778  * reserved by the VM. This function is defined in the machine-dependent
779  * machine_routines.c files.
780  */
781 extern size_t ml_get_vm_reserved_regions(
782 	bool                    vm_is64bit,
783 	const struct vm_reserved_region **regions);
784 
785 /**
786  * Explicitly preallocates a floating point save area. This function is defined
787  * in the machine-dependent machine_routines.c files.
788  */
789 extern void ml_fp_save_area_prealloc(void);
790 
791 extern bool vm_map_is_sealed(
792 	vm_map_t                 map);
793 
794 #endif /* MACH_KERNEL_PRIVATE */
795 
796 /*
797  * Read and write from a kernel buffer to a specified map.
798  */
799 extern  kern_return_t   vm_map_write_user(
800 	vm_map_t                map,
801 	void                   *src_p,
802 	vm_map_offset_ut        dst_addr_u,
803 	vm_size_ut              size_u);
804 
805 extern  kern_return_t   vm_map_read_user(
806 	vm_map_t                map,
807 	vm_map_offset_ut        src_addr_u,
808 	void                   *dst_p,
809 	vm_size_ut              size_u);
810 
811 extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);
812 
813 typedef struct {
814 	vm_map_t map;
815 	task_t task;
816 	boolean_t sec_overridden;
817 } vm_map_switch_context_t;
818 extern vm_map_switch_context_t vm_map_switch_with_sec_override(vm_map_t, boolean_t sec_override);
819 static inline vm_map_switch_context_t
vm_map_switch_to(vm_map_t map)820 vm_map_switch_to(vm_map_t map)
821 {
822 	return vm_map_switch_with_sec_override(map, FALSE);
823 }
824 extern void vm_map_switch_back(vm_map_switch_context_t ctx);
825 
826 extern boolean_t vm_map_cs_enforcement(
827 	vm_map_t                map);
828 extern void vm_map_cs_enforcement_set(
829 	vm_map_t                map,
830 	boolean_t               val);
831 
832 extern void vm_map_cs_debugged_set(
833 	vm_map_t map,
834 	boolean_t val);
835 
836 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
837 extern kern_return_t vm_map_csm_allow_jit(vm_map_t map);
838 
839 
840 extern void vm_map_will_allocate_early_map(
841 	vm_map_t               *map_owner);
842 
843 extern void vm_map_relocate_early_maps(
844 	vm_offset_t             delta);
845 
846 extern void vm_map_relocate_early_elem(
847 	uint32_t                zone_id,
848 	vm_offset_t             new_addr,
849 	vm_offset_t             delta);
850 
851 /* wire down a region */
852 
853 /* never fails */
854 extern vm_map_t vm_map_create_options(
855 	pmap_t                  pmap,
856 	vm_map_offset_t         min_off,
857 	vm_map_offset_t         max_off,
858 	vm_map_create_options_t options);
859 
860 extern boolean_t        vm_kernel_map_is_kernel(vm_map_t map);
861 
862 /*!
863  * @function vm_map_enter_mem_object_control()
864  *
865  * @brief
866  * Enters a mapping of @c initial_size bytes at @c *address (subject to
867  * fixed/anywhere semantics, see @c VM_FLAGS_FIXED/VM_FLAGS_ANYWHERE ).
868  * The pages will come from a memory object paged in by the @c control pager,
869  * and the caller may specify an @c offset into the object.
870  *
871  * @param target_map     The map into which to enter the mapping.
872  * @param address        [in]  Pointer to the address at which to enter the
873  *                             mapping (or use as a hint for anywhere
874  *                             mappings).
875  *                             No alignment is required, the function will
876  *                             round this down to a page boundary in the
877  *                             @c target_map.
878  *                       [out] On success, it will be filled with the address
879  *                             at which the object data is made available, and
880  *                             will have the same misalignment into
881  *                             @c target_map as @c offset.
882  *                             On failure, it remains unmodified.
883  * @param initial_size   Size of the mapping to enter.
884  *                       Must be non-zero.
885  *                       No alignment is required.
886  * @param mask           An alignment mask the mapping must respect.
887  * @param vmk_flags      The vm map kernel flags to influence this call.
888  * @param control        The pager-managed memory object which is the source
889  *                       of the pages.
890  * @param offset         The offset into the memory object to use when
891  *                       paging.
892  *                       @c vm_map_enter, which is called into by
893  *                       @c vm_map_enter_mem_object_control, requires that
894  *                       @c offset be page-aligned for either @c target_map
895  *                       pages or kernel pages.
896  * @param needs_copy     Boolean which can be set to request that the mapped
897  *                       pages be a copy of the memory object's pages.
898  * @param cur_protection Effective protection that should be set for the
899  *                       mapping.
900  * @param max_protection Max protection that should be allowed for the
901  *                       mapping. Should at least cover @c cur_protection.
902  * @param inheritance    Inheritance policy for the mapping.
903  *
904  * @returns @c KERN_SUCCESS if the mapping was successfully entered, an error
905  *          code otherwise.
906  */
907 extern kern_return_t    vm_map_enter_mem_object_control(
908 	vm_map_t                target_map,
909 	vm_map_offset_ut       *address,
910 	vm_map_size_ut          initial_size,
911 	vm_map_offset_ut        mask,
912 	vm_map_kernel_flags_t   vmk_flags,
913 	memory_object_control_t control,
914 	vm_object_offset_ut     offset,
915 	boolean_t               needs_copy,
916 	vm_prot_ut              cur_protection,
917 	vm_prot_ut              max_protection,
918 	vm_inherit_ut           inheritance);
919 
920 /* Must be executed on a new task's map before the task is enabled for IPC access */
921 extern void vm_map_setup(vm_map_t map, task_t task); /* always succeeds */
922 
923 extern kern_return_t    vm_map_terminate(
924 	vm_map_t                map);
925 
926 /* Overwrite existing memory with a copy */
927 extern kern_return_t    vm_map_copy_overwrite(
928 	vm_map_t                dst_map,
929 	vm_map_address_ut       dst_addr_u,
930 	vm_map_copy_t           copy,
931 	vm_map_size_ut          copy_size_u,
932 	boolean_t               interruptible);
933 
934 /* returns TRUE if size of vm_map_copy == *size, FALSE otherwise */
935 extern boolean_t        vm_map_copy_validate_size(
936 	vm_map_t                dst_map,
937 	vm_map_copy_t           copy,
938 	vm_map_size_t          *size);
939 
940 extern kern_return_t    vm_map_copyout_size(
941 	vm_map_t                dst_map,
942 	vm_map_address_t       *dst_addr, /* OUT */
943 	vm_map_copy_t           copy,
944 	vm_map_size_ut          copy_size);
945 
946 extern void             vm_map_disable_NX(
947 	vm_map_t                map);
948 
949 extern void             vm_map_disallow_data_exec(
950 	vm_map_t                map);
951 
952 extern void             vm_map_set_64bit(
953 	vm_map_t                map);
954 
955 extern void             vm_map_set_32bit(
956 	vm_map_t                map);
957 
958 extern void             vm_map_set_jumbo(
959 	vm_map_t                map);
960 
961 #if XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT
962 extern void             vm_map_set_extra_jumbo(
963 	vm_map_t                map);
964 #endif /* XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT */
965 
966 extern void             vm_map_set_jit_entitled(
967 	vm_map_t                map);
968 
969 extern void             vm_map_set_max_addr(
970 	vm_map_t                map,
971 	vm_map_offset_t         new_max_offset,
972 	bool                    extra_jumbo);
973 
974 extern boolean_t        vm_map_has_hard_pagezero(
975 	vm_map_t                map,
976 	vm_map_offset_t         pagezero_size);
977 
978 extern void             vm_commit_pagezero_status(vm_map_t      tmap);
979 
980 extern void             vm_map_set_platform_binary(
981 	vm_map_t                map,
982 	bool                    is_platform_binary);
983 extern bool             vm_map_is_platform_binary(
984 	vm_map_t                map);
985 
986 extern boolean_t        vm_map_tpro(
987 	vm_map_t                map);
988 
989 extern void             vm_map_set_tpro(
990 	vm_map_t                map);
991 
992 
993 
994 extern void             vm_map_set_tpro_enforcement(
995 	vm_map_t                map);
996 
997 extern boolean_t        vm_map_set_tpro_range(
998 	vm_map_t                map,
999 	vm_map_address_t        start,
1000 	vm_map_address_t        end);
1001 
1002 extern boolean_t        vm_map_is_64bit(
1003 	vm_map_t                map);
1004 
1005 extern kern_return_t    vm_map_raise_max_offset(
1006 	vm_map_t        map,
1007 	vm_map_offset_t new_max_offset);
1008 
1009 extern kern_return_t    vm_map_raise_min_offset(
1010 	vm_map_t        map,
1011 	vm_map_offset_t new_min_offset);
1012 
1013 #if XNU_TARGET_OS_OSX
1014 extern void vm_map_set_high_start(
1015 	vm_map_t        map,
1016 	vm_map_offset_t high_start);
1017 #endif /* XNU_TARGET_OS_OSX */
1018 
1019 
1020 extern vm_map_offset_t  vm_compute_max_offset(
1021 	boolean_t               is64);
1022 
1023 extern void             vm_map_get_max_aslr_slide_section(
1024 	vm_map_t                map,
1025 	int64_t                 *max_sections,
1026 	int64_t                 *section_size);
1027 
1028 extern uint64_t         vm_map_get_max_aslr_slide_pages(
1029 	vm_map_t map);
1030 
1031 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
1032 	vm_map_t map);
1033 
1034 extern kern_return_t    vm_map_set_size_limit(
1035 	vm_map_t                map,
1036 	uint64_t                limit);
1037 
1038 extern kern_return_t    vm_map_set_data_limit(
1039 	vm_map_t                map,
1040 	uint64_t                limit);
1041 
1042 extern void             vm_map_set_user_wire_limit(
1043 	vm_map_t                map,
1044 	vm_size_t               limit);
1045 
1046 extern void vm_map_switch_protect(
1047 	vm_map_t                map,
1048 	boolean_t               val);
1049 
1050 extern boolean_t        vm_map_page_aligned(
1051 	vm_map_offset_t         offset,
1052 	vm_map_offset_t         mask);
1053 
1054 extern bool vm_map_range_overflows(
1055 	vm_map_t                map,
1056 	vm_map_offset_t         addr,
1057 	vm_map_size_t           size);
1058 
1059 /* Support for vm_map ranges */
1060 extern kern_return_t    vm_map_range_configure(
1061 	vm_map_t                map,
1062 	bool                    needs_extra_jumbo_va);
1063 
1064 
1065 
1066 /*!
1067  * @function vm_map_kernel_flags_update_range_id()
1068  *
1069  * @brief
1070  * Updates the @c vmkf_range_id field with the adequate value
1071  * according to the policy for specified map and tag set in @c vmk_flags.
1072  *
1073  * @discussion
1074  * This function is meant to be called by Mach VM entry points,
1075  * which matters for the kernel: allocations with pointers _MUST_
1076  * be allocated with @c kmem_*() functions.
1077  *
1078  * If the range ID is already set, it is preserved.
1079  */
1080 extern void             vm_map_kernel_flags_update_range_id(
1081 	vm_map_kernel_flags_t  *flags,
1082 	vm_map_t                map,
1083 	vm_map_size_t           size);
1084 
1085 #if XNU_TARGET_OS_OSX
1086 extern void vm_map_mark_alien(vm_map_t map);
1087 extern void vm_map_single_jit(vm_map_t map);
1088 #endif /* XNU_TARGET_OS_OSX */
1089 
1090 extern kern_return_t vm_map_page_info(
1091 	vm_map_t                map,
1092 	vm_map_offset_ut        offset,
1093 	vm_page_info_flavor_t   flavor,
1094 	vm_page_info_t          info,
1095 	mach_msg_type_number_t  *count);
1096 
1097 extern kern_return_t vm_map_page_range_info_internal(
1098 	vm_map_t                map,
1099 	vm_map_offset_ut        start_offset,
1100 	vm_map_offset_ut        end_offset,
1101 	int                     effective_page_shift,
1102 	vm_page_info_flavor_t   flavor,
1103 	vm_page_info_t          info,
1104 	mach_msg_type_number_t  *count);
1105 
1106 #ifdef MACH_KERNEL_PRIVATE
1107 
1108 /*
1109  * Internal macros for rounding and truncation of vm_map offsets and sizes
1110  */
1111 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1112 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1113 
1114 /*
1115  * Macros for rounding and truncation of vm_map offsets and sizes
1116  */
1117 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1118 VM_MAP_PAGE_SHIFT(
1119 	vm_map_t map)
1120 {
1121 	int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1122 	/*
1123 	 * help ubsan and codegen in general,
1124 	 * cannot use PAGE_{MIN,MAX}_SHIFT
1125 	 * because of testing code which
1126 	 * tests 16k aligned maps on 4k only systems.
1127 	 */
1128 	__builtin_assume(shift >= 12 && shift <= 14);
1129 	return shift;
1130 }
1131 
1132 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1133 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1134 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1135 
1136 #endif /* MACH_KERNEL_PRIVATE */
1137 
1138 
1139 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1140 extern bool vm_map_is_exotic(vm_map_t map);
1141 extern bool vm_map_is_alien(vm_map_t map);
1142 extern pmap_t vm_map_get_pmap(vm_map_t map);
1143 
1144 extern void vm_map_guard_exception(vm_map_offset_t gap_start, unsigned reason);
1145 
1146 extern bool vm_map_is_corpse_source(vm_map_t map);
1147 extern void vm_map_set_corpse_source(vm_map_t map);
1148 extern void vm_map_unset_corpse_source(vm_map_t map);
1149 
1150 #if CONFIG_DYNAMIC_CODE_SIGNING
1151 
1152 extern kern_return_t vm_map_sign(vm_map_t map,
1153     vm_map_offset_t start,
1154     vm_map_offset_t end);
1155 
1156 #endif /* CONFIG_DYNAMIC_CODE_SIGNING */
1157 #if CONFIG_FREEZE
1158 
1159 extern kern_return_t vm_map_freeze(
1160 	task_t       task,
1161 	unsigned int *purgeable_count,
1162 	unsigned int *wired_count,
1163 	unsigned int *clean_count,
1164 	unsigned int *dirty_count,
1165 	unsigned int dirty_budget,
1166 	unsigned int *shared_count,
1167 	int          *freezer_error_code,
1168 	boolean_t    eval_only);
1169 
1170 __enum_decl(freezer_error_code_t, int, {
1171 	FREEZER_ERROR_GENERIC = -1,
1172 	FREEZER_ERROR_EXCESS_SHARED_MEMORY = -2,
1173 	FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO = -3,
1174 	FREEZER_ERROR_NO_COMPRESSOR_SPACE = -4,
1175 	FREEZER_ERROR_NO_SWAP_SPACE = -5,
1176 	FREEZER_ERROR_NO_SLOTS = -6,
1177 });
1178 
1179 #endif /* CONFIG_FREEZE */
1180 
1181 extern kern_return_t vm_map_partial_reap(
1182 	vm_map_t map,
1183 	unsigned int *reclaimed_resident,
1184 	unsigned int *reclaimed_compressed);
1185 
1186 /*
1187  * In some cases, we don't have a real VM object but still want to return a
1188  * unique ID (to avoid a memory region looking like shared memory), so build
1189  * a fake pointer based on the map's ledger and the index of the ledger being
1190  * reported.
1191  */
1192 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((int*)((map)->pmap->ledger)+(ledger_id)))
1193 
1194 #if DEVELOPMENT || DEBUG
1195 
1196 extern int vm_map_disconnect_page_mappings(
1197 	vm_map_t map,
1198 	boolean_t);
1199 
1200 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1201 
1202 extern kern_return_t vm_map_entries_foreach(vm_map_t map, kern_return_t (^count_handler)(int nentries),
1203     kern_return_t (^entry_handler)(void* entry));
1204 extern kern_return_t vm_map_dump_entry_and_compressor_pager(void* entry, char *buf, size_t *count);
1205 
1206 extern void vm_map_testing_make_sealed_submap(
1207 	vm_map_t            parent_map,
1208 	mach_vm_address_t   start,
1209 	mach_vm_address_t   end);
1210 
1211 extern void vm_map_testing_remap_submap(
1212 	vm_map_t            parent_map,
1213 	mach_vm_address_t   submap_base_address,
1214 	mach_vm_address_t   start,
1215 	mach_vm_address_t   end,
1216 	mach_vm_address_t   offset);
1217 
1218 #endif /* DEVELOPMENT || DEBUG */
1219 
1220 boolean_t        kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1221 
1222 boolean_t        vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1223 
1224 
1225 #ifdef VM_SCAN_FOR_SHADOW_CHAIN
1226 int vm_map_shadow_max(vm_map_t map);
1227 #endif
1228 
1229 bool vm_map_is_map_size_valid(vm_map_t target_map, vm_size_t size, bool no_soft_limit);
1230 
1231 /* Returns the map's ID or VM_MAP_SERIAL_NONE if the input map is NULL */
1232 vm_map_serial_t vm_map_maybe_serial_id(vm_map_t maybe_vm_map);
1233 
1234 __END_DECLS
1235 
1236 #endif /* XNU_KERNEL_PRIVATE */
1237 #endif  /* _VM_VM_MAP_XNU_H_ */
1238