xref: /xnu-10002.1.13/osfmk/vm/vm_map.h (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	File:	vm/vm_map.h
61  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62  *	Date:	1985
63  *
64  *	Virtual memory map module definitions.
65  *
66  * Contributors:
67  *	avie, dlb, mwyoung
68  */
69 
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72 
73 #include <sys/cdefs.h>
74 
75 #include <mach/mach_types.h>
76 #include <mach/kern_return.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_types.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/vm_behavior.h>
82 #include <mach/vm_param.h>
83 #include <mach/sdt.h>
84 #include <vm/pmap.h>
85 #include <os/overflow.h>
86 #ifdef XNU_KERNEL_PRIVATE
87 #include <vm/vm_protos.h>
88 #endif /* XNU_KERNEL_PRIVATE */
89 #ifdef  MACH_KERNEL_PRIVATE
90 #include <mach_assert.h>
91 #include <vm/vm_map_store.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <kern/locks.h>
95 #include <kern/zalloc.h>
96 #include <kern/macro_help.h>
97 
98 #include <kern/thread.h>
99 #include <os/refcnt.h>
100 #endif /* MACH_KERNEL_PRIVATE */
101 
102 __BEGIN_DECLS
103 
104 #ifdef  KERNEL_PRIVATE
105 
106 extern void     vm_map_reference(vm_map_t       map);
107 extern vm_map_t current_map(void);
108 
109 /* Setup reserved areas in a new VM map */
110 extern kern_return_t    vm_map_exec(
111 	vm_map_t                new_map,
112 	task_t                  task,
113 	boolean_t               is64bit,
114 	void                    *fsroot,
115 	cpu_type_t              cpu,
116 	cpu_subtype_t           cpu_subtype,
117 	boolean_t               reslide,
118 	boolean_t               is_driverkit,
119 	uint32_t                rsr_version);
120 
121 #ifdef  MACH_KERNEL_PRIVATE
122 
123 #define current_map_fast()      (current_thread()->map)
124 #define current_map()           (current_map_fast())
125 
126 /*
127  *	Types defined:
128  *
129  *	vm_map_t		the high-level address map data structure.
130  *	vm_map_entry_t		an entry in an address map.
131  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
132  *	vm_map_copy_t		represents memory copied from an address map,
133  *				 used for inter-map copy operations
134  */
135 typedef struct vm_map_entry     *vm_map_entry_t;
136 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)
137 
138 
139 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
140 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
141 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock)
142 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock)
143 
144 /*
145  *	Type:		vm_named_entry_t [internal use only]
146  *
147  *	Description:
148  *		Description of a mapping to a memory cache object.
149  *
150  *	Implementation:
151  *		While the handle to this object is used as a means to map
152  *              and pass around the right to map regions backed by pagers
153  *		of all sorts, the named_entry itself is only manipulated
154  *		by the kernel.  Named entries hold information on the
155  *		right to map a region of a cached object.  Namely,
156  *		the target cache object, the beginning and ending of the
157  *		region to be mapped, and the permissions, (read, write)
158  *		with which it can be mapped.
159  *
160  */
161 
162 struct vm_named_entry {
163 	decl_lck_mtx_data(, Lock);              /* Synchronization */
164 	union {
165 		vm_map_t        map;            /* map backing submap */
166 		vm_map_copy_t   copy;           /* a VM map copy */
167 	} backing;
168 	vm_object_offset_t      offset;         /* offset into object */
169 	vm_object_size_t        size;           /* size of region */
170 	vm_object_offset_t      data_offset;    /* offset to first byte of data */
171 	unsigned int                            /* Is backing.xxx : */
172 	/* unsigned  */ access:8,               /* MAP_MEM_* */
173 	/* vm_prot_t */ protection:4,           /* access permissions */
174 	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
175 	/* boolean_t */ internal:1,             /* ... an internal object */
176 	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
177 	/* boolean_t */ is_copy:1,              /* ... a VM map copy */
178 	/* boolean_t */ is_fully_owned:1;       /* ... all objects are owned */
179 #if VM_NAMED_ENTRY_DEBUG
180 	uint32_t                named_entry_bt; /* btref_t */
181 #endif /* VM_NAMED_ENTRY_DEBUG */
182 };
183 
184 /*
185  * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
186  * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
187  * to convert between the "packed" representation in the vm_map_entry's fields
188  * and the equivalent bits defined in vm_prot_t.
189  */
190 #if defined(__x86_64__)
191 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
192 #else
193 #define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
194 #endif
195 
196 /*
197  * FOOTPRINT ACCOUNTING:
198  * The "memory footprint" is better described in the pmap layer.
199  *
200  * At the VM level, these 2 vm_map_entry_t fields are relevant:
201  * iokit_mapped:
202  *	For an "iokit_mapped" entry, we add the size of the entry to the
203  *	footprint when the entry is entered into the map and we subtract that
204  *	size when the entry is removed.  No other accounting should take place.
205  *	"use_pmap" should be FALSE but is not taken into account.
206  * use_pmap: (only when is_sub_map is FALSE)
207  *	This indicates if we should ask the pmap layer to account for pages
208  *	in this mapping.  If FALSE, we expect that another form of accounting
209  *	is being used (e.g. "iokit_mapped" or the explicit accounting of
210  *	non-volatile purgable memory).
211  *
212  * So the logic is mostly:
213  * if entry->is_sub_map == TRUE
214  *	anything in a submap does not count for the footprint
215  * else if entry->iokit_mapped == TRUE
216  *	footprint includes the entire virtual size of this entry
217  * else if entry->use_pmap == FALSE
218  *	tell pmap NOT to account for pages being pmap_enter()'d from this
219  *	mapping (i.e. use "alternate accounting")
220  * else
221  *	pmap will account for pages being pmap_enter()'d from this mapping
222  *	as it sees fit (only if anonymous, etc...)
223  */
224 
225 #define VME_ALIAS_BITS          12
226 #define VME_ALIAS_MASK          ((1u << VME_ALIAS_BITS) - 1)
227 #define VME_OFFSET_SHIFT        VME_ALIAS_BITS
228 #define VME_OFFSET_BITS         (64 - VME_ALIAS_BITS)
229 #define VME_SUBMAP_SHIFT        2
230 #define VME_SUBMAP_BITS         (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
231 
232 struct vm_map_entry {
233 	struct vm_map_links     links;                      /* links to other entries */
234 #define vme_prev                links.prev
235 #define vme_next                links.next
236 #define vme_start               links.start
237 #define vme_end                 links.end
238 
239 	struct vm_map_store     store;
240 
241 	union {
242 		vm_offset_t     vme_object_value;
243 		struct {
244 			vm_offset_t vme_atomic:1;           /* entry cannot be split/coalesced */
245 			vm_offset_t is_sub_map:1;           /* Is "object" a submap? */
246 			vm_offset_t vme_submap:VME_SUBMAP_BITS;
247 		};
248 		struct {
249 			uint32_t    vme_ctx_atomic : 1;
250 			uint32_t    vme_ctx_is_sub_map : 1;
251 			uint32_t    vme_context : 30;
252 
253 			/**
254 			 * If vme_kernel_object==1 && KASAN,
255 			 * vme_object_or_delta holds the delta.
256 			 *
257 			 * If vme_kernel_object==1 && !KASAN,
258 			 * vme_tag_btref holds a btref when vme_alias is equal to the "vmtaglog"
259 			 * boot-arg.
260 			 *
261 			 * If vme_kernel_object==0,
262 			 * vme_object_or_delta holds the packed vm object.
263 			 */
264 			union {
265 				vm_page_object_t vme_object_or_delta;
266 				btref_t vme_tag_btref;
267 			};
268 		};
269 	};
270 
271 	unsigned long long
272 	/* vm_tag_t          */ vme_alias:VME_ALIAS_BITS,   /* entry VM tag */
273 	/* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
274 
275 	/* boolean_t         */ is_shared:1,                /* region is shared */
276 	/* boolean_t         */ __unused1:1,
277 	/* boolean_t         */ in_transition:1,            /* Entry being changed */
278 	/* boolean_t         */ needs_wakeup:1,             /* Waiters on in_transition */
279 	/* behavior is not defined for submap type */
280 	/* vm_behavior_t     */ behavior:2,                 /* user paging behavior hint */
281 	/* boolean_t         */ needs_copy:1,               /* object need to be copied? */
282 
283 	/* Only in task maps: */
284 #if defined(__arm64e__)
285 	/*
286 	 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
287 	 * We reuse it here to keep track of mappings that have hardware support
288 	 * for read-only/read-write trusted paths.
289 	 */
290 	/* vm_prot_t-like    */ protection:3,               /* protection code */
291 	/* boolean_t         */ used_for_tpro:1,
292 #else /* __arm64e__ */
293 	/* vm_prot_t-like    */protection:4,                /* protection code, bit3=UEXEC */
294 #endif /* __arm64e__ */
295 
296 	/* vm_prot_t-like    */ max_protection:4,           /* maximum protection, bit3=UEXEC */
297 	/* vm_inherit_t      */ inheritance:2,              /* inheritance */
298 
299 	/*
300 	 * use_pmap is overloaded:
301 	 * if "is_sub_map":
302 	 *      use a nested pmap?
303 	 * else (i.e. if object):
304 	 *      use pmap accounting
305 	 *      for footprint?
306 	 */
307 	/* boolean_t         */ use_pmap:1,
308 	/* boolean_t         */ no_cache:1,                 /* should new pages be cached? */
309 	/* boolean_t         */ vme_permanent:1,            /* mapping can not be removed */
310 	/* boolean_t         */ superpage_size:1,           /* use superpages of a certain size */
311 	/* boolean_t         */ map_aligned:1,              /* align to map's page size */
312 	/*
313 	 * zero out the wired pages of this entry
314 	 * if is being deleted without unwiring them
315 	 */
316 	/* boolean_t         */ zero_wired_pages:1,
317 	/* boolean_t         */ used_for_jit:1,
318 	/* boolean_t         */ csm_associated:1,       /* code signing monitor will validate */
319 
320 	/* iokit accounting: use the virtual size rather than resident size: */
321 	/* boolean_t         */ iokit_acct:1,
322 	/* boolean_t         */ vme_resilient_codesign:1,
323 	/* boolean_t         */ vme_resilient_media:1,
324 	/* boolean_t         */ vme_xnu_user_debug:1,
325 	/* boolean_t         */ vme_no_copy_on_read:1,
326 	/* boolean_t         */ translated_allow_execute:1, /* execute in translated processes */
327 	/* boolean_t         */ vme_kernel_object:1;        /* vme_object is kernel_object */
328 
329 	unsigned short          wired_count;                /* can be paged if = 0 */
330 	unsigned short          user_wired_count;           /* for vm_wire */
331 
332 #if     DEBUG
333 #define MAP_ENTRY_CREATION_DEBUG (1)
334 #define MAP_ENTRY_INSERTION_DEBUG (1)
335 #endif /* DEBUG */
336 #if     MAP_ENTRY_CREATION_DEBUG
337 	struct vm_map_header    *vme_creation_maphdr;
338 	uint32_t                vme_creation_bt;            /* btref_t */
339 #endif /* MAP_ENTRY_CREATION_DEBUG */
340 #if     MAP_ENTRY_INSERTION_DEBUG
341 	uint32_t                vme_insertion_bt;           /* btref_t */
342 	vm_map_offset_t         vme_start_original;
343 	vm_map_offset_t         vme_end_original;
344 #endif /* MAP_ENTRY_INSERTION_DEBUG */
345 };
346 
347 #define VME_ALIAS(entry) \
348 	((entry)->vme_alias)
349 
350 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)351 _VME_SUBMAP(
352 	vm_map_entry_t entry)
353 {
354 	__builtin_assume(entry->vme_submap);
355 	return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
356 }
357 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
358 
359 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)360 VME_SUBMAP_SET(
361 	vm_map_entry_t entry,
362 	vm_map_t submap)
363 {
364 	__builtin_assume(((vm_offset_t)submap & 3) == 0);
365 
366 	entry->is_sub_map = true;
367 	entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
368 }
369 
370 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)371 _VME_OBJECT(
372 	vm_map_entry_t entry)
373 {
374 	vm_object_t object;
375 
376 	if (!entry->vme_kernel_object) {
377 		object = VM_OBJECT_UNPACK(entry->vme_object_or_delta);
378 		__builtin_assume(!is_kernel_object(object));
379 	} else {
380 		object = kernel_object_default;
381 	}
382 	return object;
383 }
384 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
385 
386 static inline void
VME_OBJECT_SET(vm_map_entry_t entry,vm_object_t object,bool atomic,uint32_t context)387 VME_OBJECT_SET(
388 	vm_map_entry_t entry,
389 	vm_object_t    object,
390 	bool           atomic,
391 	uint32_t       context)
392 {
393 	__builtin_assume(((vm_offset_t)object & 3) == 0);
394 
395 	entry->vme_atomic = atomic;
396 	entry->is_sub_map = false;
397 	if (atomic) {
398 		entry->vme_context = context;
399 	} else {
400 		entry->vme_context = 0;
401 	}
402 
403 	if (!object) {
404 		entry->vme_object_or_delta = 0;
405 	} else if (is_kernel_object(object)) {
406 #if VM_BTLOG_TAGS
407 		if (!(entry->vme_kernel_object && entry->vme_tag_btref))
408 #endif /* VM_BTLOG_TAGS */
409 		{
410 			entry->vme_object_or_delta = 0;
411 		}
412 	} else {
413 #if VM_BTLOG_TAGS
414 		if (entry->vme_kernel_object && entry->vme_tag_btref) {
415 			btref_put(entry->vme_tag_btref);
416 		}
417 #endif /* VM_BTLOG_TAGS */
418 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
419 	}
420 
421 	entry->vme_kernel_object = is_kernel_object(object);
422 	entry->vme_resilient_codesign = false;
423 	entry->used_for_jit = false;
424 }
425 
426 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)427 VME_OFFSET(
428 	vm_map_entry_t entry)
429 {
430 	return entry->vme_offset << VME_OFFSET_SHIFT;
431 }
432 
433 static inline void
VME_OFFSET_SET(vm_map_entry_t entry,vm_object_offset_t offset)434 VME_OFFSET_SET(
435 	vm_map_entry_t entry,
436 	vm_object_offset_t offset)
437 {
438 	entry->vme_offset = offset >> VME_OFFSET_SHIFT;
439 	assert3u(VME_OFFSET(entry), ==, offset);
440 }
441 
442 /*
443  * IMPORTANT:
444  * The "alias" field can be updated while holding the VM map lock
445  * "shared".  It's OK as along as it's the only field that can be
446  * updated without the VM map "exclusive" lock.
447  */
448 static inline void
VME_ALIAS_SET(vm_map_entry_t entry,unsigned int alias)449 VME_ALIAS_SET(
450 	vm_map_entry_t entry,
451 	unsigned int alias)
452 {
453 	assert3u(alias & VME_ALIAS_MASK, ==, alias);
454 	entry->vme_alias = alias;
455 }
456 
457 static inline void
VME_OBJECT_SHADOW(vm_map_entry_t entry,vm_object_size_t length,bool always)458 VME_OBJECT_SHADOW(
459 	vm_map_entry_t entry,
460 	vm_object_size_t length,
461 	bool always)
462 {
463 	vm_object_t object;
464 	vm_object_offset_t offset;
465 
466 	object = VME_OBJECT(entry);
467 	offset = VME_OFFSET(entry);
468 	vm_object_shadow(&object, &offset, length, always);
469 	if (object != VME_OBJECT(entry)) {
470 		entry->vme_object_or_delta = VM_OBJECT_PACK(object);
471 		entry->use_pmap = true;
472 	}
473 	if (offset != VME_OFFSET(entry)) {
474 		VME_OFFSET_SET(entry, offset);
475 	}
476 }
477 
478 #if (DEBUG || DEVELOPMENT) && !KASAN
479 #define VM_BTLOG_TAGS 1
480 #else
481 #define VM_BTLOG_TAGS 0
482 #endif
483 
484 extern vm_tag_t vmtaglog_tag; /* Collected from a tunable in vm_resident.c */
485 static inline void
vme_btref_consider_and_set(__unused vm_map_entry_t entry,__unused void * fp)486 vme_btref_consider_and_set(__unused vm_map_entry_t entry, __unused void *fp)
487 {
488 #if VM_BTLOG_TAGS
489 	if (vmtaglog_tag && (VME_ALIAS(entry) == vmtaglog_tag) && entry->vme_kernel_object && entry->wired_count) {
490 		assert(!entry->vme_tag_btref); /* We should have already zeroed and freed the btref if we're here. */
491 		entry->vme_tag_btref = btref_get(fp, BTREF_GET_NOWAIT);
492 	}
493 #endif /* VM_BTLOG_TAGS */
494 }
495 
496 static inline void
vme_btref_consider_and_put(__unused vm_map_entry_t entry)497 vme_btref_consider_and_put(__unused vm_map_entry_t entry)
498 {
499 #if VM_BTLOG_TAGS
500 	if (entry->vme_tag_btref && entry->vme_kernel_object && (entry->wired_count == 0) && (entry->user_wired_count == 0)) {
501 		btref_put(entry->vme_tag_btref);
502 		entry->vme_tag_btref = 0;
503 	}
504 #endif /* VM_BTLOG_TAGS */
505 }
506 
507 
508 /*
509  * Convenience macros for dealing with superpages
510  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
511  */
512 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
513 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
514 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
515 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
516 
517 /*
518  * wired_counts are unsigned short.  This value is used to safeguard
519  * against any mishaps due to runaway user programs.
520  */
521 #define MAX_WIRE_COUNT          65535
522 
523 typedef struct vm_map_user_range {
524 	vm_map_address_t        vmur_min_address __kernel_data_semantics;
525 
526 	vm_map_address_t        vmur_max_address : 56 __kernel_data_semantics;
527 	vm_map_range_id_t       vmur_range_id : 8;
528 } *vm_map_user_range_t;
529 
530 /*
531  *	Type:		vm_map_t [exported; contents invisible]
532  *
533  *	Description:
534  *		An address map -- a directory relating valid
535  *		regions of a task's address space to the corresponding
536  *		virtual memory objects.
537  *
538  *	Implementation:
539  *		Maps are doubly-linked lists of map entries, sorted
540  *		by address.  One hint is used to start
541  *		searches again from the last successful search,
542  *		insertion, or removal.  Another hint is used to
543  *		quickly find free space.
544  *
545  *	Note:
546  *		vm_map_relocate_early_elem() knows about this layout,
547  *		and needs to be kept in sync.
548  */
549 struct _vm_map {
550 	lck_rw_t                lock;           /* map lock */
551 	struct vm_map_header    hdr;            /* Map entry header */
552 #define min_offset              hdr.links.start /* start of range */
553 #define max_offset              hdr.links.end   /* end of range */
554 	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
555 	vm_map_size_t           size;           /* virtual size */
556 	uint64_t                size_limit;     /* rlimit on address space size */
557 	uint64_t                data_limit;     /* rlimit on data size */
558 	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
559 	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
560 #if __x86_64__
561 	vm_map_offset_t         vmmap_high_start;
562 #endif /* __x86_64__ */
563 
564 	os_ref_atomic_t         map_refcnt;       /* Reference count */
565 
566 #if CONFIG_MAP_RANGES
567 #define VM_MAP_EXTRA_RANGES_MAX 1024
568 	struct mach_vm_range    default_range;
569 	struct mach_vm_range    data_range;
570 
571 	uint16_t                extra_ranges_count;
572 	vm_map_user_range_t     extra_ranges;
573 #endif /* CONFIG_MAP_RANGES */
574 
575 	union {
576 		/*
577 		 * If map->disable_vmentry_reuse == TRUE:
578 		 * the end address of the highest allocated vm_map_entry_t.
579 		 */
580 		vm_map_offset_t         vmu1_highest_entry_end;
581 		/*
582 		 * For a nested VM map:
583 		 * the lowest address in this nested VM map that we would
584 		 * expect to be unnested under normal operation (i.e. for
585 		 * regular copy-on-write on DATA section).
586 		 */
587 		vm_map_offset_t         vmu1_lowest_unnestable_start;
588 	} vmu1;
589 #define highest_entry_end       vmu1.vmu1_highest_entry_end
590 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
591 	vm_map_entry_t          hint;           /* hint for quick lookups */
592 	union {
593 		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
594 		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
595 	} vmmap_u_1;
596 #define hole_hint vmmap_u_1.vmmap_hole_hint
597 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
598 	union {
599 		vm_map_entry_t          _first_free;    /* First free space hint */
600 		struct vm_map_links*    _holes;         /* links all holes between entries */
601 	} f_s;                                          /* Union for free space data structures being used */
602 
603 #define first_free              f_s._first_free
604 #define holes_list              f_s._holes
605 
606 	unsigned int
607 	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
608 	/* boolean_t */ wiring_required:1,        /* All memory wired? */
609 	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
610 	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
611 	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
612 	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
613 	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
614 	/* boolean_t */ holelistenabled:1,
615 	/* boolean_t */ is_nested_map:1,
616 	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
617 	/* boolean_t */ jit_entry_exists:1,
618 	/* boolean_t */ has_corpse_footprint:1,
619 	/* boolean_t */ terminated:1,
620 	/* boolean_t */ is_alien:1,               /* for platform simulation, i.e. PLATFORM_IOS on OSX */
621 	/* boolean_t */ cs_enforcement:1,         /* code-signing enforcement */
622 	/* boolean_t */ cs_debugged:1,            /* code-signed but debugged */
623 	/* boolean_t */ reserved_regions:1,       /* has reserved regions. The map size that userspace sees should ignore these. */
624 	/* boolean_t */ single_jit:1,             /* only allow one JIT mapping */
625 	/* boolean_t */ never_faults:1,           /* this map should never cause faults */
626 	/* boolean_t */ uses_user_ranges:1,       /* has the map been configured to use user VM ranges */
627 	/* boolean_t */ tpro_enforcement:1,       /* enforce TPRO propagation */
628 	/* reserved  */ pad:11;
629 	unsigned int            timestamp;        /* Version number */
630 };
631 
632 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
633 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
634 #define vm_map_first_entry(map) ((map)->hdr.links.next)
635 #define vm_map_last_entry(map)  ((map)->hdr.links.prev)
636 
637 /*
638  *	Type:		vm_map_version_t [exported; contents invisible]
639  *
640  *	Description:
641  *		Map versions may be used to quickly validate a previous
642  *		lookup operation.
643  *
644  *	Usage note:
645  *		Because they are bulky objects, map versions are usually
646  *		passed by reference.
647  *
648  *	Implementation:
649  *		Just a timestamp for the main map.
650  */
651 typedef struct vm_map_version {
652 	unsigned int    main_timestamp;
653 } vm_map_version_t;
654 
655 /*
656  *	Type:		vm_map_copy_t [exported; contents invisible]
657  *
658  *	Description:
659  *		A map copy object represents a region of virtual memory
660  *		that has been copied from an address map but is still
661  *		in transit.
662  *
663  *		A map copy object may only be used by a single thread
664  *		at a time.
665  *
666  *	Implementation:
667  *              There are two formats for map copy objects.
668  *		The first is very similar to the main
669  *		address map in structure, and as a result, some
670  *		of the internal maintenance functions/macros can
671  *		be used with either address maps or map copy objects.
672  *
673  *		The map copy object contains a header links
674  *		entry onto which the other entries that represent
675  *		the region are chained.
676  *
677  *		The second format is a kernel buffer copy object - for data
678  *              small enough that physical copies were the most efficient
679  *		method. This method uses a zero-sized array unioned with
680  *		other format-specific data in the 'c_u' member. This unsized
681  *		array overlaps the other elements and allows us to use this
682  *		extra structure space for physical memory copies. On 64-bit
683  *		systems this saves ~64 bytes per vm_map_copy.
684  */
685 
686 struct vm_map_copy {
687 #define VM_MAP_COPY_ENTRY_LIST          1
688 #define VM_MAP_COPY_KERNEL_BUFFER       2
689 	uint16_t                type;
690 	bool                    is_kernel_range;
691 	bool                    is_user_range;
692 	vm_map_range_id_t       orig_range;
693 	vm_object_offset_t      offset;
694 	vm_map_size_t           size;
695 	union {
696 		struct vm_map_header                  hdr;    /* ENTRY_LIST */
697 		void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
698 	} c_u;
699 };
700 
701 
702 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
703 #define vm_map_entry_zone       (&zone_array[ZONE_ID_VM_MAP_ENTRY])
704 
705 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
706 #define vm_map_holes_zone       (&zone_array[ZONE_ID_VM_MAP_HOLES])
707 
708 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
709 #define vm_map_zone             (&zone_array[ZONE_ID_VM_MAP])
710 
711 
712 #define cpy_hdr                 c_u.hdr
713 #define cpy_kdata               c_u.kdata
714 
715 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
716 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
717 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
718 
719 /*
720  *	Useful macros for entry list copy objects
721  */
722 
723 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
724 #define vm_map_copy_first_entry(copy)           \
725 	        ((copy)->cpy_hdr.links.next)
726 #define vm_map_copy_last_entry(copy)            \
727 	        ((copy)->cpy_hdr.links.prev)
728 
729 extern kern_return_t
730 vm_map_copy_adjust_to_target(
731 	vm_map_copy_t           copy_map,
732 	vm_map_offset_t         offset,
733 	vm_map_size_t           size,
734 	vm_map_t                target_map,
735 	boolean_t               copy,
736 	vm_map_copy_t           *target_copy_map_p,
737 	vm_map_offset_t         *overmap_start_p,
738 	vm_map_offset_t         *overmap_end_p,
739 	vm_map_offset_t         *trimmed_start_p);
740 
741 /*
742  *	Macros:		vm_map_lock, etc. [internal use only]
743  *	Description:
744  *		Perform locking on the data portion of a map.
745  *	When multiple maps are to be locked, order by map address.
746  *	(See vm_map.c::vm_remap())
747  */
748 
749 #define vm_map_lock_init(map)                                           \
750 	((map)->timestamp = 0 ,                                         \
751 	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
752 
753 #define vm_map_lock(map)                     \
754 	MACRO_BEGIN                          \
755 	DTRACE_VM(vm_map_lock_w);            \
756 	lck_rw_lock_exclusive(&(map)->lock); \
757 	MACRO_END
758 
759 #define vm_map_unlock(map)          \
760 	MACRO_BEGIN                 \
761 	DTRACE_VM(vm_map_unlock_w); \
762 	(map)->timestamp++;         \
763 	lck_rw_done(&(map)->lock);  \
764 	MACRO_END
765 
766 #define vm_map_lock_read(map)             \
767 	MACRO_BEGIN                       \
768 	DTRACE_VM(vm_map_lock_r);         \
769 	lck_rw_lock_shared(&(map)->lock); \
770 	MACRO_END
771 
772 #define vm_map_unlock_read(map)     \
773 	MACRO_BEGIN                 \
774 	DTRACE_VM(vm_map_unlock_r); \
775 	lck_rw_done(&(map)->lock);  \
776 	MACRO_END
777 
778 #define vm_map_lock_write_to_read(map)                 \
779 	MACRO_BEGIN                                    \
780 	DTRACE_VM(vm_map_lock_downgrade);              \
781 	(map)->timestamp++;                            \
782 	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
783 	MACRO_END
784 
785 __attribute__((always_inline))
786 int vm_map_lock_read_to_write(vm_map_t map);
787 
788 __attribute__((always_inline))
789 boolean_t vm_map_try_lock(vm_map_t map);
790 
791 __attribute__((always_inline))
792 boolean_t vm_map_try_lock_read(vm_map_t map);
793 
794 int vm_self_region_page_shift(vm_map_t target_map);
795 int vm_self_region_page_shift_safely(vm_map_t target_map);
796 
797 #define vm_map_lock_assert_held(map) \
798 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_HELD)
799 #define vm_map_lock_assert_shared(map)  \
800 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_SHARED)
801 #define vm_map_lock_assert_exclusive(map) \
802 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
803 #define vm_map_lock_assert_notheld(map) \
804 	LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
805 
806 /*
807  *	Exported procedures that operate on vm_map_t.
808  */
809 
810 /* Lookup map entry containing or the specified address in the given map */
811 extern boolean_t        vm_map_lookup_entry(
812 	vm_map_t                map,
813 	vm_map_address_t        address,
814 	vm_map_entry_t          *entry);                                /* OUT */
815 
816 /* Lookup map entry containing or the specified address in the given map */
817 extern boolean_t        vm_map_lookup_entry_or_next(
818 	vm_map_t                map,
819 	vm_map_address_t        address,
820 	vm_map_entry_t          *entry);                                /* OUT */
821 
822 /* like vm_map_lookup_entry without the PGZ bear trap */
823 #if CONFIG_PROB_GZALLOC
824 extern boolean_t        vm_map_lookup_entry_allow_pgz(
825 	vm_map_t                map,
826 	vm_map_address_t        address,
827 	vm_map_entry_t          *entry);                                /* OUT */
828 #else /* !CONFIG_PROB_GZALLOC */
829 #define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
830 #endif /* !CONFIG_PROB_GZALLOC */
831 
832 extern void             vm_map_copy_remap(
833 	vm_map_t                map,
834 	vm_map_entry_t          where,
835 	vm_map_copy_t           copy,
836 	vm_map_offset_t         adjustment,
837 	vm_prot_t               cur_prot,
838 	vm_prot_t               max_prot,
839 	vm_inherit_t            inheritance);
840 
841 /* Find the VM object, offset, and protection for a given virtual address
842  * in the specified map, assuming a page fault of the	type specified. */
843 extern kern_return_t    vm_map_lookup_and_lock_object(
844 	vm_map_t                *var_map,                               /* IN/OUT */
845 	vm_map_address_t        vaddr,
846 	vm_prot_t               fault_type,
847 	int                     object_lock_type,
848 	vm_map_version_t        *out_version,                           /* OUT */
849 	vm_object_t             *object,                                /* OUT */
850 	vm_object_offset_t      *offset,                                /* OUT */
851 	vm_prot_t               *out_prot,                              /* OUT */
852 	boolean_t               *wired,                                 /* OUT */
853 	vm_object_fault_info_t  fault_info,                             /* OUT */
854 	vm_map_t                *real_map,                              /* OUT */
855 	bool                    *contended);                            /* OUT */
856 
857 /* Verifies that the map has not changed since the given version. */
858 extern boolean_t        vm_map_verify(
859 	vm_map_t                map,
860 	vm_map_version_t        *version);                              /* REF */
861 
862 
863 /*
864  *	Functions implemented as macros
865  */
866 #define         vm_map_min(map) ((map)->min_offset)
867 /* Lowest valid address in
868  * a map */
869 
870 #define         vm_map_max(map) ((map)->max_offset)
871 /* Highest valid address */
872 
873 #define         vm_map_pmap(map)        ((map)->pmap)
874 /* Physical map associated
875 * with this address map */
876 
877 /* Gain a reference to an existing map */
878 extern void             vm_map_reference(
879 	vm_map_t        map);
880 
881 /*
882  *	Wait and wakeup macros for in_transition map entries.
883  */
884 #define vm_map_entry_wait(map, interruptible)           \
885 	((map)->timestamp++ ,                           \
886 	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
887 	                          (event_t)&(map)->hdr,	interruptible))
888 
889 
890 #define vm_map_entry_wakeup(map)        \
891 	thread_wakeup((event_t)(&(map)->hdr))
892 
893 
894 /* simplify map entries */
895 extern void             vm_map_simplify_entry(
896 	vm_map_t        map,
897 	vm_map_entry_t  this_entry);
898 extern void             vm_map_simplify(
899 	vm_map_t                map,
900 	vm_map_offset_t         start);
901 
902 #if XNU_PLATFORM_MacOSX
903 
904 /* Move the information in a map copy object to a new map copy object */
905 extern vm_map_copy_t    vm_map_copy_copy(
906 	vm_map_copy_t           copy);
907 
908 #endif /* XNU_PLATFORM_MacOSX */
909 
910 /* Enter a mapping */
911 extern kern_return_t    vm_map_enter(
912 	vm_map_t                map,
913 	vm_map_offset_t         *address,
914 	vm_map_size_t           size,
915 	vm_map_offset_t         mask,
916 	vm_map_kernel_flags_t   vmk_flags,
917 	vm_object_t             object,
918 	vm_object_offset_t      offset,
919 	boolean_t               needs_copy,
920 	vm_prot_t               cur_protection,
921 	vm_prot_t               max_protection,
922 	vm_inherit_t            inheritance);
923 
924 #if __arm64__
925 extern kern_return_t    vm_map_enter_fourk(
926 	vm_map_t                map,
927 	vm_map_offset_t         *address,
928 	vm_map_size_t           size,
929 	vm_map_offset_t         mask,
930 	vm_map_kernel_flags_t   vmk_flags,
931 	vm_object_t             object,
932 	vm_object_offset_t      offset,
933 	boolean_t               needs_copy,
934 	vm_prot_t               cur_protection,
935 	vm_prot_t               max_protection,
936 	vm_inherit_t            inheritance);
937 #endif /* __arm64__ */
938 
939 /* XXX should go away - replaced with regular enter of contig object */
940 extern  kern_return_t   vm_map_enter_cpm(
941 	vm_map_t                map,
942 	vm_map_address_t        *addr,
943 	vm_map_size_t           size,
944 	vm_map_kernel_flags_t   vmk_flags);
945 
946 extern kern_return_t vm_map_remap(
947 	vm_map_t                target_map,
948 	vm_map_offset_t         *address,
949 	vm_map_size_t           size,
950 	vm_map_offset_t         mask,
951 	vm_map_kernel_flags_t   vmk_flags,
952 	vm_map_t                src_map,
953 	vm_map_offset_t         memory_address,
954 	boolean_t               copy,
955 	vm_prot_t               *cur_protection,
956 	vm_prot_t               *max_protection,
957 	vm_inherit_t            inheritance);
958 
959 
960 /*
961  * Read and write from a kernel buffer to a specified map.
962  */
963 extern  kern_return_t   vm_map_write_user(
964 	vm_map_t                map,
965 	void                    *src_p,
966 	vm_map_offset_t         dst_addr,
967 	vm_size_t               size);
968 
969 extern  kern_return_t   vm_map_read_user(
970 	vm_map_t                map,
971 	vm_map_offset_t         src_addr,
972 	void                    *dst_p,
973 	vm_size_t               size);
974 
975 extern void             vm_map_inherit_limits(
976 	vm_map_t                new_map,
977 	const struct _vm_map   *old_map);
978 
979 /* Create a new task map using an existing task map as a template. */
980 extern vm_map_t         vm_map_fork(
981 	ledger_t                ledger,
982 	vm_map_t                old_map,
983 	int                     options);
984 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
985 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
986 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004
987 
988 /* Change inheritance */
989 extern kern_return_t    vm_map_inherit(
990 	vm_map_t                map,
991 	vm_map_offset_t         start,
992 	vm_map_offset_t         end,
993 	vm_inherit_t            new_inheritance);
994 
995 /* Add or remove machine-dependent attributes from map regions */
996 extern kern_return_t    vm_map_machine_attribute(
997 	vm_map_t                map,
998 	vm_map_offset_t         start,
999 	vm_map_offset_t         end,
1000 	vm_machine_attribute_t  attribute,
1001 	vm_machine_attribute_val_t* value);                         /* IN/OUT */
1002 
1003 extern kern_return_t    vm_map_msync(
1004 	vm_map_t                map,
1005 	vm_map_address_t        address,
1006 	vm_map_size_t           size,
1007 	vm_sync_t               sync_flags);
1008 
1009 /* Set paging behavior */
1010 extern kern_return_t    vm_map_behavior_set(
1011 	vm_map_t                map,
1012 	vm_map_offset_t         start,
1013 	vm_map_offset_t         end,
1014 	vm_behavior_t           new_behavior);
1015 
1016 extern kern_return_t vm_map_region(
1017 	vm_map_t                 map,
1018 	vm_map_offset_t         *address,
1019 	vm_map_size_t           *size,
1020 	vm_region_flavor_t       flavor,
1021 	vm_region_info_t         info,
1022 	mach_msg_type_number_t  *count,
1023 	mach_port_t             *object_name);
1024 
1025 extern kern_return_t vm_map_region_recurse_64(
1026 	vm_map_t                 map,
1027 	vm_map_offset_t         *address,
1028 	vm_map_size_t           *size,
1029 	natural_t               *nesting_depth,
1030 	vm_region_submap_info_64_t info,
1031 	mach_msg_type_number_t  *count);
1032 
1033 extern kern_return_t vm_map_page_query_internal(
1034 	vm_map_t                map,
1035 	vm_map_offset_t         offset,
1036 	int                     *disposition,
1037 	int                     *ref_count);
1038 
1039 extern kern_return_t vm_map_query_volatile(
1040 	vm_map_t        map,
1041 	mach_vm_size_t  *volatile_virtual_size_p,
1042 	mach_vm_size_t  *volatile_resident_size_p,
1043 	mach_vm_size_t  *volatile_compressed_size_p,
1044 	mach_vm_size_t  *volatile_pmap_size_p,
1045 	mach_vm_size_t  *volatile_compressed_pmap_size_p);
1046 
1047 /* Convert from a map entry port to a map */
1048 extern vm_map_t convert_port_entry_to_map(
1049 	ipc_port_t      port);
1050 
1051 
1052 extern kern_return_t vm_map_set_cache_attr(
1053 	vm_map_t        map,
1054 	vm_map_offset_t va);
1055 
1056 
1057 /* definitions related to overriding the NX behavior */
1058 
1059 #define VM_ABI_32       0x1
1060 #define VM_ABI_64       0x2
1061 
1062 extern int override_nx(vm_map_t map, uint32_t user_tag);
1063 
1064 extern void vm_map_region_top_walk(
1065 	vm_map_entry_t entry,
1066 	vm_region_top_info_t top);
1067 extern void vm_map_region_walk(
1068 	vm_map_t map,
1069 	vm_map_offset_t va,
1070 	vm_map_entry_t entry,
1071 	vm_object_offset_t offset,
1072 	vm_object_size_t range,
1073 	vm_region_extended_info_t extended,
1074 	boolean_t look_for_pages,
1075 	mach_msg_type_number_t count);
1076 
1077 
1078 
1079 extern void vm_map_copy_footprint_ledgers(
1080 	task_t  old_task,
1081 	task_t  new_task);
1082 extern void vm_map_copy_ledger(
1083 	task_t  old_task,
1084 	task_t  new_task,
1085 	int     ledger_entry);
1086 
1087 /**
1088  * Represents a single region of virtual address space that should be reserved
1089  * (pre-mapped) in a user address space.
1090  */
1091 struct vm_reserved_region {
1092 	const char             *vmrr_name;
1093 	vm_map_offset_t         vmrr_addr;
1094 	vm_map_size_t           vmrr_size;
1095 };
1096 
1097 /**
1098  * Return back a machine-dependent array of address space regions that should be
1099  * reserved by the VM. This function is defined in the machine-dependent
1100  * machine_routines.c files.
1101  */
1102 extern size_t ml_get_vm_reserved_regions(
1103 	bool                    vm_is64bit,
1104 	const struct vm_reserved_region **regions);
1105 
1106 #endif /* MACH_KERNEL_PRIVATE */
1107 
1108 /* Create an empty map */
1109 extern vm_map_t         vm_map_create(
1110 	pmap_t                  pmap,
1111 	vm_map_offset_t         min_off,
1112 	vm_map_offset_t         max_off,
1113 	boolean_t               pageable);
1114 
1115 extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);
1116 
1117 extern void             vm_map_disable_hole_optimization(vm_map_t map);
1118 
1119 /* Get rid of a map */
1120 extern void             vm_map_destroy(
1121 	vm_map_t                map);
1122 
1123 /* Lose a reference */
1124 extern void             vm_map_deallocate(
1125 	vm_map_t                map);
1126 
1127 /* Lose a reference */
1128 extern void             vm_map_inspect_deallocate(
1129 	vm_map_inspect_t        map);
1130 
1131 /* Lose a reference */
1132 extern void             vm_map_read_deallocate(
1133 	vm_map_read_t        map);
1134 
1135 extern vm_map_t         vm_map_switch(
1136 	vm_map_t                map);
1137 
1138 /* Change protection */
1139 extern kern_return_t    vm_map_protect(
1140 	vm_map_t                map,
1141 	vm_map_offset_t         start,
1142 	vm_map_offset_t         end,
1143 	vm_prot_t               new_prot,
1144 	boolean_t               set_max);
1145 
1146 /* Check protection */
1147 extern boolean_t vm_map_check_protection(
1148 	vm_map_t                map,
1149 	vm_map_offset_t         start,
1150 	vm_map_offset_t         end,
1151 	vm_prot_t               protection);
1152 
1153 extern boolean_t vm_map_cs_enforcement(
1154 	vm_map_t                map);
1155 extern void vm_map_cs_enforcement_set(
1156 	vm_map_t                map,
1157 	boolean_t               val);
1158 
1159 extern void vm_map_cs_debugged_set(
1160 	vm_map_t map,
1161 	boolean_t val);
1162 
1163 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
1164 extern kern_return_t vm_map_csm_allow_jit(vm_map_t map);
1165 
1166 /* wire down a region */
1167 
1168 #ifdef XNU_KERNEL_PRIVATE
1169 
1170 extern void vm_map_will_allocate_early_map(
1171 	vm_map_t               *map_owner);
1172 
1173 extern void vm_map_relocate_early_maps(
1174 	vm_offset_t             delta);
1175 
1176 extern void vm_map_relocate_early_elem(
1177 	uint32_t                zone_id,
1178 	vm_offset_t             new_addr,
1179 	vm_offset_t             delta);
1180 
1181 /* never fails */
1182 extern vm_map_t vm_map_create_options(
1183 	pmap_t                  pmap,
1184 	vm_map_offset_t         min_off,
1185 	vm_map_offset_t         max_off,
1186 	vm_map_create_options_t options);
1187 
1188 extern kern_return_t    vm_map_wire_kernel(
1189 	vm_map_t                map,
1190 	vm_map_offset_t         start,
1191 	vm_map_offset_t         end,
1192 	vm_prot_t               access_type,
1193 	vm_tag_t                tag,
1194 	boolean_t               user_wire);
1195 
1196 extern kern_return_t    vm_map_wire_and_extract_kernel(
1197 	vm_map_t                map,
1198 	vm_map_offset_t         start,
1199 	vm_prot_t               access_type,
1200 	vm_tag_t                tag,
1201 	boolean_t               user_wire,
1202 	ppnum_t                 *physpage_p);
1203 
1204 /* kext exported versions */
1205 
1206 extern kern_return_t    vm_map_wire_external(
1207 	vm_map_t                map,
1208 	vm_map_offset_t         start,
1209 	vm_map_offset_t         end,
1210 	vm_prot_t               access_type,
1211 	boolean_t               user_wire);
1212 
1213 extern kern_return_t    vm_map_wire_and_extract_external(
1214 	vm_map_t                map,
1215 	vm_map_offset_t         start,
1216 	vm_prot_t               access_type,
1217 	boolean_t               user_wire,
1218 	ppnum_t                 *physpage_p);
1219 
1220 #else /* XNU_KERNEL_PRIVATE */
1221 
1222 extern kern_return_t    vm_map_wire(
1223 	vm_map_t                map,
1224 	vm_map_offset_t         start,
1225 	vm_map_offset_t         end,
1226 	vm_prot_t               access_type,
1227 	boolean_t               user_wire);
1228 
1229 extern kern_return_t    vm_map_wire_and_extract(
1230 	vm_map_t                map,
1231 	vm_map_offset_t         start,
1232 	vm_prot_t               access_type,
1233 	boolean_t               user_wire,
1234 	ppnum_t                 *physpage_p);
1235 
1236 #endif /* !XNU_KERNEL_PRIVATE */
1237 
1238 /* unwire a region */
1239 extern kern_return_t    vm_map_unwire(
1240 	vm_map_t                map,
1241 	vm_map_offset_t         start,
1242 	vm_map_offset_t         end,
1243 	boolean_t               user_wire);
1244 
1245 #ifdef XNU_KERNEL_PRIVATE
1246 
1247 /* Enter a mapping of a memory object */
1248 extern kern_return_t    vm_map_enter_mem_object(
1249 	vm_map_t                map,
1250 	vm_map_offset_t         *address,
1251 	vm_map_size_t           size,
1252 	vm_map_offset_t         mask,
1253 	vm_map_kernel_flags_t   vmk_flags,
1254 	ipc_port_t              port,
1255 	vm_object_offset_t      offset,
1256 	boolean_t               needs_copy,
1257 	vm_prot_t               cur_protection,
1258 	vm_prot_t               max_protection,
1259 	vm_inherit_t            inheritance);
1260 
1261 /* Enter a mapping of a memory object */
1262 extern kern_return_t    vm_map_enter_mem_object_prefault(
1263 	vm_map_t                map,
1264 	vm_map_offset_t         *address,
1265 	vm_map_size_t           size,
1266 	vm_map_offset_t         mask,
1267 	vm_map_kernel_flags_t   vmk_flags,
1268 	ipc_port_t              port,
1269 	vm_object_offset_t      offset,
1270 	vm_prot_t               cur_protection,
1271 	vm_prot_t               max_protection,
1272 	upl_page_list_ptr_t     page_list,
1273 	unsigned int            page_list_count);
1274 
1275 /* Enter a mapping of a memory object */
1276 extern kern_return_t    vm_map_enter_mem_object_control(
1277 	vm_map_t                map,
1278 	vm_map_offset_t         *address,
1279 	vm_map_size_t           size,
1280 	vm_map_offset_t         mask,
1281 	vm_map_kernel_flags_t   vmk_flags,
1282 	memory_object_control_t control,
1283 	vm_object_offset_t      offset,
1284 	boolean_t               needs_copy,
1285 	vm_prot_t               cur_protection,
1286 	vm_prot_t               max_protection,
1287 	vm_inherit_t            inheritance);
1288 
1289 extern kern_return_t    vm_map_terminate(
1290 	vm_map_t                map);
1291 
1292 extern void             vm_map_require(
1293 	vm_map_t                map);
1294 
1295 extern void             vm_map_copy_require(
1296 	vm_map_copy_t           copy);
1297 
1298 extern kern_return_t    vm_map_copy_extract(
1299 	vm_map_t                src_map,
1300 	vm_map_address_t        src_addr,
1301 	vm_map_size_t           len,
1302 	boolean_t               copy,
1303 	vm_map_copy_t           *copy_result,   /* OUT */
1304 	vm_prot_t               *cur_prot,      /* OUT */
1305 	vm_prot_t               *max_prot,      /* OUT */
1306 	vm_inherit_t            inheritance,
1307 	vm_map_kernel_flags_t   vmk_flags);
1308 
1309 #endif /* !XNU_KERNEL_PRIVATE */
1310 
1311 /* Discard a copy without using it */
1312 extern void             vm_map_copy_discard(
1313 	vm_map_copy_t           copy);
1314 
1315 /* Overwrite existing memory with a copy */
1316 extern kern_return_t    vm_map_copy_overwrite(
1317 	vm_map_t                dst_map,
1318 	vm_map_address_t        dst_addr,
1319 	vm_map_copy_t           copy,
1320 	vm_map_size_t           copy_size,
1321 	boolean_t               interruptible);
1322 
1323 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3)
1324 
1325 
1326 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
1327 extern boolean_t        vm_map_copy_validate_size(
1328 	vm_map_t                dst_map,
1329 	vm_map_copy_t           copy,
1330 	vm_map_size_t           *size);
1331 
1332 /* Place a copy into a map */
1333 extern kern_return_t    vm_map_copyout(
1334 	vm_map_t                dst_map,
1335 	vm_map_address_t        *dst_addr,                              /* OUT */
1336 	vm_map_copy_t           copy);
1337 
1338 extern kern_return_t vm_map_copyout_size(
1339 	vm_map_t                dst_map,
1340 	vm_map_address_t        *dst_addr,                              /* OUT */
1341 	vm_map_copy_t           copy,
1342 	vm_map_size_t           copy_size);
1343 
1344 extern kern_return_t    vm_map_copyout_internal(
1345 	vm_map_t                dst_map,
1346 	vm_map_address_t        *dst_addr,      /* OUT */
1347 	vm_map_copy_t           copy,
1348 	vm_map_size_t           copy_size,
1349 	boolean_t               consume_on_success,
1350 	vm_prot_t               cur_protection,
1351 	vm_prot_t               max_protection,
1352 	vm_inherit_t            inheritance);
1353 
1354 extern kern_return_t    vm_map_copyin(
1355 	vm_map_t                src_map,
1356 	vm_map_address_t        src_addr,
1357 	vm_map_size_t           len,
1358 	boolean_t               src_destroy,
1359 	vm_map_copy_t           *copy_result);                          /* OUT */
1360 
1361 extern kern_return_t    vm_map_copyin_common(
1362 	vm_map_t                src_map,
1363 	vm_map_address_t        src_addr,
1364 	vm_map_size_t           len,
1365 	boolean_t               src_destroy,
1366 	boolean_t               src_volatile,
1367 	vm_map_copy_t           *copy_result,                           /* OUT */
1368 	boolean_t               use_maxprot);
1369 
1370 #define VM_MAP_COPYIN_SRC_DESTROY       0x00000001
1371 #define VM_MAP_COPYIN_USE_MAXPROT       0x00000002
1372 #define VM_MAP_COPYIN_ENTRY_LIST        0x00000004
1373 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
1374 #define VM_MAP_COPYIN_FORK              0x00000010
1375 #define VM_MAP_COPYIN_ALL_FLAGS         0x0000001F
1376 extern kern_return_t    vm_map_copyin_internal(
1377 	vm_map_t                src_map,
1378 	vm_map_address_t        src_addr,
1379 	vm_map_size_t           len,
1380 	int                     flags,
1381 	vm_map_copy_t           *copy_result);                         /* OUT */
1382 
1383 
1384 extern void             vm_map_disable_NX(
1385 	vm_map_t                map);
1386 
1387 extern void             vm_map_disallow_data_exec(
1388 	vm_map_t                map);
1389 
1390 extern void             vm_map_set_64bit(
1391 	vm_map_t                map);
1392 
1393 extern void             vm_map_set_32bit(
1394 	vm_map_t                map);
1395 
1396 extern void             vm_map_set_jumbo(
1397 	vm_map_t                map);
1398 
1399 extern void             vm_map_set_jit_entitled(
1400 	vm_map_t                map);
1401 
1402 extern void             vm_map_set_max_addr(
1403 	vm_map_t                map, vm_map_offset_t new_max_offset);
1404 
1405 extern boolean_t        vm_map_has_hard_pagezero(
1406 	vm_map_t                map,
1407 	vm_map_offset_t         pagezero_size);
1408 extern void             vm_commit_pagezero_status(vm_map_t      tmap);
1409 
1410 extern boolean_t        vm_map_tpro(
1411 	vm_map_t                map);
1412 
1413 extern void             vm_map_set_tpro(
1414 	vm_map_t                map);
1415 
1416 extern boolean_t        vm_map_tpro_enforcement(
1417 	vm_map_t                map);
1418 
1419 extern void             vm_map_set_tpro_enforcement(
1420 	vm_map_t                map);
1421 
1422 extern boolean_t        vm_map_set_tpro_range(
1423 	vm_map_t                map,
1424 	vm_map_address_t        start,
1425 	vm_map_address_t        end);
1426 
1427 extern boolean_t        vm_map_is_64bit(
1428 	vm_map_t                map);
1429 
1430 extern kern_return_t    vm_map_raise_max_offset(
1431 	vm_map_t        map,
1432 	vm_map_offset_t new_max_offset);
1433 
1434 extern kern_return_t    vm_map_raise_min_offset(
1435 	vm_map_t        map,
1436 	vm_map_offset_t new_min_offset);
1437 
1438 #if XNU_TARGET_OS_OSX
1439 extern void vm_map_set_high_start(
1440 	vm_map_t        map,
1441 	vm_map_offset_t high_start);
1442 #endif /* XNU_TARGET_OS_OSX */
1443 
1444 extern vm_map_offset_t  vm_compute_max_offset(
1445 	boolean_t               is64);
1446 
1447 extern void             vm_map_get_max_aslr_slide_section(
1448 	vm_map_t                map,
1449 	int64_t                 *max_sections,
1450 	int64_t                 *section_size);
1451 
1452 extern uint64_t         vm_map_get_max_aslr_slide_pages(
1453 	vm_map_t map);
1454 
1455 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
1456 	vm_map_t map);
1457 
1458 extern kern_return_t    vm_map_set_size_limit(
1459 	vm_map_t                map,
1460 	uint64_t                limit);
1461 
1462 extern kern_return_t    vm_map_set_data_limit(
1463 	vm_map_t                map,
1464 	uint64_t                limit);
1465 
1466 extern void             vm_map_set_user_wire_limit(
1467 	vm_map_t                map,
1468 	vm_size_t               limit);
1469 
1470 extern void vm_map_switch_protect(
1471 	vm_map_t                map,
1472 	boolean_t               val);
1473 
1474 extern void vm_map_iokit_mapped_region(
1475 	vm_map_t                map,
1476 	vm_size_t               bytes);
1477 
1478 extern void vm_map_iokit_unmapped_region(
1479 	vm_map_t                map,
1480 	vm_size_t               bytes);
1481 
1482 
1483 extern boolean_t first_free_is_valid(vm_map_t);
1484 
1485 extern int              vm_map_page_shift(
1486 	vm_map_t                map);
1487 
1488 extern vm_map_offset_t  vm_map_page_mask(
1489 	vm_map_t                map);
1490 
1491 extern int              vm_map_page_size(
1492 	vm_map_t                map);
1493 
1494 extern vm_map_offset_t  vm_map_round_page_mask(
1495 	vm_map_offset_t         offset,
1496 	vm_map_offset_t         mask);
1497 
1498 extern vm_map_offset_t  vm_map_trunc_page_mask(
1499 	vm_map_offset_t         offset,
1500 	vm_map_offset_t         mask);
1501 
1502 extern boolean_t        vm_map_page_aligned(
1503 	vm_map_offset_t         offset,
1504 	vm_map_offset_t         mask);
1505 
1506 extern bool vm_map_range_overflows(
1507 	vm_map_t                map,
1508 	vm_map_offset_t         addr,
1509 	vm_map_size_t           size);
1510 #ifdef XNU_KERNEL_PRIVATE
1511 
1512 /* Support for vm_map ranges */
1513 extern kern_return_t    vm_map_range_configure(
1514 	vm_map_t                map);
1515 
1516 extern void             vm_map_range_fork(
1517 	vm_map_t                new_map,
1518 	vm_map_t                old_map);
1519 
1520 extern int              vm_map_get_user_range(
1521 	vm_map_t                map,
1522 	vm_map_range_id_t       range_id,
1523 	mach_vm_range_t         range);
1524 
1525 /*!
1526  * @function vm_map_kernel_flags_update_range_id()
1527  *
1528  * @brief
1529  * Updates the @c vmkf_range_id field with the adequate value
1530  * according to the policy for specified map and tag set in @c vmk_flags.
1531  *
1532  * @discussion
1533  * This function is meant to be called by Mach VM entry points,
1534  * which matters for the kernel: allocations with pointers _MUST_
1535  * be allocated with @c kmem_*() functions.
1536  *
1537  * If the range ID is already set, it is preserved.
1538  */
1539 extern void             vm_map_kernel_flags_update_range_id(
1540 	vm_map_kernel_flags_t  *flags,
1541 	vm_map_t                map);
1542 
1543 #if XNU_TARGET_OS_OSX
1544 extern void vm_map_mark_alien(vm_map_t map);
1545 extern void vm_map_single_jit(vm_map_t map);
1546 #endif /* XNU_TARGET_OS_OSX */
1547 
1548 extern kern_return_t vm_map_page_info(
1549 	vm_map_t                map,
1550 	vm_map_offset_t         offset,
1551 	vm_page_info_flavor_t   flavor,
1552 	vm_page_info_t          info,
1553 	mach_msg_type_number_t  *count);
1554 extern kern_return_t vm_map_page_range_info_internal(
1555 	vm_map_t                map,
1556 	vm_map_offset_t         start_offset,
1557 	vm_map_offset_t         end_offset,
1558 	int                     effective_page_shift,
1559 	vm_page_info_flavor_t   flavor,
1560 	vm_page_info_t          info,
1561 	mach_msg_type_number_t  *count);
1562 
1563 #endif /* XNU_KERNEL_PRIVATE */
1564 #ifdef  MACH_KERNEL_PRIVATE
1565 
1566 
1567 /*
1568  * Internal macros for rounding and truncation of vm_map offsets and sizes
1569  */
1570 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1571 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1572 
1573 /*
1574  * Macros for rounding and truncation of vm_map offsets and sizes
1575  */
1576 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1577 VM_MAP_PAGE_SHIFT(
1578 	vm_map_t map)
1579 {
1580 	int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1581 	/*
1582 	 * help ubsan and codegen in general,
1583 	 * cannot use PAGE_{MIN,MAX}_SHIFT
1584 	 * because of testing code which
1585 	 * tests 16k aligned maps on 4k only systems.
1586 	 */
1587 	__builtin_assume(shift >= 12 && shift <= 14);
1588 	return shift;
1589 }
1590 
1591 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1592 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1593 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1594 
1595 static inline bool
VM_MAP_IS_EXOTIC(vm_map_t map __unused)1596 VM_MAP_IS_EXOTIC(
1597 	vm_map_t map __unused)
1598 {
1599 #if __arm64__
1600 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
1601 	    pmap_is_exotic(map->pmap)) {
1602 		return true;
1603 	}
1604 #endif /* __arm64__ */
1605 	return false;
1606 }
1607 
1608 static inline bool
VM_MAP_IS_ALIEN(vm_map_t map __unused)1609 VM_MAP_IS_ALIEN(
1610 	vm_map_t map __unused)
1611 {
1612 	/*
1613 	 * An "alien" process/task/map/pmap should mostly behave
1614 	 * as it currently would on iOS.
1615 	 */
1616 #if XNU_TARGET_OS_OSX
1617 	if (map->is_alien) {
1618 		return true;
1619 	}
1620 	return false;
1621 #else /* XNU_TARGET_OS_OSX */
1622 	return true;
1623 #endif /* XNU_TARGET_OS_OSX */
1624 }
1625 
1626 static inline bool
VM_MAP_POLICY_WX_FAIL(vm_map_t map __unused)1627 VM_MAP_POLICY_WX_FAIL(
1628 	vm_map_t map __unused)
1629 {
1630 	if (VM_MAP_IS_ALIEN(map)) {
1631 		return false;
1632 	}
1633 	return true;
1634 }
1635 
1636 static inline bool
VM_MAP_POLICY_WX_STRIP_X(vm_map_t map __unused)1637 VM_MAP_POLICY_WX_STRIP_X(
1638 	vm_map_t map __unused)
1639 {
1640 	if (VM_MAP_IS_ALIEN(map)) {
1641 		return true;
1642 	}
1643 	return false;
1644 }
1645 
1646 static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(vm_map_t map __unused)1647 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
1648 	vm_map_t map __unused)
1649 {
1650 	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
1651 		return false;
1652 	}
1653 	return true;
1654 }
1655 
1656 static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(vm_map_t map)1657 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
1658 	vm_map_t map)
1659 {
1660 	return VM_MAP_IS_ALIEN(map);
1661 }
1662 
1663 static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(vm_map_t map __unused)1664 VM_MAP_POLICY_ALLOW_JIT_INHERIT(
1665 	vm_map_t map __unused)
1666 {
1667 	if (VM_MAP_IS_ALIEN(map)) {
1668 		return false;
1669 	}
1670 	return true;
1671 }
1672 
1673 static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(vm_map_t map __unused)1674 VM_MAP_POLICY_ALLOW_JIT_SHARING(
1675 	vm_map_t map __unused)
1676 {
1677 	if (VM_MAP_IS_ALIEN(map)) {
1678 		return false;
1679 	}
1680 	return true;
1681 }
1682 
1683 static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(vm_map_t map __unused)1684 VM_MAP_POLICY_ALLOW_JIT_COPY(
1685 	vm_map_t map __unused)
1686 {
1687 	if (VM_MAP_IS_ALIEN(map)) {
1688 		return false;
1689 	}
1690 	return true;
1691 }
1692 
1693 static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(vm_map_t map __unused)1694 VM_MAP_POLICY_WRITABLE_SHARED_REGION(
1695 	vm_map_t map __unused)
1696 {
1697 #if __x86_64__
1698 	return true;
1699 #else /* __x86_64__ */
1700 	if (VM_MAP_IS_EXOTIC(map)) {
1701 		return true;
1702 	}
1703 	return false;
1704 #endif /* __x86_64__ */
1705 }
1706 
1707 static inline void
vm_prot_to_wimg(unsigned int prot,unsigned int * wimg)1708 vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
1709 {
1710 	switch (prot) {
1711 	case MAP_MEM_NOOP:                      break;
1712 	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
1713 	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
1714 	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
1715 	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
1716 	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
1717 	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
1718 	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
1719 	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
1720 	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
1721 	default:                                break;
1722 	}
1723 }
1724 
1725 static inline boolean_t
vm_map_always_shadow(vm_map_t map)1726 vm_map_always_shadow(vm_map_t map)
1727 {
1728 	if (map->mapped_in_other_pmaps) {
1729 		/*
1730 		 * This is a submap, mapped in other maps.
1731 		 * Even if a VM object is mapped only once in this submap,
1732 		 * the submap itself could be mapped multiple times,
1733 		 * so vm_object_shadow() should always create a shadow
1734 		 * object, even if the object has only 1 reference.
1735 		 */
1736 		return TRUE;
1737 	}
1738 	return FALSE;
1739 }
1740 
1741 #endif /* MACH_KERNEL_PRIVATE */
1742 #ifdef XNU_KERNEL_PRIVATE
1743 
1744 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1745 extern bool vm_map_is_exotic(vm_map_t map);
1746 extern bool vm_map_is_alien(vm_map_t map);
1747 extern pmap_t vm_map_get_pmap(vm_map_t map);
1748 
1749 #endif /* XNU_KERNEL_PRIVATE */
1750 
1751 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1752 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1753 
1754 /* Support for UPLs from vm_maps */
1755 
1756 #ifdef XNU_KERNEL_PRIVATE
1757 
1758 extern kern_return_t vm_map_get_upl(
1759 	vm_map_t                target_map,
1760 	vm_map_offset_t         map_offset,
1761 	upl_size_t              *size,
1762 	upl_t                   *upl,
1763 	upl_page_info_array_t   page_info,
1764 	unsigned int            *page_infoCnt,
1765 	upl_control_flags_t     *flags,
1766 	vm_tag_t                tag,
1767 	int                     force_data_sync);
1768 
1769 #endif /* XNU_KERNEL_PRIVATE */
1770 
1771 extern void
1772 vm_map_sizes(vm_map_t map,
1773     vm_map_size_t * psize,
1774     vm_map_size_t * pfree,
1775     vm_map_size_t * plargest_free);
1776 
1777 #if CONFIG_DYNAMIC_CODE_SIGNING
1778 
1779 extern kern_return_t vm_map_sign(vm_map_t map,
1780     vm_map_offset_t start,
1781     vm_map_offset_t end);
1782 
1783 #endif /* CONFIG_DYNAMIC_CODE_SIGNING */
1784 
1785 extern kern_return_t vm_map_partial_reap(
1786 	vm_map_t map,
1787 	unsigned int *reclaimed_resident,
1788 	unsigned int *reclaimed_compressed);
1789 
1790 
1791 #if DEVELOPMENT || DEBUG
1792 
1793 extern int vm_map_disconnect_page_mappings(
1794 	vm_map_t map,
1795 	boolean_t);
1796 
1797 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1798 
1799 #endif /* DEVELOPMENT || DEBUG */
1800 
1801 #if CONFIG_FREEZE
1802 
1803 extern kern_return_t vm_map_freeze(
1804 	task_t       task,
1805 	unsigned int *purgeable_count,
1806 	unsigned int *wired_count,
1807 	unsigned int *clean_count,
1808 	unsigned int *dirty_count,
1809 	unsigned int dirty_budget,
1810 	unsigned int *shared_count,
1811 	int          *freezer_error_code,
1812 	boolean_t    eval_only);
1813 
1814 __enum_decl(freezer_error_code_t, int, {
1815 	FREEZER_ERROR_GENERIC = -1,
1816 	FREEZER_ERROR_EXCESS_SHARED_MEMORY = -2,
1817 	FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO = -3,
1818 	FREEZER_ERROR_NO_COMPRESSOR_SPACE = -4,
1819 	FREEZER_ERROR_NO_SWAP_SPACE = -5,
1820 	FREEZER_ERROR_NO_SLOTS = -6,
1821 });
1822 
1823 #endif /* CONFIG_FREEZE */
1824 #if XNU_KERNEL_PRIVATE
1825 
1826 boolean_t        kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1827 
1828 boolean_t        vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1829 
1830 #endif /* XNU_KERNEL_PRIVATE */
1831 
1832 /*
1833  * In some cases, we don't have a real VM object but still want to return a
1834  * unique ID (to avoid a memory region looking like shared memory), so build
1835  * a fake pointer based on the map's ledger and the index of the ledger being
1836  * reported.
1837  */
1838 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))
1839 
1840 #endif  /* KERNEL_PRIVATE */
1841 
1842 __END_DECLS
1843 
1844 #endif  /* _VM_VM_MAP_H_ */
1845