1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _VM_VM_MAP_XNU_H_
30 #define _VM_VM_MAP_XNU_H_
31
32 #ifdef XNU_KERNEL_PRIVATE
33
34 #include <mach/vm_types.h>
35 #include <sys/cdefs.h>
36 #include <vm/vm_map.h>
37
38
39 __BEGIN_DECLS
40
41 extern void vm_map_reference(vm_map_t map);
42 extern vm_map_t current_map(void);
43
44 /* Setup reserved areas in a new VM map */
45 extern kern_return_t vm_map_exec(
46 vm_map_t new_map,
47 task_t task,
48 boolean_t is64bit,
49 void *fsroot,
50 cpu_type_t cpu,
51 cpu_subtype_t cpu_subtype,
52 boolean_t reslide,
53 boolean_t is_driverkit,
54 uint32_t rsr_version);
55
56
57
58 #ifdef MACH_KERNEL_PRIVATE
59
60 #define current_map_fast() (current_thread()->map)
61 #define current_map() (current_map_fast())
62
63 /*
64 * Types defined:
65 *
66 * vm_map_t the high-level address map data structure.
67 * vm_map_entry_t an entry in an address map.
68 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
69 * vm_map_copy_t represents memory copied from an address map,
70 * used for inter-map copy operations
71 */
72 typedef struct vm_map_entry *vm_map_entry_t;
73 #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) NULL)
74
75 #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
76 #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
77 #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock)
78 #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock)
79
80 /*
81 * Type: vm_named_entry_t [internal use only]
82 *
83 * Description:
84 * Description of a mapping to a memory cache object.
85 *
86 * Implementation:
87 * While the handle to this object is used as a means to map
88 * and pass around the right to map regions backed by pagers
89 * of all sorts, the named_entry itself is only manipulated
90 * by the kernel. Named entries hold information on the
91 * right to map a region of a cached object. Namely,
92 * the target cache object, the beginning and ending of the
93 * region to be mapped, and the permissions, (read, write)
94 * with which it can be mapped.
95 *
96 */
97
98 struct vm_named_entry {
99 decl_lck_mtx_data(, Lock); /* Synchronization */
100 union {
101 vm_map_t map; /* map backing submap */
102 vm_map_copy_t copy; /* a VM map copy */
103 } backing;
104 vm_object_offset_t offset; /* offset into object */
105 vm_object_size_t size; /* size of region */
106 vm_object_offset_t data_offset; /* offset to first byte of data */
107 unsigned int /* Is backing.xxx : */
108 /* unsigned */ access:8, /* MAP_MEM_* */
109 /* vm_prot_t */ protection:4, /* access permissions */
110 /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */
111 /* boolean_t */ internal:1, /* ... an internal object */
112 /* boolean_t */ is_sub_map:1, /* ... a submap? */
113 /* boolean_t */ is_copy:1, /* ... a VM map copy */
114 /* boolean_t */ is_fully_owned:1; /* ... all objects are owned */
115 #if VM_NAMED_ENTRY_DEBUG
116 uint32_t named_entry_bt; /* btref_t */
117 #endif /* VM_NAMED_ENTRY_DEBUG */
118 };
119
120 /*
121 * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
122 * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
123 * to convert between the "packed" representation in the vm_map_entry's fields
124 * and the equivalent bits defined in vm_prot_t.
125 */
126 #if defined(__x86_64__)
127 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
128 #else
129 #define VM_VALID_VMPROTECT_FLAGS (VM_PROT_ALL | VM_PROT_COPY)
130 #endif
131
132 /*
133 * FOOTPRINT ACCOUNTING:
134 * The "memory footprint" is better described in the pmap layer.
135 *
136 * At the VM level, these 2 vm_map_entry_t fields are relevant:
137 * iokit_mapped:
138 * For an "iokit_mapped" entry, we add the size of the entry to the
139 * footprint when the entry is entered into the map and we subtract that
140 * size when the entry is removed. No other accounting should take place.
141 * "use_pmap" should be FALSE but is not taken into account.
142 * use_pmap: (only when is_sub_map is FALSE)
143 * This indicates if we should ask the pmap layer to account for pages
144 * in this mapping. If FALSE, we expect that another form of accounting
145 * is being used (e.g. "iokit_mapped" or the explicit accounting of
146 * non-volatile purgable memory).
147 *
148 * So the logic is mostly:
149 * if entry->is_sub_map == TRUE
150 * anything in a submap does not count for the footprint
151 * else if entry->iokit_mapped == TRUE
152 * footprint includes the entire virtual size of this entry
153 * else if entry->use_pmap == FALSE
154 * tell pmap NOT to account for pages being pmap_enter()'d from this
155 * mapping (i.e. use "alternate accounting")
156 * else
157 * pmap will account for pages being pmap_enter()'d from this mapping
158 * as it sees fit (only if anonymous, etc...)
159 */
160
161 #define VME_ALIAS_BITS 12
162 #define VME_ALIAS_MASK ((1u << VME_ALIAS_BITS) - 1)
163 #define VME_OFFSET_SHIFT VME_ALIAS_BITS
164 #define VME_OFFSET_BITS (64 - VME_ALIAS_BITS)
165 #define VME_SUBMAP_SHIFT 2
166 #define VME_SUBMAP_BITS (sizeof(vm_offset_t) * 8 - VME_SUBMAP_SHIFT)
167
168 struct vm_map_entry {
169 struct vm_map_links links; /* links to other entries */
170 #define vme_next links.next
171 #define vme_start links.start
172 #define vme_end links.end
173
174 struct vm_map_store store;
175
176 union {
177 vm_offset_t vme_object_value;
178 struct {
179 vm_offset_t vme_atomic:1; /* entry cannot be split/coalesced */
180 vm_offset_t is_sub_map:1; /* Is "object" a submap? */
181 vm_offset_t vme_submap:VME_SUBMAP_BITS;
182 };
183 struct {
184 uint32_t vme_ctx_atomic : 1;
185 uint32_t vme_ctx_is_sub_map : 1;
186 uint32_t vme_context : 30;
187
188 /**
189 * If vme_kernel_object==1 && KASAN,
190 * vme_object_or_delta holds the delta.
191 *
192 * If vme_kernel_object==1 && !KASAN,
193 * vme_tag_btref holds a btref when vme_alias is equal to the "vmtaglog"
194 * boot-arg.
195 *
196 * If vme_kernel_object==0,
197 * vme_object_or_delta holds the packed vm object.
198 */
199 union {
200 vm_page_object_t vme_object_or_delta;
201 btref_t vme_tag_btref;
202 };
203 };
204 };
205
206 unsigned long long
207 /* vm_tag_t */ vme_alias:VME_ALIAS_BITS, /* entry VM tag */
208 /* vm_object_offset_t*/ vme_offset:VME_OFFSET_BITS, /* offset into object */
209
210 /* boolean_t */ is_shared:1, /* region is shared */
211 #if HAS_MTE
212 /* boolean_t */ vme_is_tagged:1, /* region is mapped with tags */
213 #else /* !HAS_MTE */
214 /* boolean_t */__unused1:1,
215 #endif /* HAS_MTE */
216 /* boolean_t */in_transition:1, /* Entry being changed */
217 /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
218 /* behavior is not defined for submap type */
219 /* vm_behavior_t */ behavior:2, /* user paging behavior hint */
220 /* boolean_t */ needs_copy:1, /* object need to be copied? */
221
222 /* Only in task maps: */
223 #if defined(__arm64e__)
224 /*
225 * On ARM, the fourth protection bit is unused (UEXEC is x86_64 only).
226 * We reuse it here to keep track of mappings that have hardware support
227 * for read-only/read-write trusted paths.
228 */
229 /* vm_prot_t-like */ protection:3, /* protection code */
230 /* boolean_t */ used_for_tpro:1,
231 #else /* __arm64e__ */
232 /* vm_prot_t-like */protection:4, /* protection code, bit3=UEXEC */
233 #endif /* __arm64e__ */
234
235 /* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
236 /* vm_inherit_t */ inheritance:2, /* inheritance */
237
238 /*
239 * use_pmap is overloaded:
240 * if "is_sub_map":
241 * use a nested pmap?
242 * else (i.e. if object):
243 * use pmap accounting
244 * for footprint?
245 */
246 /* boolean_t */ use_pmap:1,
247 /* boolean_t */ no_cache:1, /* should new pages be cached? */
248 /* boolean_t */ vme_permanent:1, /* mapping can not be removed */
249 /* boolean_t */ superpage_size:1, /* use superpages of a certain size */
250 /*
251 * zero out the wired pages of this entry
252 * if is being deleted without unwiring them
253 */
254 /* boolean_t */ zero_wired_pages:1,
255 /* boolean_t */ used_for_jit:1,
256 /* boolean_t */ csm_associated:1, /* code signing monitor will validate */
257
258 /* iokit accounting: use the virtual size rather than resident size: */
259 /* boolean_t */ iokit_acct:1,
260 /* boolean_t */ vme_resilient_codesign:1,
261 /* boolean_t */ vme_resilient_media:1,
262 /* boolean_t */ vme_xnu_user_debug:1,
263 /* boolean_t */ vme_no_copy_on_read:1,
264 /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
265 /* boolean_t */ vme_kernel_object:1, /* vme_object is a kernel_object */
266 /* boolean_t */ __unused:1;
267
268 unsigned short wired_count; /* can be paged if = 0 */
269 unsigned short user_wired_count; /* for vm_wire */
270
271 #if DEBUG
272 #define MAP_ENTRY_CREATION_DEBUG (1)
273 #define MAP_ENTRY_INSERTION_DEBUG (1)
274 #endif /* DEBUG */
275 #if MAP_ENTRY_CREATION_DEBUG
276 struct vm_map_header *vme_creation_maphdr;
277 uint32_t vme_creation_bt; /* btref_t */
278 #endif /* MAP_ENTRY_CREATION_DEBUG */
279 #if MAP_ENTRY_INSERTION_DEBUG
280 uint32_t vme_insertion_bt; /* btref_t */
281 vm_map_offset_t vme_start_original;
282 vm_map_offset_t vme_end_original;
283 #endif /* MAP_ENTRY_INSERTION_DEBUG */
284 };
285
286 #define VME_ALIAS(entry) \
287 ((entry)->vme_alias)
288
289 static inline vm_map_t
_VME_SUBMAP(vm_map_entry_t entry)290 _VME_SUBMAP(
291 vm_map_entry_t entry)
292 {
293 __builtin_assume(entry->vme_submap);
294 return (vm_map_t)(entry->vme_submap << VME_SUBMAP_SHIFT);
295 }
296 #define VME_SUBMAP(entry) ({ assert((entry)->is_sub_map); _VME_SUBMAP(entry); })
297
298 static inline void
VME_SUBMAP_SET(vm_map_entry_t entry,vm_map_t submap)299 VME_SUBMAP_SET(
300 vm_map_entry_t entry,
301 vm_map_t submap)
302 {
303 __builtin_assume(((vm_offset_t)submap & 3) == 0);
304
305 entry->is_sub_map = true;
306 entry->vme_submap = (vm_offset_t)submap >> VME_SUBMAP_SHIFT;
307 }
308
309 static inline vm_object_t
_VME_OBJECT(vm_map_entry_t entry)310 _VME_OBJECT(
311 vm_map_entry_t entry)
312 {
313 vm_object_t object;
314
315 if (!entry->vme_kernel_object) {
316 object = VM_OBJECT_UNPACK(entry->vme_object_or_delta);
317 __builtin_assume(!is_kernel_object(object));
318 } else {
319 #if HAS_MTE
320 object = entry->vme_is_tagged ? kernel_object_tagged : kernel_object_default;
321 #else /* !HAS_MTE */
322 object = kernel_object_default;
323 #endif /* HAS_MTE */
324 }
325 return object;
326 }
327 #define VME_OBJECT(entry) ({ assert(!(entry)->is_sub_map); _VME_OBJECT(entry); })
328
329
330 static inline vm_object_offset_t
VME_OFFSET(vm_map_entry_t entry)331 VME_OFFSET(
332 vm_map_entry_t entry)
333 {
334 return entry->vme_offset << VME_OFFSET_SHIFT;
335 }
336
337
338 #if (DEBUG || DEVELOPMENT) && !KASAN
339 #define VM_BTLOG_TAGS 1
340 #else
341 #define VM_BTLOG_TAGS 0
342 #endif
343
344
345 /*
346 * Convenience macros for dealing with superpages
347 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
348 */
349 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
350 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
351 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
352 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
353
354 /*
355 * wired_counts are unsigned short. This value is used to safeguard
356 * against any mishaps due to runaway user programs.
357 */
358 #define MAX_WIRE_COUNT 65535
359
360 typedef struct vm_map_user_range {
361 vm_map_address_t vmur_min_address __kernel_data_semantics;
362
363 vm_map_address_t vmur_max_address : 56 __kernel_data_semantics;
364 vm_map_range_id_t vmur_range_id : 8;
365 } *vm_map_user_range_t;
366
367 /*
368 * Type: vm_map_t [exported; contents invisible]
369 *
370 * Description:
371 * An address map -- a directory relating valid
372 * regions of a task's address space to the corresponding
373 * virtual memory objects.
374 *
375 * Implementation:
376 * Maps are doubly-linked lists of map entries, sorted
377 * by address. One hint is used to start
378 * searches again from the last successful search,
379 * insertion, or removal. Another hint is used to
380 * quickly find free space.
381 *
382 * Note:
383 * vm_map_relocate_early_elem() knows about this layout,
384 * and needs to be kept in sync.
385 */
386 struct _vm_map {
387 lck_rw_t lock; /* map lock */
388 struct vm_map_header hdr; /* Map entry header */
389 #define min_offset hdr.links.start /* start of range */
390 #define max_offset hdr.links.end /* end of range */
391 pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */
392 vm_map_size_t size; /* virtual size */
393 uint64_t size_limit; /* rlimit on address space size */
394 uint64_t data_limit; /* rlimit on data size */
395 vm_map_size_t user_wire_limit;/* rlimit on user locked memory */
396 vm_map_size_t user_wire_size; /* current size of user locked memory in this map */
397 #if __x86_64__
398 vm_map_offset_t vmmap_high_start;
399 #endif /* __x86_64__ */
400
401 os_ref_atomic_t map_refcnt; /* Reference count */
402
403 #if CONFIG_MAP_RANGES
404 #define VM_MAP_EXTRA_RANGES_MAX 1024
405 struct mach_vm_range default_range;
406 struct mach_vm_range data_range;
407 struct mach_vm_range large_file_range;
408
409 uint16_t extra_ranges_count;
410 vm_map_user_range_t extra_ranges;
411 #endif /* CONFIG_MAP_RANGES */
412
413 union {
414 /*
415 * If map->disable_vmentry_reuse == TRUE:
416 * the end address of the highest allocated vm_map_entry_t.
417 */
418 vm_map_offset_t vmu1_highest_entry_end;
419 /*
420 * For a nested VM map:
421 * the lowest address in this nested VM map that we would
422 * expect to be unnested under normal operation (i.e. for
423 * regular copy-on-write on DATA section).
424 */
425 vm_map_offset_t vmu1_lowest_unnestable_start;
426 } vmu1;
427 #define highest_entry_end vmu1.vmu1_highest_entry_end
428 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
429 vm_map_entry_t hint; /* hint for quick lookups */
430 union {
431 struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */
432 struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
433 } vmmap_u_1;
434 #define hole_hint vmmap_u_1.vmmap_hole_hint
435 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
436 union {
437 vm_map_entry_t _first_free; /* First free space hint */
438 struct vm_map_links* _holes; /* links all holes between entries */
439 } f_s; /* Union for free space data structures being used */
440
441 #define first_free f_s._first_free
442 #define holes_list f_s._holes
443
444 unsigned int
445 /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */
446 /* boolean_t */ wiring_required:1, /* All memory wired? */
447 /* boolean_t */ no_zero_fill:1, /* No zero fill absent pages */
448 /* boolean_t */ mapped_in_other_pmaps:1, /* has this submap been mapped in maps that use a different pmap */
449 /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */
450 /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */
451 /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
452 /* boolean_t */ holelistenabled:1,
453 /* boolean_t */ is_nested_map:1,
454 /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */
455 /* boolean_t */ jit_entry_exists:1,
456 /* boolean_t */ has_corpse_footprint:1,
457 /* boolean_t */ terminated:1,
458 /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */
459 /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */
460 /* boolean_t */ cs_debugged:1, /* code-signed but debugged */
461 /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */
462 /* boolean_t */ single_jit:1, /* only allow one JIT mapping */
463 /* boolean_t */ never_faults:1, /* this map should never cause faults */
464 /* boolean_t */ uses_user_ranges:1, /* has the map been configured to use user VM ranges */
465 /* boolean_t */ tpro_enforcement:1, /* enforce TPRO propagation */
466 /* boolean_t */ corpse_source:1, /* map is being used to create a corpse for diagnostics.*/
467 /* boolean_t */ cs_platform_binary:1, /* map belongs to a platform binary */
468
469 #define VM_MAP_NOT_SEALED 0 /* map is not sealed and may be freely modified. */
470 #define VM_MAP_WILL_BE_SEALED 1 /* map will be sealed and is subject to limited modification. */
471 #define VM_MAP_SEALED 2 /* map is sealed and should not be modified. */
472 /* unsigned int */ vmmap_sealed:2, /* sealed state of map, see definitions above. */
473 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
474 /* boolean_t */ has_sec_access:1, /* offsets into this map may contain embedded pointer tags, whether or not they're enabled */
475 #else
476 /* reserved */ res0:1,
477 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
478 /* reserved */pad:6;
479 uint64_t timestamp; /* Version number */
480 /*
481 * Weak reference to the task that owns this map. This will be NULL if the
482 * map has terminated, so you must have a task reference to be able to safely
483 * access this. Under the map lock, you can safely acquire a task reference
484 * if owning_task is not NULL, since vm_map_terminate requires the map lock.
485 */
486 task_t owning_task;
487 #if HAS_MTE
488 /*
489 * This is used to asynchronously deliver tag check faults to the owner
490 * of a user vm_map when we take a tag check fault in a kernel thread with
491 * its map switched.
492 *
493 * This variable starts zero-initialized. On such a tag check fault, we
494 * atomically set the the address to the address where the fault occurred.
495 *
496 * When we vm_map_switch_back, we set AST_MACH_EXCEPTION on all of
497 * owning_task's threads.
498 *
499 * Whichever thread consumes the AST first will atomically set the address
500 * to VM_ASYNC_TAG_FAULT_ALREADY_REPORTED (which prevents vm_map_switch_back
501 * from spuriously setting ASTs on the map) and throw a guard exception,
502 * potentially (based on policy) killing owning_task.
503 *
504 * This field is not protected by the map lock. Readers/writers should hold
505 * a map reference and access this value atomically.
506 */
507 vm_map_offset_t async_tag_fault_address;
508 #define VM_ASYNC_TAG_FAULT_ALREADY_REPORTED 0x1
509 #define VM_ASYNC_TAG_FAULT_MIN_VALID_ADDR (VM_ASYNC_TAG_FAULT_ALREADY_REPORTED + 1)
510 #endif
511
512 /*
513 * A generation ID for maps that increments monotonically.
514 * This is a pointer type just so we get dPAC out-of-the-box, but
515 * conceptually it's just an ID.
516 * Note that this is not a unique object ID. In particular, fork()
517 * will produce a child map with the same ID as its parent.
518 */
519 vm_map_serial_t serial_id;
520 };
521
522 #define VME_PREV(entry) VM_PREV_UNPACK((entry)->links.prev)
523 #define VMH_PREV(hdr) (VM_PREV_UNPACK((hdr)->links.prev))
524 #define VML_PREV(links) (VM_PREV_UNPACK((links)->prev))
525
526 static inline
527 void
VME_PREV_SET(vm_map_entry_t entry,vm_map_entry_t prev)528 VME_PREV_SET(vm_map_entry_t entry, vm_map_entry_t prev)
529 {
530 entry->links.prev = VM_PREV_PACK(prev);
531 }
532
533 static inline
534 void
VMH_PREV_SET(struct vm_map_header * hdr,vm_map_entry_t prev)535 VMH_PREV_SET(struct vm_map_header * hdr, vm_map_entry_t prev)
536 {
537 hdr->links.prev = VM_PREV_PACK(prev);
538 }
539
540 static inline
541 void
VML_PREV_SET(struct vm_map_links * links,vm_map_entry_t prev)542 VML_PREV_SET(struct vm_map_links * links, vm_map_entry_t prev)
543 {
544 links->prev = VM_PREV_PACK(prev);
545 }
546
547 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
548 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
549 #define vm_map_first_entry(map) ((map)->hdr.links.next)
550 #define vm_map_last_entry(map) (VME_PREV(vm_map_to_entry(map)))
551
552 /*
553 * Type: vm_map_version_t [exported; contents invisible]
554 *
555 * Description:
556 * Map versions may be used to quickly validate a previous
557 * lookup operation.
558 *
559 * Usage note:
560 * Because they are bulky objects, map versions are usually
561 * passed by reference.
562 *
563 * Implementation:
564 * Just a timestamp for the main map.
565 */
566 typedef struct vm_map_version {
567 uint64_t main_timestamp;
568 } vm_map_version_t;
569
570 /*
571 * Type: vm_map_copy_t [exported; contents invisible]
572 *
573 * Description:
574 * A map copy object represents a region of virtual memory
575 * that has been copied from an address map but is still
576 * in transit.
577 *
578 * A map copy object may only be used by a single thread
579 * at a time.
580 *
581 * Implementation:
582 * There are two formats for map copy objects.
583 * The first is very similar to the main
584 * address map in structure, and as a result, some
585 * of the internal maintenance functions/macros can
586 * be used with either address maps or map copy objects.
587 *
588 * The map copy object contains a header links
589 * entry onto which the other entries that represent
590 * the region are chained.
591 *
592 * The second format is a kernel buffer copy object - for data
593 * small enough that physical copies were the most efficient
594 * method. This method uses a zero-sized array unioned with
595 * other format-specific data in the 'c_u' member. This unsized
596 * array overlaps the other elements and allows us to use this
597 * extra structure space for physical memory copies. On 64-bit
598 * systems this saves ~64 bytes per vm_map_copy.
599 */
600
601 struct vm_map_copy {
602 #define VM_MAP_COPY_ENTRY_LIST 1
603 #define VM_MAP_COPY_KERNEL_BUFFER 2
604 uint16_t type;
605 bool is_kernel_range;
606 bool is_user_range;
607 vm_map_range_id_t orig_range;
608 vm_object_offset_t offset;
609 vm_map_size_t size;
610 union {
611 struct vm_map_header hdr; /* ENTRY_LIST */
612 struct {
613 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
614 #if HAS_MTE
615 bool should_apply_mte_security_policy;
616 #endif /* HAS_MTE */
617 } buffer_data;
618 } c_u;
619 };
620
621
622 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_ENTRY, struct vm_map_entry);
623 #define vm_map_entry_zone (&zone_array[ZONE_ID_VM_MAP_ENTRY])
624
625 ZONE_DECLARE_ID(ZONE_ID_VM_MAP_HOLES, struct vm_map_links);
626 #define vm_map_holes_zone (&zone_array[ZONE_ID_VM_MAP_HOLES])
627
628 ZONE_DECLARE_ID(ZONE_ID_VM_MAP, struct _vm_map);
629 #define vm_map_zone (&zone_array[ZONE_ID_VM_MAP])
630
631
632 #define cpy_hdr c_u.hdr
633 #define cpy_kdata c_u.buffer_data.kdata
634 #if HAS_MTE
635 #define cpy_should_apply_mte_security_policy c_u.buffer_data.should_apply_mte_security_policy
636 #endif /* HAS_MTE */
637
638 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
639 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
640 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
641
642 /*
643 * Useful macros for entry list copy objects
644 */
645
646 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
647 #define vm_map_copy_first_entry(copy) \
648 ((copy)->cpy_hdr.links.next)
649 #define vm_map_copy_last_entry(copy) \
650 (VM_PREV_UNPACK((copy)->cpy_hdr.links.prev))
651
652
653 /*
654 * Macros: vm_map_lock, etc. [internal use only]
655 * Description:
656 * Perform locking on the data portion of a map.
657 * When multiple maps are to be locked, order by map address.
658 * (See vm_map.c::vm_remap())
659 */
660
661 #include <vm/vm_lock_perf.h>
662
663 #define vm_map_lock_init(map) \
664 ((map)->timestamp = 0 , \
665 lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
666
667 #define vm_map_lock(map) \
668 MACRO_BEGIN \
669 DTRACE_VM(vm_map_lock_w); \
670 vmlp_lock_event_unlocked(VMLP_EVENT_LOCK_REQ_EXCL, map); \
671 assert(!vm_map_is_sealed(map)); \
672 lck_rw_lock_exclusive(&(map)->lock); \
673 vmlp_lock_event_locked(VMLP_EVENT_LOCK_GOT_EXCL, map); \
674 MACRO_END
675
676 #define vm_map_lock_unseal(map) \
677 MACRO_BEGIN \
678 DTRACE_VM(vm_map_lock_w); \
679 assert(vm_map_is_sealed(map)); \
680 lck_rw_lock_exclusive(&(map)->lock); \
681 (map)->vmmap_sealed = VM_MAP_NOT_SEALED; \
682 MACRO_END
683
684 #define vm_map_unlock(map) \
685 MACRO_BEGIN \
686 DTRACE_VM(vm_map_unlock_w); \
687 vmlp_lock_event_locked(VMLP_EVENT_LOCK_UNLOCK_EXCL, map); \
688 assert(!vm_map_is_sealed(map)); \
689 (map)->timestamp++; \
690 lck_rw_done(&(map)->lock); \
691 MACRO_END
692
693 #define vm_map_lock_read(map) \
694 MACRO_BEGIN \
695 DTRACE_VM(vm_map_lock_r); \
696 vmlp_lock_event_unlocked(VMLP_EVENT_LOCK_REQ_SH, map); \
697 lck_rw_lock_shared(&(map)->lock); \
698 vmlp_lock_event_locked(VMLP_EVENT_LOCK_GOT_SH, map); \
699 MACRO_END
700
701 #define vm_map_unlock_read(map) \
702 MACRO_BEGIN \
703 DTRACE_VM(vm_map_unlock_r); \
704 vmlp_lock_event_locked(VMLP_EVENT_LOCK_UNLOCK_SH, map); \
705 lck_rw_done(&(map)->lock); \
706 MACRO_END
707
708 #define vm_map_lock_write_to_read(map) \
709 MACRO_BEGIN \
710 DTRACE_VM(vm_map_lock_downgrade); \
711 vmlp_lock_event_locked(VMLP_EVENT_LOCK_DOWNGRADE, map); \
712 (map)->timestamp++; \
713 lck_rw_lock_exclusive_to_shared(&(map)->lock); \
714 MACRO_END
715
716 #define vm_map_lock_assert_held(map) \
717 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_HELD)
718 #define vm_map_lock_assert_shared(map) \
719 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_SHARED)
720 #define vm_map_lock_assert_exclusive(map) \
721 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
722 #define vm_map_lock_assert_notheld(map) \
723 LCK_RW_ASSERT(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
724
725 /*
726 * Exported procedures that operate on vm_map_t.
727 */
728
729 /* Lookup map entry containing or the specified address in the given map */
730 extern boolean_t vm_map_lookup_entry(
731 vm_map_t map,
732 vm_map_address_t address,
733 vm_map_entry_t *entry); /* OUT */
734
735
736 /*
737 * Functions implemented as macros
738 */
739 #define vm_map_min(map) ((map)->min_offset)
740 /* Lowest valid address in
741 * a map */
742
743 #define vm_map_max(map) ((map)->max_offset)
744 /* Highest valid address */
745
746 #define vm_map_pmap(map) ((map)->pmap)
747 /* Physical map associated
748 * with this address map */
749
750 /* Gain a reference to an existing map */
751 extern void vm_map_reference(
752 vm_map_t map);
753
754 /*
755 * Wait and wakeup macros for in_transition map entries.
756 */
757 static inline wait_result_t
_vm_map_entry_wait_helper(vm_map_t map,wait_interrupt_t interruptible)758 _vm_map_entry_wait_helper(vm_map_t map, wait_interrupt_t interruptible)
759 {
760 vmlp_lock_event_locked(VMLP_EVENT_LOCK_SLEEP_BEGIN, map);
761 map->timestamp++;
762 wait_result_t res = lck_rw_sleep(&map->lock, LCK_SLEEP_EXCLUSIVE | LCK_SLEEP_PROMOTED_PRI,
763 (event_t)&map->hdr, interruptible);
764 vmlp_lock_event_locked(VMLP_EVENT_LOCK_SLEEP_END, map);
765 return res;
766 }
767 #define vm_map_entry_wait(map, interruptible) _vm_map_entry_wait_helper((map), (interruptible))
768
769 #define vm_map_entry_wakeup(map) \
770 thread_wakeup((event_t)(&(map)->hdr))
771
772
773 extern void vm_map_inherit_limits(
774 vm_map_t new_map,
775 const struct _vm_map *old_map);
776
777 /* Create a new task map using an existing task map as a template. */
778 extern vm_map_t vm_map_fork(
779 ledger_t ledger,
780 vm_map_t old_map,
781 int options);
782
783 #if HAS_MTE
784 /*
785 * WARNING: VM_MAP_FORK_SHARE_IF_INHERIT_NONE and VM_MAP_FORK_SHARE_IF_OWNED
786 * allow fork() to create shared mappings of MTE-tagged memory, which is
787 * generally forbidden.
788 *
789 * Currently, these flags are used only in the corpse-fork path, which is
790 * safe because neither the process nor the corpse continue running, but future
791 * callers should be careful.
792 */
793 #endif /* HAS_MTE */
794 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001
795 #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002
796 #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004
797 #define VM_MAP_FORK_SHARE_IF_OWNED 0x00000008
798
799
800 extern kern_return_t vm_map_query_volatile(
801 vm_map_t map,
802 mach_vm_size_t *volatile_virtual_size_p,
803 mach_vm_size_t *volatile_resident_size_p,
804 mach_vm_size_t *volatile_compressed_size_p,
805 mach_vm_size_t *volatile_pmap_size_p,
806 mach_vm_size_t *volatile_compressed_pmap_size_p);
807
808
809 extern kern_return_t vm_map_set_cache_attr(
810 vm_map_t map,
811 vm_map_offset_t va);
812
813
814 extern void vm_map_copy_footprint_ledgers(
815 task_t old_task,
816 task_t new_task);
817
818
819 /**
820 * Represents a single region of virtual address space that should be reserved
821 * (pre-mapped) in a user address space.
822 */
823 struct vm_reserved_region {
824 const char *vmrr_name;
825 vm_map_offset_t vmrr_addr;
826 vm_map_size_t vmrr_size;
827 };
828
829 /**
830 * Return back a machine-dependent array of address space regions that should be
831 * reserved by the VM. This function is defined in the machine-dependent
832 * machine_routines.c files.
833 */
834 extern size_t ml_get_vm_reserved_regions(
835 bool vm_is64bit,
836 const struct vm_reserved_region **regions);
837
838 /**
839 * Explicitly preallocates a floating point save area. This function is defined
840 * in the machine-dependent machine_routines.c files.
841 */
842 extern void ml_fp_save_area_prealloc(void);
843
844 extern bool vm_map_is_sealed(
845 vm_map_t map);
846
847 #endif /* MACH_KERNEL_PRIVATE */
848
849 /*
850 * Read and write from a kernel buffer to a specified map.
851 */
852 extern kern_return_t vm_map_write_user(
853 vm_map_t map,
854 void *src_p,
855 vm_map_offset_ut dst_addr_u,
856 vm_size_ut size_u);
857
858 extern kern_return_t vm_map_read_user(
859 vm_map_t map,
860 vm_map_offset_ut src_addr_u,
861 void *dst_p,
862 vm_size_ut size_u);
863
864 extern vm_map_size_t vm_map_adjusted_size(vm_map_t map);
865
866 typedef struct {
867 vm_map_t map;
868 task_t task;
869 boolean_t sec_overridden;
870 } vm_map_switch_context_t;
871 extern vm_map_switch_context_t vm_map_switch_with_sec_override(vm_map_t, boolean_t sec_override);
872 static inline vm_map_switch_context_t
vm_map_switch_to(vm_map_t map)873 vm_map_switch_to(vm_map_t map)
874 {
875 return vm_map_switch_with_sec_override(map, FALSE);
876 }
877 extern void vm_map_switch_back(vm_map_switch_context_t ctx);
878
879 extern boolean_t vm_map_cs_enforcement(
880 vm_map_t map);
881 extern void vm_map_cs_enforcement_set(
882 vm_map_t map,
883 boolean_t val);
884
885 extern void vm_map_cs_debugged_set(
886 vm_map_t map,
887 boolean_t val);
888
889 extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
890 extern kern_return_t vm_map_csm_allow_jit(vm_map_t map);
891
892
893 extern void vm_map_will_allocate_early_map(
894 vm_map_t *map_owner);
895
896 extern void vm_map_relocate_early_maps(
897 vm_offset_t delta);
898
899 extern void vm_map_relocate_early_elem(
900 uint32_t zone_id,
901 vm_offset_t new_addr,
902 vm_offset_t delta);
903
904 /* wire down a region */
905
906 /* never fails */
907 extern vm_map_t vm_map_create_options(
908 pmap_t pmap,
909 vm_map_offset_t min_off,
910 vm_map_offset_t max_off,
911 vm_map_create_options_t options);
912
913 extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
914
915 #if CONFIG_LARGE_SIZE_TELEMETRY
916 /* Cause a simulated crash. */
917 extern void vm_map_enter_large_telemetry_ast(void);
918 #endif /* CONFIG_LARGE_SIZE_TELEMETRY */
919
920 /*!
921 * @function vm_map_enter_mem_object_control()
922 *
923 * @brief
924 * Enters a mapping of @c initial_size bytes at @c *address (subject to
925 * fixed/anywhere semantics, see @c VM_FLAGS_FIXED/VM_FLAGS_ANYWHERE ).
926 * The pages will come from a memory object paged in by the @c control pager,
927 * and the caller may specify an @c offset into the object.
928 *
929 * @param target_map The map into which to enter the mapping.
930 * @param address [in] Pointer to the address at which to enter the
931 * mapping (or use as a hint for anywhere
932 * mappings).
933 * No alignment is required, the function will
934 * round this down to a page boundary in the
935 * @c target_map.
936 * [out] On success, it will be filled with the address
937 * at which the object data is made available, and
938 * will have the same misalignment into
939 * @c target_map as @c offset.
940 * On failure, it remains unmodified.
941 * @param initial_size Size of the mapping to enter.
942 * Must be non-zero.
943 * No alignment is required.
944 * @param mask An alignment mask the mapping must respect.
945 * @param vmk_flags The vm map kernel flags to influence this call.
946 * @param control The pager-managed memory object which is the source
947 * of the pages.
948 * @param offset The offset into the memory object to use when
949 * paging.
950 * @c vm_map_enter, which is called into by
951 * @c vm_map_enter_mem_object_control, requires that
952 * @c offset be page-aligned for either @c target_map
953 * pages or kernel pages.
954 * @param needs_copy Boolean which can be set to request that the mapped
955 * pages be a copy of the memory object's pages.
956 * @param cur_protection Effective protection that should be set for the
957 * mapping.
958 * @param max_protection Max protection that should be allowed for the
959 * mapping. Should at least cover @c cur_protection.
960 * @param inheritance Inheritance policy for the mapping.
961 *
962 * @returns @c KERN_SUCCESS if the mapping was successfully entered, an error
963 * code otherwise.
964 */
965 extern kern_return_t vm_map_enter_mem_object_control(
966 vm_map_t target_map,
967 vm_map_offset_ut *address,
968 vm_map_size_ut initial_size,
969 vm_map_offset_ut mask,
970 vm_map_kernel_flags_t vmk_flags,
971 memory_object_control_t control,
972 vm_object_offset_ut offset,
973 boolean_t needs_copy,
974 vm_prot_ut cur_protection,
975 vm_prot_ut max_protection,
976 vm_inherit_ut inheritance);
977
978 /* Must be executed on a new task's map before the task is enabled for IPC access */
979 extern void vm_map_setup(vm_map_t map, task_t task); /* always succeeds */
980
981 extern kern_return_t vm_map_terminate(
982 vm_map_t map);
983
984 /* Overwrite existing memory with a copy */
985 extern kern_return_t vm_map_copy_overwrite(
986 vm_map_t dst_map,
987 vm_map_address_ut dst_addr_u,
988 vm_map_copy_t copy,
989 vm_map_size_ut copy_size_u,
990 #if HAS_MTE
991 boolean_t sec_override,
992 #endif
993 boolean_t interruptible);
994
995 /* returns TRUE if size of vm_map_copy == *size, FALSE otherwise */
996 extern boolean_t vm_map_copy_validate_size(
997 vm_map_t dst_map,
998 vm_map_copy_t copy,
999 vm_map_size_t *size);
1000
1001 extern kern_return_t vm_map_copyout_size(
1002 vm_map_t dst_map,
1003 vm_map_address_t *dst_addr, /* OUT */
1004 vm_map_copy_t copy,
1005 vm_map_size_ut copy_size);
1006
1007 extern void vm_map_disable_NX(
1008 vm_map_t map);
1009
1010 extern void vm_map_disallow_data_exec(
1011 vm_map_t map);
1012
1013 extern void vm_map_set_64bit(
1014 vm_map_t map);
1015
1016 extern void vm_map_set_32bit(
1017 vm_map_t map);
1018
1019 extern void vm_map_set_jumbo(
1020 vm_map_t map);
1021
1022 #if XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT
1023 extern void vm_map_set_extra_jumbo(
1024 vm_map_t map);
1025 #endif /* XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT */
1026
1027 extern void vm_map_set_jit_entitled(
1028 vm_map_t map);
1029
1030 extern void vm_map_set_max_addr(
1031 vm_map_t map,
1032 vm_map_offset_t new_max_offset,
1033 bool extra_jumbo);
1034
1035 extern boolean_t vm_map_has_hard_pagezero(
1036 vm_map_t map,
1037 vm_map_offset_t pagezero_size);
1038
1039 extern void vm_commit_pagezero_status(vm_map_t tmap);
1040
1041 extern void vm_map_set_platform_binary(
1042 vm_map_t map,
1043 bool is_platform_binary);
1044 extern bool vm_map_is_platform_binary(
1045 vm_map_t map);
1046
1047 extern boolean_t vm_map_tpro(
1048 vm_map_t map);
1049
1050 extern void vm_map_set_tpro(
1051 vm_map_t map);
1052
1053 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1054 extern void vm_map_set_sec_enabled(
1055 vm_map_t map);
1056
1057 extern void vm_map_set_sec_disabled(
1058 vm_map_t map);
1059
1060 extern vm_map_address_t vm_map_strip_addr(
1061 vm_map_t map,
1062 vm_map_address_t ptr);
1063 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1064
1065 #if HAS_MTE
1066 extern void vm_map_set_restrict_receiving_aliases_to_tagged_memory(
1067 vm_map_t map, bool must_restrict);
1068 #endif /* HAS_MTE */
1069
1070 extern void vm_map_set_tpro_enforcement(
1071 vm_map_t map);
1072
1073 extern boolean_t vm_map_set_tpro_range(
1074 vm_map_t map,
1075 vm_map_address_t start,
1076 vm_map_address_t end);
1077
1078 extern boolean_t vm_map_is_64bit(
1079 vm_map_t map);
1080
1081 extern kern_return_t vm_map_raise_max_offset(
1082 vm_map_t map,
1083 vm_map_offset_t new_max_offset);
1084
1085 extern kern_return_t vm_map_raise_min_offset(
1086 vm_map_t map,
1087 vm_map_offset_t new_min_offset);
1088
1089 #if XNU_TARGET_OS_OSX
1090 extern void vm_map_set_high_start(
1091 vm_map_t map,
1092 vm_map_offset_t high_start);
1093 #endif /* XNU_TARGET_OS_OSX */
1094
1095
1096 extern vm_map_offset_t vm_compute_max_offset(
1097 boolean_t is64);
1098
1099 extern void vm_map_get_max_aslr_slide_section(
1100 vm_map_t map,
1101 int64_t *max_sections,
1102 int64_t *section_size);
1103
1104 extern uint64_t vm_map_get_max_aslr_slide_pages(
1105 vm_map_t map);
1106
1107 extern uint64_t vm_map_get_max_loader_aslr_slide_pages(
1108 vm_map_t map);
1109
1110 extern kern_return_t vm_map_set_size_limit(
1111 vm_map_t map,
1112 uint64_t limit);
1113
1114 extern kern_return_t vm_map_set_data_limit(
1115 vm_map_t map,
1116 uint64_t limit);
1117
1118 extern void vm_map_set_user_wire_limit(
1119 vm_map_t map,
1120 vm_size_t limit);
1121
1122 extern void vm_map_switch_protect(
1123 vm_map_t map,
1124 boolean_t val);
1125
1126 extern boolean_t vm_map_page_aligned(
1127 vm_map_offset_t offset,
1128 vm_map_offset_t mask);
1129
1130 extern bool vm_map_range_overflows(
1131 vm_map_t map,
1132 vm_map_offset_t addr,
1133 vm_map_size_t size);
1134
1135 /* Support for vm_map ranges */
1136 extern kern_return_t vm_map_range_configure(
1137 vm_map_t map,
1138 bool needs_extra_jumbo_va);
1139
1140
1141
1142 /*!
1143 * @function vm_map_kernel_flags_update_range_id()
1144 *
1145 * @brief
1146 * Updates the @c vmkf_range_id field with the adequate value
1147 * according to the policy for specified map and tag set in @c vmk_flags.
1148 *
1149 * @discussion
1150 * This function is meant to be called by Mach VM entry points,
1151 * which matters for the kernel: allocations with pointers _MUST_
1152 * be allocated with @c kmem_*() functions.
1153 *
1154 * If the range ID is already set, it is preserved.
1155 */
1156 extern void vm_map_kernel_flags_update_range_id(
1157 vm_map_kernel_flags_t *flags,
1158 vm_map_t map,
1159 vm_map_size_t size);
1160
1161 #if XNU_TARGET_OS_OSX
1162 extern void vm_map_mark_alien(vm_map_t map);
1163 extern void vm_map_single_jit(vm_map_t map);
1164 #endif /* XNU_TARGET_OS_OSX */
1165
1166 extern kern_return_t vm_map_page_info(
1167 vm_map_t map,
1168 vm_map_offset_ut offset,
1169 vm_page_info_flavor_t flavor,
1170 vm_page_info_t info,
1171 mach_msg_type_number_t *count);
1172
1173 extern kern_return_t vm_map_page_range_info_internal(
1174 vm_map_t map,
1175 vm_map_offset_ut start_offset,
1176 vm_map_offset_ut end_offset,
1177 int effective_page_shift,
1178 vm_page_info_flavor_t flavor,
1179 vm_page_info_t info,
1180 mach_msg_type_number_t *count);
1181
1182 #ifdef MACH_KERNEL_PRIVATE
1183
1184 /*
1185 * Internal macros for rounding and truncation of vm_map offsets and sizes
1186 */
1187 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1188 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1189
1190 /*
1191 * Macros for rounding and truncation of vm_map offsets and sizes
1192 */
1193 static inline int
VM_MAP_PAGE_SHIFT(vm_map_t map)1194 VM_MAP_PAGE_SHIFT(
1195 vm_map_t map)
1196 {
1197 int shift = map ? map->hdr.page_shift : PAGE_SHIFT;
1198 /*
1199 * help ubsan and codegen in general,
1200 * cannot use PAGE_{MIN,MAX}_SHIFT
1201 * because of testing code which
1202 * tests 16k aligned maps on 4k only systems.
1203 */
1204 __builtin_assume(shift >= 12 && shift <= 14);
1205 return shift;
1206 }
1207
1208 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1209 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1210 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
1211
1212 #endif /* MACH_KERNEL_PRIVATE */
1213
1214
1215 extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1216 extern bool vm_map_is_exotic(vm_map_t map);
1217 extern bool vm_map_is_alien(vm_map_t map);
1218 extern pmap_t vm_map_get_pmap(vm_map_t map);
1219
1220 extern void vm_map_guard_exception(vm_map_offset_t gap_start, unsigned reason);
1221
1222 extern bool vm_map_is_corpse_source(vm_map_t map);
1223 extern void vm_map_set_corpse_source(vm_map_t map);
1224 extern void vm_map_unset_corpse_source(vm_map_t map);
1225 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1226 extern bool vm_map_has_sec_access(vm_map_t map);
1227 extern void vm_map_mark_has_sec_access_locked(vm_map_t map);
1228 #if CONFIG_XNUPOST
1229 extern void vm_map_mark_has_sec_access(vm_map_t map);
1230 extern void vm_map_remove_sec_access(vm_map_t map);
1231 #endif /* CONFIG_XNUPOST */
1232 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1233
1234 #if CONFIG_DYNAMIC_CODE_SIGNING
1235
1236 extern kern_return_t vm_map_sign(vm_map_t map,
1237 vm_map_offset_t start,
1238 vm_map_offset_t end);
1239
1240 #endif /* CONFIG_DYNAMIC_CODE_SIGNING */
1241 #if CONFIG_FREEZE
1242
1243 extern kern_return_t vm_map_freeze(
1244 task_t task,
1245 unsigned int *purgeable_count,
1246 unsigned int *wired_count,
1247 unsigned int *clean_count,
1248 unsigned int *dirty_count,
1249 unsigned int dirty_budget,
1250 unsigned int *shared_count,
1251 int *freezer_error_code,
1252 boolean_t eval_only);
1253
1254 __enum_decl(freezer_error_code_t, int, {
1255 FREEZER_ERROR_GENERIC = -1,
1256 FREEZER_ERROR_EXCESS_SHARED_MEMORY = -2,
1257 FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO = -3,
1258 FREEZER_ERROR_NO_COMPRESSOR_SPACE = -4,
1259 FREEZER_ERROR_NO_SWAP_SPACE = -5,
1260 FREEZER_ERROR_NO_SLOTS = -6,
1261 });
1262
1263 #endif /* CONFIG_FREEZE */
1264
1265 extern kern_return_t vm_map_partial_reap(
1266 vm_map_t map,
1267 unsigned int *reclaimed_resident,
1268 unsigned int *reclaimed_compressed);
1269
1270 /*
1271 * In some cases, we don't have a real VM object but still want to return a
1272 * unique ID (to avoid a memory region looking like shared memory), so build
1273 * a fake pointer based on the map's ledger and the index of the ledger being
1274 * reported.
1275 */
1276 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((int*)((map)->pmap->ledger)+(ledger_id)))
1277
1278 #if DEVELOPMENT || DEBUG
1279
1280 extern int vm_map_disconnect_page_mappings(
1281 vm_map_t map,
1282 boolean_t);
1283
1284 extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);
1285
1286 extern kern_return_t vm_map_entries_foreach(vm_map_t map, kern_return_t (^count_handler)(int nentries),
1287 kern_return_t (^entry_handler)(void* entry));
1288 extern kern_return_t vm_map_dump_entry_and_compressor_pager(void* entry, char *buf, size_t *count);
1289
1290 extern void vm_map_testing_make_sealed_submap(
1291 vm_map_t parent_map,
1292 mach_vm_address_t start,
1293 mach_vm_address_t end);
1294
1295 extern void vm_map_testing_remap_submap(
1296 vm_map_t parent_map,
1297 mach_vm_address_t submap_base_address,
1298 mach_vm_address_t start,
1299 mach_vm_address_t end,
1300 mach_vm_address_t offset);
1301
1302 #endif /* DEVELOPMENT || DEBUG */
1303
1304 boolean_t kdp_vm_map_is_acquired_exclusive(vm_map_t map);
1305
1306 boolean_t vm_map_entry_has_device_pager(vm_map_t, vm_map_offset_t vaddr);
1307
1308
1309 #ifdef VM_SCAN_FOR_SHADOW_CHAIN
1310 int vm_map_shadow_max(vm_map_t map);
1311 #endif
1312
1313 bool vm_map_is_map_size_valid(vm_map_t target_map, vm_size_t size, bool no_soft_limit);
1314
1315 /* Returns the map's ID or VM_MAP_SERIAL_NONE if the input map is NULL */
1316 vm_map_serial_t vm_map_maybe_serial_id(vm_map_t maybe_vm_map);
1317
1318 __END_DECLS
1319
1320 #endif /* XNU_KERNEL_PRIVATE */
1321 #endif /* _VM_VM_MAP_XNU_H_ */
1322