1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: mach/vm_statistics.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 *
62 * Virtual memory statistics structure.
63 *
64 */
65
66 #ifndef _MACH_VM_STATISTICS_H_
67 #define _MACH_VM_STATISTICS_H_
68
69
70 #include <Availability.h>
71 #include <os/base.h>
72 #include <stdbool.h>
73 #include <sys/cdefs.h>
74
75 #include <mach/machine/vm_types.h>
76 #include <mach/machine/kern_return.h>
77
78 __BEGIN_DECLS
79
80 #pragma mark VM Statistics
81
82 /*
83 * vm_statistics
84 *
85 * History:
86 * rev0 - original structure.
87 * rev1 - added purgable info (purgable_count and purges).
88 * rev2 - added speculative_count.
89 *
90 * Note: you cannot add any new fields to this structure. Add them below in
91 * vm_statistics64.
92 */
93
94 struct vm_statistics {
95 natural_t free_count; /* # of pages free */
96 natural_t active_count; /* # of pages active */
97 natural_t inactive_count; /* # of pages inactive */
98 natural_t wire_count; /* # of pages wired down */
99 natural_t zero_fill_count; /* # of zero fill pages */
100 natural_t reactivations; /* # of pages reactivated */
101 natural_t pageins; /* # of pageins */
102 natural_t pageouts; /* # of pageouts */
103 natural_t faults; /* # of faults */
104 natural_t cow_faults; /* # of copy-on-writes */
105 natural_t lookups; /* object cache lookups */
106 natural_t hits; /* object cache hits */
107
108 /* added for rev1 */
109 natural_t purgeable_count; /* # of pages purgeable */
110 natural_t purges; /* # of pages purged */
111
112 /* added for rev2 */
113 /*
114 * NB: speculative pages are already accounted for in "free_count",
115 * so "speculative_count" is the number of "free" pages that are
116 * used to hold data that was read speculatively from disk but
117 * haven't actually been used by anyone so far.
118 */
119 natural_t speculative_count; /* # of pages speculative */
120 };
121
122 /* Used by all architectures */
123 typedef struct vm_statistics *vm_statistics_t;
124 typedef struct vm_statistics vm_statistics_data_t;
125
126 /*
127 * vm_statistics64
128 *
129 * History:
130 * rev0 - original structure.
131 * rev1 - added purgable info (purgable_count and purges).
132 * rev2 - added speculative_count.
133 * ----
134 * rev3 - changed name to vm_statistics64.
135 * changed some fields in structure to 64-bit on
136 * arm, i386 and x86_64 architectures.
137 * rev4 - require 64-bit alignment for efficient access
138 * in the kernel. No change to reported data.
139 *
140 */
141
142 struct vm_statistics64 {
143 natural_t free_count; /* # of pages free */
144 natural_t active_count; /* # of pages active */
145 natural_t inactive_count; /* # of pages inactive */
146 natural_t wire_count; /* # of pages wired down */
147 uint64_t zero_fill_count; /* # of zero fill pages */
148 uint64_t reactivations; /* # of pages reactivated */
149 uint64_t pageins; /* # of pageins (lifetime) */
150 uint64_t pageouts; /* # of pageouts */
151 uint64_t faults; /* # of faults */
152 uint64_t cow_faults; /* # of copy-on-writes */
153 uint64_t lookups; /* object cache lookups */
154 uint64_t hits; /* object cache hits */
155 uint64_t purges; /* # of pages purged */
156 natural_t purgeable_count; /* # of pages purgeable */
157 /*
158 * NB: speculative pages are already accounted for in "free_count",
159 * so "speculative_count" is the number of "free" pages that are
160 * used to hold data that was read speculatively from disk but
161 * haven't actually been used by anyone so far.
162 */
163 natural_t speculative_count; /* # of pages speculative */
164
165 /* added for rev1 */
166 uint64_t decompressions; /* # of pages decompressed (lifetime) */
167 uint64_t compressions; /* # of pages compressed (lifetime) */
168 uint64_t swapins; /* # of pages swapped in via compressor segments (lifetime) */
169 uint64_t swapouts; /* # of pages swapped out via compressor segments (lifetime) */
170 natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */
171 natural_t throttled_count; /* # of pages throttled */
172 natural_t external_page_count; /* # of pages that are file-backed (non-swap) */
173 natural_t internal_page_count; /* # of pages that are anonymous */
174 uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */
175 /* added for rev2 */
176 uint64_t swapped_count; /* # of compressor-stored pages currently stored in swap */
177 } __attribute__((aligned(8)));
178
179 typedef struct vm_statistics64 *vm_statistics64_t;
180 typedef struct vm_statistics64 vm_statistics64_data_t;
181
182 kern_return_t vm_stats(void *info, unsigned int *count);
183
184 /*
185 * VM_STATISTICS_TRUNCATE_TO_32_BIT
186 *
187 * This is used by host_statistics() to truncate and peg the 64-bit in-kernel values from
188 * vm_statistics64 to the 32-bit values of the older structure above (vm_statistics).
189 */
190 #define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) ((uint32_t)(((value) > UINT32_MAX ) ? UINT32_MAX : (value)))
191
192 /*
193 * vm_extmod_statistics
194 *
195 * Structure to record modifications to a task by an
196 * external agent.
197 *
198 * History:
199 * rev0 - original structure.
200 */
201
202 struct vm_extmod_statistics {
203 int64_t task_for_pid_count; /* # of times task port was looked up */
204 int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */
205 int64_t thread_creation_count; /* # of threads created in task */
206 int64_t thread_creation_caller_count; /* # of threads created by task */
207 int64_t thread_set_state_count; /* # of register state sets in task */
208 int64_t thread_set_state_caller_count; /* # of register state sets by task */
209 } __attribute__((aligned(8)));
210
211 typedef struct vm_extmod_statistics *vm_extmod_statistics_t;
212 typedef struct vm_extmod_statistics vm_extmod_statistics_data_t;
213
214 typedef struct vm_purgeable_stat {
215 uint64_t count;
216 uint64_t size;
217 }vm_purgeable_stat_t;
218
219 struct vm_purgeable_info {
220 vm_purgeable_stat_t fifo_data[8];
221 vm_purgeable_stat_t obsolete_data;
222 vm_purgeable_stat_t lifo_data[8];
223 };
224
225 typedef struct vm_purgeable_info *vm_purgeable_info_t;
226
227 /* included for the vm_map_page_query call */
228
229 #define VM_PAGE_QUERY_PAGE_PRESENT 0x1
230 #define VM_PAGE_QUERY_PAGE_FICTITIOUS 0x2
231 #define VM_PAGE_QUERY_PAGE_REF 0x4
232 #define VM_PAGE_QUERY_PAGE_DIRTY 0x8
233 #define VM_PAGE_QUERY_PAGE_PAGED_OUT 0x10
234 #define VM_PAGE_QUERY_PAGE_COPIED 0x20
235 #define VM_PAGE_QUERY_PAGE_SPECULATIVE 0x40
236 #define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80
237 #define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100
238 #define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200
239 #define VM_PAGE_QUERY_PAGE_CS_NX 0x400
240 #define VM_PAGE_QUERY_PAGE_REUSABLE 0x800
241
242 #pragma mark User Flags
243
244 /*
245 * VM allocation flags:
246 *
247 * VM_FLAGS_FIXED
248 * (really the absence of VM_FLAGS_ANYWHERE)
249 * Allocate new VM region at the specified virtual address, if possible.
250 *
251 * VM_FLAGS_ANYWHERE
252 * Allocate new VM region anywhere it would fit in the address space.
253 *
254 * VM_FLAGS_PURGABLE
255 * Create a purgable VM object for that new VM region.
256 *
257 * VM_FLAGS_4GB_CHUNK
258 * The new VM region will be chunked up into 4GB sized pieces.
259 *
260 * VM_FLAGS_NO_PMAP_CHECK
261 * (for DEBUG kernel config only, ignored for other configs)
262 * Do not check that there is no stale pmap mapping for the new VM region.
263 * This is useful for kernel memory allocations at bootstrap when building
264 * the initial kernel address space while some memory is already in use.
265 *
266 * VM_FLAGS_OVERWRITE
267 * The new VM region can replace existing VM regions if necessary
268 * (to be used in combination with VM_FLAGS_FIXED).
269 *
270 * VM_FLAGS_NO_CACHE
271 * Pages brought in to this VM region are placed on the speculative
272 * queue instead of the active queue. In other words, they are not
273 * cached so that they will be stolen first if memory runs low.
274 */
275
276 #define VM_FLAGS_FIXED 0x00000000
277 #define VM_FLAGS_ANYWHERE 0x00000001
278 #define VM_FLAGS_PURGABLE 0x00000002
279 #define VM_FLAGS_4GB_CHUNK 0x00000004
280 #define VM_FLAGS_RANDOM_ADDR 0x00000008
281 #define VM_FLAGS_NO_CACHE 0x00000010
282 #define VM_FLAGS_RESILIENT_CODESIGN 0x00000020
283 #define VM_FLAGS_RESILIENT_MEDIA 0x00000040
284 #define VM_FLAGS_PERMANENT 0x00000080
285 #define VM_FLAGS_TPRO 0x00001000
286 #define VM_FLAGS_MTE 0x00002000
287 #define VM_FLAGS_OVERWRITE 0x00004000 /* delete any existing mappings first */
288 /*
289 * VM_FLAGS_SUPERPAGE_MASK
290 * 3 bits that specify whether large pages should be used instead of
291 * base pages (!=0), as well as the requested page size.
292 */
293 #define VM_FLAGS_SUPERPAGE_MASK 0x00070000 /* bits 0x10000, 0x20000, 0x40000 */
294 #define VM_FLAGS_RETURN_DATA_ADDR 0x00100000 /* Return address of target data, rather than base of page */
295 #define VM_FLAGS_RETURN_4K_DATA_ADDR 0x00800000 /* Return 4K aligned address of target data */
296 #define VM_FLAGS_ALIAS_MASK 0xFF000000
297 #define VM_GET_FLAGS_ALIAS(flags, alias) \
298 (alias) = (((flags) >> 24) & 0xff)
299 #if !XNU_KERNEL_PRIVATE
300 #define VM_SET_FLAGS_ALIAS(flags, alias) \
301 (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \
302 (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24))
303 #endif /* !XNU_KERNEL_PRIVATE */
304
305 #if XNU_KERNEL_PRIVATE
306 /*
307 * When making a new VM_FLAG_*:
308 * - add it to this mask
309 * - add a vmf_* field to vm_map_kernel_flags_t in the right spot
310 * - add a check in vm_map_kernel_flags_check_vmflags()
311 * - update tests vm_parameter_validation_[user|kern] and their expected
312 * results; they deliberately call VM functions with invalid flag values
313 * and you may be turning one of those invalid flags valid.
314 */
315 #define VM_FLAGS_ANY_MASK (VM_FLAGS_FIXED | \
316 VM_FLAGS_ANYWHERE | \
317 VM_FLAGS_PURGABLE | \
318 VM_FLAGS_4GB_CHUNK | \
319 VM_FLAGS_RANDOM_ADDR | \
320 VM_FLAGS_NO_CACHE | \
321 VM_FLAGS_RESILIENT_CODESIGN | \
322 VM_FLAGS_RESILIENT_MEDIA | \
323 VM_FLAGS_PERMANENT | \
324 VM_FLAGS_TPRO | \
325 VM_FLAGS_MTE | \
326 VM_FLAGS_OVERWRITE | \
327 VM_FLAGS_SUPERPAGE_MASK | \
328 VM_FLAGS_RETURN_DATA_ADDR | \
329 VM_FLAGS_RETURN_4K_DATA_ADDR | \
330 VM_FLAGS_ALIAS_MASK)
331 #endif /* XNU_KERNEL_PRIVATE */
332 #define VM_FLAGS_HW (VM_FLAGS_TPRO | VM_FLAGS_MTE)
333
334 /* These are the flags that we accept from user-space */
335 #define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \
336 VM_FLAGS_ANYWHERE | \
337 VM_FLAGS_PURGABLE | \
338 VM_FLAGS_4GB_CHUNK | \
339 VM_FLAGS_RANDOM_ADDR | \
340 VM_FLAGS_NO_CACHE | \
341 VM_FLAGS_PERMANENT | \
342 VM_FLAGS_OVERWRITE | \
343 VM_FLAGS_SUPERPAGE_MASK | \
344 VM_FLAGS_HW | \
345 VM_FLAGS_ALIAS_MASK)
346
347 #define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \
348 VM_FLAGS_RETURN_4K_DATA_ADDR | \
349 VM_FLAGS_RETURN_DATA_ADDR)
350
351 #define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \
352 VM_FLAGS_ANYWHERE | \
353 VM_FLAGS_RANDOM_ADDR | \
354 VM_FLAGS_OVERWRITE| \
355 VM_FLAGS_RETURN_DATA_ADDR | \
356 VM_FLAGS_RESILIENT_CODESIGN | \
357 VM_FLAGS_RESILIENT_MEDIA)
358
359 #define VM_FLAGS_SUPERPAGE_SHIFT 16
360 #define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */
361 #define SUPERPAGE_SIZE_ANY 1
362 #define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT)
363 #define VM_FLAGS_SUPERPAGE_SIZE_ANY (SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT)
364 #if defined(__x86_64__) || !defined(KERNEL)
365 #define SUPERPAGE_SIZE_2MB 2
366 #define VM_FLAGS_SUPERPAGE_SIZE_2MB (SUPERPAGE_SIZE_2MB<<VM_FLAGS_SUPERPAGE_SHIFT)
367 #endif
368
369 /*
370 * EXC_GUARD definitions for virtual memory.
371 */
372 #define GUARD_TYPE_VIRT_MEMORY 0x5
373
374 /* Reasons for exception for virtual memory */
375 __enum_decl(virtual_memory_guard_exception_code_t, uint32_t, {
376 kGUARD_EXC_DEALLOC_GAP = 1,
377 kGUARD_EXC_RECLAIM_COPYIO_FAILURE = 2,
378 kGUARD_EXC_RECLAIM_INDEX_FAILURE = 4,
379 kGUARD_EXC_RECLAIM_DEALLOCATE_FAILURE = 8,
380 kGUARD_EXC_RECLAIM_ACCOUNTING_FAILURE = 9,
381 kGUARD_EXC_SEC_IOPL_ON_EXEC_PAGE = 10,
382 kGUARD_EXC_SEC_EXEC_ON_IOPL_PAGE = 11,
383 kGUARD_EXC_SEC_UPL_WRITE_ON_EXEC_REGION = 12,
384 /*
385 * rdar://151450801 (Remove spurious kGUARD_EXC_SEC_ACCESS_FAULT and kGUARD_EXC_SEC_ASYNC_ACCESS_FAULT once CrashReporter is aligned)
386 */
387 kGUARD_EXC_SEC_ACCESS_FAULT = 98,
388 kGUARD_EXC_SEC_ASYNC_ACCESS_FAULT = 99,
389 /* VM policy decisions */
390 kGUARD_EXC_SEC_COPY_DENIED = 100,
391 kGUARD_EXC_SEC_SHARING_DENIED = 101,
392
393 /* Fault-related exceptions. */
394 kGUARD_EXC_MTE_SYNC_FAULT = 200,
395 kGUARD_EXC_MTE_ASYNC_USER_FAULT = 201,
396 kGUARD_EXC_MTE_ASYNC_KERN_FAULT = 202
397 });
398
399 #define kGUARD_EXC_MTE_SOFT_MODE 0x100000
400
401 #ifdef XNU_KERNEL_PRIVATE
402
403 #if HAS_MTE
404 static inline bool
vm_guard_is_mte_policy(uint32_t flavor)405 vm_guard_is_mte_policy(uint32_t flavor)
406 {
407 return flavor == kGUARD_EXC_SEC_COPY_DENIED || flavor == kGUARD_EXC_SEC_SHARING_DENIED;
408 }
409
410 static inline bool
vm_guard_is_mte_fault(uint32_t flavor)411 vm_guard_is_mte_fault(uint32_t flavor)
412 {
413 return flavor == kGUARD_EXC_MTE_SYNC_FAULT ||
414 flavor == kGUARD_EXC_MTE_ASYNC_USER_FAULT ||
415 flavor == kGUARD_EXC_MTE_ASYNC_KERN_FAULT;
416 }
417 #endif /* HAS_MTE */
418
419 #pragma mark Map Ranges
420
421 /*!
422 * @enum vm_map_range_id_t
423 *
424 * @brief
425 * Enumerate a particular vm_map range.
426 *
427 * @discussion
428 * The kernel_map VA has been split into the following ranges. Userspace
429 * VA for any given process can also optionally be split by the following user
430 * ranges.
431 *
432 * @const KMEM_RANGE_ID_NONE
433 * This range is only used for early initialization.
434 *
435 * @const KMEM_RANGE_ID_PTR_*
436 * Range containing general purpose allocations from kalloc, etc that
437 * contain pointers.
438 *
439 * @const KMEM_RANGE_ID_SPRAYQTN
440 * The spray quarantine range contains allocations that have the following
441 * properties:
442 * - An attacker could control the size, lifetime and number of allocations
443 * of this type (or from this callsite).
444 * - The pointer to the allocation is zeroed to ensure that it isn't left
445 * dangling limiting the use of UaFs.
446 * - OOBs on the allocation is carefully considered and sufficiently
447 * addressed.
448 *
449 * @const KMEM_RANGE_ID_DATA
450 * Range containing allocations that are bags of bytes and contain no
451 * pointers.
452 *
453 * @const KMEM_RANGE_ID_DATA_SHARED
454 * Range containing allocations that are bags of bytes and contain no
455 * pointers and meant to be shared with external domains.
456 */
457 __enum_decl(vm_map_range_id_t, uint8_t, {
458 KMEM_RANGE_ID_NONE,
459 KMEM_RANGE_ID_PTR_0,
460 KMEM_RANGE_ID_PTR_1,
461 KMEM_RANGE_ID_PTR_2,
462 KMEM_RANGE_ID_SPRAYQTN,
463 KMEM_RANGE_ID_DATA,
464 KMEM_RANGE_ID_DATA_SHARED,
465
466 KMEM_RANGE_ID_FIRST = KMEM_RANGE_ID_PTR_0,
467 KMEM_RANGE_ID_NUM_PTR = KMEM_RANGE_ID_PTR_2,
468 KMEM_RANGE_ID_MAX = KMEM_RANGE_ID_DATA_SHARED,
469
470 /* these UMEM_* correspond to the MACH_VM_RANGE_* tags and are ABI */
471 UMEM_RANGE_ID_DEFAULT = 0, /* same as MACH_VM_RANGE_DEFAULT */
472 UMEM_RANGE_ID_HEAP, /* same as MACH_VM_RANGE_DATA */
473 UMEM_RANGE_ID_FIXED, /* same as MACH_VM_RANGE_FIXED */
474 UMEM_RANGE_ID_LARGE_FILE,
475
476 /* these UMEM_* are XNU internal only range IDs, and aren't ABI */
477 UMEM_RANGE_ID_MAX = UMEM_RANGE_ID_LARGE_FILE,
478
479 #define KMEM_RANGE_COUNT (KMEM_RANGE_ID_MAX + 1)
480 });
481
482 typedef vm_map_range_id_t kmem_range_id_t;
483
484 #define kmem_log2down(mask) (31 - __builtin_clz(mask))
485 #define KMEM_RANGE_MAX (UMEM_RANGE_ID_MAX < KMEM_RANGE_ID_MAX \
486 ? KMEM_RANGE_ID_MAX : UMEM_RANGE_ID_MAX)
487 #define KMEM_RANGE_BITS kmem_log2down(2 * KMEM_RANGE_MAX - 1)
488
489 #pragma mark Kernel Flags
490
491 typedef union {
492 struct {
493 unsigned long long
494 /*
495 * VM_FLAG_* flags
496 */
497 vmf_fixed:1,
498 vmf_purgeable:1,
499 vmf_4gb_chunk:1,
500 vmf_random_addr:1,
501 vmf_no_cache:1,
502 vmf_resilient_codesign:1,
503 vmf_resilient_media:1,
504 vmf_permanent:1,
505
506 __unused_bit_8:1,
507 __unused_bit_9:1,
508 __unused_bit_10:1,
509 __unused_bit_11:1,
510 vmf_tpro:1,
511 vmf_mte:1,
512 vmf_overwrite:1,
513 __unused_bit_15:1,
514
515 vmf_superpage_size:3,
516 __unused_bit_19:1,
517 vmf_return_data_addr:1,
518 __unused_bit_21:1,
519 __unused_bit_22:1,
520 vmf_return_4k_data_addr:1,
521
522 /*
523 * VM tag (user or kernel)
524 *
525 * User tags are limited to 8 bits,
526 * kernel tags can use up to 12 bits
527 * with -zt or similar features.
528 */
529 vm_tag : 12, /* same as VME_ALIAS_BITS */
530
531 /*
532 * General kernel flags
533 */
534 vmkf_already:1, /* OK if same mapping already exists */
535 vmkf_beyond_max:1, /* map beyond the map's max offset */
536 vmkf_no_pmap_check:1, /* do not check that pmap is empty */
537 vmkf_map_jit:1, /* mark entry as JIT region */
538 vmkf_iokit_acct:1, /* IOKit accounting */
539 vmkf_keep_map_locked:1, /* keep map locked when returning from vm_map_enter() */
540 vmkf_overwrite_immutable:1, /* can overwrite immutable mappings */
541 vmkf_remap_prot_copy:1, /* vm_remap for VM_PROT_COPY */
542 vmkf_remap_legacy_mode:1, /* vm_remap, not vm_remap_new */
543 vmkf_cs_enforcement_override:1, /* override CS_ENFORCEMENT */
544 vmkf_cs_enforcement:1, /* new value for CS_ENFORCEMENT */
545 vmkf_nested_pmap:1, /* use a nested pmap */
546 vmkf_no_copy_on_read:1, /* do not use copy_on_read */
547 vmkf_copy_single_object:1, /* vm_map_copy only 1 VM object */
548 vmkf_copy_pageable:1, /* vm_map_copy with pageable entries */
549 vmkf_copy_same_map:1, /* vm_map_copy to remap in original map */
550 vmkf_translated_allow_execute:1, /* allow execute in translated processes */
551 vmkf_tpro_enforcement_override:1, /* override TPRO propagation */
552 vmkf_no_soft_limit:1, /* override soft allocation size limit */
553
554 /*
555 * Submap creation, altering vm_map_enter() only
556 */
557 vmkf_submap:1, /* mapping a VM submap */
558 vmkf_submap_atomic:1, /* keep entry atomic (no splitting/coalescing) */
559 vmkf_submap_adjust:1, /* the submap needs to be adjusted */
560
561 /*
562 * Flags altering the behavior of vm_map_locate_space_anywhere()
563 */
564 vmkf_32bit_map_va:1, /* allocate in low 32-bits range */
565 vmkf_guard_before:1, /* guard page before the mapping */
566 vmkf_last_free:1, /* find space from the end */
567 vmkf_range_id:KMEM_RANGE_BITS; /* kmem range to allocate in */
568
569 unsigned long long
570 /*
571 * Flags used to enforce security policy for copying of tagged memory
572 */
573 vmkf_copy_dest:2, /* See VM_COPY_DESTINATION_* */
574 vmkf_is_iokit:1, /* creating a memory entry to back an IOMD */
575 __vmkf_unused2:61;
576 };
577
578 /*
579 * do not access these directly,
580 * use vm_map_kernel_flags_check_vmflags*()
581 */
582 uint32_t __vm_flags : 24;
583 } vm_map_kernel_flags_t;
584
585 /*
586 * using this means that vmf_* flags can't be used
587 * until vm_map_kernel_flags_set_vmflags() is set,
588 * or some manual careful init is done.
589 *
590 * Prefer VM_MAP_KERNEL_FLAGS_(FIXED,ANYWHERE) instead.
591 */
592 #define VM_MAP_KERNEL_FLAGS_NONE \
593 (vm_map_kernel_flags_t){ }
594
595 #define VM_MAP_KERNEL_FLAGS_FIXED(...) \
596 (vm_map_kernel_flags_t){ .vmf_fixed = true, __VA_ARGS__ }
597
598 #define VM_MAP_KERNEL_FLAGS_ANYWHERE(...) \
599 (vm_map_kernel_flags_t){ .vmf_fixed = false, __VA_ARGS__ }
600
601 #define VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(...) \
602 VM_MAP_KERNEL_FLAGS_FIXED(.vmf_permanent = true, __VA_ARGS__)
603
604 #define VM_MAP_KERNEL_FLAGS_ANYWHERE_PERMANENT(...) \
605 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmf_permanent = true, __VA_ARGS__)
606
607 #define VM_MAP_KERNEL_FLAGS_DATA_BUFFERS_ANYWHERE(...) \
608 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = KMEM_RANGE_ID_DATA, __VA_ARGS__)
609
610 #define VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(...) \
611 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = kmem_needs_data_share_range() ? \
612 KMEM_RANGE_ID_DATA_SHARED : KMEM_RANGE_ID_DATA, __VA_ARGS__)
613
614 typedef struct {
615 unsigned int
616 vmnekf_ledger_tag:3,
617 vmnekf_ledger_no_footprint:1,
618 vmnekf_is_iokit:1,
619 __vmnekf_unused:27;
620 } vm_named_entry_kernel_flags_t;
621 #define VM_NAMED_ENTRY_KERNEL_FLAGS_NONE (vm_named_entry_kernel_flags_t) { \
622 .vmnekf_ledger_tag = 0, \
623 .vmnekf_ledger_no_footprint = 0, \
624 .vmnekf_is_iokit = 0, \
625 .__vmnekf_unused = 0 \
626 }
627
628 #endif /* XNU_KERNEL_PRIVATE */
629
630 #pragma mark Ledger Tags
631
632 /* current accounting postmark */
633 #define __VM_LEDGER_ACCOUNTING_POSTMARK 2019032600
634
635 /*
636 * When making a new VM_LEDGER_TAG_* or VM_LEDGER_FLAG_*, update tests
637 * vm_parameter_validation_[user|kern] and their expected results; they
638 * deliberately call VM functions with invalid ledger values and you may
639 * be turning one of those invalid tags/flags valid.
640 */
641 /* discrete values: */
642 #define VM_LEDGER_TAG_NONE 0x00000000
643 #define VM_LEDGER_TAG_DEFAULT 0x00000001
644 #define VM_LEDGER_TAG_NETWORK 0x00000002
645 #define VM_LEDGER_TAG_MEDIA 0x00000003
646 #define VM_LEDGER_TAG_GRAPHICS 0x00000004
647 #define VM_LEDGER_TAG_NEURAL 0x00000005
648 #define VM_LEDGER_TAG_MAX 0x00000005
649 #define VM_LEDGER_TAG_UNCHANGED ((int)-1)
650
651 /* individual bits: */
652 #define VM_LEDGER_FLAG_NO_FOOTPRINT (1 << 0)
653 #define VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG (1 << 1)
654 #define VM_LEDGER_FLAG_FROM_KERNEL (1 << 2)
655
656 #define VM_LEDGER_FLAGS_USER (VM_LEDGER_FLAG_NO_FOOTPRINT | VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG)
657 #define VM_LEDGER_FLAGS_ALL (VM_LEDGER_FLAGS_USER | VM_LEDGER_FLAG_FROM_KERNEL)
658
659 #pragma mark User Memory Tags
660
661 /*
662 * These tags may be used to identify memory regions created with
663 * `mach_vm_map()` or `mach_vm_allocate()` via the top 8 bits of the `flags`
664 * parameter. Users should pass `VM_MAKE_TAG(tag) | flags` (see section
665 * "User Flags").
666 */
667 #define VM_MEMORY_MALLOC 1
668 #define VM_MEMORY_MALLOC_SMALL 2
669 #define VM_MEMORY_MALLOC_LARGE 3
670 #define VM_MEMORY_MALLOC_HUGE 4
671 #define VM_MEMORY_SBRK 5// uninteresting -- no one should call
672 #define VM_MEMORY_REALLOC 6
673 #define VM_MEMORY_MALLOC_TINY 7
674 #define VM_MEMORY_MALLOC_LARGE_REUSABLE 8
675 #define VM_MEMORY_MALLOC_LARGE_REUSED 9
676
677 #define VM_MEMORY_ANALYSIS_TOOL 10
678
679 #define VM_MEMORY_MALLOC_NANO 11
680 #define VM_MEMORY_MALLOC_MEDIUM 12
681 #define VM_MEMORY_MALLOC_PROB_GUARD 13
682
683 #define VM_MEMORY_MACH_MSG 20
684 #define VM_MEMORY_IOKIT 21
685 #define VM_MEMORY_STACK 30
686 #define VM_MEMORY_GUARD 31
687 #define VM_MEMORY_SHARED_PMAP 32
688 /* memory containing a dylib */
689 #define VM_MEMORY_DYLIB 33
690 #define VM_MEMORY_OBJC_DISPATCHERS 34
691
692 /* Was a nested pmap (VM_MEMORY_SHARED_PMAP) which has now been unnested */
693 #define VM_MEMORY_UNSHARED_PMAP 35
694
695 /* for libchannel memory, mostly used on visionOS for communication with realtime threads */
696 #define VM_MEMORY_LIBCHANNEL 36
697
698 // Placeholders for now -- as we analyze the libraries and find how they
699 // use memory, we can make these labels more specific.
700 #define VM_MEMORY_APPKIT 40
701 #define VM_MEMORY_FOUNDATION 41
702 #define VM_MEMORY_COREGRAPHICS 42
703 #define VM_MEMORY_CORESERVICES 43
704 #define VM_MEMORY_CARBON VM_MEMORY_CORESERVICES
705 #define VM_MEMORY_JAVA 44
706 #define VM_MEMORY_COREDATA 45
707 #define VM_MEMORY_COREDATA_OBJECTIDS 46
708
709 #define VM_MEMORY_ATS 50
710 #define VM_MEMORY_LAYERKIT 51
711 #define VM_MEMORY_CGIMAGE 52
712 #define VM_MEMORY_TCMALLOC 53
713
714 /* private raster data (i.e. layers, some images, QGL allocator) */
715 #define VM_MEMORY_COREGRAPHICS_DATA 54
716
717 /* shared image and font caches */
718 #define VM_MEMORY_COREGRAPHICS_SHARED 55
719
720 /* Memory used for virtual framebuffers, shadowing buffers, etc... */
721 #define VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS 56
722
723 /* Window backing stores, custom shadow data, and compressed backing stores */
724 #define VM_MEMORY_COREGRAPHICS_BACKINGSTORES 57
725
726 /* x-alloc'd memory */
727 #define VM_MEMORY_COREGRAPHICS_XALLOC 58
728
729 /* catch-all for other uses, such as the read-only shared data page */
730 #define VM_MEMORY_COREGRAPHICS_MISC VM_MEMORY_COREGRAPHICS
731
732 /* memory allocated by the dynamic loader for itself */
733 #define VM_MEMORY_DYLD 60
734 /* malloc'd memory created by dyld */
735 #define VM_MEMORY_DYLD_MALLOC 61
736
737 /* Used for sqlite page cache */
738 #define VM_MEMORY_SQLITE 62
739
740 /* JavaScriptCore heaps */
741 #define VM_MEMORY_JAVASCRIPT_CORE 63
742 #define VM_MEMORY_WEBASSEMBLY VM_MEMORY_JAVASCRIPT_CORE
743 /* memory allocated for the JIT */
744 #define VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR 64
745 #define VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE 65
746
747 /* memory allocated for GLSL */
748 #define VM_MEMORY_GLSL 66
749
750 /* memory allocated for OpenCL.framework */
751 #define VM_MEMORY_OPENCL 67
752
753 /* memory allocated for QuartzCore.framework */
754 #define VM_MEMORY_COREIMAGE 68
755
756 /* memory allocated for WebCore Purgeable Buffers */
757 #define VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS 69
758
759 /* ImageIO memory */
760 #define VM_MEMORY_IMAGEIO 70
761
762 /* CoreProfile memory */
763 #define VM_MEMORY_COREPROFILE 71
764
765 /* assetsd / MobileSlideShow memory */
766 #define VM_MEMORY_ASSETSD 72
767
768 /* libsystem_kernel os_once_alloc */
769 #define VM_MEMORY_OS_ALLOC_ONCE 73
770
771 /* libdispatch internal allocator */
772 #define VM_MEMORY_LIBDISPATCH 74
773
774 /* Accelerate.framework image backing stores */
775 #define VM_MEMORY_ACCELERATE 75
776
777 /* CoreUI image block data */
778 #define VM_MEMORY_COREUI 76
779
780 /* CoreUI image file */
781 #define VM_MEMORY_COREUIFILE 77
782
783 /* Genealogy buffers */
784 #define VM_MEMORY_GENEALOGY 78
785
786 /* RawCamera VM allocated memory */
787 #define VM_MEMORY_RAWCAMERA 79
788
789 /* corpse info for dead process */
790 #define VM_MEMORY_CORPSEINFO 80
791
792 /* Apple System Logger (ASL) messages */
793 #define VM_MEMORY_ASL 81
794
795 /* Swift runtime */
796 #define VM_MEMORY_SWIFT_RUNTIME 82
797
798 /* Swift metadata */
799 #define VM_MEMORY_SWIFT_METADATA 83
800
801 /* DHMM data */
802 #define VM_MEMORY_DHMM 84
803
804 /* memory needed for DFR related actions */
805 #define VM_MEMORY_DFR 85
806
807 /* memory allocated by SceneKit.framework */
808 #define VM_MEMORY_SCENEKIT 86
809
810 /* memory allocated by skywalk networking */
811 #define VM_MEMORY_SKYWALK 87
812
813 #define VM_MEMORY_IOSURFACE 88
814
815 #define VM_MEMORY_LIBNETWORK 89
816
817 #define VM_MEMORY_AUDIO 90
818
819 #define VM_MEMORY_VIDEOBITSTREAM 91
820
821 /* memory allocated by CoreMedia */
822 #define VM_MEMORY_CM_XPC 92
823
824 #define VM_MEMORY_CM_RPC 93
825
826 #define VM_MEMORY_CM_MEMORYPOOL 94
827
828 #define VM_MEMORY_CM_READCACHE 95
829
830 #define VM_MEMORY_CM_CRABS 96
831
832 /* memory allocated for QuickLookThumbnailing */
833 #define VM_MEMORY_QUICKLOOK_THUMBNAILS 97
834
835 /* memory allocated by Accounts framework */
836 #define VM_MEMORY_ACCOUNTS 98
837
838 /* memory allocated by Sanitizer runtime libraries */
839 #define VM_MEMORY_SANITIZER 99
840
841 /* Differentiate memory needed by GPU drivers and frameworks from generic IOKit allocations */
842 #define VM_MEMORY_IOACCELERATOR 100
843
844 /* memory allocated by CoreMedia for global image registration of frames */
845 #define VM_MEMORY_CM_REGWARP 101
846
847 /* memory allocated by EmbeddedAcousticRecognition for speech decoder */
848 #define VM_MEMORY_EAR_DECODER 102
849
850 /* CoreUI cached image data */
851 #define VM_MEMORY_COREUI_CACHED_IMAGE_DATA 103
852
853 /* ColorSync is using mmap for read-only copies of ICC profile data */
854 #define VM_MEMORY_COLORSYNC 104
855
856 /* backtrace info for simulated crashes */
857 #define VM_MEMORY_BTINFO 105
858
859 /* memory allocated by CoreMedia */
860 #define VM_MEMORY_CM_HLS 106
861
862 /* memory allocated for CompositorServices */
863 #define VM_MEMORY_COMPOSITOR_SERVICES 107
864
865 /* Reserve 230-239 for Rosetta */
866 #define VM_MEMORY_ROSETTA 230
867 #define VM_MEMORY_ROSETTA_THREAD_CONTEXT 231
868 #define VM_MEMORY_ROSETTA_INDIRECT_BRANCH_MAP 232
869 #define VM_MEMORY_ROSETTA_RETURN_STACK 233
870 #define VM_MEMORY_ROSETTA_EXECUTABLE_HEAP 234
871 #define VM_MEMORY_ROSETTA_USER_LDT 235
872 #define VM_MEMORY_ROSETTA_ARENA 236
873 #define VM_MEMORY_ROSETTA_10 239
874
875 /* Reserve 240-255 for application */
876 #define VM_MEMORY_APPLICATION_SPECIFIC_1 240
877 #define VM_MEMORY_APPLICATION_SPECIFIC_2 241
878 #define VM_MEMORY_APPLICATION_SPECIFIC_3 242
879 #define VM_MEMORY_APPLICATION_SPECIFIC_4 243
880 #define VM_MEMORY_APPLICATION_SPECIFIC_5 244
881 #define VM_MEMORY_APPLICATION_SPECIFIC_6 245
882 #define VM_MEMORY_APPLICATION_SPECIFIC_7 246
883 #define VM_MEMORY_APPLICATION_SPECIFIC_8 247
884 #define VM_MEMORY_APPLICATION_SPECIFIC_9 248
885 #define VM_MEMORY_APPLICATION_SPECIFIC_10 249
886 #define VM_MEMORY_APPLICATION_SPECIFIC_11 250
887 #define VM_MEMORY_APPLICATION_SPECIFIC_12 251
888 #define VM_MEMORY_APPLICATION_SPECIFIC_13 252
889 #define VM_MEMORY_APPLICATION_SPECIFIC_14 253
890 #define VM_MEMORY_APPLICATION_SPECIFIC_15 254
891 #define VM_MEMORY_APPLICATION_SPECIFIC_16 255
892
893 #define VM_MEMORY_COUNT 256
894
895 #if !XNU_KERNEL_PRIVATE
896 #define VM_MAKE_TAG(tag) ((tag) << 24)
897 #endif /* XNU_KERNEL_PRIVATE */
898
899 #if PRIVATE && !KERNEL
900 ///
901 /// Return a human-readable description for a given VM user tag.
902 ///
903 /// - Parameters:
904 /// - tag: A VM tag between `[0,VM_MEMORY_COUNT)`
905 ///
906 /// - Returns: A string literal description of the tag
907 ///
908 __SPI_AVAILABLE(macos(16.0), ios(19.0), watchos(12.0), tvos(19.0), visionos(3.0), bridgeos(10.0))
909 OS_EXPORT
910 const char *mach_vm_tag_describe(unsigned int tag);
911 #endif /* PRIVATE && !KERNEL */
912
913 #if KERNEL_PRIVATE
914
915 #pragma mark Kernel Tags
916
917 #if XNU_KERNEL_PRIVATE
918 /*
919 * When making a new VM_KERN_MEMORY_*, update:
920 * - tests vm_parameter_validation_[user|kern]
921 * and their expected results; they deliberately call VM functions with invalid
922 * kernel tag values and you may be turning one of those invalid tags valid.
923 * - vm_kern_memory_names, which is used to map tags to their string name
924 */
925 #endif /* XNU_KERNEL_PRIVATE */
926
927 #define VM_KERN_MEMORY_NONE 0
928
929 #define VM_KERN_MEMORY_OSFMK 1
930 #define VM_KERN_MEMORY_BSD 2
931 #define VM_KERN_MEMORY_IOKIT 3
932 #define VM_KERN_MEMORY_LIBKERN 4
933 #define VM_KERN_MEMORY_OSKEXT 5
934 #define VM_KERN_MEMORY_KEXT 6
935 #define VM_KERN_MEMORY_IPC 7
936 #define VM_KERN_MEMORY_STACK 8
937 #define VM_KERN_MEMORY_CPU 9
938 #define VM_KERN_MEMORY_PMAP 10
939 #define VM_KERN_MEMORY_PTE 11
940 #define VM_KERN_MEMORY_ZONE 12
941 #define VM_KERN_MEMORY_KALLOC 13
942 #define VM_KERN_MEMORY_COMPRESSOR 14
943 #define VM_KERN_MEMORY_COMPRESSED_DATA 15
944 #define VM_KERN_MEMORY_PHANTOM_CACHE 16
945 #define VM_KERN_MEMORY_WAITQ 17
946 #define VM_KERN_MEMORY_DIAG 18
947 #define VM_KERN_MEMORY_LOG 19
948 #define VM_KERN_MEMORY_FILE 20
949 #define VM_KERN_MEMORY_MBUF 21
950 #define VM_KERN_MEMORY_UBC 22
951 #define VM_KERN_MEMORY_SECURITY 23
952 #define VM_KERN_MEMORY_MLOCK 24
953 #define VM_KERN_MEMORY_REASON 25
954 #define VM_KERN_MEMORY_SKYWALK 26
955 #define VM_KERN_MEMORY_LTABLE 27
956 #define VM_KERN_MEMORY_HV 28
957 #define VM_KERN_MEMORY_KALLOC_DATA 29
958 #define VM_KERN_MEMORY_RETIRED 30
959 #define VM_KERN_MEMORY_KALLOC_TYPE 31
960 #define VM_KERN_MEMORY_TRIAGE 32
961 #define VM_KERN_MEMORY_RECOUNT 33
962 #define VM_KERN_MEMORY_MTAG 34
963 #define VM_KERN_MEMORY_EXCLAVES 35
964 #define VM_KERN_MEMORY_EXCLAVES_SHARED 36
965 #define VM_KERN_MEMORY_KALLOC_SHARED 37
966 /* add new tags here and adjust first-dynamic value */
967 #define VM_KERN_MEMORY_CPUTRACE 38
968 #define VM_KERN_MEMORY_FIRST_DYNAMIC 39
969
970 /* out of tags: */
971 #define VM_KERN_MEMORY_ANY 255
972 #define VM_KERN_MEMORY_COUNT 256
973
974 #pragma mark Kernel Wired Counts
975
976 // mach_memory_info.flags
977 #define VM_KERN_SITE_TYPE 0x000000FF
978 #define VM_KERN_SITE_TAG 0x00000000
979 #define VM_KERN_SITE_KMOD 0x00000001
980 #define VM_KERN_SITE_KERNEL 0x00000002
981 #define VM_KERN_SITE_COUNTER 0x00000003
982 #define VM_KERN_SITE_WIRED 0x00000100 /* add to wired count */
983 #define VM_KERN_SITE_HIDE 0x00000200 /* no zprint */
984 #define VM_KERN_SITE_NAMED 0x00000400
985 #define VM_KERN_SITE_ZONE 0x00000800
986 #define VM_KERN_SITE_ZONE_VIEW 0x00001000
987 #define VM_KERN_SITE_KALLOC 0x00002000 /* zone field is size class */
988
989 /* Kernel Memory Counters */
990 #if XNU_KERNEL_PRIVATE
991 /*
992 * When making a new VM_KERN_COUNT_*, also update vm_kern_count_names
993 */
994 #endif /* XNU_KERNEL_PRIVATE */
995
996 #define VM_KERN_COUNT_MANAGED 0
997 #define VM_KERN_COUNT_RESERVED 1
998 #define VM_KERN_COUNT_WIRED 2
999 #define VM_KERN_COUNT_WIRED_MANAGED 3
1000 #define VM_KERN_COUNT_STOLEN 4
1001 #define VM_KERN_COUNT_LOPAGE 5
1002 #define VM_KERN_COUNT_MAP_KERNEL 6
1003 #define VM_KERN_COUNT_MAP_ZONE 7
1004 #define VM_KERN_COUNT_MAP_KALLOC 8
1005
1006 #define VM_KERN_COUNT_WIRED_BOOT 9
1007
1008 #define VM_KERN_COUNT_BOOT_STOLEN 10
1009
1010 /* The number of bytes from the kernel cache that are wired in memory */
1011 #define VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE 11
1012
1013 #define VM_KERN_COUNT_MAP_KALLOC_LARGE VM_KERN_COUNT_MAP_KALLOC
1014 #define VM_KERN_COUNT_MAP_KALLOC_LARGE_DATA 12
1015 #define VM_KERN_COUNT_MAP_KERNEL_DATA 13
1016
1017 /* The size of the exclaves iboot carveout (exclaves memory not from XNU) in bytes. */
1018 #define VM_KERN_COUNT_EXCLAVES_CARVEOUT 14
1019
1020 /* The number of VM_KERN_COUNT_ stats. New VM_KERN_COUNT_ entries should be less than this. */
1021 #define VM_KERN_COUNTER_COUNT 15
1022
1023 #define VM_COPY_DESTINATION_USER 0
1024 #define VM_COPY_DESTINATION_KERNEL 1
1025 #define VM_COPY_DESTINATION_UNKNOWN 2 /* memory entry */
1026 #define VM_COPY_DESTINATION_INTERNAL 3 /* creating a copy map for internal use which is soon discarded */
1027 #endif /* KERNEL_PRIVATE */
1028
1029 __END_DECLS
1030
1031 #endif /* _MACH_VM_STATISTICS_H_ */
1032