1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: mach/vm_statistics.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 *
62 * Virtual memory statistics structure.
63 *
64 */
65
66 #ifndef _MACH_VM_STATISTICS_H_
67 #define _MACH_VM_STATISTICS_H_
68
69 #include <stdbool.h>
70 #include <sys/cdefs.h>
71
72 #include <mach/machine/vm_types.h>
73 #include <mach/machine/kern_return.h>
74
75 __BEGIN_DECLS
76
77 /*
78 * vm_statistics
79 *
80 * History:
81 * rev0 - original structure.
82 * rev1 - added purgable info (purgable_count and purges).
83 * rev2 - added speculative_count.
84 *
85 * Note: you cannot add any new fields to this structure. Add them below in
86 * vm_statistics64.
87 */
88
89 struct vm_statistics {
90 natural_t free_count; /* # of pages free */
91 natural_t active_count; /* # of pages active */
92 natural_t inactive_count; /* # of pages inactive */
93 natural_t wire_count; /* # of pages wired down */
94 natural_t zero_fill_count; /* # of zero fill pages */
95 natural_t reactivations; /* # of pages reactivated */
96 natural_t pageins; /* # of pageins */
97 natural_t pageouts; /* # of pageouts */
98 natural_t faults; /* # of faults */
99 natural_t cow_faults; /* # of copy-on-writes */
100 natural_t lookups; /* object cache lookups */
101 natural_t hits; /* object cache hits */
102
103 /* added for rev1 */
104 natural_t purgeable_count; /* # of pages purgeable */
105 natural_t purges; /* # of pages purged */
106
107 /* added for rev2 */
108 /*
109 * NB: speculative pages are already accounted for in "free_count",
110 * so "speculative_count" is the number of "free" pages that are
111 * used to hold data that was read speculatively from disk but
112 * haven't actually been used by anyone so far.
113 */
114 natural_t speculative_count; /* # of pages speculative */
115 };
116
117 /* Used by all architectures */
118 typedef struct vm_statistics *vm_statistics_t;
119 typedef struct vm_statistics vm_statistics_data_t;
120
121 /*
122 * vm_statistics64
123 *
124 * History:
125 * rev0 - original structure.
126 * rev1 - added purgable info (purgable_count and purges).
127 * rev2 - added speculative_count.
128 * ----
129 * rev3 - changed name to vm_statistics64.
130 * changed some fields in structure to 64-bit on
131 * arm, i386 and x86_64 architectures.
132 * rev4 - require 64-bit alignment for efficient access
133 * in the kernel. No change to reported data.
134 *
135 */
136
137 struct vm_statistics64 {
138 natural_t free_count; /* # of pages free */
139 natural_t active_count; /* # of pages active */
140 natural_t inactive_count; /* # of pages inactive */
141 natural_t wire_count; /* # of pages wired down */
142 uint64_t zero_fill_count; /* # of zero fill pages */
143 uint64_t reactivations; /* # of pages reactivated */
144 uint64_t pageins; /* # of pageins */
145 uint64_t pageouts; /* # of pageouts */
146 uint64_t faults; /* # of faults */
147 uint64_t cow_faults; /* # of copy-on-writes */
148 uint64_t lookups; /* object cache lookups */
149 uint64_t hits; /* object cache hits */
150 uint64_t purges; /* # of pages purged */
151 natural_t purgeable_count; /* # of pages purgeable */
152 /*
153 * NB: speculative pages are already accounted for in "free_count",
154 * so "speculative_count" is the number of "free" pages that are
155 * used to hold data that was read speculatively from disk but
156 * haven't actually been used by anyone so far.
157 */
158 natural_t speculative_count; /* # of pages speculative */
159
160 /* added for rev1 */
161 uint64_t decompressions; /* # of pages decompressed */
162 uint64_t compressions; /* # of pages compressed */
163 uint64_t swapins; /* # of pages swapped in (via compression segments) */
164 uint64_t swapouts; /* # of pages swapped out (via compression segments) */
165 natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */
166 natural_t throttled_count; /* # of pages throttled */
167 natural_t external_page_count; /* # of pages that are file-backed (non-swap) */
168 natural_t internal_page_count; /* # of pages that are anonymous */
169 uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */
170 } __attribute__((aligned(8)));
171
172 typedef struct vm_statistics64 *vm_statistics64_t;
173 typedef struct vm_statistics64 vm_statistics64_data_t;
174
175 kern_return_t vm_stats(void *info, unsigned int *count);
176
177 /*
178 * VM_STATISTICS_TRUNCATE_TO_32_BIT
179 *
180 * This is used by host_statistics() to truncate and peg the 64-bit in-kernel values from
181 * vm_statistics64 to the 32-bit values of the older structure above (vm_statistics).
182 */
183 #define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) ((uint32_t)(((value) > UINT32_MAX ) ? UINT32_MAX : (value)))
184
185 /*
186 * vm_extmod_statistics
187 *
188 * Structure to record modifications to a task by an
189 * external agent.
190 *
191 * History:
192 * rev0 - original structure.
193 */
194
195 struct vm_extmod_statistics {
196 int64_t task_for_pid_count; /* # of times task port was looked up */
197 int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */
198 int64_t thread_creation_count; /* # of threads created in task */
199 int64_t thread_creation_caller_count; /* # of threads created by task */
200 int64_t thread_set_state_count; /* # of register state sets in task */
201 int64_t thread_set_state_caller_count; /* # of register state sets by task */
202 } __attribute__((aligned(8)));
203
204 typedef struct vm_extmod_statistics *vm_extmod_statistics_t;
205 typedef struct vm_extmod_statistics vm_extmod_statistics_data_t;
206
207 typedef struct vm_purgeable_stat {
208 uint64_t count;
209 uint64_t size;
210 }vm_purgeable_stat_t;
211
212 struct vm_purgeable_info {
213 vm_purgeable_stat_t fifo_data[8];
214 vm_purgeable_stat_t obsolete_data;
215 vm_purgeable_stat_t lifo_data[8];
216 };
217
218 typedef struct vm_purgeable_info *vm_purgeable_info_t;
219
220 /* included for the vm_map_page_query call */
221
222 #define VM_PAGE_QUERY_PAGE_PRESENT 0x1
223 #define VM_PAGE_QUERY_PAGE_FICTITIOUS 0x2
224 #define VM_PAGE_QUERY_PAGE_REF 0x4
225 #define VM_PAGE_QUERY_PAGE_DIRTY 0x8
226 #define VM_PAGE_QUERY_PAGE_PAGED_OUT 0x10
227 #define VM_PAGE_QUERY_PAGE_COPIED 0x20
228 #define VM_PAGE_QUERY_PAGE_SPECULATIVE 0x40
229 #define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80
230 #define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100
231 #define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200
232 #define VM_PAGE_QUERY_PAGE_CS_NX 0x400
233 #define VM_PAGE_QUERY_PAGE_REUSABLE 0x800
234
235 /*
236 * VM allocation flags:
237 *
238 * VM_FLAGS_FIXED
239 * (really the absence of VM_FLAGS_ANYWHERE)
240 * Allocate new VM region at the specified virtual address, if possible.
241 *
242 * VM_FLAGS_ANYWHERE
243 * Allocate new VM region anywhere it would fit in the address space.
244 *
245 * VM_FLAGS_PURGABLE
246 * Create a purgable VM object for that new VM region.
247 *
248 * VM_FLAGS_4GB_CHUNK
249 * The new VM region will be chunked up into 4GB sized pieces.
250 *
251 * VM_FLAGS_NO_PMAP_CHECK
252 * (for DEBUG kernel config only, ignored for other configs)
253 * Do not check that there is no stale pmap mapping for the new VM region.
254 * This is useful for kernel memory allocations at bootstrap when building
255 * the initial kernel address space while some memory is already in use.
256 *
257 * VM_FLAGS_OVERWRITE
258 * The new VM region can replace existing VM regions if necessary
259 * (to be used in combination with VM_FLAGS_FIXED).
260 *
261 * VM_FLAGS_NO_CACHE
262 * Pages brought in to this VM region are placed on the speculative
263 * queue instead of the active queue. In other words, they are not
264 * cached so that they will be stolen first if memory runs low.
265 */
266
267 #define VM_FLAGS_FIXED 0x00000000
268 #define VM_FLAGS_ANYWHERE 0x00000001
269 #define VM_FLAGS_PURGABLE 0x00000002
270 #define VM_FLAGS_4GB_CHUNK 0x00000004
271 #define VM_FLAGS_RANDOM_ADDR 0x00000008
272 #define VM_FLAGS_NO_CACHE 0x00000010
273 #define VM_FLAGS_RESILIENT_CODESIGN 0x00000020
274 #define VM_FLAGS_RESILIENT_MEDIA 0x00000040
275 #define VM_FLAGS_PERMANENT 0x00000080
276 #define VM_FLAGS_TPRO 0x00001000
277 #define VM_FLAGS_OVERWRITE 0x00004000 /* delete any existing mappings first */
278 /*
279 * VM_FLAGS_SUPERPAGE_MASK
280 * 3 bits that specify whether large pages should be used instead of
281 * base pages (!=0), as well as the requested page size.
282 */
283 #define VM_FLAGS_SUPERPAGE_MASK 0x00070000 /* bits 0x10000, 0x20000, 0x40000 */
284 #define VM_FLAGS_RETURN_DATA_ADDR 0x00100000 /* Return address of target data, rather than base of page */
285 #define VM_FLAGS_RETURN_4K_DATA_ADDR 0x00800000 /* Return 4K aligned address of target data */
286 #define VM_FLAGS_ALIAS_MASK 0xFF000000
287 #define VM_GET_FLAGS_ALIAS(flags, alias) \
288 (alias) = (((flags) >> 24) & 0xff)
289 #if !XNU_KERNEL_PRIVATE
290 #define VM_SET_FLAGS_ALIAS(flags, alias) \
291 (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \
292 (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24))
293 #endif /* !XNU_KERNEL_PRIVATE */
294
295 #if XNU_KERNEL_PRIVATE
296 /*
297 * When making a new VM_FLAG_*:
298 * - add it to this mask
299 * - add a vmf_* field to vm_map_kernel_flags_t in the right spot
300 * - add a check in vm_map_kernel_flags_check_vmflags()
301 * - update tests vm_parameter_validation_[user|kern] and their expected
302 * results; they deliberately call VM functions with invalid flag values
303 * and you may be turning one of those invalid flags valid.
304 */
305 #define VM_FLAGS_ANY_MASK (VM_FLAGS_FIXED | \
306 VM_FLAGS_ANYWHERE | \
307 VM_FLAGS_PURGABLE | \
308 VM_FLAGS_4GB_CHUNK | \
309 VM_FLAGS_RANDOM_ADDR | \
310 VM_FLAGS_NO_CACHE | \
311 VM_FLAGS_RESILIENT_CODESIGN | \
312 VM_FLAGS_RESILIENT_MEDIA | \
313 VM_FLAGS_PERMANENT | \
314 VM_FLAGS_TPRO | \
315 VM_FLAGS_OVERWRITE | \
316 VM_FLAGS_SUPERPAGE_MASK | \
317 VM_FLAGS_RETURN_DATA_ADDR | \
318 VM_FLAGS_RETURN_4K_DATA_ADDR | \
319 VM_FLAGS_ALIAS_MASK)
320 #endif /* XNU_KERNEL_PRIVATE */
321 #define VM_FLAGS_HW (VM_FLAGS_TPRO)
322
323 /* These are the flags that we accept from user-space */
324 #define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \
325 VM_FLAGS_ANYWHERE | \
326 VM_FLAGS_PURGABLE | \
327 VM_FLAGS_4GB_CHUNK | \
328 VM_FLAGS_RANDOM_ADDR | \
329 VM_FLAGS_NO_CACHE | \
330 VM_FLAGS_PERMANENT | \
331 VM_FLAGS_OVERWRITE | \
332 VM_FLAGS_SUPERPAGE_MASK | \
333 VM_FLAGS_HW | \
334 VM_FLAGS_ALIAS_MASK)
335
336 #define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \
337 VM_FLAGS_RETURN_4K_DATA_ADDR | \
338 VM_FLAGS_RETURN_DATA_ADDR)
339
340 #define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \
341 VM_FLAGS_ANYWHERE | \
342 VM_FLAGS_RANDOM_ADDR | \
343 VM_FLAGS_OVERWRITE| \
344 VM_FLAGS_RETURN_DATA_ADDR | \
345 VM_FLAGS_RESILIENT_CODESIGN | \
346 VM_FLAGS_RESILIENT_MEDIA)
347
348 #define VM_FLAGS_SUPERPAGE_SHIFT 16
349 #define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */
350 #define SUPERPAGE_SIZE_ANY 1
351 #define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT)
352 #define VM_FLAGS_SUPERPAGE_SIZE_ANY (SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT)
353 #if defined(__x86_64__) || !defined(KERNEL)
354 #define SUPERPAGE_SIZE_2MB 2
355 #define VM_FLAGS_SUPERPAGE_SIZE_2MB (SUPERPAGE_SIZE_2MB<<VM_FLAGS_SUPERPAGE_SHIFT)
356 #endif
357
358 /*
359 * EXC_GUARD definitions for virtual memory.
360 */
361 #define GUARD_TYPE_VIRT_MEMORY 0x5
362
363 /* Reasons for exception for virtual memory */
364 __enum_decl(virtual_memory_guard_exception_code_t, uint32_t, {
365 kGUARD_EXC_DEALLOC_GAP = 1,
366 kGUARD_EXC_RECLAIM_COPYIO_FAILURE = 2,
367 kGUARD_EXC_SEC_LOOKUP_DENIED = 3,
368 kGUARD_EXC_RECLAIM_INDEX_FAILURE = 4,
369 kGUARD_EXC_SEC_RANGE_DENIED = 6,
370 kGUARD_EXC_SEC_ACCESS_FAULT = 7,
371 kGUARD_EXC_RECLAIM_DEALLOCATE_FAILURE = 8,
372 kGUARD_EXC_SEC_COPY_DENIED = 16,
373 kGUARD_EXC_SEC_SHARING_DENIED = 32,
374 kGUARD_EXC_SEC_ASYNC_ACCESS_FAULT = 64,
375 });
376
377 #ifdef XNU_KERNEL_PRIVATE
378
379 static inline bool
vm_guard_is_sec_access(uint32_t flavor)380 vm_guard_is_sec_access(uint32_t flavor)
381 {
382 return flavor == kGUARD_EXC_SEC_ACCESS_FAULT ||
383 flavor == kGUARD_EXC_SEC_ASYNC_ACCESS_FAULT;
384 }
385
386 static inline bool
vm_guard_is_sec_policy(uint32_t flavor)387 vm_guard_is_sec_policy(uint32_t flavor)
388 {
389 return flavor == kGUARD_EXC_SEC_LOOKUP_DENIED ||
390 flavor == kGUARD_EXC_SEC_RANGE_DENIED ||
391 flavor == kGUARD_EXC_SEC_COPY_DENIED ||
392 flavor == kGUARD_EXC_SEC_SHARING_DENIED;
393 }
394
395 /*!
396 * @enum vm_map_range_id_t
397 *
398 * @brief
399 * Enumerate a particular vm_map range.
400 *
401 * @discussion
402 * The kernel_map VA has been split into the following ranges. Userspace
403 * VA for any given process can also optionally be split by the following user
404 * ranges.
405 *
406 * @const KMEM_RANGE_ID_NONE
407 * This range is only used for early initialization.
408 *
409 * @const KMEM_RANGE_ID_PTR_*
410 * Range containing general purpose allocations from kalloc, etc that
411 * contain pointers.
412 *
413 * @const KMEM_RANGE_ID_SPRAYQTN
414 * The spray quarantine range contains allocations that have the following
415 * properties:
416 * - An attacker could control the size, lifetime and number of allocations
417 * of this type (or from this callsite).
418 * - The pointer to the allocation is zeroed to ensure that it isn't left
419 * dangling limiting the use of UaFs.
420 * - OOBs on the allocation is carefully considered and sufficiently
421 * addressed.
422 *
423 * @const KMEM_RANGE_ID_DATA
424 * Range containing allocations that are bags of bytes and contain no
425 * pointers.
426 */
427 __enum_decl(vm_map_range_id_t, uint8_t, {
428 KMEM_RANGE_ID_NONE,
429 KMEM_RANGE_ID_PTR_0,
430 KMEM_RANGE_ID_PTR_1,
431 KMEM_RANGE_ID_PTR_2,
432 KMEM_RANGE_ID_SPRAYQTN,
433 KMEM_RANGE_ID_DATA,
434
435 KMEM_RANGE_ID_FIRST = KMEM_RANGE_ID_PTR_0,
436 KMEM_RANGE_ID_NUM_PTR = KMEM_RANGE_ID_PTR_2,
437 KMEM_RANGE_ID_MAX = KMEM_RANGE_ID_DATA,
438
439 /* these UMEM_* correspond to the MACH_VM_RANGE_* tags and are ABI */
440 UMEM_RANGE_ID_DEFAULT = 0, /* same as MACH_VM_RANGE_DEFAULT */
441 UMEM_RANGE_ID_HEAP, /* same as MACH_VM_RANGE_DATA */
442 UMEM_RANGE_ID_FIXED, /* same as MACH_VM_RANGE_FIXED */
443 UMEM_RANGE_ID_LARGE_FILE,
444
445 /* these UMEM_* are XNU internal only range IDs, and aren't ABI */
446 UMEM_RANGE_ID_MAX = UMEM_RANGE_ID_LARGE_FILE,
447
448 #define KMEM_RANGE_COUNT (KMEM_RANGE_ID_MAX + 1)
449 });
450
451 typedef vm_map_range_id_t kmem_range_id_t;
452
453 #define kmem_log2down(mask) (31 - __builtin_clz(mask))
454 #define KMEM_RANGE_MAX (UMEM_RANGE_ID_MAX < KMEM_RANGE_ID_MAX \
455 ? KMEM_RANGE_ID_MAX : UMEM_RANGE_ID_MAX)
456 #define KMEM_RANGE_BITS kmem_log2down(2 * KMEM_RANGE_MAX - 1)
457
458 typedef union {
459 struct {
460 unsigned long long
461 /*
462 * VM_FLAG_* flags
463 */
464 vmf_fixed:1,
465 vmf_purgeable:1,
466 vmf_4gb_chunk:1,
467 vmf_random_addr:1,
468 vmf_no_cache:1,
469 vmf_resilient_codesign:1,
470 vmf_resilient_media:1,
471 vmf_permanent:1,
472
473 __unused_bit_8:1,
474 __unused_bit_9:1,
475 __unused_bit_10:1,
476 __unused_bit_11:1,
477 vmf_tpro:1,
478 __unused_bit_13:1,
479 vmf_overwrite:1,
480 __unused_bit_15:1,
481
482 vmf_superpage_size:3,
483 __unused_bit_19:1,
484 vmf_return_data_addr:1,
485 __unused_bit_21:1,
486 __unused_bit_22:1,
487 vmf_return_4k_data_addr:1,
488
489 /*
490 * VM tag (user or kernel)
491 *
492 * User tags are limited to 8 bits,
493 * kernel tags can use up to 12 bits
494 * with -zt or similar features.
495 */
496 vm_tag : 12, /* same as VME_ALIAS_BITS */
497
498 /*
499 * General kernel flags
500 */
501 vmkf_already:1, /* OK if same mapping already exists */
502 vmkf_beyond_max:1, /* map beyond the map's max offset */
503 vmkf_no_pmap_check:1, /* do not check that pmap is empty */
504 vmkf_map_jit:1, /* mark entry as JIT region */
505 vmkf_iokit_acct:1, /* IOKit accounting */
506 vmkf_keep_map_locked:1, /* keep map locked when returning from vm_map_enter() */
507 vmkf_overwrite_immutable:1, /* can overwrite immutable mappings */
508 vmkf_remap_prot_copy:1, /* vm_remap for VM_PROT_COPY */
509 vmkf_remap_legacy_mode:1, /* vm_remap, not vm_remap_new */
510 vmkf_cs_enforcement_override:1, /* override CS_ENFORCEMENT */
511 vmkf_cs_enforcement:1, /* new value for CS_ENFORCEMENT */
512 vmkf_nested_pmap:1, /* use a nested pmap */
513 vmkf_no_copy_on_read:1, /* do not use copy_on_read */
514 vmkf_copy_single_object:1, /* vm_map_copy only 1 VM object */
515 vmkf_copy_pageable:1, /* vm_map_copy with pageable entries */
516 vmkf_copy_same_map:1, /* vm_map_copy to remap in original map */
517 vmkf_translated_allow_execute:1, /* allow execute in translated processes */
518 vmkf_tpro_enforcement_override:1, /* override TPRO propagation */
519 vmkf_no_soft_limit:1, /* override soft allocation size limit */
520
521 /*
522 * Submap creation, altering vm_map_enter() only
523 */
524 vmkf_submap:1, /* mapping a VM submap */
525 vmkf_submap_atomic:1, /* keep entry atomic (no splitting/coalescing) */
526 vmkf_submap_adjust:1, /* the submap needs to be adjusted */
527
528 /*
529 * Flags altering the behavior of vm_map_locate_space_anywhere()
530 */
531 vmkf_32bit_map_va:1, /* allocate in low 32-bits range */
532 vmkf_guard_before:1, /* guard page before the mapping */
533 vmkf_last_free:1, /* find space from the end */
534 vmkf_range_id:KMEM_RANGE_BITS; /* kmem range to allocate in */
535
536 unsigned long long
537 __vmkf_unused2:64;
538 };
539
540 /*
541 * do not access these directly,
542 * use vm_map_kernel_flags_check_vmflags*()
543 */
544 uint32_t __vm_flags : 24;
545 } vm_map_kernel_flags_t;
546
547 /*
548 * using this means that vmf_* flags can't be used
549 * until vm_map_kernel_flags_set_vmflags() is set,
550 * or some manual careful init is done.
551 *
552 * Prefer VM_MAP_KERNEL_FLAGS_(FIXED,ANYWHERE) instead.
553 */
554 #define VM_MAP_KERNEL_FLAGS_NONE \
555 (vm_map_kernel_flags_t){ }
556
557 #define VM_MAP_KERNEL_FLAGS_FIXED(...) \
558 (vm_map_kernel_flags_t){ .vmf_fixed = true, __VA_ARGS__ }
559
560 #define VM_MAP_KERNEL_FLAGS_ANYWHERE(...) \
561 (vm_map_kernel_flags_t){ .vmf_fixed = false, __VA_ARGS__ }
562
563 #define VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(...) \
564 VM_MAP_KERNEL_FLAGS_FIXED(.vmf_permanent = true, __VA_ARGS__)
565
566 #define VM_MAP_KERNEL_FLAGS_ANYWHERE_PERMANENT(...) \
567 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmf_permanent = true, __VA_ARGS__)
568
569 #define VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(...) \
570 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = KMEM_RANGE_ID_DATA, __VA_ARGS__)
571
572 typedef struct {
573 unsigned int
574 vmnekf_ledger_tag:3,
575 vmnekf_ledger_no_footprint:1,
576 __vmnekf_unused:28;
577 } vm_named_entry_kernel_flags_t;
578 #define VM_NAMED_ENTRY_KERNEL_FLAGS_NONE (vm_named_entry_kernel_flags_t) { \
579 .vmnekf_ledger_tag = 0, \
580 .vmnekf_ledger_no_footprint = 0, \
581 .__vmnekf_unused = 0 \
582 }
583
584 #endif /* XNU_KERNEL_PRIVATE */
585
586 /* current accounting postmark */
587 #define __VM_LEDGER_ACCOUNTING_POSTMARK 2019032600
588
589 /*
590 * When making a new VM_LEDGER_TAG_* or VM_LEDGER_FLAG_*, update tests
591 * vm_parameter_validation_[user|kern] and their expected results; they
592 * deliberately call VM functions with invalid ledger values and you may
593 * be turning one of those invalid tags/flags valid.
594 */
595 /* discrete values: */
596 #define VM_LEDGER_TAG_NONE 0x00000000
597 #define VM_LEDGER_TAG_DEFAULT 0x00000001
598 #define VM_LEDGER_TAG_NETWORK 0x00000002
599 #define VM_LEDGER_TAG_MEDIA 0x00000003
600 #define VM_LEDGER_TAG_GRAPHICS 0x00000004
601 #define VM_LEDGER_TAG_NEURAL 0x00000005
602 #define VM_LEDGER_TAG_MAX 0x00000005
603 #define VM_LEDGER_TAG_UNCHANGED ((int)-1)
604
605 /* individual bits: */
606 #define VM_LEDGER_FLAG_NO_FOOTPRINT (1 << 0)
607 #define VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG (1 << 1)
608 #define VM_LEDGER_FLAG_FROM_KERNEL (1 << 2)
609
610 #define VM_LEDGER_FLAGS_USER (VM_LEDGER_FLAG_NO_FOOTPRINT | VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG)
611 #define VM_LEDGER_FLAGS_ALL (VM_LEDGER_FLAGS_USER | VM_LEDGER_FLAG_FROM_KERNEL)
612
613 #define VM_MEMORY_MALLOC 1
614 #define VM_MEMORY_MALLOC_SMALL 2
615 #define VM_MEMORY_MALLOC_LARGE 3
616 #define VM_MEMORY_MALLOC_HUGE 4
617 #define VM_MEMORY_SBRK 5// uninteresting -- no one should call
618 #define VM_MEMORY_REALLOC 6
619 #define VM_MEMORY_MALLOC_TINY 7
620 #define VM_MEMORY_MALLOC_LARGE_REUSABLE 8
621 #define VM_MEMORY_MALLOC_LARGE_REUSED 9
622
623 #define VM_MEMORY_ANALYSIS_TOOL 10
624
625 #define VM_MEMORY_MALLOC_NANO 11
626 #define VM_MEMORY_MALLOC_MEDIUM 12
627 #define VM_MEMORY_MALLOC_PROB_GUARD 13
628
629 #define VM_MEMORY_MACH_MSG 20
630 #define VM_MEMORY_IOKIT 21
631 #define VM_MEMORY_STACK 30
632 #define VM_MEMORY_GUARD 31
633 #define VM_MEMORY_SHARED_PMAP 32
634 /* memory containing a dylib */
635 #define VM_MEMORY_DYLIB 33
636 #define VM_MEMORY_OBJC_DISPATCHERS 34
637
638 /* Was a nested pmap (VM_MEMORY_SHARED_PMAP) which has now been unnested */
639 #define VM_MEMORY_UNSHARED_PMAP 35
640
641
642 // Placeholders for now -- as we analyze the libraries and find how they
643 // use memory, we can make these labels more specific.
644 #define VM_MEMORY_APPKIT 40
645 #define VM_MEMORY_FOUNDATION 41
646 #define VM_MEMORY_COREGRAPHICS 42
647 #define VM_MEMORY_CORESERVICES 43
648 #define VM_MEMORY_CARBON VM_MEMORY_CORESERVICES
649 #define VM_MEMORY_JAVA 44
650 #define VM_MEMORY_COREDATA 45
651 #define VM_MEMORY_COREDATA_OBJECTIDS 46
652 #define VM_MEMORY_ATS 50
653 #define VM_MEMORY_LAYERKIT 51
654 #define VM_MEMORY_CGIMAGE 52
655 #define VM_MEMORY_TCMALLOC 53
656
657 /* private raster data (i.e. layers, some images, QGL allocator) */
658 #define VM_MEMORY_COREGRAPHICS_DATA 54
659
660 /* shared image and font caches */
661 #define VM_MEMORY_COREGRAPHICS_SHARED 55
662
663 /* Memory used for virtual framebuffers, shadowing buffers, etc... */
664 #define VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS 56
665
666 /* Window backing stores, custom shadow data, and compressed backing stores */
667 #define VM_MEMORY_COREGRAPHICS_BACKINGSTORES 57
668
669 /* x-alloc'd memory */
670 #define VM_MEMORY_COREGRAPHICS_XALLOC 58
671
672 /* catch-all for other uses, such as the read-only shared data page */
673 #define VM_MEMORY_COREGRAPHICS_MISC VM_MEMORY_COREGRAPHICS
674
675 /* memory allocated by the dynamic loader for itself */
676 #define VM_MEMORY_DYLD 60
677 /* malloc'd memory created by dyld */
678 #define VM_MEMORY_DYLD_MALLOC 61
679
680 /* Used for sqlite page cache */
681 #define VM_MEMORY_SQLITE 62
682
683 /* JavaScriptCore heaps */
684 #define VM_MEMORY_JAVASCRIPT_CORE 63
685 #define VM_MEMORY_WEBASSEMBLY VM_MEMORY_JAVASCRIPT_CORE
686 /* memory allocated for the JIT */
687 #define VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR 64
688 #define VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE 65
689
690 /* memory allocated for GLSL */
691 #define VM_MEMORY_GLSL 66
692
693 /* memory allocated for OpenCL.framework */
694 #define VM_MEMORY_OPENCL 67
695
696 /* memory allocated for QuartzCore.framework */
697 #define VM_MEMORY_COREIMAGE 68
698
699 /* memory allocated for WebCore Purgeable Buffers */
700 #define VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS 69
701
702 /* ImageIO memory */
703 #define VM_MEMORY_IMAGEIO 70
704
705 /* CoreProfile memory */
706 #define VM_MEMORY_COREPROFILE 71
707
708 /* assetsd / MobileSlideShow memory */
709 #define VM_MEMORY_ASSETSD 72
710
711 /* libsystem_kernel os_once_alloc */
712 #define VM_MEMORY_OS_ALLOC_ONCE 73
713
714 /* libdispatch internal allocator */
715 #define VM_MEMORY_LIBDISPATCH 74
716
717 /* Accelerate.framework image backing stores */
718 #define VM_MEMORY_ACCELERATE 75
719
720 /* CoreUI image block data */
721 #define VM_MEMORY_COREUI 76
722
723 /* CoreUI image file */
724 #define VM_MEMORY_COREUIFILE 77
725
726 /* Genealogy buffers */
727 #define VM_MEMORY_GENEALOGY 78
728
729 /* RawCamera VM allocated memory */
730 #define VM_MEMORY_RAWCAMERA 79
731
732 /* corpse info for dead process */
733 #define VM_MEMORY_CORPSEINFO 80
734
735 /* Apple System Logger (ASL) messages */
736 #define VM_MEMORY_ASL 81
737
738 /* Swift runtime */
739 #define VM_MEMORY_SWIFT_RUNTIME 82
740
741 /* Swift metadata */
742 #define VM_MEMORY_SWIFT_METADATA 83
743
744 /* DHMM data */
745 #define VM_MEMORY_DHMM 84
746
747
748 /* memory allocated by SceneKit.framework */
749 #define VM_MEMORY_SCENEKIT 86
750
751 /* memory allocated by skywalk networking */
752 #define VM_MEMORY_SKYWALK 87
753
754 #define VM_MEMORY_IOSURFACE 88
755
756 #define VM_MEMORY_LIBNETWORK 89
757
758 #define VM_MEMORY_AUDIO 90
759
760 #define VM_MEMORY_VIDEOBITSTREAM 91
761
762 /* memory allocated by CoreMedia */
763 #define VM_MEMORY_CM_XPC 92
764
765 #define VM_MEMORY_CM_RPC 93
766
767 #define VM_MEMORY_CM_MEMORYPOOL 94
768
769 #define VM_MEMORY_CM_READCACHE 95
770
771 #define VM_MEMORY_CM_CRABS 96
772
773 /* memory allocated for QuickLookThumbnailing */
774 #define VM_MEMORY_QUICKLOOK_THUMBNAILS 97
775
776 /* memory allocated by Accounts framework */
777 #define VM_MEMORY_ACCOUNTS 98
778
779 /* memory allocated by Sanitizer runtime libraries */
780 #define VM_MEMORY_SANITIZER 99
781
782 /* Differentiate memory needed by GPU drivers and frameworks from generic IOKit allocations */
783 #define VM_MEMORY_IOACCELERATOR 100
784
785 /* memory allocated by CoreMedia for global image registration of frames */
786 #define VM_MEMORY_CM_REGWARP 101
787
788 /* memory allocated by EmbeddedAcousticRecognition for speech decoder */
789 #define VM_MEMORY_EAR_DECODER 102
790
791 /* CoreUI cached image data */
792 #define VM_MEMORY_COREUI_CACHED_IMAGE_DATA 103
793
794 /* ColorSync is using mmap for read-only copies of ICC profile data */
795 #define VM_MEMORY_COLORSYNC 104
796
797 /* backtrace info for simulated crashes */
798 #define VM_MEMORY_BTINFO 105
799
800 /* memory allocated by CoreMedia */
801 #define VM_MEMORY_CM_HLS 106
802
803 /* Reserve 230-239 for Rosetta */
804 #define VM_MEMORY_ROSETTA 230
805 #define VM_MEMORY_ROSETTA_THREAD_CONTEXT 231
806 #define VM_MEMORY_ROSETTA_INDIRECT_BRANCH_MAP 232
807 #define VM_MEMORY_ROSETTA_RETURN_STACK 233
808 #define VM_MEMORY_ROSETTA_EXECUTABLE_HEAP 234
809 #define VM_MEMORY_ROSETTA_USER_LDT 235
810 #define VM_MEMORY_ROSETTA_ARENA 236
811 #define VM_MEMORY_ROSETTA_10 239
812
813 /* Reserve 240-255 for application */
814 #define VM_MEMORY_APPLICATION_SPECIFIC_1 240
815 #define VM_MEMORY_APPLICATION_SPECIFIC_16 255
816
817 #define VM_MEMORY_COUNT 256
818
819 #if !XNU_KERNEL_PRIVATE
820 #define VM_MAKE_TAG(tag) ((tag) << 24)
821 #endif /* XNU_KERNEL_PRIVATE */
822
823
824 #if KERNEL_PRIVATE
825
826 /* kernel map tags */
827 /* please add new definition strings to zprint */
828 /*
829 * When making a new VM_KERN_MEMORY_*, update tests vm_parameter_validation_[user|kern]
830 * and their expected results; they deliberately call VM functions with invalid
831 * kernel tag values and you may be turning one of those invalid tags valid.
832 */
833
834 #define VM_KERN_MEMORY_NONE 0
835
836 #define VM_KERN_MEMORY_OSFMK 1
837 #define VM_KERN_MEMORY_BSD 2
838 #define VM_KERN_MEMORY_IOKIT 3
839 #define VM_KERN_MEMORY_LIBKERN 4
840 #define VM_KERN_MEMORY_OSKEXT 5
841 #define VM_KERN_MEMORY_KEXT 6
842 #define VM_KERN_MEMORY_IPC 7
843 #define VM_KERN_MEMORY_STACK 8
844 #define VM_KERN_MEMORY_CPU 9
845 #define VM_KERN_MEMORY_PMAP 10
846 #define VM_KERN_MEMORY_PTE 11
847 #define VM_KERN_MEMORY_ZONE 12
848 #define VM_KERN_MEMORY_KALLOC 13
849 #define VM_KERN_MEMORY_COMPRESSOR 14
850 #define VM_KERN_MEMORY_COMPRESSED_DATA 15
851 #define VM_KERN_MEMORY_PHANTOM_CACHE 16
852 #define VM_KERN_MEMORY_WAITQ 17
853 #define VM_KERN_MEMORY_DIAG 18
854 #define VM_KERN_MEMORY_LOG 19
855 #define VM_KERN_MEMORY_FILE 20
856 #define VM_KERN_MEMORY_MBUF 21
857 #define VM_KERN_MEMORY_UBC 22
858 #define VM_KERN_MEMORY_SECURITY 23
859 #define VM_KERN_MEMORY_MLOCK 24
860 #define VM_KERN_MEMORY_REASON 25
861 #define VM_KERN_MEMORY_SKYWALK 26
862 #define VM_KERN_MEMORY_LTABLE 27
863 #define VM_KERN_MEMORY_HV 28
864 #define VM_KERN_MEMORY_KALLOC_DATA 29
865 #define VM_KERN_MEMORY_RETIRED 30
866 #define VM_KERN_MEMORY_KALLOC_TYPE 31
867 #define VM_KERN_MEMORY_TRIAGE 32
868 #define VM_KERN_MEMORY_RECOUNT 33
869 #define VM_KERN_MEMORY_EXCLAVES 35
870 #define VM_KERN_MEMORY_EXCLAVES_SHARED 36
871 #define VM_KERN_MEMORY_KALLOC_SHARED 37
872 /* add new tags here and adjust first-dynamic value */
873 #define VM_KERN_MEMORY_FIRST_DYNAMIC 38
874 #define VM_KERN_MEMORY_CPUTRACE 39
875
876 /* out of tags: */
877 #define VM_KERN_MEMORY_ANY 255
878 #define VM_KERN_MEMORY_COUNT 256
879
880 /* end kernel map tags */
881
882 // mach_memory_info.flags
883 #define VM_KERN_SITE_TYPE 0x000000FF
884 #define VM_KERN_SITE_TAG 0x00000000
885 #define VM_KERN_SITE_KMOD 0x00000001
886 #define VM_KERN_SITE_KERNEL 0x00000002
887 #define VM_KERN_SITE_COUNTER 0x00000003
888 #define VM_KERN_SITE_WIRED 0x00000100 /* add to wired count */
889 #define VM_KERN_SITE_HIDE 0x00000200 /* no zprint */
890 #define VM_KERN_SITE_NAMED 0x00000400
891 #define VM_KERN_SITE_ZONE 0x00000800
892 #define VM_KERN_SITE_ZONE_VIEW 0x00001000
893 #define VM_KERN_SITE_KALLOC 0x00002000 /* zone field is size class */
894
895 #define VM_KERN_COUNT_MANAGED 0
896 #define VM_KERN_COUNT_RESERVED 1
897 #define VM_KERN_COUNT_WIRED 2
898 #define VM_KERN_COUNT_WIRED_MANAGED 3
899 #define VM_KERN_COUNT_STOLEN 4
900 #define VM_KERN_COUNT_LOPAGE 5
901 #define VM_KERN_COUNT_MAP_KERNEL 6
902 #define VM_KERN_COUNT_MAP_ZONE 7
903 #define VM_KERN_COUNT_MAP_KALLOC 8
904
905 #define VM_KERN_COUNT_WIRED_BOOT 9
906
907 #define VM_KERN_COUNT_BOOT_STOLEN 10
908
909 /* The number of bytes from the kernel cache that are wired in memory */
910 #define VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE 11
911
912 #define VM_KERN_COUNT_MAP_KALLOC_LARGE VM_KERN_COUNT_MAP_KALLOC
913 #define VM_KERN_COUNT_MAP_KALLOC_LARGE_DATA 12
914 #define VM_KERN_COUNT_MAP_KERNEL_DATA 13
915
916 /* The size of the exclaves iboot carveout (exclaves memory not from XNU) in bytes. */
917 #define VM_KERN_COUNT_EXCLAVES_CARVEOUT 14
918
919 /* The number of VM_KERN_COUNT_ stats. New VM_KERN_COUNT_ entries should be less than this. */
920 #define VM_KERN_COUNTER_COUNT 15
921
922
923 #endif /* KERNEL_PRIVATE */
924
925 __END_DECLS
926
927 #endif /* _MACH_VM_STATISTICS_H_ */
928