1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: mach/vm_statistics.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 *
62 * Virtual memory statistics structure.
63 *
64 */
65
66 #ifndef _MACH_VM_STATISTICS_H_
67 #define _MACH_VM_STATISTICS_H_
68
69
70 #include <Availability.h>
71 #include <os/base.h>
72 #include <stdbool.h>
73 #include <sys/cdefs.h>
74
75 #include <mach/machine/vm_types.h>
76 #include <mach/machine/kern_return.h>
77
78 __BEGIN_DECLS
79
80 #pragma mark VM Statistics
81
82 /*
83 * vm_statistics
84 *
85 * History:
86 * rev0 - original structure.
87 * rev1 - added purgable info (purgable_count and purges).
88 * rev2 - added speculative_count.
89 *
90 * Note: you cannot add any new fields to this structure. Add them below in
91 * vm_statistics64.
92 */
93
94 struct vm_statistics {
95 natural_t free_count; /* # of pages free */
96 natural_t active_count; /* # of pages active */
97 natural_t inactive_count; /* # of pages inactive */
98 natural_t wire_count; /* # of pages wired down */
99 natural_t zero_fill_count; /* # of zero fill pages */
100 natural_t reactivations; /* # of pages reactivated */
101 natural_t pageins; /* # of pageins */
102 natural_t pageouts; /* # of pageouts */
103 natural_t faults; /* # of faults */
104 natural_t cow_faults; /* # of copy-on-writes */
105 natural_t lookups; /* object cache lookups */
106 natural_t hits; /* object cache hits */
107
108 /* added for rev1 */
109 natural_t purgeable_count; /* # of pages purgeable */
110 natural_t purges; /* # of pages purged */
111
112 /* added for rev2 */
113 /*
114 * NB: speculative pages are already accounted for in "free_count",
115 * so "speculative_count" is the number of "free" pages that are
116 * used to hold data that was read speculatively from disk but
117 * haven't actually been used by anyone so far.
118 */
119 natural_t speculative_count; /* # of pages speculative */
120 };
121
122 /* Used by all architectures */
123 typedef struct vm_statistics *vm_statistics_t;
124 typedef struct vm_statistics vm_statistics_data_t;
125
126 /*
127 * vm_statistics64
128 *
129 * History:
130 * rev0 - original structure.
131 * rev1 - added purgable info (purgable_count and purges).
132 * rev2 - added speculative_count.
133 * ----
134 * rev3 - changed name to vm_statistics64.
135 * changed some fields in structure to 64-bit on
136 * arm, i386 and x86_64 architectures.
137 * rev4 - require 64-bit alignment for efficient access
138 * in the kernel. No change to reported data.
139 *
140 */
141
142 struct vm_statistics64 {
143 natural_t free_count; /* # of pages free */
144 natural_t active_count; /* # of pages active */
145 natural_t inactive_count; /* # of pages inactive */
146 natural_t wire_count; /* # of pages wired down */
147 uint64_t zero_fill_count; /* # of zero fill pages */
148 uint64_t reactivations; /* # of pages reactivated */
149 uint64_t pageins; /* # of pageins (lifetime) */
150 uint64_t pageouts; /* # of pageouts */
151 uint64_t faults; /* # of faults */
152 uint64_t cow_faults; /* # of copy-on-writes */
153 uint64_t lookups; /* object cache lookups */
154 uint64_t hits; /* object cache hits */
155 uint64_t purges; /* # of pages purged */
156 natural_t purgeable_count; /* # of pages purgeable */
157 /*
158 * NB: speculative pages are already accounted for in "free_count",
159 * so "speculative_count" is the number of "free" pages that are
160 * used to hold data that was read speculatively from disk but
161 * haven't actually been used by anyone so far.
162 */
163 natural_t speculative_count; /* # of pages speculative */
164
165 /* added for rev1 */
166 uint64_t decompressions; /* # of pages decompressed (lifetime) */
167 uint64_t compressions; /* # of pages compressed (lifetime) */
168 uint64_t swapins; /* # of pages swapped in via compressor segments (lifetime) */
169 uint64_t swapouts; /* # of pages swapped out via compressor segments (lifetime) */
170 natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */
171 natural_t throttled_count; /* # of pages throttled */
172 natural_t external_page_count; /* # of pages that are file-backed (non-swap) */
173 natural_t internal_page_count; /* # of pages that are anonymous */
174 uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */
175 /* added for rev2 */
176 uint64_t swapped_count; /* # of compressor-stored pages currently stored in swap */
177 } __attribute__((aligned(8)));
178
179 typedef struct vm_statistics64 *vm_statistics64_t;
180 typedef struct vm_statistics64 vm_statistics64_data_t;
181
182 kern_return_t vm_stats(void *info, unsigned int *count);
183
184 /*
185 * VM_STATISTICS_TRUNCATE_TO_32_BIT
186 *
187 * This is used by host_statistics() to truncate and peg the 64-bit in-kernel values from
188 * vm_statistics64 to the 32-bit values of the older structure above (vm_statistics).
189 */
190 #define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) ((uint32_t)(((value) > UINT32_MAX ) ? UINT32_MAX : (value)))
191
192 /*
193 * vm_extmod_statistics
194 *
195 * Structure to record modifications to a task by an
196 * external agent.
197 *
198 * History:
199 * rev0 - original structure.
200 */
201
202 struct vm_extmod_statistics {
203 int64_t task_for_pid_count; /* # of times task port was looked up */
204 int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */
205 int64_t thread_creation_count; /* # of threads created in task */
206 int64_t thread_creation_caller_count; /* # of threads created by task */
207 int64_t thread_set_state_count; /* # of register state sets in task */
208 int64_t thread_set_state_caller_count; /* # of register state sets by task */
209 } __attribute__((aligned(8)));
210
211 typedef struct vm_extmod_statistics *vm_extmod_statistics_t;
212 typedef struct vm_extmod_statistics vm_extmod_statistics_data_t;
213
214 typedef struct vm_purgeable_stat {
215 uint64_t count;
216 uint64_t size;
217 }vm_purgeable_stat_t;
218
219 struct vm_purgeable_info {
220 vm_purgeable_stat_t fifo_data[8];
221 vm_purgeable_stat_t obsolete_data;
222 vm_purgeable_stat_t lifo_data[8];
223 };
224
225 typedef struct vm_purgeable_info *vm_purgeable_info_t;
226
227 /* included for the vm_map_page_query call */
228
229 #define VM_PAGE_QUERY_PAGE_PRESENT 0x1
230 #define VM_PAGE_QUERY_PAGE_FICTITIOUS 0x2
231 #define VM_PAGE_QUERY_PAGE_REF 0x4
232 #define VM_PAGE_QUERY_PAGE_DIRTY 0x8
233 #define VM_PAGE_QUERY_PAGE_PAGED_OUT 0x10
234 #define VM_PAGE_QUERY_PAGE_COPIED 0x20
235 #define VM_PAGE_QUERY_PAGE_SPECULATIVE 0x40
236 #define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80
237 #define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100
238 #define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200
239 #define VM_PAGE_QUERY_PAGE_CS_NX 0x400
240 #define VM_PAGE_QUERY_PAGE_REUSABLE 0x800
241
242 #pragma mark User Flags
243
244 /*
245 * VM allocation flags:
246 *
247 * VM_FLAGS_FIXED
248 * (really the absence of VM_FLAGS_ANYWHERE)
249 * Allocate new VM region at the specified virtual address, if possible.
250 *
251 * VM_FLAGS_ANYWHERE
252 * Allocate new VM region anywhere it would fit in the address space.
253 *
254 * VM_FLAGS_PURGABLE
255 * Create a purgable VM object for that new VM region.
256 *
257 * VM_FLAGS_4GB_CHUNK
258 * The new VM region will be chunked up into 4GB sized pieces.
259 *
260 * VM_FLAGS_NO_PMAP_CHECK
261 * (for DEBUG kernel config only, ignored for other configs)
262 * Do not check that there is no stale pmap mapping for the new VM region.
263 * This is useful for kernel memory allocations at bootstrap when building
264 * the initial kernel address space while some memory is already in use.
265 *
266 * VM_FLAGS_OVERWRITE
267 * The new VM region can replace existing VM regions if necessary
268 * (to be used in combination with VM_FLAGS_FIXED).
269 *
270 * VM_FLAGS_NO_CACHE
271 * Pages brought in to this VM region are placed on the speculative
272 * queue instead of the active queue. In other words, they are not
273 * cached so that they will be stolen first if memory runs low.
274 */
275
276 #define VM_FLAGS_FIXED 0x00000000
277 #define VM_FLAGS_ANYWHERE 0x00000001
278 #define VM_FLAGS_PURGABLE 0x00000002
279 #define VM_FLAGS_4GB_CHUNK 0x00000004
280 #define VM_FLAGS_RANDOM_ADDR 0x00000008
281 #define VM_FLAGS_NO_CACHE 0x00000010
282 #define VM_FLAGS_RESILIENT_CODESIGN 0x00000020
283 #define VM_FLAGS_RESILIENT_MEDIA 0x00000040
284 #define VM_FLAGS_PERMANENT 0x00000080
285 #define VM_FLAGS_TPRO 0x00001000
286 #define VM_FLAGS_MTE 0x00002000
287 #define VM_FLAGS_OVERWRITE 0x00004000 /* delete any existing mappings first */
288 /*
289 * VM_FLAGS_SUPERPAGE_MASK
290 * 3 bits that specify whether large pages should be used instead of
291 * base pages (!=0), as well as the requested page size.
292 */
293 #define VM_FLAGS_SUPERPAGE_MASK 0x00070000 /* bits 0x10000, 0x20000, 0x40000 */
294 #define VM_FLAGS_RETURN_DATA_ADDR 0x00100000 /* Return address of target data, rather than base of page */
295 #define VM_FLAGS_RETURN_4K_DATA_ADDR 0x00800000 /* Return 4K aligned address of target data */
296 #define VM_FLAGS_ALIAS_MASK 0xFF000000
297 #define VM_GET_FLAGS_ALIAS(flags, alias) \
298 (alias) = (((flags) >> 24) & 0xff)
299 #if !XNU_KERNEL_PRIVATE
300 #define VM_SET_FLAGS_ALIAS(flags, alias) \
301 (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \
302 (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24))
303 #endif /* !XNU_KERNEL_PRIVATE */
304
305 #if XNU_KERNEL_PRIVATE
306 /*
307 * When making a new VM_FLAG_*:
308 * - add it to this mask
309 * - add a vmf_* field to vm_map_kernel_flags_t in the right spot
310 * - add a check in vm_map_kernel_flags_check_vmflags()
311 * - update tests vm_parameter_validation_[user|kern] and their expected
312 * results; they deliberately call VM functions with invalid flag values
313 * and you may be turning one of those invalid flags valid.
314 */
315 #define VM_FLAGS_ANY_MASK (VM_FLAGS_FIXED | \
316 VM_FLAGS_ANYWHERE | \
317 VM_FLAGS_PURGABLE | \
318 VM_FLAGS_4GB_CHUNK | \
319 VM_FLAGS_RANDOM_ADDR | \
320 VM_FLAGS_NO_CACHE | \
321 VM_FLAGS_RESILIENT_CODESIGN | \
322 VM_FLAGS_RESILIENT_MEDIA | \
323 VM_FLAGS_PERMANENT | \
324 VM_FLAGS_TPRO | \
325 VM_FLAGS_MTE | \
326 VM_FLAGS_OVERWRITE | \
327 VM_FLAGS_SUPERPAGE_MASK | \
328 VM_FLAGS_RETURN_DATA_ADDR | \
329 VM_FLAGS_RETURN_4K_DATA_ADDR | \
330 VM_FLAGS_ALIAS_MASK)
331 #endif /* XNU_KERNEL_PRIVATE */
332 #define VM_FLAGS_HW (VM_FLAGS_TPRO | VM_FLAGS_MTE)
333
334 /* These are the flags that we accept from user-space */
335 #define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \
336 VM_FLAGS_ANYWHERE | \
337 VM_FLAGS_PURGABLE | \
338 VM_FLAGS_4GB_CHUNK | \
339 VM_FLAGS_RANDOM_ADDR | \
340 VM_FLAGS_NO_CACHE | \
341 VM_FLAGS_PERMANENT | \
342 VM_FLAGS_OVERWRITE | \
343 VM_FLAGS_SUPERPAGE_MASK | \
344 VM_FLAGS_HW | \
345 VM_FLAGS_ALIAS_MASK)
346
347 #define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \
348 VM_FLAGS_RETURN_4K_DATA_ADDR | \
349 VM_FLAGS_RETURN_DATA_ADDR)
350
351 #define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \
352 VM_FLAGS_ANYWHERE | \
353 VM_FLAGS_RANDOM_ADDR | \
354 VM_FLAGS_OVERWRITE| \
355 VM_FLAGS_RETURN_DATA_ADDR | \
356 VM_FLAGS_RESILIENT_CODESIGN | \
357 VM_FLAGS_RESILIENT_MEDIA)
358
359 #define VM_FLAGS_SUPERPAGE_SHIFT 16
360 #define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */
361 #define SUPERPAGE_SIZE_ANY 1
362 #define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT)
363 #define VM_FLAGS_SUPERPAGE_SIZE_ANY (SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT)
364 #if defined(__x86_64__) || !defined(KERNEL)
365 #define SUPERPAGE_SIZE_2MB 2
366 #define VM_FLAGS_SUPERPAGE_SIZE_2MB (SUPERPAGE_SIZE_2MB<<VM_FLAGS_SUPERPAGE_SHIFT)
367 #endif
368
369 /*
370 * EXC_GUARD definitions for virtual memory.
371 */
372 #define GUARD_TYPE_VIRT_MEMORY 0x5
373
374 /* Reasons for exception for virtual memory */
375 __enum_decl(virtual_memory_guard_exception_code_t, uint32_t, {
376 kGUARD_EXC_DEALLOC_GAP = 1,
377 kGUARD_EXC_RECLAIM_COPYIO_FAILURE = 2,
378 kGUARD_EXC_RECLAIM_INDEX_FAILURE = 4,
379 kGUARD_EXC_RECLAIM_DEALLOCATE_FAILURE = 8,
380 kGUARD_EXC_RECLAIM_ACCOUNTING_FAILURE = 9,
381 kGUARD_EXC_SEC_IOPL_ON_EXEC_PAGE = 10,
382 kGUARD_EXC_SEC_EXEC_ON_IOPL_PAGE = 11,
383 kGUARD_EXC_SEC_UPL_WRITE_ON_EXEC_REGION = 12,
384 kGUARD_EXC_LARGE_ALLOCATION_TELEMETRY = 13,
385 /*
386 * rdar://151450801 (Remove spurious kGUARD_EXC_SEC_ACCESS_FAULT and kGUARD_EXC_SEC_ASYNC_ACCESS_FAULT once CrashReporter is aligned)
387 */
388 kGUARD_EXC_SEC_ACCESS_FAULT = 98,
389 kGUARD_EXC_SEC_ASYNC_ACCESS_FAULT = 99,
390 /* VM policy decisions */
391 kGUARD_EXC_SEC_COPY_DENIED = 100,
392 kGUARD_EXC_SEC_SHARING_DENIED = 101,
393
394 /* Fault-related exceptions. */
395 kGUARD_EXC_MTE_SYNC_FAULT = 200,
396 kGUARD_EXC_MTE_ASYNC_USER_FAULT = 201,
397 kGUARD_EXC_MTE_ASYNC_KERN_FAULT = 202
398 });
399
400 #define kGUARD_EXC_MTE_SOFT_MODE 0x100000
401
402 #ifdef XNU_KERNEL_PRIVATE
403
404 #if HAS_MTE
405 static inline bool
vm_guard_is_mte_policy(uint32_t flavor)406 vm_guard_is_mte_policy(uint32_t flavor)
407 {
408 return flavor == kGUARD_EXC_SEC_COPY_DENIED || flavor == kGUARD_EXC_SEC_SHARING_DENIED;
409 }
410
411 static inline bool
vm_guard_is_mte_fault(uint32_t flavor)412 vm_guard_is_mte_fault(uint32_t flavor)
413 {
414 return flavor == kGUARD_EXC_MTE_SYNC_FAULT ||
415 flavor == kGUARD_EXC_MTE_ASYNC_USER_FAULT ||
416 flavor == kGUARD_EXC_MTE_ASYNC_KERN_FAULT;
417 }
418 #endif /* HAS_MTE */
419
420 #pragma mark Map Ranges
421
422 /*!
423 * @enum vm_map_range_id_t
424 *
425 * @brief
426 * Enumerate a particular vm_map range.
427 *
428 * @discussion
429 * The kernel_map VA has been split into the following ranges. Userspace
430 * VA for any given process can also optionally be split by the following user
431 * ranges.
432 *
433 * @const KMEM_RANGE_ID_NONE
434 * This range is only used for early initialization.
435 *
436 * @const KMEM_RANGE_ID_PTR_*
437 * Range containing general purpose allocations from kalloc, etc that
438 * contain pointers.
439 *
440 * @const KMEM_RANGE_ID_SPRAYQTN
441 * The spray quarantine range contains allocations that have the following
442 * properties:
443 * - An attacker could control the size, lifetime and number of allocations
444 * of this type (or from this callsite).
445 * - The pointer to the allocation is zeroed to ensure that it isn't left
446 * dangling limiting the use of UaFs.
447 * - OOBs on the allocation is carefully considered and sufficiently
448 * addressed.
449 *
450 * @const KMEM_RANGE_ID_DATA
451 * Range containing allocations that are bags of bytes and contain no
452 * pointers.
453 *
454 * @const KMEM_RANGE_ID_DATA_SHARED
455 * Range containing allocations that are bags of bytes and contain no
456 * pointers and meant to be shared with external domains.
457 */
458 __enum_decl(vm_map_range_id_t, uint8_t, {
459 KMEM_RANGE_ID_NONE,
460 KMEM_RANGE_ID_PTR_0,
461 KMEM_RANGE_ID_PTR_1,
462 KMEM_RANGE_ID_PTR_2,
463 KMEM_RANGE_ID_SPRAYQTN,
464 KMEM_RANGE_ID_DATA,
465 KMEM_RANGE_ID_DATA_SHARED,
466
467 KMEM_RANGE_ID_FIRST = KMEM_RANGE_ID_PTR_0,
468 KMEM_RANGE_ID_NUM_PTR = KMEM_RANGE_ID_PTR_2,
469 KMEM_RANGE_ID_MAX = KMEM_RANGE_ID_DATA_SHARED,
470
471 /* these UMEM_* correspond to the MACH_VM_RANGE_* tags and are ABI */
472 UMEM_RANGE_ID_DEFAULT = 0, /* same as MACH_VM_RANGE_DEFAULT */
473 UMEM_RANGE_ID_HEAP, /* same as MACH_VM_RANGE_DATA */
474 UMEM_RANGE_ID_FIXED, /* same as MACH_VM_RANGE_FIXED */
475 UMEM_RANGE_ID_LARGE_FILE,
476
477 /* these UMEM_* are XNU internal only range IDs, and aren't ABI */
478 UMEM_RANGE_ID_MAX = UMEM_RANGE_ID_LARGE_FILE,
479
480 #define KMEM_RANGE_COUNT (KMEM_RANGE_ID_MAX + 1)
481 });
482
483 typedef vm_map_range_id_t kmem_range_id_t;
484
485 #define kmem_log2down(mask) (31 - __builtin_clz(mask))
486 #define KMEM_RANGE_MAX (UMEM_RANGE_ID_MAX < KMEM_RANGE_ID_MAX \
487 ? KMEM_RANGE_ID_MAX : UMEM_RANGE_ID_MAX)
488 #define KMEM_RANGE_BITS kmem_log2down(2 * KMEM_RANGE_MAX - 1)
489
490 #pragma mark Kernel Flags
491
492 typedef union {
493 struct {
494 unsigned long long
495 /*
496 * VM_FLAG_* flags
497 */
498 vmf_fixed:1,
499 vmf_purgeable:1,
500 vmf_4gb_chunk:1,
501 vmf_random_addr:1,
502 vmf_no_cache:1,
503 vmf_resilient_codesign:1,
504 vmf_resilient_media:1,
505 vmf_permanent:1,
506
507 __unused_bit_8:1,
508 __unused_bit_9:1,
509 __unused_bit_10:1,
510 __unused_bit_11:1,
511 vmf_tpro:1,
512 vmf_mte:1,
513 vmf_overwrite:1,
514 __unused_bit_15:1,
515
516 vmf_superpage_size:3,
517 __unused_bit_19:1,
518 vmf_return_data_addr:1,
519 __unused_bit_21:1,
520 __unused_bit_22:1,
521 vmf_return_4k_data_addr:1,
522
523 /*
524 * VM tag (user or kernel)
525 *
526 * User tags are limited to 8 bits,
527 * kernel tags can use up to 12 bits
528 * with -zt or similar features.
529 */
530 vm_tag : 12, /* same as VME_ALIAS_BITS */
531
532 /*
533 * General kernel flags
534 */
535 vmkf_already:1, /* OK if same mapping already exists */
536 vmkf_beyond_max:1, /* map beyond the map's max offset */
537 vmkf_no_pmap_check:1, /* do not check that pmap is empty */
538 vmkf_map_jit:1, /* mark entry as JIT region */
539 vmkf_iokit_acct:1, /* IOKit accounting */
540 vmkf_keep_map_locked:1, /* keep map locked when returning from vm_map_enter() */
541 vmkf_overwrite_immutable:1, /* can overwrite immutable mappings */
542 vmkf_remap_prot_copy:1, /* vm_remap for VM_PROT_COPY */
543 vmkf_remap_legacy_mode:1, /* vm_remap, not vm_remap_new */
544 vmkf_cs_enforcement_override:1, /* override CS_ENFORCEMENT */
545 vmkf_cs_enforcement:1, /* new value for CS_ENFORCEMENT */
546 vmkf_nested_pmap:1, /* use a nested pmap */
547 vmkf_no_copy_on_read:1, /* do not use copy_on_read */
548 vmkf_copy_single_object:1, /* vm_map_copy only 1 VM object */
549 vmkf_copy_pageable:1, /* vm_map_copy with pageable entries */
550 vmkf_copy_same_map:1, /* vm_map_copy to remap in original map */
551 vmkf_translated_allow_execute:1, /* allow execute in translated processes */
552 vmkf_tpro_enforcement_override:1, /* override TPRO propagation */
553 vmkf_no_soft_limit:1, /* override soft allocation size limit */
554
555 /*
556 * Submap creation, altering vm_map_enter() only
557 */
558 vmkf_submap:1, /* mapping a VM submap */
559 vmkf_submap_atomic:1, /* keep entry atomic (no splitting/coalescing) */
560 vmkf_submap_adjust:1, /* the submap needs to be adjusted */
561
562 /*
563 * Flags altering the behavior of vm_map_locate_space_anywhere()
564 */
565 vmkf_32bit_map_va:1, /* allocate in low 32-bits range */
566 vmkf_guard_before:1, /* guard page before the mapping */
567 vmkf_last_free:1, /* find space from the end */
568 vmkf_range_id:KMEM_RANGE_BITS; /* kmem range to allocate in */
569
570 unsigned long long
571 /*
572 * Flags used to enforce security policy for copying of tagged memory
573 */
574 vmkf_copy_dest:2, /* See VM_COPY_DESTINATION_* */
575 vmkf_is_iokit:1, /* creating a memory entry to back an IOMD */
576 __vmkf_unused2:61;
577 };
578
579 /*
580 * do not access these directly,
581 * use vm_map_kernel_flags_check_vmflags*()
582 */
583 uint32_t __vm_flags : 24;
584 } vm_map_kernel_flags_t;
585
586 /*
587 * using this means that vmf_* flags can't be used
588 * until vm_map_kernel_flags_set_vmflags() is set,
589 * or some manual careful init is done.
590 *
591 * Prefer VM_MAP_KERNEL_FLAGS_(FIXED,ANYWHERE) instead.
592 */
593 #define VM_MAP_KERNEL_FLAGS_NONE \
594 (vm_map_kernel_flags_t){ }
595
596 #define VM_MAP_KERNEL_FLAGS_FIXED(...) \
597 (vm_map_kernel_flags_t){ .vmf_fixed = true, __VA_ARGS__ }
598
599 #define VM_MAP_KERNEL_FLAGS_ANYWHERE(...) \
600 (vm_map_kernel_flags_t){ .vmf_fixed = false, __VA_ARGS__ }
601
602 #define VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(...) \
603 VM_MAP_KERNEL_FLAGS_FIXED(.vmf_permanent = true, __VA_ARGS__)
604
605 #define VM_MAP_KERNEL_FLAGS_ANYWHERE_PERMANENT(...) \
606 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmf_permanent = true, __VA_ARGS__)
607
608 #define VM_MAP_KERNEL_FLAGS_DATA_BUFFERS_ANYWHERE(...) \
609 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = KMEM_RANGE_ID_DATA, __VA_ARGS__)
610
611 #define VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(...) \
612 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = kmem_needs_data_share_range() ? \
613 KMEM_RANGE_ID_DATA_SHARED : KMEM_RANGE_ID_DATA, __VA_ARGS__)
614
615 typedef struct {
616 unsigned int
617 vmnekf_ledger_tag:3,
618 vmnekf_ledger_no_footprint:1,
619 vmnekf_is_iokit:1,
620 __vmnekf_unused:27;
621 } vm_named_entry_kernel_flags_t;
622 #define VM_NAMED_ENTRY_KERNEL_FLAGS_NONE (vm_named_entry_kernel_flags_t) { \
623 .vmnekf_ledger_tag = 0, \
624 .vmnekf_ledger_no_footprint = 0, \
625 .vmnekf_is_iokit = 0, \
626 .__vmnekf_unused = 0 \
627 }
628
629 #endif /* XNU_KERNEL_PRIVATE */
630
631 #pragma mark Ledger Tags
632
633 /* current accounting postmark */
634 #define __VM_LEDGER_ACCOUNTING_POSTMARK 2019032600
635
636 /*
637 * When making a new VM_LEDGER_TAG_* or VM_LEDGER_FLAG_*, update tests
638 * vm_parameter_validation_[user|kern] and their expected results; they
639 * deliberately call VM functions with invalid ledger values and you may
640 * be turning one of those invalid tags/flags valid.
641 */
642 /* discrete values: */
643 #define VM_LEDGER_TAG_NONE 0x00000000
644 #define VM_LEDGER_TAG_DEFAULT 0x00000001
645 #define VM_LEDGER_TAG_NETWORK 0x00000002
646 #define VM_LEDGER_TAG_MEDIA 0x00000003
647 #define VM_LEDGER_TAG_GRAPHICS 0x00000004
648 #define VM_LEDGER_TAG_NEURAL 0x00000005
649 #define VM_LEDGER_TAG_MAX 0x00000005
650 #define VM_LEDGER_TAG_UNCHANGED ((int)-1)
651
652 /* individual bits: */
653 #define VM_LEDGER_FLAG_NO_FOOTPRINT (1 << 0)
654 #define VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG (1 << 1)
655 #define VM_LEDGER_FLAG_FROM_KERNEL (1 << 2)
656
657 #define VM_LEDGER_FLAGS_USER (VM_LEDGER_FLAG_NO_FOOTPRINT | VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG)
658 #define VM_LEDGER_FLAGS_ALL (VM_LEDGER_FLAGS_USER | VM_LEDGER_FLAG_FROM_KERNEL)
659
660 #pragma mark User Memory Tags
661
662 /*
663 * These tags may be used to identify memory regions created with
664 * `mach_vm_map()` or `mach_vm_allocate()` via the top 8 bits of the `flags`
665 * parameter. Users should pass `VM_MAKE_TAG(tag) | flags` (see section
666 * "User Flags").
667 */
668 #define VM_MEMORY_MALLOC 1
669 #define VM_MEMORY_MALLOC_SMALL 2
670 #define VM_MEMORY_MALLOC_LARGE 3
671 #define VM_MEMORY_MALLOC_HUGE 4
672 #define VM_MEMORY_SBRK 5// uninteresting -- no one should call
673 #define VM_MEMORY_REALLOC 6
674 #define VM_MEMORY_MALLOC_TINY 7
675 #define VM_MEMORY_MALLOC_LARGE_REUSABLE 8
676 #define VM_MEMORY_MALLOC_LARGE_REUSED 9
677
678 #define VM_MEMORY_ANALYSIS_TOOL 10
679
680 #define VM_MEMORY_MALLOC_NANO 11
681 #define VM_MEMORY_MALLOC_MEDIUM 12
682 #define VM_MEMORY_MALLOC_PROB_GUARD 13
683
684 #define VM_MEMORY_MACH_MSG 20
685 #define VM_MEMORY_IOKIT 21
686 #define VM_MEMORY_STACK 30
687 #define VM_MEMORY_GUARD 31
688 #define VM_MEMORY_SHARED_PMAP 32
689 /* memory containing a dylib */
690 #define VM_MEMORY_DYLIB 33
691 #define VM_MEMORY_OBJC_DISPATCHERS 34
692
693 /* Was a nested pmap (VM_MEMORY_SHARED_PMAP) which has now been unnested */
694 #define VM_MEMORY_UNSHARED_PMAP 35
695
696 /* for libchannel memory, mostly used on visionOS for communication with realtime threads */
697 #define VM_MEMORY_LIBCHANNEL 36
698
699 // Placeholders for now -- as we analyze the libraries and find how they
700 // use memory, we can make these labels more specific.
701 #define VM_MEMORY_APPKIT 40
702 #define VM_MEMORY_FOUNDATION 41
703 #define VM_MEMORY_COREGRAPHICS 42
704 #define VM_MEMORY_CORESERVICES 43
705 #define VM_MEMORY_CARBON VM_MEMORY_CORESERVICES
706 #define VM_MEMORY_JAVA 44
707 #define VM_MEMORY_COREDATA 45
708 #define VM_MEMORY_COREDATA_OBJECTIDS 46
709
710 #define VM_MEMORY_ATS 50
711 #define VM_MEMORY_LAYERKIT 51
712 #define VM_MEMORY_CGIMAGE 52
713 #define VM_MEMORY_TCMALLOC 53
714
715 /* private raster data (i.e. layers, some images, QGL allocator) */
716 #define VM_MEMORY_COREGRAPHICS_DATA 54
717
718 /* shared image and font caches */
719 #define VM_MEMORY_COREGRAPHICS_SHARED 55
720
721 /* Memory used for virtual framebuffers, shadowing buffers, etc... */
722 #define VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS 56
723
724 /* Window backing stores, custom shadow data, and compressed backing stores */
725 #define VM_MEMORY_COREGRAPHICS_BACKINGSTORES 57
726
727 /* x-alloc'd memory */
728 #define VM_MEMORY_COREGRAPHICS_XALLOC 58
729
730 /* catch-all for other uses, such as the read-only shared data page */
731 #define VM_MEMORY_COREGRAPHICS_MISC VM_MEMORY_COREGRAPHICS
732
733 /* memory allocated by the dynamic loader for itself */
734 #define VM_MEMORY_DYLD 60
735 /* malloc'd memory created by dyld */
736 #define VM_MEMORY_DYLD_MALLOC 61
737
738 /* Used for sqlite page cache */
739 #define VM_MEMORY_SQLITE 62
740
741 /* JavaScriptCore heaps */
742 #define VM_MEMORY_JAVASCRIPT_CORE 63
743 #define VM_MEMORY_WEBASSEMBLY VM_MEMORY_JAVASCRIPT_CORE
744 /* memory allocated for the JIT */
745 #define VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR 64
746 #define VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE 65
747
748 /* memory allocated for GLSL */
749 #define VM_MEMORY_GLSL 66
750
751 /* memory allocated for OpenCL.framework */
752 #define VM_MEMORY_OPENCL 67
753
754 /* memory allocated for QuartzCore.framework */
755 #define VM_MEMORY_COREIMAGE 68
756
757 /* memory allocated for WebCore Purgeable Buffers */
758 #define VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS 69
759
760 /* ImageIO memory */
761 #define VM_MEMORY_IMAGEIO 70
762
763 /* CoreProfile memory */
764 #define VM_MEMORY_COREPROFILE 71
765
766 /* assetsd / MobileSlideShow memory */
767 #define VM_MEMORY_ASSETSD 72
768
769 /* libsystem_kernel os_once_alloc */
770 #define VM_MEMORY_OS_ALLOC_ONCE 73
771
772 /* libdispatch internal allocator */
773 #define VM_MEMORY_LIBDISPATCH 74
774
775 /* Accelerate.framework image backing stores */
776 #define VM_MEMORY_ACCELERATE 75
777
778 /* CoreUI image block data */
779 #define VM_MEMORY_COREUI 76
780
781 /* CoreUI image file */
782 #define VM_MEMORY_COREUIFILE 77
783
784 /* Genealogy buffers */
785 #define VM_MEMORY_GENEALOGY 78
786
787 /* RawCamera VM allocated memory */
788 #define VM_MEMORY_RAWCAMERA 79
789
790 /* corpse info for dead process */
791 #define VM_MEMORY_CORPSEINFO 80
792
793 /* Apple System Logger (ASL) messages */
794 #define VM_MEMORY_ASL 81
795
796 /* Swift runtime */
797 #define VM_MEMORY_SWIFT_RUNTIME 82
798
799 /* Swift metadata */
800 #define VM_MEMORY_SWIFT_METADATA 83
801
802 /* DHMM data */
803 #define VM_MEMORY_DHMM 84
804
805 /* memory needed for DFR related actions */
806 #define VM_MEMORY_DFR 85
807
808 /* memory allocated by SceneKit.framework */
809 #define VM_MEMORY_SCENEKIT 86
810
811 /* memory allocated by skywalk networking */
812 #define VM_MEMORY_SKYWALK 87
813
814 #define VM_MEMORY_IOSURFACE 88
815
816 #define VM_MEMORY_LIBNETWORK 89
817
818 #define VM_MEMORY_AUDIO 90
819
820 #define VM_MEMORY_VIDEOBITSTREAM 91
821
822 /* memory allocated by CoreMedia */
823 #define VM_MEMORY_CM_XPC 92
824
825 #define VM_MEMORY_CM_RPC 93
826
827 #define VM_MEMORY_CM_MEMORYPOOL 94
828
829 #define VM_MEMORY_CM_READCACHE 95
830
831 #define VM_MEMORY_CM_CRABS 96
832
833 /* memory allocated for QuickLookThumbnailing */
834 #define VM_MEMORY_QUICKLOOK_THUMBNAILS 97
835
836 /* memory allocated by Accounts framework */
837 #define VM_MEMORY_ACCOUNTS 98
838
839 /* memory allocated by Sanitizer runtime libraries */
840 #define VM_MEMORY_SANITIZER 99
841
842 /* Differentiate memory needed by GPU drivers and frameworks from generic IOKit allocations */
843 #define VM_MEMORY_IOACCELERATOR 100
844
845 /* memory allocated by CoreMedia for global image registration of frames */
846 #define VM_MEMORY_CM_REGWARP 101
847
848 /* memory allocated by EmbeddedAcousticRecognition for speech decoder */
849 #define VM_MEMORY_EAR_DECODER 102
850
851 /* CoreUI cached image data */
852 #define VM_MEMORY_COREUI_CACHED_IMAGE_DATA 103
853
854 /* ColorSync is using mmap for read-only copies of ICC profile data */
855 #define VM_MEMORY_COLORSYNC 104
856
857 /* backtrace info for simulated crashes */
858 #define VM_MEMORY_BTINFO 105
859
860 /* memory allocated by CoreMedia */
861 #define VM_MEMORY_CM_HLS 106
862
863 /* memory allocated for CompositorServices */
864 #define VM_MEMORY_COMPOSITOR_SERVICES 107
865
866 /* Reserve 230-239 for Rosetta */
867 #define VM_MEMORY_ROSETTA 230
868 #define VM_MEMORY_ROSETTA_THREAD_CONTEXT 231
869 #define VM_MEMORY_ROSETTA_INDIRECT_BRANCH_MAP 232
870 #define VM_MEMORY_ROSETTA_RETURN_STACK 233
871 #define VM_MEMORY_ROSETTA_EXECUTABLE_HEAP 234
872 #define VM_MEMORY_ROSETTA_USER_LDT 235
873 #define VM_MEMORY_ROSETTA_ARENA 236
874 #define VM_MEMORY_ROSETTA_10 239
875
876 /* Reserve 240-255 for application */
877 #define VM_MEMORY_APPLICATION_SPECIFIC_1 240
878 #define VM_MEMORY_APPLICATION_SPECIFIC_2 241
879 #define VM_MEMORY_APPLICATION_SPECIFIC_3 242
880 #define VM_MEMORY_APPLICATION_SPECIFIC_4 243
881 #define VM_MEMORY_APPLICATION_SPECIFIC_5 244
882 #define VM_MEMORY_APPLICATION_SPECIFIC_6 245
883 #define VM_MEMORY_APPLICATION_SPECIFIC_7 246
884 #define VM_MEMORY_APPLICATION_SPECIFIC_8 247
885 #define VM_MEMORY_APPLICATION_SPECIFIC_9 248
886 #define VM_MEMORY_APPLICATION_SPECIFIC_10 249
887 #define VM_MEMORY_APPLICATION_SPECIFIC_11 250
888 #define VM_MEMORY_APPLICATION_SPECIFIC_12 251
889 #define VM_MEMORY_APPLICATION_SPECIFIC_13 252
890 #define VM_MEMORY_APPLICATION_SPECIFIC_14 253
891 #define VM_MEMORY_APPLICATION_SPECIFIC_15 254
892 #define VM_MEMORY_APPLICATION_SPECIFIC_16 255
893
894 #define VM_MEMORY_COUNT 256
895
896 #if !XNU_KERNEL_PRIVATE
897 #define VM_MAKE_TAG(tag) ((tag) << 24)
898 #endif /* XNU_KERNEL_PRIVATE */
899
900 #if PRIVATE && !KERNEL
901 ///
902 /// Return a human-readable description for a given VM user tag.
903 ///
904 /// - Parameters:
905 /// - tag: A VM tag between `[0,VM_MEMORY_COUNT)`
906 ///
907 /// - Returns: A string literal description of the tag
908 ///
909 __SPI_AVAILABLE(macos(16.0), ios(19.0), watchos(12.0), tvos(19.0), visionos(3.0), bridgeos(10.0))
910 OS_EXPORT
911 const char *mach_vm_tag_describe(unsigned int tag);
912 #endif /* PRIVATE && !KERNEL */
913
914 #if KERNEL_PRIVATE
915
916 #pragma mark Kernel Tags
917
918 #if XNU_KERNEL_PRIVATE
919 /*
920 * When making a new VM_KERN_MEMORY_*, update:
921 * - tests vm_parameter_validation_[user|kern]
922 * and their expected results; they deliberately call VM functions with invalid
923 * kernel tag values and you may be turning one of those invalid tags valid.
924 * - vm_kern_memory_names, which is used to map tags to their string name
925 */
926 #endif /* XNU_KERNEL_PRIVATE */
927
928 #define VM_KERN_MEMORY_NONE 0
929
930 #define VM_KERN_MEMORY_OSFMK 1
931 #define VM_KERN_MEMORY_BSD 2
932 #define VM_KERN_MEMORY_IOKIT 3
933 #define VM_KERN_MEMORY_LIBKERN 4
934 #define VM_KERN_MEMORY_OSKEXT 5
935 #define VM_KERN_MEMORY_KEXT 6
936 #define VM_KERN_MEMORY_IPC 7
937 #define VM_KERN_MEMORY_STACK 8
938 #define VM_KERN_MEMORY_CPU 9
939 #define VM_KERN_MEMORY_PMAP 10
940 #define VM_KERN_MEMORY_PTE 11
941 #define VM_KERN_MEMORY_ZONE 12
942 #define VM_KERN_MEMORY_KALLOC 13
943 #define VM_KERN_MEMORY_COMPRESSOR 14
944 #define VM_KERN_MEMORY_COMPRESSED_DATA 15
945 #define VM_KERN_MEMORY_PHANTOM_CACHE 16
946 #define VM_KERN_MEMORY_WAITQ 17
947 #define VM_KERN_MEMORY_DIAG 18
948 #define VM_KERN_MEMORY_LOG 19
949 #define VM_KERN_MEMORY_FILE 20
950 #define VM_KERN_MEMORY_MBUF 21
951 #define VM_KERN_MEMORY_UBC 22
952 #define VM_KERN_MEMORY_SECURITY 23
953 #define VM_KERN_MEMORY_MLOCK 24
954 #define VM_KERN_MEMORY_REASON 25
955 #define VM_KERN_MEMORY_SKYWALK 26
956 #define VM_KERN_MEMORY_LTABLE 27
957 #define VM_KERN_MEMORY_HV 28
958 #define VM_KERN_MEMORY_KALLOC_DATA 29
959 #define VM_KERN_MEMORY_RETIRED 30
960 #define VM_KERN_MEMORY_KALLOC_TYPE 31
961 #define VM_KERN_MEMORY_TRIAGE 32
962 #define VM_KERN_MEMORY_RECOUNT 33
963 #define VM_KERN_MEMORY_MTAG 34
964 #define VM_KERN_MEMORY_EXCLAVES 35
965 #define VM_KERN_MEMORY_EXCLAVES_SHARED 36
966 #define VM_KERN_MEMORY_KALLOC_SHARED 37
967 /* add new tags here and adjust first-dynamic value */
968 #define VM_KERN_MEMORY_CPUTRACE 38
969 #define VM_KERN_MEMORY_FIRST_DYNAMIC 39
970
971 /* out of tags: */
972 #define VM_KERN_MEMORY_ANY 255
973 #define VM_KERN_MEMORY_COUNT 256
974
975 #pragma mark Kernel Wired Counts
976
977 // mach_memory_info.flags
978 #define VM_KERN_SITE_TYPE 0x000000FF
979 #define VM_KERN_SITE_TAG 0x00000000
980 #define VM_KERN_SITE_KMOD 0x00000001
981 #define VM_KERN_SITE_KERNEL 0x00000002
982 #define VM_KERN_SITE_COUNTER 0x00000003
983 #define VM_KERN_SITE_WIRED 0x00000100 /* add to wired count */
984 #define VM_KERN_SITE_HIDE 0x00000200 /* no zprint */
985 #define VM_KERN_SITE_NAMED 0x00000400
986 #define VM_KERN_SITE_ZONE 0x00000800
987 #define VM_KERN_SITE_ZONE_VIEW 0x00001000
988 #define VM_KERN_SITE_KALLOC 0x00002000 /* zone field is size class */
989
990 /* Kernel Memory Counters */
991 #if XNU_KERNEL_PRIVATE
992 /*
993 * When making a new VM_KERN_COUNT_*, also update vm_kern_count_names
994 */
995 #endif /* XNU_KERNEL_PRIVATE */
996
997 #define VM_KERN_COUNT_MANAGED 0
998 #define VM_KERN_COUNT_RESERVED 1
999 #define VM_KERN_COUNT_WIRED 2
1000 #define VM_KERN_COUNT_WIRED_MANAGED 3
1001 #define VM_KERN_COUNT_STOLEN 4
1002 #define VM_KERN_COUNT_LOPAGE 5
1003 #define VM_KERN_COUNT_MAP_KERNEL 6
1004 #define VM_KERN_COUNT_MAP_ZONE 7
1005 #define VM_KERN_COUNT_MAP_KALLOC 8
1006
1007 #define VM_KERN_COUNT_WIRED_BOOT 9
1008
1009 #define VM_KERN_COUNT_BOOT_STOLEN 10
1010
1011 /* The number of bytes from the kernel cache that are wired in memory */
1012 #define VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE 11
1013
1014 #define VM_KERN_COUNT_MAP_KALLOC_LARGE VM_KERN_COUNT_MAP_KALLOC
1015 #define VM_KERN_COUNT_MAP_KALLOC_LARGE_DATA 12
1016 #define VM_KERN_COUNT_MAP_KERNEL_DATA 13
1017
1018 /* The size of the exclaves iboot carveout (exclaves memory not from XNU) in bytes. */
1019 #define VM_KERN_COUNT_EXCLAVES_CARVEOUT 14
1020
1021 /* The number of VM_KERN_COUNT_ stats. New VM_KERN_COUNT_ entries should be less than this. */
1022 #define VM_KERN_COUNTER_COUNT 15
1023
1024 #define VM_COPY_DESTINATION_USER 0
1025 #define VM_COPY_DESTINATION_KERNEL 1
1026 #define VM_COPY_DESTINATION_UNKNOWN 2 /* memory entry */
1027 #define VM_COPY_DESTINATION_INTERNAL 3 /* creating a copy map for internal use which is soon discarded */
1028 #endif /* KERNEL_PRIVATE */
1029
1030 __END_DECLS
1031
1032 #endif /* _MACH_VM_STATISTICS_H_ */
1033