xref: /xnu-12377.61.12/osfmk/vm/vm_page.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Resident memory system definitions.
64  */
65 
66 #ifndef _VM_VM_PAGE_H_
67 #define _VM_VM_PAGE_H_
68 
69 #include <debug.h>
70 #include <stdbool.h>
71 #include <vm/vm_options.h>
72 #include <vm/vm_protos.h>
73 #include <vm/vm_far.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_param.h>
77 #include <mach/memory_object_types.h> /* for VMP_CS_BITS... */
78 #include <kern/thread.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <sys/kern_memorystatus_xnu.h>
82 
83 #if __x86_64__
84 #define XNU_VM_HAS_DELAYED_PAGES        1
85 #define XNU_VM_HAS_LOPAGE               1
86 #define XNU_VM_HAS_LINEAR_PAGES_ARRAY   0
87 #else
88 #define XNU_VM_HAS_DELAYED_PAGES        0
89 #define XNU_VM_HAS_LOPAGE               0
90 #define XNU_VM_HAS_LINEAR_PAGES_ARRAY   1
91 #endif
92 
93 
94 #if HAS_MTE
95 static_assert(!XNU_VM_HAS_DELAYED_PAGES, "MTE and delayed pages aren't compatible");
96 static_assert(XNU_VM_HAS_LINEAR_PAGES_ARRAY, "MTE requires linear vm_pages[]");
97 #endif /* HAS_MTE */
98 
99 /*
100  * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
101  * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
102  * vm_page_t's from doesn't span more then 256 Gbytes, we're safe.   There are live tests in the
103  * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
104  * pointers from the 2 ends of these spaces
105  */
106 typedef uint32_t        vm_page_packed_t;
107 
108 struct vm_page_packed_queue_entry {
109 	vm_page_packed_t        next;          /* next element */
110 	vm_page_packed_t        prev;          /* previous element */
111 };
112 
113 typedef struct vm_page_packed_queue_entry       *vm_page_queue_t;
114 typedef struct vm_page_packed_queue_entry       vm_page_queue_head_t;
115 typedef struct vm_page_packed_queue_entry       vm_page_queue_chain_t;
116 typedef struct vm_page_packed_queue_entry       *vm_page_queue_entry_t;
117 
118 typedef vm_page_packed_t                        vm_page_object_t;
119 
120 
121 /*
122  * vm_relocate_reason_t:
123  * A type to describe why a page relocation is being attempted.  Depending on
124  * the reason, certain pages may or may not be relocatable.
125  *
126  * VM_RELOCATE_REASON_CONTIGUOUS:
127  * The relocation is on behalf of the contiguous allocator; it is likely to be
128  * wired, so do not consider pages that cannot be wired for any reason.
129  */
130 #if HAS_MTE
131 /*
132  * VM_RELOCATE_REASON_TAG_STORAGE_RECLAIM:
133  * The relocation is to free up a tag storage range page, so that it can be
134  * used for tag storage.
135  *
136  * VM_RELOCATE_REASON_TAG_STORAGE_WIRE:
137  * The relocation is because a codepath is trying or is about to try to wire a
138  * tag storage page.  The relocation code will relax requirements around the
139  * page state needed to be relocatable.
140  *
141  * NOTE: For now, tag storage pages will be considered wireable... but in the
142  * future, tag storage pages will not be considered wireable.
143  * VM_RELOCATE_REASON_TAG_STORAGE_WIRE exists in anticipation of this.
144  */
145 #endif /* HAS_MTE */
146 __enum_closed_decl(vm_relocate_reason_t, unsigned int, {
147 	VM_RELOCATE_REASON_CONTIGUOUS,
148 #if HAS_MTE
149 	VM_RELOCATE_REASON_TAG_STORAGE_RECLAIM,
150 	VM_RELOCATE_REASON_TAG_STORAGE_WIRE,
151 #endif /* HAS_MTE */
152 
153 	VM_RELOCATE_REASON_COUNT,
154 });
155 
156 /*!
157  * @typedef vm_memory_class_t
158  *
159  * @abstract
160  * A type to describe what kind of memory a page represents.
161  *
162  * @const VM_MEMORY_CLASS_REGULAR
163  * Normal memory, which should participate in the normal page lifecycle.
164  *
165  * @const VM_MEMORY_CLASS_LOPAGE
166  * this exists to support hardware controllers
167  * incapable of generating DMAs with more than 32 bits
168  * of address on platforms with physical memory > 4G...
169  *
170  * @const VM_MEMORY_CLASS_SECLUDED
171  * Denotes memory must be put on the secluded queue,
172  * this is not returned by @c vm_page_get_memory_class().
173  */
174 #if HAS_MTE
175 /*
176  * @const VM_MEMORY_CLASS_TAGGED
177  * MTE tagged memory, which should participate in the lifecycle for tagged
178  * pages.  Pages may move between this and the VM_MEMORY_CLASS_REGULAR classes,
179  * dynamically.
180  *
181  * @const VM_MEMORY_CLASS_TAG_STORAGE
182  * MTE tag storage memory, which should participate in the lifecycle for tag
183  * storage pages.  Note that this is NOT the same as being in the tag storage
184  * region; some pages in the tag storage region will be classified as regular
185  * pages, because the system will never use them for tag storage.
186  *
187  * @const VM_MEMORY_CLASS_DEAD_TAG_STORAGE
188  * MTE tag storage that is either recursive or for unmanaged memory
189  * that must always be used as regular memory and can never be taggable.
190  */
191 #endif /* HAS_MTE */
192 __enum_closed_decl(vm_memory_class_t, uint8_t, {
193 	VM_MEMORY_CLASS_REGULAR,
194 #if HAS_MTE
195 	VM_MEMORY_CLASS_TAGGED,
196 	VM_MEMORY_CLASS_TAG_STORAGE,
197 	VM_MEMORY_CLASS_DEAD_TAG_STORAGE,
198 #endif /* HAS_MTE */
199 #if XNU_VM_HAS_LOPAGE
200 	VM_MEMORY_CLASS_LOPAGE,
201 #endif /* XNU_VM_HAS_LOPAGE */
202 #if CONFIG_SECLUDED_MEMORY
203 	VM_MEMORY_CLASS_SECLUDED,
204 #endif
205 });
206 
207 /* pages of compressed data */
208 #define VM_PAGE_COMPRESSOR_COUNT os_atomic_load(&compressor_object->resident_page_count, relaxed)
209 
210 /*
211  *	Management of resident (logical) pages.
212  *
213  *	A small structure is kept for each resident
214  *	page, indexed by page number.  Each structure
215  *	is an element of several lists:
216  *
217  *		A hash table bucket used to quickly
218  *		perform object/offset lookups
219  *
220  *		A list of all pages for a given object,
221  *		so they can be quickly deactivated at
222  *		time of deallocation.
223  *
224  *		An ordered list of pages due for pageout.
225  *
226  *	In addition, the structure contains the object
227  *	and offset to which this page belongs (for pageout),
228  *	and sundry status bits.
229  *
230  *	Fields in this structure are locked either by the lock on the
231  *	object that the page belongs to (O) or by the lock on the page
232  *	queues (P).  [Some fields require that both locks be held to
233  *	change that field; holding either lock is sufficient to read.]
234  */
235 
236 #define VM_PAGE_NULL            ((vm_page_t) 0)
237 
238 __enum_closed_decl(vm_page_q_state_t, uint8_t, {
239 	VM_PAGE_NOT_ON_Q                = 0,    /* page is not present on any queue, nor is it wired... mainly a transient state */
240 	VM_PAGE_IS_WIRED                = 1,    /* page is currently wired */
241 	VM_PAGE_USED_BY_COMPRESSOR      = 2,    /* page is in use by the compressor to hold compressed data */
242 	VM_PAGE_ON_FREE_Q               = 3,    /* page is on the main free queue */
243 	VM_PAGE_ON_FREE_LOCAL_Q         = 4,    /* page is on one of the per-CPU free queues */
244 #if XNU_VM_HAS_LOPAGE
245 	VM_PAGE_ON_FREE_LOPAGE_Q        = 5,    /* page is on the lopage pool free list */
246 #endif /* XNU_VM_HAS_LOPAGE */
247 #if CONFIG_SECLUDED_MEMORY
248 	VM_PAGE_ON_SECLUDED_Q           = 5,    /* page is on secluded queue */
249 #endif /* CONFIG_SECLUDED_MEMORY */
250 	VM_PAGE_ON_THROTTLED_Q          = 6,    /* page is on the throttled queue... we stash anonymous pages here when not paging */
251 	VM_PAGE_ON_PAGEOUT_Q            = 7,    /* page is on one of the pageout queues (internal/external) awaiting processing */
252 	VM_PAGE_ON_SPECULATIVE_Q        = 8,    /* page is on one of the speculative queues */
253 	VM_PAGE_ON_ACTIVE_LOCAL_Q       = 9,    /* page has recently been created and is being held in one of the per-CPU local queues */
254 	VM_PAGE_ON_ACTIVE_Q             = 10,   /* page is in global active queue */
255 	VM_PAGE_ON_INACTIVE_INTERNAL_Q  = 11,   /* page is on the inactive internal queue a.k.a.  anonymous queue */
256 	VM_PAGE_ON_INACTIVE_EXTERNAL_Q  = 12,   /* page in on the inactive external queue a.k.a.  file backed queue */
257 	VM_PAGE_ON_INACTIVE_CLEANED_Q   = 13,   /* page has been cleaned to a backing file and is ready to be stolen */
258 });
259 #define VM_PAGE_Q_STATE_LAST_VALID_VALUE  13    /* we currently use 4 bits for the state... don't let this go beyond 15 */
260 
261 __enum_closed_decl(vm_page_specialq_t, uint8_t, {
262 	VM_PAGE_SPECIAL_Q_EMPTY         = 0,
263 	VM_PAGE_SPECIAL_Q_BG            = 1,
264 	VM_PAGE_SPECIAL_Q_DONATE        = 2,
265 	VM_PAGE_SPECIAL_Q_FG            = 3,
266 });
267 
268 #define VM_PAGE_INACTIVE(m)                     bit_test(vm_page_inactive_states, (m)->vmp_q_state)
269 #define VM_PAGE_ACTIVE_OR_INACTIVE(m)           bit_test(vm_page_active_or_inactive_states, (m)->vmp_q_state)
270 #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m)     bit_test(vm_page_non_speculative_pageable_states, (m)->vmp_q_state)
271 #define VM_PAGE_PAGEABLE(m)                     bit_test(vm_page_pageable_states, (m)->vmp_q_state)
272 
273 extern const uint16_t vm_page_inactive_states;
274 extern const uint16_t vm_page_active_or_inactive_states;
275 extern const uint16_t vm_page_non_speculative_pageable_states;
276 extern const uint16_t vm_page_pageable_states;
277 
278 
279 /*
280  * The structure itself. See the block comment above for what (O) and (P) mean.
281  */
282 struct vm_page {
283 	union {
284 		vm_page_queue_chain_t   vmp_pageq;      /* queue info for FIFO queue or free list (P) */
285 		struct vm_page         *vmp_snext;
286 	};
287 	vm_page_queue_chain_t           vmp_specialq;   /* anonymous pages in the special queues (P) */
288 
289 	vm_page_queue_chain_t           vmp_listq;      /* all pages in same object (O) */
290 	vm_page_packed_t                vmp_next_m;     /* VP bucket link (O) */
291 
292 	vm_page_object_t                vmp_object;     /* which object am I in (O&P) */
293 	vm_object_offset_t              vmp_offset;     /* offset into that object (O,P) */
294 
295 
296 	/*
297 	 * Either the current page wire count,
298 	 * or the local queue id (if local queues are enabled).
299 	 *
300 	 * See the comments at 'vm_page_queues_remove'
301 	 * as to why this is safe to do.
302 	 */
303 	union {
304 		uint16_t                vmp_wire_count;
305 		uint16_t                vmp_local_id;
306 	};
307 
308 	/*
309 	 * The following word of flags used to be protected by the "page queues" lock.
310 	 * That's no longer true and what lock, if any, is needed may depend on the
311 	 * value of vmp_q_state.
312 	 *
313 	 * This bitfield is kept in its own struct to prevent coalescing
314 	 * with the next one (which C allows the compiler to do) as they
315 	 * are under different locking domains
316 	 */
317 	struct {
318 		vm_page_q_state_t       vmp_q_state:4;      /* which q is the page on (P) */
319 		vm_page_specialq_t      vmp_on_specialq:2;
320 		uint8_t                 vmp_lopage:1;
321 		uint8_t                 vmp_canonical:1;    /* this page is a canonical kernel page (immutable) */
322 	};
323 	struct {
324 		uint8_t                 vmp_gobbled:1;      /* page used internally (P) */
325 		uint8_t                 vmp_laundry:1;      /* page is being cleaned now (P)*/
326 		uint8_t                 vmp_no_cache:1;     /* page is not to be cached and should */
327 		                                            /* be reused ahead of other pages (P) */
328 		uint8_t                 vmp_reference:1;    /* page has been used (P) */
329 		uint8_t                 vmp_realtime:1;     /* page used by realtime thread (P) */
330 		uint8_t                 vmp_iopl_wired:1;   /* page has been wired for I/O UPL (O&P) */
331 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
332 		uint8_t                 vmp_unmodified_ro:1;/* Tracks if an anonymous page is modified after a decompression (O&P).*/
333 #else
334 		uint8_t                 __vmp_reserved1:1;
335 #endif
336 #if HAS_MTE
337 		uint8_t                 vmp_ts_wanted:1;    /* This tag storage page is wanted for reclaim (O&P) */
338 #else
339 		uint8_t                 __vmp_reserved2:1;
340 #endif
341 	};
342 
343 	/*
344 	 * The following word of flags is protected by the "VM object" lock.
345 	 *
346 	 * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the
347 	 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
348 	 * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro.
349 	 * It's also ok to modify them behind just the VM object "exclusive" lock.
350 	 */
351 	unsigned int    vmp_busy:1,           /* page is in transit (O) */
352 	    vmp_wanted:1,                     /* someone is waiting for page (O) */
353 	    vmp_tabled:1,                     /* page is in VP table (O) */
354 	    vmp_hashed:1,                     /* page is in vm_page_buckets[] (O) + the bucket lock */
355 #if HAS_MTE
356 	/*
357 	 * Whether the page is tagged (O)
358 	 *
359 	 * This bit is modified in 3 cases:
360 	 *
361 	 * - while the page is on the free queue (vmp_q_state ==
362 	 *   VM_PAGE_ON_FREE_Q) with the free queue lock held;
363 	 *
364 	 * - when the page is in limbo on its way to the free queue
365 	 *   (vmp_q_state == VM_PAGE_NOT_ON_Q, vmp_busy == true)
366 	 *   by the thread owning exclusive access to this page;
367 	 *
368 	 * - when the page is on a free local queue (vmp_q_state ==
369 	 *   VM_PAGE_ON_FREE_LOCAL_Q), by the CPU owning that queue under
370 	 *   preemption disabled.
371 	 *
372 	 * Observing this bit as a result is always stable
373 	 * under the object lock (O).
374 	 */
375 	    vmp_using_mte : 1,
376 #else
377 	__vmp_unused : 1,
378 #endif /* HAS_MTE */
379 	vmp_clustered:1,                      /* page is not the faulted page (O) or (O-shared AND pmap_page) */
380 	    vmp_pmapped:1,                    /* page has at some time been entered into a pmap (O) or */
381 	                                      /* (O-shared AND pmap_page) */
382 	    vmp_xpmapped:1,                   /* page has been entered with execute permission (O) or */
383 	                                      /* (O-shared AND pmap_page) */
384 	    vmp_wpmapped:1,                   /* page has been entered at some point into a pmap for write (O) */
385 	    vmp_free_when_done:1,             /* page is to be freed once cleaning is completed (O) */
386 	    vmp_absent:1,                     /* Data has been requested, but is not yet available (O) */
387 	    vmp_error:1,                      /* Data manager was unable to provide data due to error (O) */
388 	    vmp_dirty:1,                      /* Page must be cleaned (O) */
389 	    vmp_cleaning:1,                   /* Page clean has begun (O) */
390 	    vmp_precious:1,                   /* Page is precious; data must be returned even if clean (O) */
391 	    vmp_overwriting:1,                /* Request to unlock has been made without having data. (O) */
392 	                                      /* [See vm_fault_page_overwrite] */
393 	    vmp_restart:1,                    /* Page was pushed higher in shadow chain by copy_call-related pagers */
394 	                                      /* start again at top of chain */
395 	    vmp_unusual:1,                    /* Page is absent, error, restart or page locked */
396 	    vmp_cs_validated:VMP_CS_BITS,     /* code-signing: page was checked */
397 	    vmp_cs_tainted:VMP_CS_BITS,       /* code-signing: page is tainted */
398 	    vmp_cs_nx:VMP_CS_BITS,            /* code-signing: page is nx */
399 	    vmp_reusable:1,
400 	    vmp_written_by_kernel:1;          /* page was written by kernel (i.e. decompressed) */
401 
402 #if !XNU_VM_HAS_LINEAR_PAGES_ARRAY
403 	/*
404 	 * Physical number of the page
405 	 *
406 	 * Setting this value to or away from vm_page_fictitious_addr
407 	 * must be done with (P) held
408 	 */
409 	ppnum_t                         vmp_phys_page;
410 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
411 };
412 
413 /*!
414  * @var vm_pages
415  * The so called VM pages array
416  *
417  * @var vm_pages_end
418  * The pointer past the last valid page in the VM pages array.
419  *
420  * @var vm_pages_count
421  * The number of elements in the VM pages array.
422  * (vm_pages + vm_pages_count == vm_pages_end).
423  *
424  * @var vm_pages_first_pnum
425  * For linear page arrays, the pnum of the first page in the array.
426  * In other words VM_PAGE_GET_PHYS_PAGE(&vm_pages_array()[0]).
427  */
428 extern vm_page_t        vm_pages_end;
429 extern uint32_t         vm_pages_count;
430 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
431 extern ppnum_t          vm_pages_first_pnum;
432 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
433 
434 /**
435  * Internal accessor which returns the raw vm_pages pointer.
436  *
437  * This pointer must not be indexed directly. Use vm_page_get instead when
438  * indexing into the array.
439  *
440  * __pure2 helps explain to the compiler that the value vm_pages is a constant.
441  */
442 __pure2
443 static inline struct vm_page *
vm_pages_array_internal(void)444 vm_pages_array_internal(void)
445 {
446 	extern vm_page_t vm_pages;
447 	return vm_pages;
448 }
449 
450 /**
451  * Get a pointer to page at index i.
452  *
453  * This getter is the only legal way to index into the vm_pages array.
454  */
455 __pure2
456 static inline vm_page_t
vm_page_get(uint32_t i)457 vm_page_get(uint32_t i)
458 {
459 	return VM_FAR_ADD_PTR_UNBOUNDED(vm_pages_array_internal(), i);
460 }
461 
462 #if HAS_MTE
463 
464 /**
465  * Internal accessor which returns the raw vm_array_tag_storage pointer,
466  * which is a pointer inside the VM pages array pointing to the first tag
467  * storage page.
468  *
469  * This pointer must not be indexed directly. Use vm_tag_storage_page_get()
470  * instead when indexing into the array.
471  *
472  * __pure2 helps explain to the compiler that the value vm_pages is a constant.
473  */
474 __pure2
475 static inline struct vm_page *
vm_pages_tag_storage_array_internal(void)476 vm_pages_tag_storage_array_internal(void)
477 {
478 	extern vm_page_t vm_pages_tag_storage;
479 	return vm_pages_tag_storage;
480 }
481 
482 /**
483  * Get a pointer to tag storage page at index i.
484  *
485  * This getter is the only legal way to index into the vm_pages_tag_storage array.
486  */
487 __pure2
488 static inline vm_page_t
vm_tag_storage_page_get(uint32_t i)489 vm_tag_storage_page_get(uint32_t i)
490 {
491 	return VM_FAR_ADD_PTR_UNBOUNDED(vm_pages_tag_storage_array_internal(), i);
492 }
493 
494 __pure2
495 static inline bool
vm_page_in_tag_storage_array(const struct vm_page * m)496 vm_page_in_tag_storage_array(const struct vm_page *m)
497 {
498 	extern vm_page_t vm_pages_tag_storage_end;
499 	return vm_pages_tag_storage_array_internal() <= m &&
500 	       m < vm_pages_tag_storage_end;
501 }
502 
503 #endif /* HAS_MTE */
504 
505 __pure2
506 static inline bool
vm_page_in_array(const struct vm_page * m)507 vm_page_in_array(const struct vm_page *m)
508 {
509 	return vm_pages_array_internal() <= m && m < vm_pages_end;
510 }
511 
512 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
513 struct vm_page_with_ppnum {
514 	struct vm_page          vmp_page;
515 	ppnum_t                 vmp_phys_page;
516 };
517 
518 /*!
519  * @abstract
520  * Looks up the canonical kernel page for a given physical page number.
521  *
522  * @discussion
523  * This function may return VM_PAGE_NULL for kernel pages that aren't managed
524  * by the VM.
525  *
526  * @param pnum          The page number to lookup.  It must be within
527  *                      [pmap_first_pnum, vm_pages_first_pnum + vm_pages_count)
528  */
529 extern vm_page_t vm_page_find_canonical(ppnum_t pnum) __pure2;
530 
531 extern vm_page_t vm_pages_radix_next(uint32_t *cursor, ppnum_t *pnum);
532 
533 #define vm_pages_radix_for_each(mem) \
534 	for (uint32_t __index = 0; ((mem) = vm_pages_radix_next(&__index, NULL)); )
535 
536 #define vm_pages_radix_for_each_pnum(pnum) \
537 	for (uint32_t __index = 0; vm_pages_radix_next(&__index, &pnum); )
538 
539 #else
540 #define vm_page_with_ppnum vm_page
541 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
542 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
543 
544 static inline ppnum_t
VM_PAGE_GET_PHYS_PAGE(const struct vm_page * m)545 VM_PAGE_GET_PHYS_PAGE(const struct vm_page *m)
546 {
547 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
548 	if (vm_page_in_array(m)) {
549 		uintptr_t index = (uintptr_t)(m - vm_pages_array_internal());
550 
551 		return (ppnum_t)(vm_pages_first_pnum + index);
552 	}
553 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
554 	return ((const struct vm_page_with_ppnum *)m)->vmp_phys_page;
555 }
556 
557 static inline void
VM_PAGE_INIT_PHYS_PAGE(struct vm_page * m,ppnum_t pnum)558 VM_PAGE_INIT_PHYS_PAGE(struct vm_page *m, ppnum_t pnum)
559 {
560 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
561 	if (vm_page_in_array(m)) {
562 		assert(pnum == VM_PAGE_GET_PHYS_PAGE(m));
563 		return;
564 	}
565 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
566 	((vm_page_with_ppnum_t)(m))->vmp_phys_page = pnum;
567 }
568 
569 static inline void
VM_PAGE_SET_PHYS_PAGE(struct vm_page * m,ppnum_t pnum)570 VM_PAGE_SET_PHYS_PAGE(struct vm_page *m, ppnum_t pnum)
571 {
572 	assert(!vm_page_in_array(m) && !m->vmp_canonical);
573 	((vm_page_with_ppnum_t)(m))->vmp_phys_page = pnum;
574 }
575 
576 #if defined(__x86_64__)
577 extern unsigned int     vm_clump_mask, vm_clump_shift;
578 #define VM_PAGE_GET_CLUMP_PNUM(pn)      ((pn) >> vm_clump_shift)
579 #define VM_PAGE_GET_CLUMP(m)            VM_PAGE_GET_CLUMP_PNUM(VM_PAGE_GET_PHYS_PAGE(m))
580 #define VM_PAGE_GET_COLOR_PNUM(pn)      (VM_PAGE_GET_CLUMP_PNUM(pn) & vm_color_mask)
581 #define VM_PAGE_GET_COLOR(m)            VM_PAGE_GET_COLOR_PNUM(VM_PAGE_GET_PHYS_PAGE(m))
582 #else
583 #define VM_PAGE_GET_COLOR_PNUM(pn)      ((pn) & vm_color_mask)
584 #define VM_PAGE_GET_COLOR(m)            VM_PAGE_GET_COLOR_PNUM(VM_PAGE_GET_PHYS_PAGE(m))
585 #endif
586 
587 /*
588  * Parameters for pointer packing
589  *
590  *
591  * VM Pages pointers might point to:
592  *
593  * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals,
594  *
595  * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages
596  *
597  * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED
598  *    aligned).
599  *
600  *
601  * The current scheme uses 31 bits of storage and 6 bits of shift using the
602  * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the
603  * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY).
604  *
605  * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS.
606  */
607 #define VM_VPLQ_ALIGNMENT               128
608 #define VM_PAGE_PACKED_PTR_ALIGNMENT    64              /* must be a power of 2 */
609 #define VM_PAGE_PACKED_ALIGNED          __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT)))
610 #define VM_PAGE_PACKED_PTR_BITS         31
611 #define VM_PAGE_PACKED_PTR_SHIFT        6
612 #ifndef __BUILDING_XNU_LIB_UNITTEST__
613 #define VM_PAGE_PACKED_PTR_BASE         ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS)
614 #else
615 extern uintptr_t mock_page_ptr_base;
616 #define VM_PAGE_PACKED_PTR_BASE         (mock_page_ptr_base)
617 #endif
618 #define VM_PAGE_PACKED_FROM_ARRAY       0x80000000
619 
620 static inline vm_page_packed_t
vm_page_pack_ptr(uintptr_t p)621 vm_page_pack_ptr(uintptr_t p)
622 {
623 	if (vm_page_in_array(__unsafe_forge_single(vm_page_t, p))) {
624 		ptrdiff_t diff = (vm_page_t)p - vm_pages_array_internal();
625 		assert((vm_page_t)p == vm_page_get((uint32_t)diff));
626 		return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY);
627 	}
628 
629 	VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR);
630 	vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR);
631 	return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed);
632 }
633 
634 
635 static inline uintptr_t
vm_page_unpack_ptr(uintptr_t p)636 vm_page_unpack_ptr(uintptr_t p)
637 {
638 	if (p >= VM_PAGE_PACKED_FROM_ARRAY) {
639 		p &= ~VM_PAGE_PACKED_FROM_ARRAY;
640 		assert(p < (uintptr_t)vm_pages_count);
641 		return (uintptr_t)vm_page_get((uint32_t)p);
642 	}
643 
644 	return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR);
645 }
646 
647 
648 #define VM_PAGE_PACK_PTR(p)     vm_page_pack_ptr((uintptr_t)(p))
649 #define VM_PAGE_UNPACK_PTR(p)   vm_page_unpack_ptr((uintptr_t)(p))
650 
651 #define VM_OBJECT_PACK(o)       ((vm_page_object_t)VM_PACK_POINTER((uintptr_t)(o), VM_PAGE_PACKED_PTR))
652 #define VM_OBJECT_UNPACK(p)     ((vm_object_t)VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR))
653 
654 #define VM_PAGE_OBJECT(p)       VM_OBJECT_UNPACK((p)->vmp_object)
655 #define VM_PAGE_PACK_OBJECT(o)  VM_OBJECT_PACK(o)
656 
657 
658 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p)     \
659 MACRO_BEGIN                             \
660 	(p)->vmp_snext = 0;             \
661 MACRO_END
662 
663 
664 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p)       VM_PAGE_PACK_PTR(p)
665 
666 
667 /*!
668  * @abstract
669  * The type for free queue heads that live in the kernel __DATA segment.
670  *
671  * @discussion
672  * This type must be used so that the queue is properly aligned
673  * for the VM Page packing to be able to represent pointers to this queue.
674  */
675 typedef struct vm_page_queue_free_head {
676 	vm_page_queue_head_t    qhead;
677 } VM_PAGE_PACKED_ALIGNED *vm_page_queue_free_head_t;
678 
679 /*
680  *	Macro:	vm_page_queue_init
681  *	Function:
682  *		Initialize the given queue.
683  *	Header:
684  *	void vm_page_queue_init(q)
685  *		vm_page_queue_t	q;	\* MODIFIED *\
686  */
687 #define vm_page_queue_init(q)               \
688 MACRO_BEGIN                                 \
689 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \
690 	(q)->next = VM_PAGE_PACK_PTR(q);        \
691 	(q)->prev = VM_PAGE_PACK_PTR(q);        \
692 MACRO_END
693 
694 
695 /*
696  * Macro: vm_page_queue_enter
697  * Function:
698  *     Insert a new element at the tail of the vm_page queue.
699  * Header:
700  *     void vm_page_queue_enter(q, elt, field)
701  *         queue_t q;
702  *         vm_page_t elt;
703  *         <field> is the list field in vm_page_t
704  *
705  * This macro's arguments have to match the generic "queue_enter()" macro which is
706  * what is used for this on 32 bit kernels.
707  */
708 #define vm_page_queue_enter(head, elt, field)                       \
709 MACRO_BEGIN                                                         \
710 	vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt);         \
711 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);       \
712 	vm_page_packed_t __pck_prev = (head)->prev;                 \
713                                                                     \
714 	if (__pck_head == __pck_prev) {                             \
715 	        (head)->next = __pck_elt;                           \
716 	} else {                                                    \
717 	        vm_page_t __prev;                                   \
718 	        __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
719 	        __prev->field.next = __pck_elt;                     \
720 	}                                                           \
721 	(elt)->field.prev = __pck_prev;                             \
722 	(elt)->field.next = __pck_head;                             \
723 	(head)->prev = __pck_elt;                                   \
724 MACRO_END
725 
726 
727 #if defined(__x86_64__)
728 /*
729  * These are helper macros for vm_page_queue_enter_clump to assist
730  * with conditional compilation (release / debug / development)
731  */
732 #if DEVELOPMENT || DEBUG
733 
734 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)                                             \
735 MACRO_BEGIN                                                                                   \
736 	if (__prev != NULL) {                                                                 \
737 	        assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next));                   \
738 	        assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
739 	}                                                                                     \
740 MACRO_END
741 
742 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)                    \
743 MACRO_BEGIN                                                                     \
744 	unsigned int __i;                                                       \
745 	vm_page_queue_entry_t __tmp;                                            \
746 	for (__i = 0, __tmp = __first; __i < __n_free; __i++) {                 \
747 	        __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
748 	}                                                                       \
749 	assert(__tmp == __last_next);                                           \
750 MACRO_END
751 
752 #define __DEBUG_STAT_INCREMENT_INRANGE              vm_clump_inrange++
753 #define __DEBUG_STAT_INCREMENT_INSERTS              vm_clump_inserts++
754 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)   vm_clump_promotes+=__n_free
755 
756 #else
757 
758 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
759 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
760 #define __DEBUG_STAT_INCREMENT_INRANGE
761 #define __DEBUG_STAT_INCREMENT_INSERTS
762 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
763 
764 #endif  /* if DEVELOPMENT || DEBUG */
765 
766 #endif
767 
768 /*
769  * Macro: vm_page_queue_enter_first
770  * Function:
771  *     Insert a new element at the head of the vm_page queue.
772  * Header:
773  *     void queue_enter_first(q, elt, , field)
774  *         queue_t q;
775  *         vm_page_t elt;
776  *         <field> is the linkage field in vm_page
777  *
778  * This macro's arguments have to match the generic "queue_enter_first()" macro which is
779  * what is used for this on 32 bit kernels.
780  */
781 #define vm_page_queue_enter_first(head, elt, field)                 \
782 MACRO_BEGIN                                                         \
783 	vm_page_packed_t __pck_next = (head)->next;                 \
784 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);       \
785 	vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt);         \
786                                                                     \
787 	if (__pck_head == __pck_next) {                             \
788 	        (head)->prev = __pck_elt;                           \
789 	} else {                                                    \
790 	        vm_page_t __next;                                   \
791 	        __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
792 	        __next->field.prev = __pck_elt;                     \
793 	}                                                           \
794                                                                     \
795 	(elt)->field.next = __pck_next;                             \
796 	(elt)->field.prev = __pck_head;                             \
797 	(head)->next = __pck_elt;                                   \
798 MACRO_END
799 
800 
801 /*
802  * Macro:	vm_page_queue_remove
803  * Function:
804  *     Remove an arbitrary page from a vm_page queue.
805  * Header:
806  *     void vm_page_queue_remove(q, qe, field)
807  *         arguments as in vm_page_queue_enter
808  *
809  * This macro's arguments have to match the generic "queue_enter()" macro which is
810  * what is used for this on 32 bit kernels.
811  */
812 #define vm_page_queue_remove(head, elt, field)                          \
813 MACRO_BEGIN                                                             \
814 	vm_page_packed_t __pck_next = (elt)->field.next;                \
815 	vm_page_packed_t __pck_prev = (elt)->field.prev;                \
816 	vm_page_t        __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
817 	vm_page_t        __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
818                                                                         \
819 	if ((void *)(head) == (void *)__next) {                         \
820 	        (head)->prev = __pck_prev;                              \
821 	} else {                                                        \
822 	        __next->field.prev = __pck_prev;                        \
823 	}                                                               \
824                                                                         \
825 	if ((void *)(head) == (void *)__prev) {                         \
826 	        (head)->next = __pck_next;                              \
827 	} else {                                                        \
828 	        __prev->field.next = __pck_next;                        \
829 	}                                                               \
830                                                                         \
831 	(elt)->field.next = 0;                                          \
832 	(elt)->field.prev = 0;                                          \
833 MACRO_END
834 
835 
836 /*
837  * Macro: vm_page_queue_remove_first
838  *
839  * Function:
840  *     Remove and return the entry at the head of a vm_page queue.
841  *
842  * Header:
843  *     vm_page_queue_remove_first(head, entry, field)
844  *     N.B. entry is returned by reference
845  *
846  * This macro's arguments have to match the generic "queue_remove_first()" macro which is
847  * what is used for this on 32 bit kernels.
848  */
849 #define vm_page_queue_remove_first(head, entry, field)            \
850 MACRO_BEGIN                                                       \
851 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);     \
852 	vm_page_packed_t __pck_next;                              \
853 	vm_page_t        __next;                                  \
854                                                                   \
855 	(entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next);    \
856 	__pck_next = (entry)->field.next;                         \
857 	__next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next);       \
858                                                                   \
859 	if (__pck_head == __pck_next) {                           \
860 	        (head)->prev = __pck_head;                        \
861 	} else {                                                  \
862 	        __next->field.prev = __pck_head;                  \
863 	}                                                         \
864                                                                   \
865 	(head)->next = __pck_next;                                \
866 	(entry)->field.next = 0;                                  \
867 	(entry)->field.prev = 0;                                  \
868 MACRO_END
869 
870 
871 #if defined(__x86_64__)
872 /*
873  * Macro:  vm_page_queue_remove_first_with_clump
874  * Function:
875  *     Remove and return the entry at the head of the free queue
876  *     end is set to 1 to indicate that we just returned the last page in a clump
877  *
878  * Header:
879  *     vm_page_queue_remove_first_with_clump(head, entry, end)
880  *     entry is returned by reference
881  *     end is returned by reference
882  */
883 #define vm_page_queue_remove_first_with_clump(head, entry, end)              \
884 MACRO_BEGIN                                                                  \
885 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);                \
886 	vm_page_packed_t __pck_next;                                         \
887 	vm_page_t        __next;                                             \
888                                                                              \
889 	(entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next);               \
890 	__pck_next = (entry)->vmp_pageq.next;                                \
891 	__next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next);                  \
892                                                                              \
893 	(end) = 0;                                                           \
894 	if (__pck_head == __pck_next) {                                      \
895 	        (head)->prev = __pck_head;                                   \
896 	        (end) = 1;                                                   \
897 	} else {                                                             \
898 	        __next->vmp_pageq.prev = __pck_head;                         \
899 	        if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
900 	                (end) = 1;                                           \
901 	        }                                                            \
902 	}                                                                    \
903                                                                              \
904 	(head)->next = __pck_next;                                           \
905 	(entry)->vmp_pageq.next = 0;                                         \
906 	(entry)->vmp_pageq.prev = 0;                                         \
907 MACRO_END
908 #endif
909 
910 /*
911  *	Macro:	vm_page_queue_end
912  *	Function:
913  *	Tests whether a new entry is really the end of
914  *		the queue.
915  *	Header:
916  *		boolean_t vm_page_queue_end(q, qe)
917  *			vm_page_queue_t q;
918  *			vm_page_queue_entry_t qe;
919  */
920 #define vm_page_queue_end(q, qe)        ((q) == (qe))
921 
922 
923 /*
924  *	Macro:	vm_page_queue_empty
925  *	Function:
926  *		Tests whether a queue is empty.
927  *	Header:
928  *		boolean_t vm_page_queue_empty(q)
929  *			vm_page_queue_t q;
930  */
931 #define vm_page_queue_empty(q)          vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
932 
933 
934 
935 /*
936  *	Macro:	vm_page_queue_first
937  *	Function:
938  *		Returns the first entry in the queue,
939  *	Header:
940  *		uintpr_t vm_page_queue_first(q)
941  *			vm_page_queue_t q;	\* IN *\
942  */
943 #define vm_page_queue_first(q)          (VM_PAGE_UNPACK_PTR((q)->next))
944 
945 
946 
947 /*
948  *	Macro:		vm_page_queue_last
949  *	Function:
950  *		Returns the last entry in the queue.
951  *	Header:
952  *		vm_page_queue_entry_t queue_last(q)
953  *			queue_t	q;		\* IN *\
954  */
955 #define vm_page_queue_last(q)           (VM_PAGE_UNPACK_PTR((q)->prev))
956 
957 
958 
959 /*
960  *	Macro:	vm_page_queue_next
961  *	Function:
962  *		Returns the entry after an item in the queue.
963  *	Header:
964  *		uintpr_t vm_page_queue_next(qc)
965  *			vm_page_queue_t qc;
966  */
967 #define vm_page_queue_next(qc)          (VM_PAGE_UNPACK_PTR((qc)->next))
968 
969 
970 
971 /*
972  *	Macro:	vm_page_queue_prev
973  *	Function:
974  *		Returns the entry before an item in the queue.
975  *	Header:
976  *		uinptr_t vm_page_queue_prev(qc)
977  *			vm_page_queue_t qc;
978  */
979 #define vm_page_queue_prev(qc)          (VM_PAGE_UNPACK_PTR((qc)->prev))
980 
981 
982 
983 /*
984  *	Macro:	vm_page_queue_iterate
985  *	Function:
986  *		iterate over each item in a vm_page queue.
987  *		Generates a 'for' loop, setting elt to
988  *		each item in turn (by reference).
989  *	Header:
990  *		vm_page_queue_iterate(q, elt, field)
991  *			queue_t q;
992  *			vm_page_t elt;
993  *			<field> is the chain field in vm_page_t
994  */
995 #define vm_page_queue_iterate(head, elt, field)                       \
996 	for ((elt) = (vm_page_t)vm_page_queue_first(head);            \
997 	    !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
998 	    (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field))     \
999 
1000 
1001 /*
1002  * VM_PAGE_MIN_SPECULATIVE_AGE_Q through vm_page_max_speculative_age_q
1003  * represents a set of aging bins that are 'protected'...
1004  *
1005  * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
1006  * not yet been 'claimed' but have been aged out of the protective bins
1007  * this occurs in vm_page_speculate when it advances to the next bin
1008  * and discovers that it is still occupied... at that point, all of the
1009  * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q.  the pages
1010  * in that bin are all guaranteed to have reached at least the maximum age
1011  * we allow for a protected page... they can be older if there is no
1012  * memory pressure to pull them from the bin, or there are no new speculative pages
1013  * being generated to push them out.
1014  * this list is the one that vm_pageout_scan will prefer when looking
1015  * for pages to move to the underweight free list
1016  *
1017  * vm_page_max_speculative_age_q * VM_PAGE_SPECULATIVE_Q_AGE_MS
1018  * defines the amount of time a speculative page is normally
1019  * allowed to live in the 'protected' state (i.e. not available
1020  * to be stolen if vm_pageout_scan is running and looking for
1021  * pages)...  however, if the total number of speculative pages
1022  * in the protected state exceeds our limit (defined in vm_pageout.c)
1023  * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
1024  * vm_pageout_scan is allowed to steal pages from the protected
1025  * bucket even if they are underage.
1026  *
1027  * vm_pageout_scan is also allowed to pull pages from a protected
1028  * bin if the bin has reached the "age of consent" we've set
1029  */
1030 #define VM_PAGE_RESERVED_SPECULATIVE_AGE_Q      40
1031 #define VM_PAGE_DEFAULT_MAX_SPECULATIVE_AGE_Q   10
1032 #define VM_PAGE_MIN_SPECULATIVE_AGE_Q   1
1033 #define VM_PAGE_SPECULATIVE_AGED_Q      0
1034 
1035 #define VM_PAGE_SPECULATIVE_Q_AGE_MS    500
1036 
1037 struct vm_speculative_age_q {
1038 	/*
1039 	 * memory queue for speculative pages via clustered pageins
1040 	 */
1041 	vm_page_queue_head_t    age_q;
1042 	mach_timespec_t age_ts;
1043 } VM_PAGE_PACKED_ALIGNED;
1044 
1045 
1046 
1047 extern
1048 struct vm_speculative_age_q     vm_page_queue_speculative[];
1049 
1050 extern int                      speculative_steal_index;
1051 extern int                      speculative_age_index;
1052 extern unsigned int             vm_page_speculative_q_age_ms;
1053 extern unsigned int             vm_page_max_speculative_age_q;
1054 
1055 
1056 typedef struct vm_locks_array {
1057 	char    pad  __attribute__ ((aligned(64)));
1058 	lck_mtx_t       vm_page_queue_lock2 __attribute__ ((aligned(64)));
1059 	lck_mtx_t       vm_page_queue_free_lock2 __attribute__ ((aligned(64)));
1060 	char    pad2  __attribute__ ((aligned(64)));
1061 } vm_locks_array_t;
1062 
1063 
1064 #define VM_PAGE_WIRED(m)        ((m)->vmp_q_state == VM_PAGE_IS_WIRED)
1065 #define NEXT_PAGE(m)            ((m)->vmp_snext)
1066 #define NEXT_PAGE_PTR(m)        (&(m)->vmp_snext)
1067 
1068 /*!
1069  * @abstract
1070  * Represents a singly linked list of pages with a count.
1071  *
1072  * @discussion
1073  * This type is used as a way to exchange transient collections of VM pages
1074  * by various subsystems.
1075  *
1076  * This type is designed to be less than sizeof(_Complex) which means
1077  * it that can be passed by value efficiently (either as a function argument
1078  * or its result).
1079  *
1080  *
1081  * @field vmpl_head
1082  * The head of the list, or VM_PAGE_NULL.
1083  *
1084  * @field vmpl_count
1085  * How many pages are on that list.
1086  *
1087  * @field vmpl_has_realtime
1088  * At least one page on the list has vmp_realtime set.
1089  */
1090 #if HAS_MTE
1091 /*
1092  * @field vmpl_has_untagged
1093  * Whether there are pages with the @c vmp_using_mte property unset on it.
1094  *
1095  * It can be used by callers to know that they need to adjust the tagging
1096  * properties of the list with @c pmap_{un,}make_tagged_pages().
1097  *
1098  * @field vmpl_has_tagged
1099  * Whether there are pages with the @c vmp_using_mte property set on it.
1100  *
1101  * It can be used by callers to know that they need to adjust the tagging
1102  * properties of the list with @c pmap_{un,}make_tagged_pages().
1103  */
1104 #endif
1105 typedef struct {
1106 	vm_page_t vmpl_head;
1107 	uint32_t  vmpl_count;
1108 	bool      vmpl_has_realtime;
1109 #if HAS_MTE
1110 	bool      vmpl_has_untagged;
1111 	bool      vmpl_has_tagged;
1112 #endif
1113 } vm_page_list_t;
1114 
1115 
1116 /*!
1117  * @abstract
1118  * Low level function that pushes a page on a naked singly linked list of VM
1119  * pages.
1120  *
1121  * @param head          The list head.
1122  * @param mem           The page to push on the list.
1123  */
1124 static inline void
_vm_page_list_push(vm_page_t * head,vm_page_t mem)1125 _vm_page_list_push(vm_page_t *head, vm_page_t mem)
1126 {
1127 	NEXT_PAGE(mem) = *head;
1128 	*head = mem;
1129 }
1130 
1131 /*!
1132  * @abstract
1133  * Pushes a page onto a VM page list, adjusting its properties.
1134  *
1135  * @param list          The VM page list to push onto
1136  * @param mem           The page to push on the list.
1137  */
1138 static inline void
vm_page_list_push(vm_page_list_t * list,vm_page_t mem)1139 vm_page_list_push(vm_page_list_t *list, vm_page_t mem)
1140 {
1141 	_vm_page_list_push(&list->vmpl_head, mem);
1142 	list->vmpl_count++;
1143 	if (mem->vmp_realtime) {
1144 		list->vmpl_has_realtime = true;
1145 	}
1146 #if HAS_MTE
1147 	if (mem->vmp_using_mte) {
1148 		list->vmpl_has_tagged = true;
1149 	} else {
1150 		list->vmpl_has_untagged = true;
1151 	}
1152 #endif
1153 }
1154 
1155 /*!
1156  * @abstract
1157  * Conveniency function that creates a VM page list from a single page.
1158  *
1159  * @param mem           The VM page to put on the list.
1160  */
1161 static inline vm_page_list_t
vm_page_list_for_page(vm_page_t mem)1162 vm_page_list_for_page(vm_page_t mem)
1163 {
1164 	assert(NEXT_PAGE(mem) == VM_PAGE_NULL);
1165 	return (vm_page_list_t){
1166 		       .vmpl_head  = mem,
1167 		       .vmpl_count = 1,
1168 		       .vmpl_has_realtime = mem->vmp_realtime,
1169 #if HAS_MTE
1170 		       .vmpl_has_untagged = !mem->vmp_using_mte,
1171 		       .vmpl_has_tagged = mem->vmp_using_mte,
1172 #endif
1173 	};
1174 }
1175 
1176 /*!
1177  * @abstract
1178  * Low level function that pops a page from a naked singly linked list of VM
1179  * pages.
1180  *
1181  * @param head          The list head.
1182  *
1183  * @returns             The first page that was on the list
1184  *                      or VM_PAGE_NULL if it was empty.
1185  */
1186 static inline vm_page_t
_vm_page_list_pop(vm_page_t * head)1187 _vm_page_list_pop(vm_page_t *head)
1188 {
1189 	vm_page_t mem = *head;
1190 
1191 	if (mem) {
1192 		*head = NEXT_PAGE(mem);
1193 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
1194 	}
1195 
1196 	return mem;
1197 }
1198 
1199 /*!
1200  * @abstract
1201  * Pops a page from a VM page list, adjusting its properties.
1202  *
1203  * @param list          The VM page list to pop from.
1204  *
1205  * @returns             The first page that was on the list
1206  *                      or VM_PAGE_NULL if it was empty.
1207  */
1208 static inline vm_page_t
vm_page_list_pop(vm_page_list_t * list)1209 vm_page_list_pop(vm_page_list_t *list)
1210 {
1211 	if (list->vmpl_head) {
1212 		list->vmpl_count--;
1213 		return _vm_page_list_pop(&list->vmpl_head);
1214 	}
1215 	*list = (vm_page_list_t){ };
1216 	return VM_PAGE_NULL;
1217 }
1218 
1219 
1220 /*!
1221  * @abstract
1222  * Reverses a list of VM pages in place.
1223  *
1224  * @param list          The VM page list to reverse.
1225  */
1226 static inline void
vm_page_list_reverse(vm_page_list_t * list)1227 vm_page_list_reverse(vm_page_list_t *list)
1228 {
1229 	vm_page_t cur, next;
1230 
1231 	cur = list->vmpl_head;
1232 	list->vmpl_head = NULL;
1233 
1234 	while (cur) {
1235 		next = NEXT_PAGE(cur);
1236 		_vm_page_list_push(&list->vmpl_head, cur);
1237 		cur = next;
1238 	}
1239 }
1240 
1241 
1242 /*!
1243  * @abstract
1244  * Low level iterator over all pages on a naked singly linked list
1245  * of VM pages.
1246  *
1247  * @discussion
1248  * Mutating the list during enumeration is undefined.
1249  *
1250  * @param mem           The variable to use for iteration.
1251  * @param head          The list head.
1252  */
1253 #define _vm_page_list_foreach(mem, list) \
1254 	for ((mem) = (list); (mem); (mem) = NEXT_PAGE(mem))
1255 
1256 
1257 /*!
1258  * @abstract
1259  * Iterator over a VM page list.
1260  *
1261  * @discussion
1262  * Mutating the list during enumeration is undefined.
1263  *
1264  * @param mem           The variable to use for iteration.
1265  * @param head          The list head.
1266  */
1267 #define vm_page_list_foreach(mem, list) \
1268 	_vm_page_list_foreach(mem, (list).vmpl_head)
1269 
1270 
1271 /*!
1272  * @abstract
1273  * Low level iterator over all pages on a naked singly linked list
1274  * of VM pages, that also consumes the list as it iterates.
1275  *
1276  * @discussion
1277  * Each element is removed from the list as it is being iterated.
1278  *
1279  * @param mem           The variable to use for iteration.
1280  * @param head          The list head.
1281  */
1282 #define _vm_page_list_foreach_consume(mem, list) \
1283 	while (((mem) = _vm_page_list_pop((list))))
1284 
1285 /*!
1286  * @abstract
1287  * Iterator over a VM page list, that consumes the list.
1288  *
1289  * @discussion
1290  * Each element is removed from the list as it is being iterated.
1291  *
1292  * @param mem           The variable to use for iteration.
1293  * @param head          The list head.
1294  */
1295 #define vm_page_list_foreach_consume(mem, list) \
1296 	while (((mem) = vm_page_list_pop((list))))
1297 
1298 
1299 /*
1300  * XXX	The unusual bit should not be necessary.  Most of the bit
1301  * XXX	fields above really want to be masks.
1302  */
1303 
1304 /*
1305  *	For debugging, this macro can be defined to perform
1306  *	some useful check on a page structure.
1307  *	INTENTIONALLY left as a no-op so that the
1308  *	current call-sites can be left intact for future uses.
1309  */
1310 
1311 #define VM_PAGE_CHECK(mem)                      \
1312 	MACRO_BEGIN                             \
1313 	MACRO_END
1314 
1315 /*     Page coloring:
1316  *
1317  *     The free page list is actually n lists, one per color,
1318  *     where the number of colors is a function of the machine's
1319  *     cache geometry set at system initialization.  To disable
1320  *     coloring, set vm_colors to 1 and vm_color_mask to 0.
1321  *     The boot-arg "colors" may be used to override vm_colors.
1322  *     Note that there is little harm in having more colors than needed.
1323  */
1324 
1325 #define MAX_COLORS      128
1326 #define DEFAULT_COLORS  32
1327 
1328 /*
1329  * Page free queue type.  Abstracts the notion of a free queue of pages, that
1330  * contains free pages of a particular memory class, and maintains a count of
1331  * the number of pages in the free queue.
1332  *
1333  * Pages in the queue will be marked VM_PAGE_ON_FREE_Q when they are added to
1334  * the free queue, and VM_PAGE_NOT_ON_Q when they are removed.
1335  *
1336  * These free queues will color pages, consistent with MachVMs color mask.
1337  */
1338 typedef struct vm_page_free_queue {
1339 	struct vm_page_queue_free_head vmpfq_queues[MAX_COLORS];
1340 	uint32_t                       vmpfq_count;
1341 } *vm_page_free_queue_t;
1342 
1343 extern unsigned int    vm_colors;              /* must be in range 1..MAX_COLORS */
1344 extern unsigned int    vm_color_mask;          /* must be (vm_colors-1) */
1345 extern unsigned int    vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
1346 extern unsigned int    vm_free_magazine_refill_limit;
1347 
1348 /*
1349  * Wired memory is a very limited resource and we can't let users exhaust it
1350  * and deadlock the entire system.  We enforce the following limits:
1351  *
1352  * vm_per_task_user_wire_limit
1353  *      how much memory can be user-wired in one user task
1354  *
1355  * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit)
1356  *      how much memory can be user-wired in all user tasks
1357  *
1358  * These values are set to defaults based on the number of pages managed
1359  * by the VM system. They can be overriden via sysctls.
1360  * See kmem_set_user_wire_limits for details on the default values.
1361  *
1362  * Regardless of the amount of memory in the system, we never reserve
1363  * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable.
1364  */
1365 #define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024)     /* 32GB */
1366 
1367 extern vm_map_size_t   vm_per_task_user_wire_limit;
1368 extern vm_map_size_t   vm_global_user_wire_limit;
1369 extern uint64_t        vm_add_wire_count_over_global_limit;
1370 extern uint64_t        vm_add_wire_count_over_user_limit;
1371 
1372 /*
1373  *	Each pageable resident page falls into one of three lists:
1374  *
1375  *	free
1376  *		Available for allocation now.  The free list is
1377  *		actually an array of lists, one per color.
1378  *	inactive
1379  *		Not referenced in any map, but still has an
1380  *		object/offset-page mapping, and may be dirty.
1381  *		This is the list of pages that should be
1382  *		paged out next.  There are actually two
1383  *		inactive lists, one for pages brought in from
1384  *		disk or other backing store, and another
1385  *		for "zero-filled" pages.  See vm_pageout_scan()
1386  *		for the distinction and usage.
1387  *	active
1388  *		A list of pages which have been placed in
1389  *		at least one physical map.  This list is
1390  *		ordered, in LRU-like fashion.
1391  */
1392 
1393 
1394 #define VPL_LOCK_SPIN 1
1395 
1396 struct vpl {
1397 	vm_page_queue_head_t    vpl_queue;
1398 	unsigned int    vpl_count;
1399 	unsigned int    vpl_internal_count;
1400 	unsigned int    vpl_external_count;
1401 	lck_spin_t      vpl_lock;
1402 };
1403 
1404 extern
1405 struct vpl     * /* __zpercpu */ vm_page_local_q;
1406 extern
1407 unsigned int    vm_page_local_q_soft_limit;
1408 extern
1409 unsigned int    vm_page_local_q_hard_limit;
1410 extern
1411 vm_locks_array_t vm_page_locks;
1412 
1413 extern
1414 vm_page_queue_head_t    vm_page_queue_active;   /* active memory queue */
1415 extern
1416 vm_page_queue_head_t    vm_page_queue_inactive; /* inactive memory queue for normal pages */
1417 #if CONFIG_SECLUDED_MEMORY
1418 extern
1419 vm_page_queue_head_t    vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
1420 #endif /* CONFIG_SECLUDED_MEMORY */
1421 extern
1422 vm_page_queue_head_t    vm_page_queue_cleaned; /* clean-queue inactive memory */
1423 extern
1424 vm_page_queue_head_t    vm_page_queue_anonymous;        /* inactive memory queue for anonymous pages */
1425 extern
1426 vm_page_queue_head_t    vm_page_queue_throttled;        /* memory queue for throttled pageout pages */
1427 
1428 extern
1429 queue_head_t    vm_objects_wired;
1430 extern
1431 lck_spin_t      vm_objects_wired_lock;
1432 
1433 #define VM_PAGE_DONATE_DISABLED     0
1434 #define VM_PAGE_DONATE_ENABLED      1
1435 extern
1436 uint32_t        vm_page_donate_mode;
1437 extern
1438 bool        vm_page_donate_queue_ripe;
1439 
1440 #define VM_PAGE_BACKGROUND_TARGET_MAX   50000
1441 #define VM_PAGE_BG_DISABLED     0
1442 #define VM_PAGE_BG_ENABLED     1
1443 
1444 extern
1445 vm_page_queue_head_t    vm_page_queue_background;
1446 extern
1447 uint64_t        vm_page_background_promoted_count;
1448 extern
1449 uint32_t        vm_page_background_count;
1450 extern
1451 uint32_t        vm_page_background_target;
1452 extern
1453 uint32_t        vm_page_background_internal_count;
1454 extern
1455 uint32_t        vm_page_background_external_count;
1456 extern
1457 uint32_t        vm_page_background_mode;
1458 extern
1459 uint32_t        vm_page_background_exclude_external;
1460 
1461 extern
1462 vm_page_queue_head_t    vm_page_queue_donate;
1463 extern
1464 uint32_t        vm_page_donate_count;
1465 extern
1466 uint32_t        vm_page_donate_target_low;
1467 extern
1468 uint32_t        vm_page_donate_target_high;
1469 #define VM_PAGE_DONATE_TARGET_LOWWATER  (100)
1470 #define VM_PAGE_DONATE_TARGET_HIGHWATER ((unsigned int)(atop_64(max_mem) / 8))
1471 
1472 extern
1473 vm_offset_t     first_phys_addr;        /* physical address for first_page */
1474 extern
1475 vm_offset_t     last_phys_addr;         /* physical address for last_page */
1476 
1477 extern
1478 unsigned int    vm_page_free_count;     /* How many pages are free? (sum of all colors) */
1479 extern
1480 unsigned int    vm_page_active_count;   /* How many pages are active? */
1481 extern
1482 unsigned int    vm_page_inactive_count; /* How many pages are inactive? */
1483 extern
1484 unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */
1485 extern
1486 unsigned int vm_page_realtime_count;    /* How many pages are used by realtime threads? */
1487 #if CONFIG_SECLUDED_MEMORY
1488 extern
1489 unsigned int    vm_page_secluded_count; /* How many pages are secluded? */
1490 extern
1491 unsigned int    vm_page_secluded_count_free; /* how many of them are free? */
1492 extern
1493 unsigned int    vm_page_secluded_count_inuse; /* how many of them are in use? */
1494 /*
1495  * We keep filling the secluded pool with new eligible pages and
1496  * we can overshoot our target by a lot.
1497  * When there's memory pressure, vm_pageout_scan() will re-balance the queues,
1498  * pushing the extra secluded pages to the active or free queue.
1499  * Since these "over target" secluded pages are actually "available", jetsam
1500  * should consider them as such, so make them visible to jetsam via the
1501  * "vm_page_secluded_count_over_target" counter and update it whenever we
1502  * update vm_page_secluded_count or vm_page_secluded_target.
1503  */
1504 extern
1505 unsigned int    vm_page_secluded_count_over_target;
1506 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE()                     \
1507 	MACRO_BEGIN                                                     \
1508 	if (vm_page_secluded_count > vm_page_secluded_target) {         \
1509 	        vm_page_secluded_count_over_target =                    \
1510 	                (vm_page_secluded_count - vm_page_secluded_target); \
1511 	} else {                                                        \
1512 	        vm_page_secluded_count_over_target = 0;                 \
1513 	}                                                               \
1514 	MACRO_END
1515 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target
1516 #else /* CONFIG_SECLUDED_MEMORY */
1517 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1518 	MACRO_BEGIN                                 \
1519 	MACRO_END
1520 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0
1521 #endif /* CONFIG_SECLUDED_MEMORY */
1522 extern
1523 unsigned int    vm_page_cleaned_count; /* How many pages are in the clean queue? */
1524 extern
1525 unsigned int    vm_page_throttled_count;/* How many inactives are throttled */
1526 extern
1527 unsigned int    vm_page_speculative_count;      /* How many speculative pages are unclaimed? */
1528 extern unsigned int     vm_page_pageable_internal_count;
1529 extern unsigned int     vm_page_pageable_external_count;
1530 extern
1531 unsigned int    vm_page_xpmapped_external_count;        /* How many pages are mapped executable? */
1532 extern
1533 unsigned int    vm_page_external_count; /* How many pages are file-backed? */
1534 extern
1535 unsigned int    vm_page_internal_count; /* How many pages are anonymous? */
1536 extern
1537 unsigned int    vm_page_wire_count;             /* How many pages are wired? */
1538 extern
1539 unsigned int    vm_page_wire_count_initial;     /* How many pages wired at startup */
1540 extern
1541 unsigned int    vm_page_wire_count_on_boot;     /* even earlier than _initial */
1542 extern
1543 unsigned int    vm_page_free_target;    /* How many do we want free? */
1544 extern
1545 unsigned int    vm_page_free_min;       /* When to wakeup pageout */
1546 extern
1547 unsigned int    vm_page_throttle_limit; /* When to throttle new page creation */
1548 extern
1549 unsigned int    vm_page_inactive_target;/* How many do we want inactive? */
1550 #if CONFIG_SECLUDED_MEMORY
1551 extern
1552 unsigned int    vm_page_secluded_target;/* How many do we want secluded? */
1553 #endif /* CONFIG_SECLUDED_MEMORY */
1554 extern
1555 unsigned int    vm_page_anonymous_min;  /* When it's ok to pre-clean */
1556 extern
1557 unsigned int    vm_page_free_reserved;  /* How many pages reserved to do pageout */
1558 extern
1559 unsigned int    vm_page_gobble_count;
1560 extern
1561 unsigned int    vm_page_stolen_count;   /* Count of stolen pages not acccounted in zones */
1562 extern
1563 unsigned int    vm_page_kern_lpage_count;   /* Count of large pages used in early boot */
1564 
1565 
1566 #if DEVELOPMENT || DEBUG
1567 extern
1568 unsigned int    vm_page_speculative_used;
1569 #endif
1570 
1571 extern
1572 unsigned int    vm_page_purgeable_count;/* How many pages are purgeable now ? */
1573 extern
1574 unsigned int    vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
1575 extern
1576 uint64_t        vm_page_purged_count;   /* How many pages got purged so far ? */
1577 
1578 extern
1579 _Atomic unsigned int vm_page_swapped_count;
1580 /* How many pages are swapped to disk? */
1581 
1582 extern unsigned int     vm_page_free_wanted;
1583 /* how many threads are waiting for memory */
1584 
1585 extern unsigned int     vm_page_free_wanted_privileged;
1586 /* how many VM privileged threads are waiting for memory */
1587 #if CONFIG_SECLUDED_MEMORY
1588 extern unsigned int     vm_page_free_wanted_secluded;
1589 /* how many threads are waiting for secluded memory */
1590 #endif /* CONFIG_SECLUDED_MEMORY */
1591 
1592 extern const ppnum_t    vm_page_fictitious_addr;
1593 /* (fake) phys_addr of fictitious pages */
1594 
1595 extern const ppnum_t    vm_page_guard_addr;
1596 /* (fake) phys_addr of guard pages */
1597 
1598 
1599 extern boolean_t        vm_page_deactivate_hint;
1600 
1601 extern int              vm_compressor_mode;
1602 
1603 #if __x86_64__
1604 /*
1605  * Defaults to true, so highest memory is used first.
1606  */
1607 extern boolean_t        vm_himemory_mode;
1608 #else
1609 #define vm_himemory_mode TRUE
1610 #endif
1611 
1612 #if XNU_VM_HAS_LOPAGE
1613 extern bool             vm_lopage_needed;
1614 extern bool             vm_lopage_refill;
1615 extern uint32_t         vm_lopage_free_count;
1616 extern uint32_t         vm_lopage_free_limit;
1617 extern uint32_t         vm_lopage_lowater;
1618 #else
1619 #define vm_lopage_needed        0
1620 #define vm_lopage_free_count    0
1621 #endif
1622 extern uint64_t         max_valid_dma_address;
1623 extern ppnum_t          max_valid_low_ppnum;
1624 
1625 /*!
1626  * @abstract
1627  * Options that alter the behavior of vm_page_grab_options().
1628  *
1629  * @const VM_PAGE_GRAB_OPTIONS_NONE
1630  * The default value when no other specific options are required.
1631  *
1632  * @const VM_PAGE_GRAB_Q_LOCK_HELD
1633  * Denotes the caller is holding the vm page queues lock held.
1634  *
1635  * @const VM_PAGE_GRAB_NOPAGEWAIT
1636  * Denotes that the caller never wants @c vm_page_grab_options() to call
1637  * @c VM_PAGE_WAIT(), even if the thread is privileged.
1638  *
1639  * @const VM_PAGE_GRAB_SECLUDED
1640  * The caller is eligible to the secluded pool.
1641  */
1642 #if HAS_MTE
1643 /*
1644  * @const VM_PAGE_GRAB_MTE
1645  * The grabbed page must have MTE tagging enabled.
1646  *
1647  * @const VM_PAGE_GRAB_ALLOW_TAG_STORAGE
1648  * The grabbed page can be a claimed tag storage page.
1649  *
1650  * @const VM_PAGE_GRAB_WIREABLE
1651  * The grabbed page will likely be wired.
1652  * This flag makes the caller ineligible to claim tag storage pages.
1653  * Currently unused.
1654  */
1655 #endif /* HAS_MTE */
1656 __enum_decl(vm_grab_options_t, uint32_t, {
1657 	VM_PAGE_GRAB_OPTIONS_NONE               = 0x00000000,
1658 	VM_PAGE_GRAB_Q_LOCK_HELD                = 0x00000001,
1659 	VM_PAGE_GRAB_NOPAGEWAIT                 = 0x00000002,
1660 
1661 	/* architecture/platform-specific flags */
1662 #if CONFIG_SECLUDED_MEMORY
1663 	VM_PAGE_GRAB_SECLUDED                   = 0x00010000,
1664 #endif /* CONFIG_SECLUDED_MEMORY */
1665 #if HAS_MTE
1666 	VM_PAGE_GRAB_MTE                = 0x00020000,
1667 	VM_PAGE_GRAB_ALLOW_TAG_STORAGE  = 0x00040000,
1668 	VM_PAGE_GRAB_WIREABLE           = 0x00080000,
1669 #endif /* HAS_MTE */
1670 });
1671 
1672 /*
1673  * Prototypes for functions exported by this module.
1674  */
1675 
1676 extern void             vm_page_init_local_q(unsigned int num_cpus);
1677 
1678 extern vm_page_t        vm_page_create(ppnum_t phys_page, bool canonical, zalloc_flags_t flags);
1679 extern void             vm_page_create_canonical(ppnum_t pnum);
1680 
1681 extern void             vm_page_create_retired(ppnum_t pn);
1682 
1683 #if XNU_VM_HAS_DELAYED_PAGES
1684 extern void             vm_free_delayed_pages(void);
1685 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1686 
1687 extern void             vm_pages_array_finalize(void);
1688 
1689 extern void             vm_page_reactivate_all_throttled(void);
1690 
1691 extern void vm_pressure_response(void);
1692 
1693 #define AVAILABLE_NON_COMPRESSED_MEMORY         (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
1694 #define AVAILABLE_MEMORY                        (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
1695 
1696 #if CONFIG_JETSAM
1697 
1698 #define VM_CHECK_MEMORYSTATUS \
1699 	memorystatus_update_available_page_count( \
1700 	        vm_page_pageable_external_count + \
1701 	        vm_page_free_count +              \
1702 	        VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \
1703 	        (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1704 	        )
1705 
1706 #else /* CONFIG_JETSAM */
1707 
1708 #if !XNU_TARGET_OS_OSX
1709 
1710 #define VM_CHECK_MEMORYSTATUS do {} while(0)
1711 
1712 #else /* !XNU_TARGET_OS_OSX */
1713 
1714 #define VM_CHECK_MEMORYSTATUS memorystatus_update_available_page_count(AVAILABLE_NON_COMPRESSED_MEMORY)
1715 
1716 #endif /* !XNU_TARGET_OS_OSX */
1717 
1718 #endif /* CONFIG_JETSAM */
1719 
1720 #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1721 #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1722 
1723 #ifdef MACH_KERNEL_PRIVATE
1724 static inline void
vm_page_lock_queues(void)1725 vm_page_lock_queues(void)
1726 {
1727 	lck_mtx_lock(&vm_page_queue_lock);
1728 }
1729 
1730 static inline boolean_t
vm_page_trylock_queues(void)1731 vm_page_trylock_queues(void)
1732 {
1733 	boolean_t ret;
1734 	ret = lck_mtx_try_lock(&vm_page_queue_lock);
1735 	return ret;
1736 }
1737 
1738 static inline void
vm_page_unlock_queues(void)1739 vm_page_unlock_queues(void)
1740 {
1741 	lck_mtx_unlock(&vm_page_queue_lock);
1742 }
1743 
1744 static inline void
vm_page_lockspin_queues(void)1745 vm_page_lockspin_queues(void)
1746 {
1747 	lck_mtx_lock_spin(&vm_page_queue_lock);
1748 }
1749 
1750 static inline boolean_t
vm_page_trylockspin_queues(void)1751 vm_page_trylockspin_queues(void)
1752 {
1753 	boolean_t ret;
1754 	ret = lck_mtx_try_lock_spin(&vm_page_queue_lock);
1755 	return ret;
1756 }
1757 
1758 extern void kdp_vm_page_sleep_find_owner(
1759 	event64_t          wait_event,
1760 	thread_waitinfo_t *waitinfo);
1761 
1762 #endif /* MACH_KERNEL_PRIVATE */
1763 
1764 extern unsigned int vm_max_delayed_work_limit;
1765 
1766 #if CONFIG_SECLUDED_MEMORY
1767 extern uint64_t secluded_shutoff_trigger;
1768 extern uint64_t secluded_shutoff_headroom;
1769 extern void start_secluded_suppression(task_t);
1770 extern void stop_secluded_suppression(task_t);
1771 #endif /* CONFIG_SECLUDED_MEMORY */
1772 
1773 #endif  /* _VM_VM_PAGE_H_ */
1774