xref: /xnu-8796.101.5/osfmk/vm/vm_page.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Resident memory system definitions.
64  */
65 
66 #ifndef _VM_VM_PAGE_H_
67 #define _VM_VM_PAGE_H_
68 
69 #include <debug.h>
70 #include <vm/vm_options.h>
71 #include <vm/vm_protos.h>
72 #include <mach/boolean.h>
73 #include <mach/vm_prot.h>
74 #include <mach/vm_param.h>
75 #include <mach/memory_object_types.h> /* for VMP_CS_BITS... */
76 
77 
78 #if    defined(__LP64__)
79 
80 /*
81  * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
82  * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
83  * vm_page_t's from doesn't span more then 256 Gbytes, we're safe.   There are live tests in the
84  * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
85  * pointers from the 2 ends of these spaces
86  */
87 typedef uint32_t        vm_page_packed_t;
88 
89 struct vm_page_packed_queue_entry {
90 	vm_page_packed_t        next;          /* next element */
91 	vm_page_packed_t        prev;          /* previous element */
92 };
93 
94 typedef struct vm_page_packed_queue_entry       *vm_page_queue_t;
95 typedef struct vm_page_packed_queue_entry       vm_page_queue_head_t;
96 typedef struct vm_page_packed_queue_entry       vm_page_queue_chain_t;
97 typedef struct vm_page_packed_queue_entry       *vm_page_queue_entry_t;
98 
99 typedef vm_page_packed_t                        vm_page_object_t;
100 
101 #else // __LP64__
102 
103 /*
104  * we can't do the packing trick on 32 bit architectures
105  * so just turn the macros into noops.
106  */
107 typedef struct vm_page          *vm_page_packed_t;
108 
109 #define vm_page_queue_t         queue_t
110 #define vm_page_queue_head_t    queue_head_t
111 #define vm_page_queue_chain_t   queue_chain_t
112 #define vm_page_queue_entry_t   queue_entry_t
113 
114 #define vm_page_object_t        vm_object_t
115 #endif // __LP64__
116 
117 
118 #include <vm/vm_object.h>
119 #include <kern/queue.h>
120 #include <kern/locks.h>
121 
122 #include <kern/macro_help.h>
123 #include <libkern/OSAtomic.h>
124 
125 
126 
127 #define VM_PAGE_COMPRESSOR_COUNT        (compressor_object->resident_page_count)
128 
129 /*
130  *	Management of resident (logical) pages.
131  *
132  *	A small structure is kept for each resident
133  *	page, indexed by page number.  Each structure
134  *	is an element of several lists:
135  *
136  *		A hash table bucket used to quickly
137  *		perform object/offset lookups
138  *
139  *		A list of all pages for a given object,
140  *		so they can be quickly deactivated at
141  *		time of deallocation.
142  *
143  *		An ordered list of pages due for pageout.
144  *
145  *	In addition, the structure contains the object
146  *	and offset to which this page belongs (for pageout),
147  *	and sundry status bits.
148  *
149  *	Fields in this structure are locked either by the lock on the
150  *	object that the page belongs to (O) or by the lock on the page
151  *	queues (P).  [Some fields require that both locks be held to
152  *	change that field; holding either lock is sufficient to read.]
153  */
154 
155 #define VM_PAGE_NULL            ((vm_page_t) 0)
156 
157 extern  char    vm_page_inactive_states[];
158 extern  char    vm_page_pageable_states[];
159 extern  char    vm_page_non_speculative_pageable_states[];
160 extern  char    vm_page_active_or_inactive_states[];
161 
162 
163 #define VM_PAGE_INACTIVE(m)                     (vm_page_inactive_states[m->vmp_q_state])
164 #define VM_PAGE_PAGEABLE(m)                     (vm_page_pageable_states[m->vmp_q_state])
165 #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m)     (vm_page_non_speculative_pageable_states[m->vmp_q_state])
166 #define VM_PAGE_ACTIVE_OR_INACTIVE(m)           (vm_page_active_or_inactive_states[m->vmp_q_state])
167 
168 
169 #define VM_PAGE_NOT_ON_Q                0               /* page is not present on any queue, nor is it wired... mainly a transient state */
170 #define VM_PAGE_IS_WIRED                1               /* page is currently wired */
171 #define VM_PAGE_USED_BY_COMPRESSOR      2               /* page is in use by the compressor to hold compressed data */
172 #define VM_PAGE_ON_FREE_Q               3               /* page is on the main free queue */
173 #define VM_PAGE_ON_FREE_LOCAL_Q         4               /* page is on one of the per-CPU free queues */
174 #define VM_PAGE_ON_FREE_LOPAGE_Q        5               /* page is on the lopage pool free list */
175 #define VM_PAGE_ON_THROTTLED_Q          6               /* page is on the throttled queue... we stash anonymous pages here when not paging */
176 #define VM_PAGE_ON_PAGEOUT_Q            7               /* page is on one of the pageout queues (internal/external) awaiting processing */
177 #define VM_PAGE_ON_SPECULATIVE_Q        8               /* page is on one of the speculative queues */
178 #define VM_PAGE_ON_ACTIVE_LOCAL_Q       9               /* page has recently been created and is being held in one of the per-CPU local queues */
179 #define VM_PAGE_ON_ACTIVE_Q             10              /* page is in global active queue */
180 #define VM_PAGE_ON_INACTIVE_INTERNAL_Q  11              /* page is on the inactive internal queue a.k.a.  anonymous queue */
181 #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q  12              /* page in on the inactive external queue a.k.a.  file backed queue */
182 #define VM_PAGE_ON_INACTIVE_CLEANED_Q   13              /* page has been cleaned to a backing file and is ready to be stolen */
183 #define VM_PAGE_ON_SECLUDED_Q           14              /* page is on secluded queue */
184 #define VM_PAGE_Q_STATE_LAST_VALID_VALUE        14      /* we currently use 4 bits for the state... don't let this go beyond 15 */
185 
186 #define VM_PAGE_Q_STATE_ARRAY_SIZE      (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1)
187 
188 
189 /*
190  * The structure itself. See the block comment above for what (O) and (P) mean.
191  */
192 #define vmp_pageq vmp_q_un.vmp_q_pageq
193 #define vmp_snext vmp_q_un.vmp_q_snext
194 
195 struct vm_page {
196 	union {
197 		vm_page_queue_chain_t vmp_q_pageq;           /* queue info for FIFO queue or free list (P) */
198 		struct vm_page        *vmp_q_snext;
199 	} vmp_q_un;
200 
201 	vm_page_queue_chain_t         vmp_listq;           /* all pages in same object (O) */
202 
203 	vm_page_queue_chain_t         vmp_specialq;     /* anonymous pages in the special queues (P) */
204 	vm_object_offset_t            vmp_offset;          /* offset into that object (O,P) */
205 
206 	vm_page_object_t              vmp_object;          /* which object am I in (O&P) */
207 
208 	/*
209 	 * The following word of flags used to be protected by the "page queues" lock.
210 	 * That's no longer true and what lock, if any, is needed may depend on the
211 	 * value of vmp_q_state.
212 	 *
213 	 * We use 'vmp_wire_count' to store the local queue id if local queues are enabled.
214 	 * See the comments at 'vm_page_queues_remove' as to why this is safe to do.
215 	 */
216 #define VM_PAGE_SPECIAL_Q_EMPTY (0)
217 #define VM_PAGE_SPECIAL_Q_BG (1)
218 #define VM_PAGE_SPECIAL_Q_DONATE (2)
219 #define VM_PAGE_SPECIAL_Q_FG (3)
220 #define vmp_local_id vmp_wire_count
221 	unsigned int vmp_wire_count:16,      /* how many wired down maps use me? (O&P) */
222 	    vmp_q_state:4,                   /* which q is the page on (P) */
223 	    vmp_on_specialq:2,
224 	    vmp_gobbled:1,                   /* page used internally (P) */
225 	    vmp_laundry:1,                   /* page is being cleaned now (P)*/
226 	    vmp_no_cache:1,                  /* page is not to be cached and should */
227 	                                     /* be reused ahead of other pages (P) */
228 	    vmp_private:1,                   /* Page should not be returned to the free list (P) */
229 	    vmp_reference:1,                 /* page has been used (P) */
230 	    vmp_lopage:1,
231 	    vmp_realtime:1,                  /* page used by realtime thread */
232 	    vmp_unused_page_bits:3;
233 
234 	/*
235 	 * MUST keep the 2 32 bit words used as bit fields
236 	 * separated since the compiler has a nasty habit
237 	 * of using 64 bit loads and stores on them as
238 	 * if they were a single 64 bit field... since
239 	 * they are protected by 2 different locks, this
240 	 * is a real problem
241 	 */
242 	vm_page_packed_t vmp_next_m;            /* VP bucket link (O) */
243 
244 	/*
245 	 * The following word of flags is protected by the "VM object" lock.
246 	 *
247 	 * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the
248 	 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
249 	 * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro.
250 	 * It's also ok to modify them behind just the VM object "exclusive" lock.
251 	 */
252 	unsigned int    vmp_busy:1,           /* page is in transit (O) */
253 	    vmp_wanted:1,                     /* someone is waiting for page (O) */
254 	    vmp_tabled:1,                     /* page is in VP table (O) */
255 	    vmp_hashed:1,                     /* page is in vm_page_buckets[] (O) + the bucket lock */
256 	    vmp_fictitious:1,                 /* Physical page doesn't exist (O) */
257 	    vmp_clustered:1,                  /* page is not the faulted page (O) or (O-shared AND pmap_page) */
258 	    vmp_pmapped:1,                    /* page has at some time been entered into a pmap (O) or */
259 	                                      /* (O-shared AND pmap_page) */
260 	    vmp_xpmapped:1,                   /* page has been entered with execute permission (O) or */
261 	                                      /* (O-shared AND pmap_page) */
262 	    vmp_wpmapped:1,                   /* page has been entered at some point into a pmap for write (O) */
263 	    vmp_free_when_done:1,             /* page is to be freed once cleaning is completed (O) */
264 	    vmp_absent:1,                     /* Data has been requested, but is not yet available (O) */
265 	    vmp_error:1,                      /* Data manager was unable to provide data due to error (O) */
266 	    vmp_dirty:1,                      /* Page must be cleaned (O) */
267 	    vmp_cleaning:1,                   /* Page clean has begun (O) */
268 	    vmp_precious:1,                   /* Page is precious; data must be returned even if clean (O) */
269 	    vmp_overwriting:1,                /* Request to unlock has been made without having data. (O) */
270 	                                      /* [See vm_fault_page_overwrite] */
271 	    vmp_restart:1,                    /* Page was pushed higher in shadow chain by copy_call-related pagers */
272 	                                      /* start again at top of chain */
273 	    vmp_unusual:1,                    /* Page is absent, error, restart or page locked */
274 	    vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */
275 	    vmp_cs_tainted:VMP_CS_BITS,   /* code-signing: page is tainted */
276 	    vmp_cs_nx:VMP_CS_BITS,        /* code-signing: page is nx */
277 	    vmp_reusable:1,
278 	    vmp_written_by_kernel:1;             /* page was written by kernel (i.e. decompressed) */
279 
280 #if    !defined(__arm64__)
281 	ppnum_t         vmp_phys_page;        /* Physical page number of the page */
282 #endif
283 };
284 
285 typedef struct vm_page  *vm_page_t;
286 extern vm_page_t        vm_pages;
287 extern vm_page_t        vm_page_array_beginning_addr;
288 extern vm_page_t        vm_page_array_ending_addr;
289 
290 static inline int
VMP_CS_FOR_OFFSET(vm_map_offset_t fault_phys_offset)291 VMP_CS_FOR_OFFSET(
292 	vm_map_offset_t fault_phys_offset)
293 {
294 	assertf(fault_phys_offset < PAGE_SIZE &&
295 	    !(fault_phys_offset & FOURK_PAGE_MASK),
296 	    "offset 0x%llx\n", (uint64_t)fault_phys_offset);
297 	return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT);
298 }
299 static inline bool
VMP_CS_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)300 VMP_CS_VALIDATED(
301 	vm_page_t p,
302 	vm_map_size_t fault_page_size,
303 	vm_map_offset_t fault_phys_offset)
304 {
305 	assertf(fault_page_size <= PAGE_SIZE,
306 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
307 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
308 	if (fault_page_size == PAGE_SIZE) {
309 		return p->vmp_cs_validated == VMP_CS_ALL_TRUE;
310 	}
311 	return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset);
312 }
313 static inline bool
VMP_CS_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)314 VMP_CS_TAINTED(
315 	vm_page_t p,
316 	vm_map_size_t fault_page_size,
317 	vm_map_offset_t fault_phys_offset)
318 {
319 	assertf(fault_page_size <= PAGE_SIZE,
320 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
321 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
322 	if (fault_page_size == PAGE_SIZE) {
323 		return p->vmp_cs_tainted != VMP_CS_ALL_FALSE;
324 	}
325 	return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset);
326 }
327 static inline bool
VMP_CS_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)328 VMP_CS_NX(
329 	vm_page_t p,
330 	vm_map_size_t fault_page_size,
331 	vm_map_offset_t fault_phys_offset)
332 {
333 	assertf(fault_page_size <= PAGE_SIZE,
334 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
335 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
336 	if (fault_page_size == PAGE_SIZE) {
337 		return p->vmp_cs_nx != VMP_CS_ALL_FALSE;
338 	}
339 	return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset);
340 }
341 static inline void
VMP_CS_SET_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)342 VMP_CS_SET_VALIDATED(
343 	vm_page_t p,
344 	vm_map_size_t fault_page_size,
345 	vm_map_offset_t fault_phys_offset,
346 	boolean_t value)
347 {
348 	assertf(fault_page_size <= PAGE_SIZE,
349 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
350 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
351 	if (value) {
352 		if (fault_page_size == PAGE_SIZE) {
353 			p->vmp_cs_validated = VMP_CS_ALL_TRUE;
354 		}
355 		p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset);
356 	} else {
357 		if (fault_page_size == PAGE_SIZE) {
358 			p->vmp_cs_validated = VMP_CS_ALL_FALSE;
359 		}
360 		p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
361 	}
362 }
363 static inline void
VMP_CS_SET_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)364 VMP_CS_SET_TAINTED(
365 	vm_page_t p,
366 	vm_map_size_t fault_page_size,
367 	vm_map_offset_t fault_phys_offset,
368 	boolean_t value)
369 {
370 	assertf(fault_page_size <= PAGE_SIZE,
371 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
372 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
373 	if (value) {
374 		if (fault_page_size == PAGE_SIZE) {
375 			p->vmp_cs_tainted = VMP_CS_ALL_TRUE;
376 		}
377 		p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset);
378 	} else {
379 		if (fault_page_size == PAGE_SIZE) {
380 			p->vmp_cs_tainted = VMP_CS_ALL_FALSE;
381 		}
382 		p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
383 	}
384 }
385 static inline void
VMP_CS_SET_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)386 VMP_CS_SET_NX(
387 	vm_page_t p,
388 	vm_map_size_t fault_page_size,
389 	vm_map_offset_t fault_phys_offset,
390 	boolean_t value)
391 {
392 	assertf(fault_page_size <= PAGE_SIZE,
393 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
394 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
395 	if (value) {
396 		if (fault_page_size == PAGE_SIZE) {
397 			p->vmp_cs_nx = VMP_CS_ALL_TRUE;
398 		}
399 		p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset);
400 	} else {
401 		if (fault_page_size == PAGE_SIZE) {
402 			p->vmp_cs_nx = VMP_CS_ALL_FALSE;
403 		}
404 		p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
405 	}
406 }
407 
408 
409 #if defined(__arm64__)
410 
411 extern  unsigned int vm_first_phys_ppnum;
412 
413 struct vm_page_with_ppnum {
414 	struct  vm_page vm_page_wo_ppnum;
415 
416 	ppnum_t vmp_phys_page;
417 };
418 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
419 
420 
421 static inline ppnum_t
VM_PAGE_GET_PHYS_PAGE(vm_page_t m)422 VM_PAGE_GET_PHYS_PAGE(vm_page_t m)
423 {
424 	if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) {
425 		return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum);
426 	} else {
427 		return ((vm_page_with_ppnum_t)m)->vmp_phys_page;
428 	}
429 }
430 
431 #define VM_PAGE_SET_PHYS_PAGE(m, ppnum)         \
432 	MACRO_BEGIN                             \
433 	if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr)     \
434 	        ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum;     \
435 	assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m));              \
436 	MACRO_END
437 
438 #define VM_PAGE_GET_COLOR(m)    (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask)
439 
440 #else   /* defined(__arm64__) */
441 
442 
443 struct vm_page_with_ppnum {
444 	struct  vm_page vm_page_with_ppnum;
445 };
446 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
447 
448 
449 #define VM_PAGE_GET_PHYS_PAGE(page)     (page)->vmp_phys_page
450 #define VM_PAGE_SET_PHYS_PAGE(page, ppnum)      \
451 	MACRO_BEGIN                             \
452 	(page)->vmp_phys_page = ppnum;          \
453 	MACRO_END
454 
455 #define VM_PAGE_GET_CLUMP(m)    ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift)
456 #define VM_PAGE_GET_COLOR(m)    ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask)
457 
458 #endif  /* defined(__arm64__) */
459 
460 
461 
462 #if defined(__LP64__)
463 /*
464  * Parameters for pointer packing
465  *
466  *
467  * VM Pages pointers might point to:
468  *
469  * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals,
470  *
471  * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages
472  *
473  * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED
474  *    aligned).
475  *
476  *
477  * The current scheme uses 31 bits of storage and 6 bits of shift using the
478  * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the
479  * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY).
480  *
481  * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS.
482  */
483 #define VM_VPLQ_ALIGNMENT               128
484 #define VM_PAGE_PACKED_PTR_ALIGNMENT    64              /* must be a power of 2 */
485 #define VM_PAGE_PACKED_ALIGNED          __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT)))
486 #define VM_PAGE_PACKED_PTR_BITS         31
487 #define VM_PAGE_PACKED_PTR_SHIFT        6
488 #define VM_PAGE_PACKED_PTR_BASE         ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS)
489 
490 #define VM_PAGE_PACKED_FROM_ARRAY       0x80000000
491 
492 static inline vm_page_packed_t
vm_page_pack_ptr(uintptr_t p)493 vm_page_pack_ptr(uintptr_t p)
494 {
495 	if (p >= (uintptr_t)vm_page_array_beginning_addr &&
496 	    p < (uintptr_t)vm_page_array_ending_addr) {
497 		ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr;
498 		assert((vm_page_t)p == &vm_pages[diff]);
499 		return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY);
500 	}
501 
502 	VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR);
503 	vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR);
504 	return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed);
505 }
506 
507 
508 static inline uintptr_t
vm_page_unpack_ptr(uintptr_t p)509 vm_page_unpack_ptr(uintptr_t p)
510 {
511 	extern unsigned int vm_pages_count;
512 
513 	if (p >= VM_PAGE_PACKED_FROM_ARRAY) {
514 		p &= ~VM_PAGE_PACKED_FROM_ARRAY;
515 		assert(p < (uintptr_t)vm_pages_count);
516 		return (uintptr_t)&vm_pages[p];
517 	}
518 
519 	return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR);
520 }
521 
522 
523 #define VM_PAGE_PACK_PTR(p)     vm_page_pack_ptr((uintptr_t)(p))
524 #define VM_PAGE_UNPACK_PTR(p)   vm_page_unpack_ptr((uintptr_t)(p))
525 
526 #define VM_OBJECT_PACK(o)       ((vm_page_object_t)VM_PACK_POINTER((uintptr_t)(o), VM_PAGE_PACKED_PTR))
527 #define VM_OBJECT_UNPACK(p)     ((vm_object_t)VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR))
528 
529 #define VM_PAGE_OBJECT(p)       VM_OBJECT_UNPACK((p)->vmp_object)
530 #define VM_PAGE_PACK_OBJECT(o)  VM_OBJECT_PACK(o)
531 
532 
533 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p)     \
534 MACRO_BEGIN                             \
535 	(p)->vmp_snext = 0;             \
536 MACRO_END
537 
538 
539 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p)       VM_PAGE_PACK_PTR(p)
540 
541 
542 static __inline__ void
vm_page_enqueue_tail(vm_page_queue_t que,vm_page_queue_entry_t elt)543 vm_page_enqueue_tail(
544 	vm_page_queue_t         que,
545 	vm_page_queue_entry_t   elt)
546 {
547 	vm_page_queue_entry_t   old_tail;
548 
549 	old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
550 	elt->next = VM_PAGE_PACK_PTR(que);
551 	elt->prev = que->prev;
552 	que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
553 }
554 
555 
556 static __inline__ void
vm_page_remque(vm_page_queue_entry_t elt)557 vm_page_remque(
558 	vm_page_queue_entry_t elt)
559 {
560 	vm_page_queue_entry_t next;
561 	vm_page_queue_entry_t prev;
562 	vm_page_packed_t      next_pck = elt->next;
563 	vm_page_packed_t      prev_pck = elt->prev;
564 
565 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
566 
567 	/* next may equal prev (and the queue head) if elt was the only element */
568 	prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
569 
570 	next->prev = prev_pck;
571 	prev->next = next_pck;
572 
573 	elt->next = 0;
574 	elt->prev = 0;
575 }
576 
577 
578 /*
579  *	Macro:	vm_page_queue_init
580  *	Function:
581  *		Initialize the given queue.
582  *	Header:
583  *	void vm_page_queue_init(q)
584  *		vm_page_queue_t	q;	\* MODIFIED *\
585  */
586 #define vm_page_queue_init(q)               \
587 MACRO_BEGIN                                 \
588 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \
589 	(q)->next = VM_PAGE_PACK_PTR(q);        \
590 	(q)->prev = VM_PAGE_PACK_PTR(q);        \
591 MACRO_END
592 
593 
594 /*
595  * Macro: vm_page_queue_enter
596  * Function:
597  *     Insert a new element at the tail of the vm_page queue.
598  * Header:
599  *     void vm_page_queue_enter(q, elt, field)
600  *         queue_t q;
601  *         vm_page_t elt;
602  *         <field> is the list field in vm_page_t
603  *
604  * This macro's arguments have to match the generic "queue_enter()" macro which is
605  * what is used for this on 32 bit kernels.
606  */
607 #define vm_page_queue_enter(head, elt, field)                       \
608 MACRO_BEGIN                                                         \
609 	vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt);         \
610 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);       \
611 	vm_page_packed_t __pck_prev = (head)->prev;                 \
612                                                                     \
613 	if (__pck_head == __pck_prev) {                             \
614 	        (head)->next = __pck_elt;                           \
615 	} else {                                                    \
616 	        vm_page_t __prev;                                   \
617 	        __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
618 	        __prev->field.next = __pck_elt;                     \
619 	}                                                           \
620 	(elt)->field.prev = __pck_prev;                             \
621 	(elt)->field.next = __pck_head;                             \
622 	(head)->prev = __pck_elt;                                   \
623 MACRO_END
624 
625 
626 #if defined(__x86_64__)
627 /*
628  * These are helper macros for vm_page_queue_enter_clump to assist
629  * with conditional compilation (release / debug / development)
630  */
631 #if DEVELOPMENT || DEBUG
632 
633 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)                                             \
634 MACRO_BEGIN                                                                                   \
635 	if (__prev != NULL) {                                                                 \
636 	        assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next));                   \
637 	        assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
638 	}                                                                                     \
639 MACRO_END
640 
641 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)                    \
642 MACRO_BEGIN                                                                     \
643 	unsigned int __i;                                                       \
644 	vm_page_queue_entry_t __tmp;                                            \
645 	for (__i = 0, __tmp = __first; __i < __n_free; __i++) {                 \
646 	        __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
647 	}                                                                       \
648 	assert(__tmp == __last_next);                                           \
649 MACRO_END
650 
651 #define __DEBUG_STAT_INCREMENT_INRANGE              vm_clump_inrange++
652 #define __DEBUG_STAT_INCREMENT_INSERTS              vm_clump_inserts++
653 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)   vm_clump_promotes+=__n_free
654 
655 #else
656 
657 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
658 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
659 #define __DEBUG_STAT_INCREMENT_INRANGE
660 #define __DEBUG_STAT_INCREMENT_INSERTS
661 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
662 
663 #endif  /* if DEVELOPMENT || DEBUG */
664 
665 /*
666  * Insert a new page into a free queue and clump pages within the same 16K boundary together
667  */
668 static inline void
vm_page_queue_enter_clump(vm_page_queue_t head,vm_page_t elt)669 vm_page_queue_enter_clump(
670 	vm_page_queue_t       head,
671 	vm_page_t             elt)
672 {
673 	vm_page_queue_entry_t first = NULL;    /* first page in the clump */
674 	vm_page_queue_entry_t last = NULL;     /* last page in the clump */
675 	vm_page_queue_entry_t prev = NULL;
676 	vm_page_queue_entry_t next;
677 	uint_t                n_free = 1;
678 	extern unsigned int   vm_pages_count;
679 	extern unsigned int   vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
680 	extern unsigned long  vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
681 
682 	/*
683 	 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
684 	 */
685 	if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
686 		vm_page_t p;
687 		uint_t    i;
688 		uint_t    n;
689 		ppnum_t   clump_num;
690 
691 		first = last = (vm_page_queue_entry_t)elt;
692 		clump_num = VM_PAGE_GET_CLUMP(elt);
693 		n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
694 
695 		/*
696 		 * Check for preceeding vm_pages[] entries in the same chunk
697 		 */
698 		for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
699 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
700 				if (prev == NULL) {
701 					prev = (vm_page_queue_entry_t)p;
702 				}
703 				first = (vm_page_queue_entry_t)p;
704 				n_free++;
705 			}
706 		}
707 
708 		/*
709 		 * Check the following vm_pages[] entries in the same chunk
710 		 */
711 		for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
712 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
713 				if (last == (vm_page_queue_entry_t)elt) {               /* first one only */
714 					__DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
715 				}
716 
717 				if (prev == NULL) {
718 					prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
719 				}
720 				last = (vm_page_queue_entry_t)p;
721 				n_free++;
722 			}
723 		}
724 		__DEBUG_STAT_INCREMENT_INRANGE;
725 	}
726 
727 	/* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
728 	if (prev == NULL) {
729 		prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
730 	}
731 
732 	/* insert the element */
733 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
734 	elt->vmp_pageq.next = prev->next;
735 	elt->vmp_pageq.prev = next->prev;
736 	prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
737 	__DEBUG_STAT_INCREMENT_INSERTS;
738 
739 	/*
740 	 * Check if clump needs to be promoted to head.
741 	 */
742 	if (n_free >= vm_clump_promote_threshold && n_free > 1) {
743 		vm_page_queue_entry_t first_prev;
744 
745 		first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
746 
747 		/* If not at head already */
748 		if (first_prev != head) {
749 			vm_page_queue_entry_t last_next;
750 			vm_page_queue_entry_t head_next;
751 
752 			last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
753 
754 			/* verify that the links within the clump are consistent */
755 			__DEBUG_VERIFY_LINKS(first, n_free, last_next);
756 
757 			/* promote clump to head */
758 			first_prev->next = last->next;
759 			last_next->prev = first->prev;
760 			first->prev = VM_PAGE_PACK_PTR(head);
761 			last->next = head->next;
762 
763 			head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
764 			head_next->prev = VM_PAGE_PACK_PTR(last);
765 			head->next = VM_PAGE_PACK_PTR(first);
766 			__DEBUG_STAT_INCREMENT_PROMOTES(n_free);
767 		}
768 	}
769 }
770 #endif
771 
772 /*
773  * Macro: vm_page_queue_enter_first
774  * Function:
775  *     Insert a new element at the head of the vm_page queue.
776  * Header:
777  *     void queue_enter_first(q, elt, , field)
778  *         queue_t q;
779  *         vm_page_t elt;
780  *         <field> is the linkage field in vm_page
781  *
782  * This macro's arguments have to match the generic "queue_enter_first()" macro which is
783  * what is used for this on 32 bit kernels.
784  */
785 #define vm_page_queue_enter_first(head, elt, field)                 \
786 MACRO_BEGIN                                                         \
787 	vm_page_packed_t __pck_next = (head)->next;                 \
788 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);       \
789 	vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt);         \
790                                                                     \
791 	if (__pck_head == __pck_next) {                             \
792 	        (head)->prev = __pck_elt;                           \
793 	} else {                                                    \
794 	        vm_page_t __next;                                   \
795 	        __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
796 	        __next->field.prev = __pck_elt;                     \
797 	}                                                           \
798                                                                     \
799 	(elt)->field.next = __pck_next;                             \
800 	(elt)->field.prev = __pck_head;                             \
801 	(head)->next = __pck_elt;                                   \
802 MACRO_END
803 
804 
805 /*
806  * Macro:	vm_page_queue_remove
807  * Function:
808  *     Remove an arbitrary page from a vm_page queue.
809  * Header:
810  *     void vm_page_queue_remove(q, qe, field)
811  *         arguments as in vm_page_queue_enter
812  *
813  * This macro's arguments have to match the generic "queue_enter()" macro which is
814  * what is used for this on 32 bit kernels.
815  */
816 #define vm_page_queue_remove(head, elt, field)                          \
817 MACRO_BEGIN                                                             \
818 	vm_page_packed_t __pck_next = (elt)->field.next;                \
819 	vm_page_packed_t __pck_prev = (elt)->field.prev;                \
820 	vm_page_t        __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
821 	vm_page_t        __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
822                                                                         \
823 	if ((void *)(head) == (void *)__next) {                         \
824 	        (head)->prev = __pck_prev;                              \
825 	} else {                                                        \
826 	        __next->field.prev = __pck_prev;                        \
827 	}                                                               \
828                                                                         \
829 	if ((void *)(head) == (void *)__prev) {                         \
830 	        (head)->next = __pck_next;                              \
831 	} else {                                                        \
832 	        __prev->field.next = __pck_next;                        \
833 	}                                                               \
834                                                                         \
835 	(elt)->field.next = 0;                                          \
836 	(elt)->field.prev = 0;                                          \
837 MACRO_END
838 
839 
840 /*
841  * Macro: vm_page_queue_remove_first
842  *
843  * Function:
844  *     Remove and return the entry at the head of a vm_page queue.
845  *
846  * Header:
847  *     vm_page_queue_remove_first(head, entry, field)
848  *     N.B. entry is returned by reference
849  *
850  * This macro's arguments have to match the generic "queue_remove_first()" macro which is
851  * what is used for this on 32 bit kernels.
852  */
853 #define vm_page_queue_remove_first(head, entry, field)            \
854 MACRO_BEGIN                                                       \
855 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);     \
856 	vm_page_packed_t __pck_next;                              \
857 	vm_page_t        __next;                                  \
858                                                                   \
859 	(entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next);    \
860 	__pck_next = (entry)->field.next;                         \
861 	__next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next);       \
862                                                                   \
863 	if (__pck_head == __pck_next) {                           \
864 	        (head)->prev = __pck_head;                        \
865 	} else {                                                  \
866 	        __next->field.prev = __pck_head;                  \
867 	}                                                         \
868                                                                   \
869 	(head)->next = __pck_next;                                \
870 	(entry)->field.next = 0;                                  \
871 	(entry)->field.prev = 0;                                  \
872 MACRO_END
873 
874 
875 #if defined(__x86_64__)
876 /*
877  * Macro:  vm_page_queue_remove_first_with_clump
878  * Function:
879  *     Remove and return the entry at the head of the free queue
880  *     end is set to 1 to indicate that we just returned the last page in a clump
881  *
882  * Header:
883  *     vm_page_queue_remove_first_with_clump(head, entry, end)
884  *     entry is returned by reference
885  *     end is returned by reference
886  */
887 #define vm_page_queue_remove_first_with_clump(head, entry, end)              \
888 MACRO_BEGIN                                                                  \
889 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);                \
890 	vm_page_packed_t __pck_next;                                         \
891 	vm_page_t        __next;                                             \
892                                                                              \
893 	(entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next);               \
894 	__pck_next = (entry)->vmp_pageq.next;                                \
895 	__next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next);                  \
896                                                                              \
897 	(end) = 0;                                                           \
898 	if (__pck_head == __pck_next) {                                      \
899 	        (head)->prev = __pck_head;                                   \
900 	        (end) = 1;                                                   \
901 	} else {                                                             \
902 	        __next->vmp_pageq.prev = __pck_head;                         \
903 	        if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
904 	                (end) = 1;                                           \
905 	        }                                                            \
906 	}                                                                    \
907                                                                              \
908 	(head)->next = __pck_next;                                           \
909 	(entry)->vmp_pageq.next = 0;                                         \
910 	(entry)->vmp_pageq.prev = 0;                                         \
911 MACRO_END
912 #endif
913 
914 /*
915  *	Macro:	vm_page_queue_end
916  *	Function:
917  *	Tests whether a new entry is really the end of
918  *		the queue.
919  *	Header:
920  *		boolean_t vm_page_queue_end(q, qe)
921  *			vm_page_queue_t q;
922  *			vm_page_queue_entry_t qe;
923  */
924 #define vm_page_queue_end(q, qe)        ((q) == (qe))
925 
926 
927 /*
928  *	Macro:	vm_page_queue_empty
929  *	Function:
930  *		Tests whether a queue is empty.
931  *	Header:
932  *		boolean_t vm_page_queue_empty(q)
933  *			vm_page_queue_t q;
934  */
935 #define vm_page_queue_empty(q)          vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
936 
937 
938 
939 /*
940  *	Macro:	vm_page_queue_first
941  *	Function:
942  *		Returns the first entry in the queue,
943  *	Header:
944  *		uintpr_t vm_page_queue_first(q)
945  *			vm_page_queue_t q;	\* IN *\
946  */
947 #define vm_page_queue_first(q)          (VM_PAGE_UNPACK_PTR((q)->next))
948 
949 
950 
951 /*
952  *	Macro:		vm_page_queue_last
953  *	Function:
954  *		Returns the last entry in the queue.
955  *	Header:
956  *		vm_page_queue_entry_t queue_last(q)
957  *			queue_t	q;		\* IN *\
958  */
959 #define vm_page_queue_last(q)           (VM_PAGE_UNPACK_PTR((q)->prev))
960 
961 
962 
963 /*
964  *	Macro:	vm_page_queue_next
965  *	Function:
966  *		Returns the entry after an item in the queue.
967  *	Header:
968  *		uintpr_t vm_page_queue_next(qc)
969  *			vm_page_queue_t qc;
970  */
971 #define vm_page_queue_next(qc)          (VM_PAGE_UNPACK_PTR((qc)->next))
972 
973 
974 
975 /*
976  *	Macro:	vm_page_queue_prev
977  *	Function:
978  *		Returns the entry before an item in the queue.
979  *	Header:
980  *		uinptr_t vm_page_queue_prev(qc)
981  *			vm_page_queue_t qc;
982  */
983 #define vm_page_queue_prev(qc)          (VM_PAGE_UNPACK_PTR((qc)->prev))
984 
985 
986 
987 /*
988  *	Macro:	vm_page_queue_iterate
989  *	Function:
990  *		iterate over each item in a vm_page queue.
991  *		Generates a 'for' loop, setting elt to
992  *		each item in turn (by reference).
993  *	Header:
994  *		vm_page_queue_iterate(q, elt, field)
995  *			queue_t q;
996  *			vm_page_t elt;
997  *			<field> is the chain field in vm_page_t
998  */
999 #define vm_page_queue_iterate(head, elt, field)                       \
1000 	for ((elt) = (vm_page_t)vm_page_queue_first(head);            \
1001 	    !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
1002 	    (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field))     \
1003 
1004 #else // LP64
1005 
1006 #define VM_VPLQ_ALIGNMENT               128
1007 #define VM_PAGE_PACKED_PTR_ALIGNMENT    sizeof(vm_offset_t)
1008 #define VM_PAGE_PACKED_ALIGNED
1009 #define VM_PAGE_PACKED_PTR_BITS         32
1010 #define VM_PAGE_PACKED_PTR_SHIFT        0
1011 #define VM_PAGE_PACKED_PTR_BASE         0
1012 
1013 #define VM_PAGE_PACKED_FROM_ARRAY       0
1014 
1015 #define VM_PAGE_PACK_PTR(p)     (p)
1016 #define VM_PAGE_UNPACK_PTR(p)   ((uintptr_t)(p))
1017 
1018 #define VM_OBJECT_PACK(o)       ((vm_page_object_t)(o))
1019 #define VM_OBJECT_UNPACK(p)     ((vm_object_t)(p))
1020 
1021 #define VM_PAGE_PACK_OBJECT(o)  VM_OBJECT_PACK(o)
1022 #define VM_PAGE_OBJECT(p)       VM_OBJECT_UNPACK((p)->vmp_object)
1023 
1024 
1025 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p)     \
1026 MACRO_BEGIN                             \
1027 	(p)->vmp_pageq.next = 0;                \
1028 	(p)->vmp_pageq.prev = 0;                \
1029 MACRO_END
1030 
1031 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p)   ((queue_entry_t)(p))
1032 
1033 #define vm_page_remque                      remque
1034 #define vm_page_enqueue_tail                enqueue_tail
1035 #define vm_page_queue_init                  queue_init
1036 #define vm_page_queue_enter(h, e, f)        queue_enter(h, e, vm_page_t, f)
1037 #define vm_page_queue_enter_first(h, e, f)  queue_enter_first(h, e, vm_page_t, f)
1038 #define vm_page_queue_remove(h, e, f)       queue_remove(h, e, vm_page_t, f)
1039 #define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f)
1040 #define vm_page_queue_end                   queue_end
1041 #define vm_page_queue_empty                 queue_empty
1042 #define vm_page_queue_first                 queue_first
1043 #define vm_page_queue_last                  queue_last
1044 #define vm_page_queue_next                  queue_next
1045 #define vm_page_queue_prev                  queue_prev
1046 #define vm_page_queue_iterate(h, e, f)      queue_iterate(h, e, vm_page_t, f)
1047 
1048 #endif // __LP64__
1049 
1050 
1051 
1052 /*
1053  * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
1054  * represents a set of aging bins that are 'protected'...
1055  *
1056  * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
1057  * not yet been 'claimed' but have been aged out of the protective bins
1058  * this occurs in vm_page_speculate when it advances to the next bin
1059  * and discovers that it is still occupied... at that point, all of the
1060  * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q.  the pages
1061  * in that bin are all guaranteed to have reached at least the maximum age
1062  * we allow for a protected page... they can be older if there is no
1063  * memory pressure to pull them from the bin, or there are no new speculative pages
1064  * being generated to push them out.
1065  * this list is the one that vm_pageout_scan will prefer when looking
1066  * for pages to move to the underweight free list
1067  *
1068  * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
1069  * defines the amount of time a speculative page is normally
1070  * allowed to live in the 'protected' state (i.e. not available
1071  * to be stolen if vm_pageout_scan is running and looking for
1072  * pages)...  however, if the total number of speculative pages
1073  * in the protected state exceeds our limit (defined in vm_pageout.c)
1074  * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
1075  * vm_pageout_scan is allowed to steal pages from the protected
1076  * bucket even if they are underage.
1077  *
1078  * vm_pageout_scan is also allowed to pull pages from a protected
1079  * bin if the bin has reached the "age of consent" we've set
1080  */
1081 #define VM_PAGE_MAX_SPECULATIVE_AGE_Q   10
1082 #define VM_PAGE_MIN_SPECULATIVE_AGE_Q   1
1083 #define VM_PAGE_SPECULATIVE_AGED_Q      0
1084 
1085 #define VM_PAGE_SPECULATIVE_Q_AGE_MS    500
1086 
1087 struct vm_speculative_age_q {
1088 	/*
1089 	 * memory queue for speculative pages via clustered pageins
1090 	 */
1091 	vm_page_queue_head_t    age_q;
1092 	mach_timespec_t age_ts;
1093 } VM_PAGE_PACKED_ALIGNED;
1094 
1095 
1096 
1097 extern
1098 struct vm_speculative_age_q     vm_page_queue_speculative[];
1099 
1100 extern int                      speculative_steal_index;
1101 extern int                      speculative_age_index;
1102 extern unsigned int             vm_page_speculative_q_age_ms;
1103 
1104 
1105 typedef struct vm_locks_array {
1106 	char    pad  __attribute__ ((aligned(64)));
1107 	lck_mtx_t       vm_page_queue_lock2 __attribute__ ((aligned(64)));
1108 	lck_mtx_t       vm_page_queue_free_lock2 __attribute__ ((aligned(64)));
1109 	char    pad2  __attribute__ ((aligned(64)));
1110 } vm_locks_array_t;
1111 
1112 
1113 extern  void    vm_page_assign_special_state(vm_page_t mem, int mode);
1114 extern  void    vm_page_update_special_state(vm_page_t mem);
1115 extern  void    vm_page_add_to_specialq(vm_page_t mem, boolean_t first);
1116 extern  void    vm_page_remove_from_specialq(vm_page_t mem);
1117 
1118 #define VM_PAGE_WIRED(m)        ((m)->vmp_q_state == VM_PAGE_IS_WIRED)
1119 #define NEXT_PAGE(m)            ((m)->vmp_snext)
1120 #define NEXT_PAGE_PTR(m)        (&(m)->vmp_snext)
1121 
1122 /*
1123  * XXX	The unusual bit should not be necessary.  Most of the bit
1124  * XXX	fields above really want to be masks.
1125  */
1126 
1127 /*
1128  *	For debugging, this macro can be defined to perform
1129  *	some useful check on a page structure.
1130  *	INTENTIONALLY left as a no-op so that the
1131  *	current call-sites can be left intact for future uses.
1132  */
1133 
1134 #define VM_PAGE_CHECK(mem)                      \
1135 	MACRO_BEGIN                             \
1136 	MACRO_END
1137 
1138 /*     Page coloring:
1139  *
1140  *     The free page list is actually n lists, one per color,
1141  *     where the number of colors is a function of the machine's
1142  *     cache geometry set at system initialization.  To disable
1143  *     coloring, set vm_colors to 1 and vm_color_mask to 0.
1144  *     The boot-arg "colors" may be used to override vm_colors.
1145  *     Note that there is little harm in having more colors than needed.
1146  */
1147 
1148 #define MAX_COLORS      128
1149 #define DEFAULT_COLORS  32
1150 
1151 extern
1152 unsigned int    vm_colors;              /* must be in range 1..MAX_COLORS */
1153 extern
1154 unsigned int    vm_color_mask;          /* must be (vm_colors-1) */
1155 extern
1156 unsigned int    vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
1157 
1158 /*
1159  * Wired memory is a very limited resource and we can't let users exhaust it
1160  * and deadlock the entire system.  We enforce the following limits:
1161  *
1162  * vm_per_task_user_wire_limit
1163  *      how much memory can be user-wired in one user task
1164  *
1165  * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit)
1166  *      how much memory can be user-wired in all user tasks
1167  *
1168  * These values are set to defaults based on the number of pages managed
1169  * by the VM system. They can be overriden via sysctls.
1170  * See kmem_set_user_wire_limits for details on the default values.
1171  *
1172  * Regardless of the amount of memory in the system, we never reserve
1173  * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable.
1174  */
1175 #if defined(__LP64__)
1176 #define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024)     /* 32GB */
1177 #else
1178 #define VM_NOT_USER_WIREABLE_MAX (1UL*1024*1024*1024)     /* 1GB */
1179 #endif /* __LP64__ */
1180 extern
1181 vm_map_size_t   vm_per_task_user_wire_limit;
1182 extern
1183 vm_map_size_t   vm_global_user_wire_limit;
1184 extern
1185 uint64_t        vm_add_wire_count_over_global_limit;
1186 extern
1187 uint64_t        vm_add_wire_count_over_user_limit;
1188 
1189 /*
1190  *	Each pageable resident page falls into one of three lists:
1191  *
1192  *	free
1193  *		Available for allocation now.  The free list is
1194  *		actually an array of lists, one per color.
1195  *	inactive
1196  *		Not referenced in any map, but still has an
1197  *		object/offset-page mapping, and may be dirty.
1198  *		This is the list of pages that should be
1199  *		paged out next.  There are actually two
1200  *		inactive lists, one for pages brought in from
1201  *		disk or other backing store, and another
1202  *		for "zero-filled" pages.  See vm_pageout_scan()
1203  *		for the distinction and usage.
1204  *	active
1205  *		A list of pages which have been placed in
1206  *		at least one physical map.  This list is
1207  *		ordered, in LRU-like fashion.
1208  */
1209 
1210 
1211 #define VPL_LOCK_SPIN 1
1212 
1213 struct vpl {
1214 	vm_page_queue_head_t    vpl_queue;
1215 	unsigned int    vpl_count;
1216 	unsigned int    vpl_internal_count;
1217 	unsigned int    vpl_external_count;
1218 	lck_spin_t      vpl_lock;
1219 };
1220 
1221 extern
1222 struct vpl     * /* __zpercpu */ vm_page_local_q;
1223 extern
1224 unsigned int    vm_page_local_q_soft_limit;
1225 extern
1226 unsigned int    vm_page_local_q_hard_limit;
1227 extern
1228 vm_locks_array_t vm_page_locks;
1229 
1230 extern
1231 vm_page_queue_head_t    vm_lopage_queue_free;           /* low memory free queue */
1232 extern
1233 vm_page_queue_head_t    vm_page_queue_active;   /* active memory queue */
1234 extern
1235 vm_page_queue_head_t    vm_page_queue_inactive; /* inactive memory queue for normal pages */
1236 #if CONFIG_SECLUDED_MEMORY
1237 extern
1238 vm_page_queue_head_t    vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
1239 #endif /* CONFIG_SECLUDED_MEMORY */
1240 extern
1241 vm_page_queue_head_t    vm_page_queue_cleaned; /* clean-queue inactive memory */
1242 extern
1243 vm_page_queue_head_t    vm_page_queue_anonymous;        /* inactive memory queue for anonymous pages */
1244 extern
1245 vm_page_queue_head_t    vm_page_queue_throttled;        /* memory queue for throttled pageout pages */
1246 
1247 extern
1248 queue_head_t    vm_objects_wired;
1249 extern
1250 lck_spin_t      vm_objects_wired_lock;
1251 
1252 #define VM_PAGE_DONATE_DISABLED     0
1253 #define VM_PAGE_DONATE_ENABLED      1
1254 extern
1255 uint32_t        vm_page_donate_mode;
1256 extern
1257 bool        vm_page_donate_queue_ripe;
1258 
1259 #define VM_PAGE_BACKGROUND_TARGET_MAX   50000
1260 #define VM_PAGE_BG_DISABLED     0
1261 #define VM_PAGE_BG_ENABLED     1
1262 
1263 extern
1264 vm_page_queue_head_t    vm_page_queue_background;
1265 extern
1266 uint64_t        vm_page_background_promoted_count;
1267 extern
1268 uint32_t        vm_page_background_count;
1269 extern
1270 uint32_t        vm_page_background_target;
1271 extern
1272 uint32_t        vm_page_background_internal_count;
1273 extern
1274 uint32_t        vm_page_background_external_count;
1275 extern
1276 uint32_t        vm_page_background_mode;
1277 extern
1278 uint32_t        vm_page_background_exclude_external;
1279 
1280 extern
1281 vm_page_queue_head_t    vm_page_queue_donate;
1282 extern
1283 uint32_t        vm_page_donate_count;
1284 extern
1285 uint32_t        vm_page_donate_target_low;
1286 extern
1287 uint32_t        vm_page_donate_target_high;
1288 #define VM_PAGE_DONATE_TARGET_LOWWATER  (100)
1289 #define VM_PAGE_DONATE_TARGET_HIGHWATER ((unsigned int)(atop_64(max_mem) / 8))
1290 
1291 extern
1292 vm_offset_t     first_phys_addr;        /* physical address for first_page */
1293 extern
1294 vm_offset_t     last_phys_addr;         /* physical address for last_page */
1295 
1296 extern
1297 unsigned int    vm_page_free_count;     /* How many pages are free? (sum of all colors) */
1298 extern
1299 unsigned int    vm_page_active_count;   /* How many pages are active? */
1300 extern
1301 unsigned int    vm_page_inactive_count; /* How many pages are inactive? */
1302 extern
1303 unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */
1304 extern
1305 unsigned int vm_page_realtime_count;    /* How many pages are used by realtime threads? */
1306 #if CONFIG_SECLUDED_MEMORY
1307 extern
1308 unsigned int    vm_page_secluded_count; /* How many pages are secluded? */
1309 extern
1310 unsigned int    vm_page_secluded_count_free; /* how many of them are free? */
1311 extern
1312 unsigned int    vm_page_secluded_count_inuse; /* how many of them are in use? */
1313 /*
1314  * We keep filling the secluded pool with new eligible pages and
1315  * we can overshoot our target by a lot.
1316  * When there's memory pressure, vm_pageout_scan() will re-balance the queues,
1317  * pushing the extra secluded pages to the active or free queue.
1318  * Since these "over target" secluded pages are actually "available", jetsam
1319  * should consider them as such, so make them visible to jetsam via the
1320  * "vm_page_secluded_count_over_target" counter and update it whenever we
1321  * update vm_page_secluded_count or vm_page_secluded_target.
1322  */
1323 extern
1324 unsigned int    vm_page_secluded_count_over_target;
1325 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE()                     \
1326 	MACRO_BEGIN                                                     \
1327 	if (vm_page_secluded_count > vm_page_secluded_target) {         \
1328 	        vm_page_secluded_count_over_target =                    \
1329 	                (vm_page_secluded_count - vm_page_secluded_target); \
1330 	} else {                                                        \
1331 	        vm_page_secluded_count_over_target = 0;                 \
1332 	}                                                               \
1333 	MACRO_END
1334 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target
1335 #else /* CONFIG_SECLUDED_MEMORY */
1336 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1337 	MACRO_BEGIN                                 \
1338 	MACRO_END
1339 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0
1340 #endif /* CONFIG_SECLUDED_MEMORY */
1341 extern
1342 unsigned int    vm_page_cleaned_count; /* How many pages are in the clean queue? */
1343 extern
1344 unsigned int    vm_page_throttled_count;/* How many inactives are throttled */
1345 extern
1346 unsigned int    vm_page_speculative_count;      /* How many speculative pages are unclaimed? */
1347 extern unsigned int     vm_page_pageable_internal_count;
1348 extern unsigned int     vm_page_pageable_external_count;
1349 extern
1350 unsigned int    vm_page_xpmapped_external_count;        /* How many pages are mapped executable? */
1351 extern
1352 unsigned int    vm_page_external_count; /* How many pages are file-backed? */
1353 extern
1354 unsigned int    vm_page_internal_count; /* How many pages are anonymous? */
1355 extern
1356 unsigned int    vm_page_wire_count;             /* How many pages are wired? */
1357 extern
1358 unsigned int    vm_page_wire_count_initial;     /* How many pages wired at startup */
1359 extern
1360 unsigned int    vm_page_wire_count_on_boot;     /* even earlier than _initial */
1361 extern
1362 unsigned int    vm_page_free_target;    /* How many do we want free? */
1363 extern
1364 unsigned int    vm_page_free_min;       /* When to wakeup pageout */
1365 extern
1366 unsigned int    vm_page_throttle_limit; /* When to throttle new page creation */
1367 extern
1368 unsigned int    vm_page_inactive_target;/* How many do we want inactive? */
1369 #if CONFIG_SECLUDED_MEMORY
1370 extern
1371 unsigned int    vm_page_secluded_target;/* How many do we want secluded? */
1372 #endif /* CONFIG_SECLUDED_MEMORY */
1373 extern
1374 unsigned int    vm_page_anonymous_min;  /* When it's ok to pre-clean */
1375 extern
1376 unsigned int    vm_page_free_reserved;  /* How many pages reserved to do pageout */
1377 extern
1378 unsigned int    vm_page_gobble_count;
1379 extern
1380 unsigned int    vm_page_stolen_count;   /* Count of stolen pages not acccounted in zones */
1381 extern
1382 unsigned int    vm_page_kern_lpage_count;   /* Count of large pages used in early boot */
1383 
1384 
1385 #if DEVELOPMENT || DEBUG
1386 extern
1387 unsigned int    vm_page_speculative_used;
1388 #endif
1389 
1390 extern
1391 unsigned int    vm_page_purgeable_count;/* How many pages are purgeable now ? */
1392 extern
1393 unsigned int    vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
1394 extern
1395 uint64_t        vm_page_purged_count;   /* How many pages got purged so far ? */
1396 
1397 extern unsigned int     vm_page_free_wanted;
1398 /* how many threads are waiting for memory */
1399 
1400 extern unsigned int     vm_page_free_wanted_privileged;
1401 /* how many VM privileged threads are waiting for memory */
1402 #if CONFIG_SECLUDED_MEMORY
1403 extern unsigned int     vm_page_free_wanted_secluded;
1404 /* how many threads are waiting for secluded memory */
1405 #endif /* CONFIG_SECLUDED_MEMORY */
1406 
1407 extern const ppnum_t    vm_page_fictitious_addr;
1408 /* (fake) phys_addr of fictitious pages */
1409 
1410 extern const ppnum_t    vm_page_guard_addr;
1411 /* (fake) phys_addr of guard pages */
1412 
1413 
1414 extern boolean_t        vm_page_deactivate_hint;
1415 
1416 extern int              vm_compressor_mode;
1417 
1418 /*
1419  * Defaults to true, so highest memory is used first.
1420  */
1421 extern boolean_t        vm_himemory_mode;
1422 
1423 extern boolean_t        vm_lopage_needed;
1424 extern uint32_t         vm_lopage_free_count;
1425 extern uint32_t         vm_lopage_free_limit;
1426 extern uint32_t         vm_lopage_lowater;
1427 extern boolean_t        vm_lopage_refill;
1428 extern uint64_t         max_valid_dma_address;
1429 extern ppnum_t          max_valid_low_ppnum;
1430 
1431 /*
1432  * Prototypes for functions exported by this module.
1433  */
1434 extern void             vm_page_bootstrap(
1435 	vm_offset_t     *startp,
1436 	vm_offset_t     *endp);
1437 
1438 extern void             vm_page_init_local_q(unsigned int num_cpus);
1439 
1440 extern void             vm_page_create(
1441 	ppnum_t         start,
1442 	ppnum_t         end);
1443 
1444 extern void             vm_page_create_retired(
1445 	ppnum_t         pn);
1446 
1447 extern vm_page_t        kdp_vm_page_lookup(
1448 	vm_object_t             object,
1449 	vm_object_offset_t      offset);
1450 
1451 extern vm_page_t        vm_page_lookup(
1452 	vm_object_t             object,
1453 	vm_object_offset_t      offset);
1454 
1455 extern vm_page_t        vm_page_grab_fictitious(boolean_t canwait);
1456 
1457 extern vm_page_t        vm_page_grab_guard(boolean_t canwait);
1458 
1459 extern void             vm_page_release_fictitious(
1460 	vm_page_t page);
1461 
1462 extern void             vm_free_delayed_pages(void);
1463 
1464 extern bool             vm_pool_low(void);
1465 
1466 extern vm_page_t        vm_page_grab(void);
1467 extern vm_page_t        vm_page_grab_options(int flags);
1468 
1469 #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
1470 #if CONFIG_SECLUDED_MEMORY
1471 #define VM_PAGE_GRAB_SECLUDED     0x00000001
1472 #endif /* CONFIG_SECLUDED_MEMORY */
1473 #define VM_PAGE_GRAB_Q_LOCK_HELD  0x00000002
1474 
1475 extern vm_page_t        vm_page_grablo(void);
1476 
1477 extern void             vm_page_release(
1478 	vm_page_t       page,
1479 	boolean_t       page_queues_locked);
1480 
1481 extern boolean_t        vm_page_wait(
1482 	int             interruptible );
1483 
1484 extern vm_page_t        vm_page_alloc(
1485 	vm_object_t             object,
1486 	vm_object_offset_t      offset);
1487 
1488 extern void             vm_page_init(
1489 	vm_page_t       page,
1490 	ppnum_t         phys_page,
1491 	boolean_t       lopage);
1492 
1493 extern void             vm_page_free(
1494 	vm_page_t       page);
1495 
1496 extern void             vm_page_free_unlocked(
1497 	vm_page_t       page,
1498 	boolean_t       remove_from_hash);
1499 
1500 extern void             vm_page_balance_inactive(
1501 	int             max_to_move);
1502 
1503 extern void             vm_page_activate(
1504 	vm_page_t       page);
1505 
1506 extern void             vm_page_deactivate(
1507 	vm_page_t       page);
1508 
1509 extern void             vm_page_deactivate_internal(
1510 	vm_page_t       page,
1511 	boolean_t       clear_hw_reference);
1512 
1513 extern void             vm_page_enqueue_cleaned(vm_page_t page);
1514 
1515 extern void             vm_page_lru(
1516 	vm_page_t       page);
1517 
1518 extern void             vm_page_speculate(
1519 	vm_page_t       page,
1520 	boolean_t       new);
1521 
1522 extern void             vm_page_speculate_ageit(
1523 	struct vm_speculative_age_q *aq);
1524 
1525 extern void             vm_page_reactivate_all_throttled(void);
1526 
1527 extern void             vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
1528 
1529 extern void             vm_page_rename(
1530 	vm_page_t               page,
1531 	vm_object_t             new_object,
1532 	vm_object_offset_t      new_offset);
1533 
1534 extern void             vm_page_insert(
1535 	vm_page_t               page,
1536 	vm_object_t             object,
1537 	vm_object_offset_t      offset);
1538 
1539 extern void             vm_page_insert_wired(
1540 	vm_page_t               page,
1541 	vm_object_t             object,
1542 	vm_object_offset_t      offset,
1543 	vm_tag_t                tag);
1544 
1545 extern void             vm_page_insert_internal(
1546 	vm_page_t               page,
1547 	vm_object_t             object,
1548 	vm_object_offset_t      offset,
1549 	vm_tag_t                tag,
1550 	boolean_t               queues_lock_held,
1551 	boolean_t               insert_in_hash,
1552 	boolean_t               batch_pmap_op,
1553 	boolean_t               delayed_accounting,
1554 	uint64_t                *delayed_ledger_update);
1555 
1556 extern void             vm_page_replace(
1557 	vm_page_t               mem,
1558 	vm_object_t             object,
1559 	vm_object_offset_t      offset);
1560 
1561 extern void             vm_page_remove(
1562 	vm_page_t       page,
1563 	boolean_t       remove_from_hash);
1564 
1565 extern void             vm_page_zero_fill(
1566 	vm_page_t       page);
1567 
1568 extern void             vm_page_part_zero_fill(
1569 	vm_page_t       m,
1570 	vm_offset_t     m_pa,
1571 	vm_size_t       len);
1572 
1573 extern void             vm_page_copy(
1574 	vm_page_t       src_page,
1575 	vm_page_t       dest_page);
1576 
1577 extern void             vm_page_part_copy(
1578 	vm_page_t       src_m,
1579 	vm_offset_t     src_pa,
1580 	vm_page_t       dst_m,
1581 	vm_offset_t     dst_pa,
1582 	vm_size_t       len);
1583 
1584 extern void             vm_page_wire(
1585 	vm_page_t       page,
1586 	vm_tag_t        tag,
1587 	boolean_t       check_memorystatus);
1588 
1589 extern void             vm_page_unwire(
1590 	vm_page_t       page,
1591 	boolean_t       queueit);
1592 
1593 extern void             vm_set_page_size(void);
1594 
1595 extern void             vm_page_gobble(
1596 	vm_page_t      page);
1597 
1598 extern void             vm_page_validate_cs(
1599 	vm_page_t       page,
1600 	vm_map_size_t   fault_page_size,
1601 	vm_map_offset_t fault_phys_offset);
1602 extern void             vm_page_validate_cs_mapped(
1603 	vm_page_t       page,
1604 	vm_map_size_t   fault_page_size,
1605 	vm_map_offset_t fault_phys_offset,
1606 	const void      *kaddr);
1607 extern void             vm_page_validate_cs_mapped_slow(
1608 	vm_page_t       page,
1609 	const void      *kaddr);
1610 extern void             vm_page_validate_cs_mapped_chunk(
1611 	vm_page_t       page,
1612 	const void      *kaddr,
1613 	vm_offset_t     chunk_offset,
1614 	vm_size_t       chunk_size,
1615 	boolean_t       *validated,
1616 	unsigned        *tainted);
1617 
1618 extern void             vm_page_free_prepare_queues(
1619 	vm_page_t       page);
1620 
1621 extern void             vm_page_free_prepare_object(
1622 	vm_page_t       page,
1623 	boolean_t       remove_from_hash);
1624 
1625 #if CONFIG_IOSCHED
1626 extern wait_result_t    vm_page_sleep(
1627 	vm_object_t     object,
1628 	vm_page_t       m,
1629 	int     interruptible);
1630 #endif
1631 
1632 extern void vm_pressure_response(void);
1633 
1634 #if CONFIG_JETSAM
1635 extern void memorystatus_pages_update(unsigned int pages_avail);
1636 
1637 #define VM_CHECK_MEMORYSTATUS do { \
1638 	memorystatus_pages_update(              \
1639 	        vm_page_pageable_external_count + \
1640 	        vm_page_free_count +            \
1641 	        VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \
1642 	        (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1643 	        ); \
1644 	} while(0)
1645 
1646 #else /* CONFIG_JETSAM */
1647 
1648 #if !XNU_TARGET_OS_OSX
1649 
1650 #define VM_CHECK_MEMORYSTATUS do {} while(0)
1651 
1652 #else /* !XNU_TARGET_OS_OSX */
1653 
1654 #define VM_CHECK_MEMORYSTATUS   vm_pressure_response()
1655 
1656 #endif /* !XNU_TARGET_OS_OSX */
1657 
1658 #endif /* CONFIG_JETSAM */
1659 
1660 /*
1661  * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
1662  * protected by the object lock.
1663  */
1664 
1665 #if !XNU_TARGET_OS_OSX
1666 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
1667 	        MACRO_BEGIN                                             \
1668 	        vm_page_t __page__ = (m);                               \
1669 	        if (__page__->vmp_pmapped == TRUE &&                    \
1670 	            __page__->vmp_wpmapped == TRUE &&                   \
1671 	            __page__->vmp_dirty == FALSE &&                     \
1672 	            (set_pmap_modified)) {                              \
1673 	                pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
1674 	        }                                                       \
1675 	        __page__->vmp_dirty = TRUE;                             \
1676 	        MACRO_END
1677 #else /* !XNU_TARGET_OS_OSX */
1678 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
1679 	        MACRO_BEGIN                                             \
1680 	        vm_page_t __page__ = (m);                               \
1681 	        __page__->vmp_dirty = TRUE;                             \
1682 	        MACRO_END
1683 #endif /* !XNU_TARGET_OS_OSX */
1684 
1685 #define PAGE_ASSERT_WAIT(m, interruptible)                      \
1686 	        (((m)->vmp_wanted = TRUE),                      \
1687 	         assert_wait((event_t) (m), (interruptible)))
1688 
1689 #if CONFIG_IOSCHED
1690 #define PAGE_SLEEP(o, m, interruptible)                         \
1691 	        vm_page_sleep(o, m, interruptible)
1692 #else
1693 #define PAGE_SLEEP(o, m, interruptible)                         \
1694 	(((m)->vmp_wanted = TRUE),                              \
1695 	 thread_sleep_vm_object((o), (m), (interruptible)))
1696 #endif
1697 
1698 #define PAGE_WAKEUP_DONE(m)                                     \
1699 	        MACRO_BEGIN                                     \
1700 	        (m)->vmp_busy = FALSE;                          \
1701 	        if ((m)->vmp_wanted) {                          \
1702 	                (m)->vmp_wanted = FALSE;                \
1703 	                thread_wakeup((event_t) (m));           \
1704 	        }                                               \
1705 	        MACRO_END
1706 
1707 #define PAGE_WAKEUP(m)                                          \
1708 	        MACRO_BEGIN                                     \
1709 	        if ((m)->vmp_wanted) {                          \
1710 	                (m)->vmp_wanted = FALSE;                \
1711 	                thread_wakeup((event_t) (m));           \
1712 	        }                                               \
1713 	        MACRO_END
1714 
1715 #define VM_PAGE_FREE(p)                         \
1716 	        MACRO_BEGIN                     \
1717 	        vm_page_free_unlocked(p, TRUE); \
1718 	        MACRO_END
1719 
1720 #define VM_PAGE_WAIT()          ((void)vm_page_wait(THREAD_UNINT))
1721 
1722 #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1723 #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1724 
1725 static inline void
vm_free_page_lock(void)1726 vm_free_page_lock(void)
1727 {
1728 	lck_mtx_lock(&vm_page_queue_free_lock);
1729 }
1730 
1731 static inline void
vm_free_page_lock_spin(void)1732 vm_free_page_lock_spin(void)
1733 {
1734 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
1735 }
1736 
1737 static inline void
vm_free_page_unlock(void)1738 vm_free_page_unlock(void)
1739 {
1740 	lck_mtx_unlock(&vm_page_queue_free_lock);
1741 }
1742 
1743 
1744 static inline void
vm_page_lock_queues(void)1745 vm_page_lock_queues(void)
1746 {
1747 	lck_mtx_lock(&vm_page_queue_lock);
1748 }
1749 
1750 static inline boolean_t
vm_page_trylock_queues(void)1751 vm_page_trylock_queues(void)
1752 {
1753 	boolean_t ret;
1754 	ret = lck_mtx_try_lock(&vm_page_queue_lock);
1755 	return ret;
1756 }
1757 
1758 static inline void
vm_page_unlock_queues(void)1759 vm_page_unlock_queues(void)
1760 {
1761 	lck_mtx_unlock(&vm_page_queue_lock);
1762 }
1763 
1764 static inline void
vm_page_lockspin_queues(void)1765 vm_page_lockspin_queues(void)
1766 {
1767 	lck_mtx_lock_spin(&vm_page_queue_lock);
1768 }
1769 
1770 static inline boolean_t
vm_page_trylockspin_queues(void)1771 vm_page_trylockspin_queues(void)
1772 {
1773 	boolean_t ret;
1774 	ret = lck_mtx_try_lock_spin(&vm_page_queue_lock);
1775 	return ret;
1776 }
1777 #define vm_page_lockconvert_queues()    lck_mtx_convert_spin(&vm_page_queue_lock)
1778 
1779 #ifdef  VPL_LOCK_SPIN
1780 extern lck_grp_t vm_page_lck_grp_local;
1781 
1782 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
1783 #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
1784 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
1785 #else
1786 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
1787 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
1788 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
1789 #endif
1790 
1791 
1792 #if DEVELOPMENT || DEBUG
1793 #define VM_PAGE_SPECULATIVE_USED_ADD()                          \
1794 	MACRO_BEGIN                                             \
1795 	OSAddAtomic(1, &vm_page_speculative_used);              \
1796 	MACRO_END
1797 #else
1798 #define VM_PAGE_SPECULATIVE_USED_ADD()
1799 #endif
1800 
1801 
1802 #define VM_PAGE_CONSUME_CLUSTERED(mem)                          \
1803 	MACRO_BEGIN                                             \
1804 	ppnum_t	__phys_page;                                    \
1805 	__phys_page = VM_PAGE_GET_PHYS_PAGE(mem);               \
1806 	pmap_lock_phys_page(__phys_page);                       \
1807 	if (mem->vmp_clustered) {                               \
1808 	        vm_object_t o;                                  \
1809 	        o = VM_PAGE_OBJECT(mem);                        \
1810 	        assert(o);                                      \
1811 	        o->pages_used++;                                \
1812 	        mem->vmp_clustered = FALSE;                     \
1813 	        VM_PAGE_SPECULATIVE_USED_ADD();                 \
1814 	}                                                       \
1815 	pmap_unlock_phys_page(__phys_page);                     \
1816 	MACRO_END
1817 
1818 
1819 #define VM_PAGE_COUNT_AS_PAGEIN(mem)                            \
1820 	MACRO_BEGIN                                             \
1821 	{                                                       \
1822 	vm_object_t o;                                          \
1823 	o = VM_PAGE_OBJECT(mem);                                \
1824 	DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);           \
1825 	counter_inc(&current_task()->pageins);                  \
1826 	if (o->internal) {                                      \
1827 	        DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);       \
1828 	} else {                                                \
1829 	        DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
1830 	}                                                       \
1831 	}                                                       \
1832 	MACRO_END
1833 
1834 /* adjust for stolen pages accounted elsewhere */
1835 #define VM_PAGE_MOVE_STOLEN(page_count)                         \
1836 	MACRO_BEGIN                                             \
1837 	vm_page_stolen_count -=	(page_count);                   \
1838 	vm_page_wire_count_initial -= (page_count);             \
1839 	MACRO_END
1840 
1841 #define DW_vm_page_unwire               0x01
1842 #define DW_vm_page_wire                 0x02
1843 #define DW_vm_page_free                 0x04
1844 #define DW_vm_page_activate             0x08
1845 #define DW_vm_page_deactivate_internal  0x10
1846 #define DW_vm_page_speculate            0x20
1847 #define DW_vm_page_lru                  0x40
1848 #define DW_vm_pageout_throttle_up       0x80
1849 #define DW_PAGE_WAKEUP                  0x100
1850 #define DW_clear_busy                   0x200
1851 #define DW_clear_reference              0x400
1852 #define DW_set_reference                0x800
1853 #define DW_move_page                    0x1000
1854 #define DW_VM_PAGE_QUEUES_REMOVE        0x2000
1855 #define DW_enqueue_cleaned              0x4000
1856 #define DW_vm_phantom_cache_update      0x8000
1857 
1858 struct vm_page_delayed_work {
1859 	vm_page_t       dw_m;
1860 	int             dw_mask;
1861 };
1862 
1863 #define DEFAULT_DELAYED_WORK_LIMIT      32
1864 
1865 struct vm_page_delayed_work_ctx {
1866 	struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT];
1867 	thread_t                    delayed_owner;
1868 };
1869 
1870 void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
1871 
1872 extern unsigned int vm_max_delayed_work_limit;
1873 
1874 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
1875 
1876 /*
1877  * vm_page_do_delayed_work may need to drop the object lock...
1878  * if it does, we need the pages it's looking at to
1879  * be held stable via the busy bit, so if busy isn't already
1880  * set, we need to set it and ask vm_page_do_delayed_work
1881  * to clear it and wakeup anyone that might have blocked on
1882  * it once we're done processing the page.
1883  */
1884 
1885 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt)              \
1886 	MACRO_BEGIN                                             \
1887 	if (mem->vmp_busy == FALSE) {                           \
1888 	        mem->vmp_busy = TRUE;                           \
1889 	        if ( !(dwp->dw_mask & DW_vm_page_free))         \
1890 	                dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
1891 	}                                                       \
1892 	dwp->dw_m = mem;                                        \
1893 	dwp++;                                                  \
1894 	dw_cnt++;                                               \
1895 	MACRO_END
1896 
1897 extern vm_page_t vm_object_page_grab(vm_object_t);
1898 
1899 #if VM_PAGE_BUCKETS_CHECK
1900 extern void vm_page_buckets_check(void);
1901 #endif /* VM_PAGE_BUCKETS_CHECK */
1902 
1903 extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq);
1904 extern void vm_page_remove_internal(vm_page_t page);
1905 extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
1906 extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
1907 extern void vm_page_check_pageable_safe(vm_page_t page);
1908 
1909 #if CONFIG_SECLUDED_MEMORY
1910 extern uint64_t secluded_shutoff_trigger;
1911 extern uint64_t secluded_shutoff_headroom;
1912 extern void start_secluded_suppression(task_t);
1913 extern void stop_secluded_suppression(task_t);
1914 #endif /* CONFIG_SECLUDED_MEMORY */
1915 
1916 extern void vm_retire_boot_pages(void);
1917 
1918 
1919 #define VMP_ERROR_GET(p) ((p)->vmp_error)
1920 
1921 
1922 #endif  /* _VM_VM_PAGE_H_ */
1923