xref: /xnu-8020.140.41/osfmk/vm/vm_page.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Resident memory system definitions.
64  */
65 
66 #ifndef _VM_VM_PAGE_H_
67 #define _VM_VM_PAGE_H_
68 
69 #include <debug.h>
70 #include <vm/vm_options.h>
71 #include <vm/vm_protos.h>
72 #include <mach/boolean.h>
73 #include <mach/vm_prot.h>
74 #include <mach/vm_param.h>
75 #include <mach/memory_object_types.h> /* for VMP_CS_BITS... */
76 
77 
78 #if    defined(__LP64__)
79 
80 /*
81  * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
82  * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
83  * vm_page_t's from doesn't span more then 256 Gbytes, we're safe.   There are live tests in the
84  * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
85  * pointers from the 2 ends of these spaces
86  */
87 typedef uint32_t        vm_page_packed_t;
88 
89 struct vm_page_packed_queue_entry {
90 	vm_page_packed_t        next;          /* next element */
91 	vm_page_packed_t        prev;          /* previous element */
92 };
93 
94 typedef struct vm_page_packed_queue_entry       *vm_page_queue_t;
95 typedef struct vm_page_packed_queue_entry       vm_page_queue_head_t;
96 typedef struct vm_page_packed_queue_entry       vm_page_queue_chain_t;
97 typedef struct vm_page_packed_queue_entry       *vm_page_queue_entry_t;
98 
99 typedef vm_page_packed_t                        vm_page_object_t;
100 
101 #else // __LP64__
102 
103 /*
104  * we can't do the packing trick on 32 bit architectures
105  * so just turn the macros into noops.
106  */
107 typedef struct vm_page          *vm_page_packed_t;
108 
109 #define vm_page_queue_t         queue_t
110 #define vm_page_queue_head_t    queue_head_t
111 #define vm_page_queue_chain_t   queue_chain_t
112 #define vm_page_queue_entry_t   queue_entry_t
113 
114 #define vm_page_object_t        vm_object_t
115 #endif // __LP64__
116 
117 
118 #include <vm/vm_object.h>
119 #include <kern/queue.h>
120 #include <kern/locks.h>
121 
122 #include <kern/macro_help.h>
123 #include <libkern/OSAtomic.h>
124 
125 
126 
127 #define VM_PAGE_COMPRESSOR_COUNT        (compressor_object->resident_page_count)
128 
129 /*
130  *	Management of resident (logical) pages.
131  *
132  *	A small structure is kept for each resident
133  *	page, indexed by page number.  Each structure
134  *	is an element of several lists:
135  *
136  *		A hash table bucket used to quickly
137  *		perform object/offset lookups
138  *
139  *		A list of all pages for a given object,
140  *		so they can be quickly deactivated at
141  *		time of deallocation.
142  *
143  *		An ordered list of pages due for pageout.
144  *
145  *	In addition, the structure contains the object
146  *	and offset to which this page belongs (for pageout),
147  *	and sundry status bits.
148  *
149  *	Fields in this structure are locked either by the lock on the
150  *	object that the page belongs to (O) or by the lock on the page
151  *	queues (P).  [Some fields require that both locks be held to
152  *	change that field; holding either lock is sufficient to read.]
153  */
154 
155 #define VM_PAGE_NULL            ((vm_page_t) 0)
156 
157 extern  char    vm_page_inactive_states[];
158 extern  char    vm_page_pageable_states[];
159 extern  char    vm_page_non_speculative_pageable_states[];
160 extern  char    vm_page_active_or_inactive_states[];
161 
162 
163 #define VM_PAGE_INACTIVE(m)                     (vm_page_inactive_states[m->vmp_q_state])
164 #define VM_PAGE_PAGEABLE(m)                     (vm_page_pageable_states[m->vmp_q_state])
165 #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m)     (vm_page_non_speculative_pageable_states[m->vmp_q_state])
166 #define VM_PAGE_ACTIVE_OR_INACTIVE(m)           (vm_page_active_or_inactive_states[m->vmp_q_state])
167 
168 
169 #define VM_PAGE_NOT_ON_Q                0               /* page is not present on any queue, nor is it wired... mainly a transient state */
170 #define VM_PAGE_IS_WIRED                1               /* page is currently wired */
171 #define VM_PAGE_USED_BY_COMPRESSOR      2               /* page is in use by the compressor to hold compressed data */
172 #define VM_PAGE_ON_FREE_Q               3               /* page is on the main free queue */
173 #define VM_PAGE_ON_FREE_LOCAL_Q         4               /* page is on one of the per-CPU free queues */
174 #define VM_PAGE_ON_FREE_LOPAGE_Q        5               /* page is on the lopage pool free list */
175 #define VM_PAGE_ON_THROTTLED_Q          6               /* page is on the throttled queue... we stash anonymous pages here when not paging */
176 #define VM_PAGE_ON_PAGEOUT_Q            7               /* page is on one of the pageout queues (internal/external) awaiting processing */
177 #define VM_PAGE_ON_SPECULATIVE_Q        8               /* page is on one of the speculative queues */
178 #define VM_PAGE_ON_ACTIVE_LOCAL_Q       9               /* page has recently been created and is being held in one of the per-CPU local queues */
179 #define VM_PAGE_ON_ACTIVE_Q             10              /* page is in global active queue */
180 #define VM_PAGE_ON_INACTIVE_INTERNAL_Q  11              /* page is on the inactive internal queue a.k.a.  anonymous queue */
181 #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q  12              /* page in on the inactive external queue a.k.a.  file backed queue */
182 #define VM_PAGE_ON_INACTIVE_CLEANED_Q   13              /* page has been cleaned to a backing file and is ready to be stolen */
183 #define VM_PAGE_ON_SECLUDED_Q           14              /* page is on secluded queue */
184 #define VM_PAGE_Q_STATE_LAST_VALID_VALUE        14      /* we currently use 4 bits for the state... don't let this go beyond 15 */
185 
186 #define VM_PAGE_Q_STATE_ARRAY_SIZE      (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1)
187 
188 
189 /*
190  * The structure itself. See the block comment above for what (O) and (P) mean.
191  */
192 #define vmp_pageq vmp_q_un.vmp_q_pageq
193 #define vmp_snext vmp_q_un.vmp_q_snext
194 
195 struct vm_page {
196 	union {
197 		vm_page_queue_chain_t vmp_q_pageq;           /* queue info for FIFO queue or free list (P) */
198 		struct vm_page        *vmp_q_snext;
199 	} vmp_q_un;
200 
201 	vm_page_queue_chain_t         vmp_listq;           /* all pages in same object (O) */
202 
203 #if CONFIG_BACKGROUND_QUEUE
204 	vm_page_queue_chain_t         vmp_backgroundq;     /* anonymous pages in the background pool (P) */
205 #endif
206 
207 	vm_object_offset_t            vmp_offset;          /* offset into that object (O,P) */
208 	vm_page_object_t              vmp_object;          /* which object am I in (O&P) */
209 
210 	/*
211 	 * The following word of flags is always protected by the "page queues" lock.
212 	 *
213 	 * We use 'vmp_wire_count' to store the local queue id if local queues are enabled.
214 	 * See the comments at 'vm_page_queues_remove' as to why this is safe to do.
215 	 */
216 #define vmp_local_id vmp_wire_count
217 	unsigned int vmp_wire_count:16,      /* how many wired down maps use me? (O&P) */
218 	    vmp_q_state:4,                   /* which q is the page on (P) */
219 	    vmp_in_background:1,
220 	    vmp_on_backgroundq:1,
221 	    vmp_gobbled:1,                   /* page used internally (P) */
222 	    vmp_laundry:1,                   /* page is being cleaned now (P)*/
223 	    vmp_no_cache:1,                  /* page is not to be cached and should */
224 	                                     /* be reused ahead of other pages (P) */
225 	    vmp_private:1,                   /* Page should not be returned to the free list (P) */
226 	    vmp_reference:1,                 /* page has been used (P) */
227 	    vmp_lopage:1,
228 	    vmp_unused_page_bits:4;
229 
230 	/*
231 	 * MUST keep the 2 32 bit words used as bit fields
232 	 * separated since the compiler has a nasty habit
233 	 * of using 64 bit loads and stores on them as
234 	 * if they were a single 64 bit field... since
235 	 * they are protected by 2 different locks, this
236 	 * is a real problem
237 	 */
238 	vm_page_packed_t vmp_next_m;            /* VP bucket link (O) */
239 
240 	/*
241 	 * The following word of flags is protected by the "VM object" lock.
242 	 *
243 	 * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the
244 	 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
245 	 * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro.
246 	 * It's also ok to modify them behind just the VM object "exclusive" lock.
247 	 */
248 	unsigned int    vmp_busy:1,           /* page is in transit (O) */
249 	    vmp_wanted:1,                     /* someone is waiting for page (O) */
250 	    vmp_tabled:1,                     /* page is in VP table (O) */
251 	    vmp_hashed:1,                     /* page is in vm_page_buckets[] (O) + the bucket lock */
252 	    vmp_fictitious:1,                 /* Physical page doesn't exist (O) */
253 	    vmp_clustered:1,                  /* page is not the faulted page (O) or (O-shared AND pmap_page) */
254 	    vmp_pmapped:1,                    /* page has at some time been entered into a pmap (O) or */
255 	                                      /* (O-shared AND pmap_page) */
256 	    vmp_xpmapped:1,                   /* page has been entered with execute permission (O) or */
257 	                                      /* (O-shared AND pmap_page) */
258 	    vmp_wpmapped:1,                   /* page has been entered at some point into a pmap for write (O) */
259 	    vmp_free_when_done:1,             /* page is to be freed once cleaning is completed (O) */
260 	    vmp_absent:1,                     /* Data has been requested, but is not yet available (O) */
261 	    vmp_error:1,                      /* Data manager was unable to provide data due to error (O) */
262 	    vmp_dirty:1,                      /* Page must be cleaned (O) */
263 	    vmp_cleaning:1,                   /* Page clean has begun (O) */
264 	    vmp_precious:1,                   /* Page is precious; data must be returned even if clean (O) */
265 	    vmp_overwriting:1,                /* Request to unlock has been made without having data. (O) */
266 	                                      /* [See vm_fault_page_overwrite] */
267 	    vmp_restart:1,                    /* Page was pushed higher in shadow chain by copy_call-related pagers */
268 	                                      /* start again at top of chain */
269 	    vmp_unusual:1,                    /* Page is absent, error, restart or page locked */
270 	    vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */
271 	    vmp_cs_tainted:VMP_CS_BITS,   /* code-signing: page is tainted */
272 	    vmp_cs_nx:VMP_CS_BITS,        /* code-signing: page is nx */
273 	    vmp_reusable:1,
274 	    vmp_written_by_kernel:1;             /* page was written by kernel (i.e. decompressed) */
275 
276 #if    !defined(__arm__) && !defined(__arm64__)
277 	ppnum_t         vmp_phys_page;        /* Physical page number of the page */
278 #endif
279 };
280 
281 typedef struct vm_page  *vm_page_t;
282 extern vm_page_t        vm_pages;
283 extern vm_page_t        vm_page_array_beginning_addr;
284 extern vm_page_t        vm_page_array_ending_addr;
285 
286 static inline int
VMP_CS_FOR_OFFSET(vm_map_offset_t fault_phys_offset)287 VMP_CS_FOR_OFFSET(
288 	vm_map_offset_t fault_phys_offset)
289 {
290 	assertf(fault_phys_offset < PAGE_SIZE &&
291 	    !(fault_phys_offset & FOURK_PAGE_MASK),
292 	    "offset 0x%llx\n", (uint64_t)fault_phys_offset);
293 	return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT);
294 }
295 static inline bool
VMP_CS_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)296 VMP_CS_VALIDATED(
297 	vm_page_t p,
298 	vm_map_size_t fault_page_size,
299 	vm_map_offset_t fault_phys_offset)
300 {
301 	assertf(fault_page_size <= PAGE_SIZE,
302 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
303 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
304 	if (fault_page_size == PAGE_SIZE) {
305 		return p->vmp_cs_validated == VMP_CS_ALL_TRUE;
306 	}
307 	return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset);
308 }
309 static inline bool
VMP_CS_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)310 VMP_CS_TAINTED(
311 	vm_page_t p,
312 	vm_map_size_t fault_page_size,
313 	vm_map_offset_t fault_phys_offset)
314 {
315 	assertf(fault_page_size <= PAGE_SIZE,
316 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
317 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
318 	if (fault_page_size == PAGE_SIZE) {
319 		return p->vmp_cs_tainted != VMP_CS_ALL_FALSE;
320 	}
321 	return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset);
322 }
323 static inline bool
VMP_CS_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)324 VMP_CS_NX(
325 	vm_page_t p,
326 	vm_map_size_t fault_page_size,
327 	vm_map_offset_t fault_phys_offset)
328 {
329 	assertf(fault_page_size <= PAGE_SIZE,
330 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
331 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
332 	if (fault_page_size == PAGE_SIZE) {
333 		return p->vmp_cs_nx != VMP_CS_ALL_FALSE;
334 	}
335 	return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset);
336 }
337 static inline void
VMP_CS_SET_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)338 VMP_CS_SET_VALIDATED(
339 	vm_page_t p,
340 	vm_map_size_t fault_page_size,
341 	vm_map_offset_t fault_phys_offset,
342 	boolean_t value)
343 {
344 	assertf(fault_page_size <= PAGE_SIZE,
345 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
346 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
347 	if (value) {
348 		if (fault_page_size == PAGE_SIZE) {
349 			p->vmp_cs_validated = VMP_CS_ALL_TRUE;
350 		}
351 		p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset);
352 	} else {
353 		if (fault_page_size == PAGE_SIZE) {
354 			p->vmp_cs_validated = VMP_CS_ALL_FALSE;
355 		}
356 		p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
357 	}
358 }
359 static inline void
VMP_CS_SET_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)360 VMP_CS_SET_TAINTED(
361 	vm_page_t p,
362 	vm_map_size_t fault_page_size,
363 	vm_map_offset_t fault_phys_offset,
364 	boolean_t value)
365 {
366 	assertf(fault_page_size <= PAGE_SIZE,
367 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
368 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
369 	if (value) {
370 		if (fault_page_size == PAGE_SIZE) {
371 			p->vmp_cs_tainted = VMP_CS_ALL_TRUE;
372 		}
373 		p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset);
374 	} else {
375 		if (fault_page_size == PAGE_SIZE) {
376 			p->vmp_cs_tainted = VMP_CS_ALL_FALSE;
377 		}
378 		p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
379 	}
380 }
381 static inline void
VMP_CS_SET_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)382 VMP_CS_SET_NX(
383 	vm_page_t p,
384 	vm_map_size_t fault_page_size,
385 	vm_map_offset_t fault_phys_offset,
386 	boolean_t value)
387 {
388 	assertf(fault_page_size <= PAGE_SIZE,
389 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
390 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
391 	if (value) {
392 		if (fault_page_size == PAGE_SIZE) {
393 			p->vmp_cs_nx = VMP_CS_ALL_TRUE;
394 		}
395 		p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset);
396 	} else {
397 		if (fault_page_size == PAGE_SIZE) {
398 			p->vmp_cs_nx = VMP_CS_ALL_FALSE;
399 		}
400 		p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
401 	}
402 }
403 
404 
405 #if defined(__arm__) || defined(__arm64__)
406 
407 extern  unsigned int vm_first_phys_ppnum;
408 
409 struct vm_page_with_ppnum {
410 	struct  vm_page vm_page_wo_ppnum;
411 
412 	ppnum_t vmp_phys_page;
413 };
414 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
415 
416 
417 static inline ppnum_t
VM_PAGE_GET_PHYS_PAGE(vm_page_t m)418 VM_PAGE_GET_PHYS_PAGE(vm_page_t m)
419 {
420 	if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) {
421 		return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum);
422 	} else {
423 		return ((vm_page_with_ppnum_t)m)->vmp_phys_page;
424 	}
425 }
426 
427 #define VM_PAGE_SET_PHYS_PAGE(m, ppnum)         \
428 	MACRO_BEGIN                             \
429 	if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr)     \
430 	        ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum;     \
431 	assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m));              \
432 	MACRO_END
433 
434 #define VM_PAGE_GET_COLOR(m)    (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask)
435 
436 #else   /* defined(__arm__) || defined(__arm64__) */
437 
438 
439 struct vm_page_with_ppnum {
440 	struct  vm_page vm_page_with_ppnum;
441 };
442 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
443 
444 
445 #define VM_PAGE_GET_PHYS_PAGE(page)     (page)->vmp_phys_page
446 #define VM_PAGE_SET_PHYS_PAGE(page, ppnum)      \
447 	MACRO_BEGIN                             \
448 	(page)->vmp_phys_page = ppnum;          \
449 	MACRO_END
450 
451 #define VM_PAGE_GET_CLUMP(m)    ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift)
452 #define VM_PAGE_GET_COLOR(m)    ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask)
453 
454 #endif  /* defined(__arm__) || defined(__arm64__) */
455 
456 
457 
458 #if defined(__LP64__)
459 /*
460  * Parameters for pointer packing
461  *
462  *
463  * VM Pages pointers might point to:
464  *
465  * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals,
466  *
467  * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages
468  *
469  * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED
470  *    aligned).
471  *
472  *
473  * The current scheme uses 31 bits of storage and 6 bits of shift using the
474  * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the
475  * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY).
476  *
477  * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS.
478  */
479 #define VM_VPLQ_ALIGNMENT               128
480 #define VM_PAGE_PACKED_PTR_ALIGNMENT    64              /* must be a power of 2 */
481 #define VM_PAGE_PACKED_ALIGNED          __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT)))
482 #define VM_PAGE_PACKED_PTR_BITS         31
483 #define VM_PAGE_PACKED_PTR_SHIFT        6
484 #define VM_PAGE_PACKED_PTR_BASE         ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS)
485 
486 #define VM_PAGE_PACKED_FROM_ARRAY       0x80000000
487 
488 static inline vm_page_packed_t
vm_page_pack_ptr(uintptr_t p)489 vm_page_pack_ptr(uintptr_t p)
490 {
491 	if (p >= (uintptr_t)vm_page_array_beginning_addr &&
492 	    p < (uintptr_t)vm_page_array_ending_addr) {
493 		ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr;
494 		assert((vm_page_t)p == &vm_pages[diff]);
495 		return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY);
496 	}
497 
498 	VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR);
499 	vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR);
500 	return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed);
501 }
502 
503 
504 static inline uintptr_t
vm_page_unpack_ptr(uintptr_t p)505 vm_page_unpack_ptr(uintptr_t p)
506 {
507 	extern unsigned int vm_pages_count;
508 
509 	if (p >= VM_PAGE_PACKED_FROM_ARRAY) {
510 		p &= ~VM_PAGE_PACKED_FROM_ARRAY;
511 		assert(p < (uintptr_t)vm_pages_count);
512 		return (uintptr_t)&vm_pages[p];
513 	}
514 
515 	return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR);
516 }
517 
518 
519 #define VM_PAGE_PACK_PTR(p)     vm_page_pack_ptr((uintptr_t)(p))
520 #define VM_PAGE_UNPACK_PTR(p)   vm_page_unpack_ptr((uintptr_t)(p))
521 
522 #define VM_OBJECT_PACK(o)       ((vm_page_object_t)VM_PACK_POINTER((uintptr_t)(o), VM_PAGE_PACKED_PTR))
523 #define VM_OBJECT_UNPACK(p)     ((vm_object_t)VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR))
524 
525 #define VM_PAGE_OBJECT(p)       VM_OBJECT_UNPACK((p)->vmp_object)
526 #define VM_PAGE_PACK_OBJECT(o)  VM_OBJECT_PACK(o)
527 
528 
529 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p)     \
530 MACRO_BEGIN                             \
531 	(p)->vmp_snext = 0;             \
532 MACRO_END
533 
534 
535 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p)       VM_PAGE_PACK_PTR(p)
536 
537 
538 static __inline__ void
vm_page_enqueue_tail(vm_page_queue_t que,vm_page_queue_entry_t elt)539 vm_page_enqueue_tail(
540 	vm_page_queue_t         que,
541 	vm_page_queue_entry_t   elt)
542 {
543 	vm_page_queue_entry_t   old_tail;
544 
545 	old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
546 	elt->next = VM_PAGE_PACK_PTR(que);
547 	elt->prev = que->prev;
548 	que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
549 }
550 
551 
552 static __inline__ void
vm_page_remque(vm_page_queue_entry_t elt)553 vm_page_remque(
554 	vm_page_queue_entry_t elt)
555 {
556 	vm_page_queue_entry_t next;
557 	vm_page_queue_entry_t prev;
558 	vm_page_packed_t      next_pck = elt->next;
559 	vm_page_packed_t      prev_pck = elt->prev;
560 
561 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
562 
563 	/* next may equal prev (and the queue head) if elt was the only element */
564 	prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
565 
566 	next->prev = prev_pck;
567 	prev->next = next_pck;
568 
569 	elt->next = 0;
570 	elt->prev = 0;
571 }
572 
573 
574 /*
575  *	Macro:	vm_page_queue_init
576  *	Function:
577  *		Initialize the given queue.
578  *	Header:
579  *	void vm_page_queue_init(q)
580  *		vm_page_queue_t	q;	\* MODIFIED *\
581  */
582 #define vm_page_queue_init(q)               \
583 MACRO_BEGIN                                 \
584 	VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \
585 	(q)->next = VM_PAGE_PACK_PTR(q);        \
586 	(q)->prev = VM_PAGE_PACK_PTR(q);        \
587 MACRO_END
588 
589 
590 /*
591  * Macro: vm_page_queue_enter
592  * Function:
593  *     Insert a new element at the tail of the vm_page queue.
594  * Header:
595  *     void vm_page_queue_enter(q, elt, field)
596  *         queue_t q;
597  *         vm_page_t elt;
598  *         <field> is the list field in vm_page_t
599  *
600  * This macro's arguments have to match the generic "queue_enter()" macro which is
601  * what is used for this on 32 bit kernels.
602  */
603 #define vm_page_queue_enter(head, elt, field)                       \
604 MACRO_BEGIN                                                         \
605 	vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt);         \
606 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);       \
607 	vm_page_packed_t __pck_prev = (head)->prev;                 \
608                                                                     \
609 	if (__pck_head == __pck_prev) {                             \
610 	        (head)->next = __pck_elt;                           \
611 	} else {                                                    \
612 	        vm_page_t __prev;                                   \
613 	        __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
614 	        __prev->field.next = __pck_elt;                     \
615 	}                                                           \
616 	(elt)->field.prev = __pck_prev;                             \
617 	(elt)->field.next = __pck_head;                             \
618 	(head)->prev = __pck_elt;                                   \
619 MACRO_END
620 
621 
622 #if defined(__x86_64__)
623 /*
624  * These are helper macros for vm_page_queue_enter_clump to assist
625  * with conditional compilation (release / debug / development)
626  */
627 #if DEVELOPMENT || DEBUG
628 
629 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)                                             \
630 MACRO_BEGIN                                                                                   \
631 	if (__prev != NULL) {                                                                 \
632 	        assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next));                   \
633 	        assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
634 	}                                                                                     \
635 MACRO_END
636 
637 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)                    \
638 MACRO_BEGIN                                                                     \
639 	unsigned int __i;                                                       \
640 	vm_page_queue_entry_t __tmp;                                            \
641 	for (__i = 0, __tmp = __first; __i < __n_free; __i++) {                 \
642 	        __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
643 	}                                                                       \
644 	assert(__tmp == __last_next);                                           \
645 MACRO_END
646 
647 #define __DEBUG_STAT_INCREMENT_INRANGE              vm_clump_inrange++
648 #define __DEBUG_STAT_INCREMENT_INSERTS              vm_clump_inserts++
649 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)   vm_clump_promotes+=__n_free
650 
651 #else
652 
653 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
654 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
655 #define __DEBUG_STAT_INCREMENT_INRANGE
656 #define __DEBUG_STAT_INCREMENT_INSERTS
657 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
658 
659 #endif  /* if DEVELOPMENT || DEBUG */
660 
661 /*
662  * Insert a new page into a free queue and clump pages within the same 16K boundary together
663  */
664 static inline void
vm_page_queue_enter_clump(vm_page_queue_t head,vm_page_t elt)665 vm_page_queue_enter_clump(
666 	vm_page_queue_t       head,
667 	vm_page_t             elt)
668 {
669 	vm_page_queue_entry_t first = NULL;    /* first page in the clump */
670 	vm_page_queue_entry_t last = NULL;     /* last page in the clump */
671 	vm_page_queue_entry_t prev = NULL;
672 	vm_page_queue_entry_t next;
673 	uint_t                n_free = 1;
674 	extern unsigned int   vm_pages_count;
675 	extern unsigned int   vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
676 	extern unsigned long  vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
677 
678 	/*
679 	 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
680 	 */
681 	if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
682 		vm_page_t p;
683 		uint_t    i;
684 		uint_t    n;
685 		ppnum_t   clump_num;
686 
687 		first = last = (vm_page_queue_entry_t)elt;
688 		clump_num = VM_PAGE_GET_CLUMP(elt);
689 		n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
690 
691 		/*
692 		 * Check for preceeding vm_pages[] entries in the same chunk
693 		 */
694 		for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
695 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
696 				if (prev == NULL) {
697 					prev = (vm_page_queue_entry_t)p;
698 				}
699 				first = (vm_page_queue_entry_t)p;
700 				n_free++;
701 			}
702 		}
703 
704 		/*
705 		 * Check the following vm_pages[] entries in the same chunk
706 		 */
707 		for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
708 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
709 				if (last == (vm_page_queue_entry_t)elt) {               /* first one only */
710 					__DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
711 				}
712 
713 				if (prev == NULL) {
714 					prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
715 				}
716 				last = (vm_page_queue_entry_t)p;
717 				n_free++;
718 			}
719 		}
720 		__DEBUG_STAT_INCREMENT_INRANGE;
721 	}
722 
723 	/* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
724 	if (prev == NULL) {
725 		prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
726 	}
727 
728 	/* insert the element */
729 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
730 	elt->vmp_pageq.next = prev->next;
731 	elt->vmp_pageq.prev = next->prev;
732 	prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
733 	__DEBUG_STAT_INCREMENT_INSERTS;
734 
735 	/*
736 	 * Check if clump needs to be promoted to head.
737 	 */
738 	if (n_free >= vm_clump_promote_threshold && n_free > 1) {
739 		vm_page_queue_entry_t first_prev;
740 
741 		first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
742 
743 		/* If not at head already */
744 		if (first_prev != head) {
745 			vm_page_queue_entry_t last_next;
746 			vm_page_queue_entry_t head_next;
747 
748 			last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
749 
750 			/* verify that the links within the clump are consistent */
751 			__DEBUG_VERIFY_LINKS(first, n_free, last_next);
752 
753 			/* promote clump to head */
754 			first_prev->next = last->next;
755 			last_next->prev = first->prev;
756 			first->prev = VM_PAGE_PACK_PTR(head);
757 			last->next = head->next;
758 
759 			head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
760 			head_next->prev = VM_PAGE_PACK_PTR(last);
761 			head->next = VM_PAGE_PACK_PTR(first);
762 			__DEBUG_STAT_INCREMENT_PROMOTES(n_free);
763 		}
764 	}
765 }
766 #endif
767 
768 /*
769  * Macro: vm_page_queue_enter_first
770  * Function:
771  *     Insert a new element at the head of the vm_page queue.
772  * Header:
773  *     void queue_enter_first(q, elt, , field)
774  *         queue_t q;
775  *         vm_page_t elt;
776  *         <field> is the linkage field in vm_page
777  *
778  * This macro's arguments have to match the generic "queue_enter_first()" macro which is
779  * what is used for this on 32 bit kernels.
780  */
781 #define vm_page_queue_enter_first(head, elt, field)                 \
782 MACRO_BEGIN                                                         \
783 	vm_page_packed_t __pck_next = (head)->next;                 \
784 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);       \
785 	vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt);         \
786                                                                     \
787 	if (__pck_head == __pck_next) {                             \
788 	        (head)->prev = __pck_elt;                           \
789 	} else {                                                    \
790 	        vm_page_t __next;                                   \
791 	        __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
792 	        __next->field.prev = __pck_elt;                     \
793 	}                                                           \
794                                                                     \
795 	(elt)->field.next = __pck_next;                             \
796 	(elt)->field.prev = __pck_head;                             \
797 	(head)->next = __pck_elt;                                   \
798 MACRO_END
799 
800 
801 /*
802  * Macro:	vm_page_queue_remove
803  * Function:
804  *     Remove an arbitrary page from a vm_page queue.
805  * Header:
806  *     void vm_page_queue_remove(q, qe, field)
807  *         arguments as in vm_page_queue_enter
808  *
809  * This macro's arguments have to match the generic "queue_enter()" macro which is
810  * what is used for this on 32 bit kernels.
811  */
812 #define vm_page_queue_remove(head, elt, field)                          \
813 MACRO_BEGIN                                                             \
814 	vm_page_packed_t __pck_next = (elt)->field.next;                \
815 	vm_page_packed_t __pck_prev = (elt)->field.prev;                \
816 	vm_page_t        __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
817 	vm_page_t        __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
818                                                                         \
819 	if ((void *)(head) == (void *)__next) {                         \
820 	        (head)->prev = __pck_prev;                              \
821 	} else {                                                        \
822 	        __next->field.prev = __pck_prev;                        \
823 	}                                                               \
824                                                                         \
825 	if ((void *)(head) == (void *)__prev) {                         \
826 	        (head)->next = __pck_next;                              \
827 	} else {                                                        \
828 	        __prev->field.next = __pck_next;                        \
829 	}                                                               \
830                                                                         \
831 	(elt)->field.next = 0;                                          \
832 	(elt)->field.prev = 0;                                          \
833 MACRO_END
834 
835 
836 /*
837  * Macro: vm_page_queue_remove_first
838  *
839  * Function:
840  *     Remove and return the entry at the head of a vm_page queue.
841  *
842  * Header:
843  *     vm_page_queue_remove_first(head, entry, field)
844  *     N.B. entry is returned by reference
845  *
846  * This macro's arguments have to match the generic "queue_remove_first()" macro which is
847  * what is used for this on 32 bit kernels.
848  */
849 #define vm_page_queue_remove_first(head, entry, field)            \
850 MACRO_BEGIN                                                       \
851 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);     \
852 	vm_page_packed_t __pck_next;                              \
853 	vm_page_t        __next;                                  \
854                                                                   \
855 	(entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next);    \
856 	__pck_next = (entry)->field.next;                         \
857 	__next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next);       \
858                                                                   \
859 	if (__pck_head == __pck_next) {                           \
860 	        (head)->prev = __pck_head;                        \
861 	} else {                                                  \
862 	        __next->field.prev = __pck_head;                  \
863 	}                                                         \
864                                                                   \
865 	(head)->next = __pck_next;                                \
866 	(entry)->field.next = 0;                                  \
867 	(entry)->field.prev = 0;                                  \
868 MACRO_END
869 
870 
871 #if defined(__x86_64__)
872 /*
873  * Macro:  vm_page_queue_remove_first_with_clump
874  * Function:
875  *     Remove and return the entry at the head of the free queue
876  *     end is set to 1 to indicate that we just returned the last page in a clump
877  *
878  * Header:
879  *     vm_page_queue_remove_first_with_clump(head, entry, end)
880  *     entry is returned by reference
881  *     end is returned by reference
882  */
883 #define vm_page_queue_remove_first_with_clump(head, entry, end)              \
884 MACRO_BEGIN                                                                  \
885 	vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head);                \
886 	vm_page_packed_t __pck_next;                                         \
887 	vm_page_t        __next;                                             \
888                                                                              \
889 	(entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next);               \
890 	__pck_next = (entry)->vmp_pageq.next;                                \
891 	__next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next);                  \
892                                                                              \
893 	(end) = 0;                                                           \
894 	if (__pck_head == __pck_next) {                                      \
895 	        (head)->prev = __pck_head;                                   \
896 	        (end) = 1;                                                   \
897 	} else {                                                             \
898 	        __next->vmp_pageq.prev = __pck_head;                         \
899 	        if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
900 	                (end) = 1;                                           \
901 	        }                                                            \
902 	}                                                                    \
903                                                                              \
904 	(head)->next = __pck_next;                                           \
905 	(entry)->vmp_pageq.next = 0;                                         \
906 	(entry)->vmp_pageq.prev = 0;                                         \
907 MACRO_END
908 #endif
909 
910 /*
911  *	Macro:	vm_page_queue_end
912  *	Function:
913  *	Tests whether a new entry is really the end of
914  *		the queue.
915  *	Header:
916  *		boolean_t vm_page_queue_end(q, qe)
917  *			vm_page_queue_t q;
918  *			vm_page_queue_entry_t qe;
919  */
920 #define vm_page_queue_end(q, qe)        ((q) == (qe))
921 
922 
923 /*
924  *	Macro:	vm_page_queue_empty
925  *	Function:
926  *		Tests whether a queue is empty.
927  *	Header:
928  *		boolean_t vm_page_queue_empty(q)
929  *			vm_page_queue_t q;
930  */
931 #define vm_page_queue_empty(q)          vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
932 
933 
934 
935 /*
936  *	Macro:	vm_page_queue_first
937  *	Function:
938  *		Returns the first entry in the queue,
939  *	Header:
940  *		uintpr_t vm_page_queue_first(q)
941  *			vm_page_queue_t q;	\* IN *\
942  */
943 #define vm_page_queue_first(q)          (VM_PAGE_UNPACK_PTR((q)->next))
944 
945 
946 
947 /*
948  *	Macro:		vm_page_queue_last
949  *	Function:
950  *		Returns the last entry in the queue.
951  *	Header:
952  *		vm_page_queue_entry_t queue_last(q)
953  *			queue_t	q;		\* IN *\
954  */
955 #define vm_page_queue_last(q)           (VM_PAGE_UNPACK_PTR((q)->prev))
956 
957 
958 
959 /*
960  *	Macro:	vm_page_queue_next
961  *	Function:
962  *		Returns the entry after an item in the queue.
963  *	Header:
964  *		uintpr_t vm_page_queue_next(qc)
965  *			vm_page_queue_t qc;
966  */
967 #define vm_page_queue_next(qc)          (VM_PAGE_UNPACK_PTR((qc)->next))
968 
969 
970 
971 /*
972  *	Macro:	vm_page_queue_prev
973  *	Function:
974  *		Returns the entry before an item in the queue.
975  *	Header:
976  *		uinptr_t vm_page_queue_prev(qc)
977  *			vm_page_queue_t qc;
978  */
979 #define vm_page_queue_prev(qc)          (VM_PAGE_UNPACK_PTR((qc)->prev))
980 
981 
982 
983 /*
984  *	Macro:	vm_page_queue_iterate
985  *	Function:
986  *		iterate over each item in a vm_page queue.
987  *		Generates a 'for' loop, setting elt to
988  *		each item in turn (by reference).
989  *	Header:
990  *		vm_page_queue_iterate(q, elt, field)
991  *			queue_t q;
992  *			vm_page_t elt;
993  *			<field> is the chain field in vm_page_t
994  */
995 #define vm_page_queue_iterate(head, elt, field)                       \
996 	for ((elt) = (vm_page_t)vm_page_queue_first(head);            \
997 	    !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
998 	    (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field))     \
999 
1000 #else // LP64
1001 
1002 #define VM_VPLQ_ALIGNMENT               128
1003 #define VM_PAGE_PACKED_PTR_ALIGNMENT    sizeof(vm_offset_t)
1004 #define VM_PAGE_PACKED_ALIGNED
1005 #define VM_PAGE_PACKED_PTR_BITS         32
1006 #define VM_PAGE_PACKED_PTR_SHIFT        0
1007 #define VM_PAGE_PACKED_PTR_BASE         0
1008 
1009 #define VM_PAGE_PACKED_FROM_ARRAY       0
1010 
1011 #define VM_PAGE_PACK_PTR(p)     (p)
1012 #define VM_PAGE_UNPACK_PTR(p)   ((uintptr_t)(p))
1013 
1014 #define VM_OBJECT_PACK(o)       ((vm_page_object_t)(o))
1015 #define VM_OBJECT_UNPACK(p)     ((vm_object_t)(p))
1016 
1017 #define VM_PAGE_PACK_OBJECT(o)  VM_OBJECT_PACK(o)
1018 #define VM_PAGE_OBJECT(p)       VM_OBJECT_UNPACK((p)->vmp_object)
1019 
1020 
1021 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p)     \
1022 MACRO_BEGIN                             \
1023 	(p)->vmp_pageq.next = 0;                \
1024 	(p)->vmp_pageq.prev = 0;                \
1025 MACRO_END
1026 
1027 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p)   ((queue_entry_t)(p))
1028 
1029 #define vm_page_remque                      remque
1030 #define vm_page_enqueue_tail                enqueue_tail
1031 #define vm_page_queue_init                  queue_init
1032 #define vm_page_queue_enter(h, e, f)        queue_enter(h, e, vm_page_t, f)
1033 #define vm_page_queue_enter_first(h, e, f)  queue_enter_first(h, e, vm_page_t, f)
1034 #define vm_page_queue_remove(h, e, f)       queue_remove(h, e, vm_page_t, f)
1035 #define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f)
1036 #define vm_page_queue_end                   queue_end
1037 #define vm_page_queue_empty                 queue_empty
1038 #define vm_page_queue_first                 queue_first
1039 #define vm_page_queue_last                  queue_last
1040 #define vm_page_queue_next                  queue_next
1041 #define vm_page_queue_prev                  queue_prev
1042 #define vm_page_queue_iterate(h, e, f)      queue_iterate(h, e, vm_page_t, f)
1043 
1044 #endif // __LP64__
1045 
1046 
1047 
1048 /*
1049  * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
1050  * represents a set of aging bins that are 'protected'...
1051  *
1052  * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
1053  * not yet been 'claimed' but have been aged out of the protective bins
1054  * this occurs in vm_page_speculate when it advances to the next bin
1055  * and discovers that it is still occupied... at that point, all of the
1056  * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q.  the pages
1057  * in that bin are all guaranteed to have reached at least the maximum age
1058  * we allow for a protected page... they can be older if there is no
1059  * memory pressure to pull them from the bin, or there are no new speculative pages
1060  * being generated to push them out.
1061  * this list is the one that vm_pageout_scan will prefer when looking
1062  * for pages to move to the underweight free list
1063  *
1064  * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
1065  * defines the amount of time a speculative page is normally
1066  * allowed to live in the 'protected' state (i.e. not available
1067  * to be stolen if vm_pageout_scan is running and looking for
1068  * pages)...  however, if the total number of speculative pages
1069  * in the protected state exceeds our limit (defined in vm_pageout.c)
1070  * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
1071  * vm_pageout_scan is allowed to steal pages from the protected
1072  * bucket even if they are underage.
1073  *
1074  * vm_pageout_scan is also allowed to pull pages from a protected
1075  * bin if the bin has reached the "age of consent" we've set
1076  */
1077 #define VM_PAGE_MAX_SPECULATIVE_AGE_Q   10
1078 #define VM_PAGE_MIN_SPECULATIVE_AGE_Q   1
1079 #define VM_PAGE_SPECULATIVE_AGED_Q      0
1080 
1081 #define VM_PAGE_SPECULATIVE_Q_AGE_MS    500
1082 
1083 struct vm_speculative_age_q {
1084 	/*
1085 	 * memory queue for speculative pages via clustered pageins
1086 	 */
1087 	vm_page_queue_head_t    age_q;
1088 	mach_timespec_t age_ts;
1089 } VM_PAGE_PACKED_ALIGNED;
1090 
1091 
1092 
1093 extern
1094 struct vm_speculative_age_q     vm_page_queue_speculative[];
1095 
1096 extern int                      speculative_steal_index;
1097 extern int                      speculative_age_index;
1098 extern unsigned int             vm_page_speculative_q_age_ms;
1099 
1100 
1101 typedef struct vm_locks_array {
1102 	char    pad  __attribute__ ((aligned(64)));
1103 	lck_mtx_t       vm_page_queue_lock2 __attribute__ ((aligned(64)));
1104 	lck_mtx_t       vm_page_queue_free_lock2 __attribute__ ((aligned(64)));
1105 	char    pad2  __attribute__ ((aligned(64)));
1106 } vm_locks_array_t;
1107 
1108 
1109 #if CONFIG_BACKGROUND_QUEUE
1110 extern  void    vm_page_assign_background_state(vm_page_t mem);
1111 extern  void    vm_page_update_background_state(vm_page_t mem);
1112 extern  void    vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first);
1113 extern  void    vm_page_remove_from_backgroundq(vm_page_t mem);
1114 #endif
1115 
1116 #define VM_PAGE_WIRED(m)        ((m)->vmp_q_state == VM_PAGE_IS_WIRED)
1117 #define NEXT_PAGE(m)            ((m)->vmp_snext)
1118 #define NEXT_PAGE_PTR(m)        (&(m)->vmp_snext)
1119 
1120 /*
1121  * XXX	The unusual bit should not be necessary.  Most of the bit
1122  * XXX	fields above really want to be masks.
1123  */
1124 
1125 /*
1126  *	For debugging, this macro can be defined to perform
1127  *	some useful check on a page structure.
1128  *	INTENTIONALLY left as a no-op so that the
1129  *	current call-sites can be left intact for future uses.
1130  */
1131 
1132 #define VM_PAGE_CHECK(mem)                      \
1133 	MACRO_BEGIN                             \
1134 	MACRO_END
1135 
1136 /*     Page coloring:
1137  *
1138  *     The free page list is actually n lists, one per color,
1139  *     where the number of colors is a function of the machine's
1140  *     cache geometry set at system initialization.  To disable
1141  *     coloring, set vm_colors to 1 and vm_color_mask to 0.
1142  *     The boot-arg "colors" may be used to override vm_colors.
1143  *     Note that there is little harm in having more colors than needed.
1144  */
1145 
1146 #define MAX_COLORS      128
1147 #define DEFAULT_COLORS  32
1148 
1149 extern
1150 unsigned int    vm_colors;              /* must be in range 1..MAX_COLORS */
1151 extern
1152 unsigned int    vm_color_mask;          /* must be (vm_colors-1) */
1153 extern
1154 unsigned int    vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
1155 
1156 /*
1157  * Wired memory is a very limited resource and we can't let users exhaust it
1158  * and deadlock the entire system.  We enforce the following limits:
1159  *
1160  * vm_per_task_user_wire_limit
1161  *      how much memory can be user-wired in one user task
1162  *
1163  * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit)
1164  *      how much memory can be user-wired in all user tasks
1165  *
1166  * These values are set to defaults based on the number of pages managed
1167  * by the VM system. They can be overriden via sysctls.
1168  * See kmem_set_user_wire_limits for details on the default values.
1169  *
1170  * Regardless of the amount of memory in the system, we never reserve
1171  * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable.
1172  */
1173 #if defined(__LP64__)
1174 #define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024)     /* 32GB */
1175 #else
1176 #define VM_NOT_USER_WIREABLE_MAX (1UL*1024*1024*1024)     /* 1GB */
1177 #endif /* __LP64__ */
1178 extern
1179 vm_map_size_t   vm_per_task_user_wire_limit;
1180 extern
1181 vm_map_size_t   vm_global_user_wire_limit;
1182 extern
1183 uint64_t        vm_add_wire_count_over_global_limit;
1184 extern
1185 uint64_t        vm_add_wire_count_over_user_limit;
1186 
1187 /*
1188  *	Each pageable resident page falls into one of three lists:
1189  *
1190  *	free
1191  *		Available for allocation now.  The free list is
1192  *		actually an array of lists, one per color.
1193  *	inactive
1194  *		Not referenced in any map, but still has an
1195  *		object/offset-page mapping, and may be dirty.
1196  *		This is the list of pages that should be
1197  *		paged out next.  There are actually two
1198  *		inactive lists, one for pages brought in from
1199  *		disk or other backing store, and another
1200  *		for "zero-filled" pages.  See vm_pageout_scan()
1201  *		for the distinction and usage.
1202  *	active
1203  *		A list of pages which have been placed in
1204  *		at least one physical map.  This list is
1205  *		ordered, in LRU-like fashion.
1206  */
1207 
1208 
1209 #define VPL_LOCK_SPIN 1
1210 
1211 struct vpl {
1212 	vm_page_queue_head_t    vpl_queue;
1213 	unsigned int    vpl_count;
1214 	unsigned int    vpl_internal_count;
1215 	unsigned int    vpl_external_count;
1216 #ifdef  VPL_LOCK_SPIN
1217 	lck_spin_t      vpl_lock;
1218 #else
1219 	lck_mtx_t       vpl_lock;
1220 	lck_mtx_ext_t   vpl_lock_ext;
1221 #endif
1222 };
1223 
1224 extern
1225 struct vpl     * /* __zpercpu */ vm_page_local_q;
1226 extern
1227 unsigned int    vm_page_local_q_soft_limit;
1228 extern
1229 unsigned int    vm_page_local_q_hard_limit;
1230 extern
1231 vm_locks_array_t vm_page_locks;
1232 
1233 extern
1234 vm_page_queue_head_t    vm_lopage_queue_free;           /* low memory free queue */
1235 extern
1236 vm_page_queue_head_t    vm_page_queue_active;   /* active memory queue */
1237 extern
1238 vm_page_queue_head_t    vm_page_queue_inactive; /* inactive memory queue for normal pages */
1239 #if CONFIG_SECLUDED_MEMORY
1240 extern
1241 vm_page_queue_head_t    vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
1242 #endif /* CONFIG_SECLUDED_MEMORY */
1243 extern
1244 vm_page_queue_head_t    vm_page_queue_cleaned; /* clean-queue inactive memory */
1245 extern
1246 vm_page_queue_head_t    vm_page_queue_anonymous;        /* inactive memory queue for anonymous pages */
1247 extern
1248 vm_page_queue_head_t    vm_page_queue_throttled;        /* memory queue for throttled pageout pages */
1249 
1250 extern
1251 queue_head_t    vm_objects_wired;
1252 extern
1253 lck_spin_t      vm_objects_wired_lock;
1254 
1255 #if CONFIG_BACKGROUND_QUEUE
1256 
1257 #define VM_PAGE_BACKGROUND_TARGET_MAX   50000
1258 
1259 #define VM_PAGE_BG_DISABLED     0
1260 #define VM_PAGE_BG_LEVEL_1      1
1261 
1262 extern
1263 vm_page_queue_head_t    vm_page_queue_background;
1264 extern
1265 uint64_t        vm_page_background_promoted_count;
1266 extern
1267 uint32_t        vm_page_background_count;
1268 extern
1269 uint32_t        vm_page_background_target;
1270 extern
1271 uint32_t        vm_page_background_internal_count;
1272 extern
1273 uint32_t        vm_page_background_external_count;
1274 extern
1275 uint32_t        vm_page_background_mode;
1276 extern
1277 uint32_t        vm_page_background_exclude_external;
1278 
1279 #endif
1280 
1281 extern
1282 vm_offset_t     first_phys_addr;        /* physical address for first_page */
1283 extern
1284 vm_offset_t     last_phys_addr;         /* physical address for last_page */
1285 
1286 extern
1287 unsigned int    vm_page_free_count;     /* How many pages are free? (sum of all colors) */
1288 extern
1289 unsigned int    vm_page_active_count;   /* How many pages are active? */
1290 extern
1291 unsigned int    vm_page_inactive_count; /* How many pages are inactive? */
1292 extern
1293 unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */
1294 #if CONFIG_SECLUDED_MEMORY
1295 extern
1296 unsigned int    vm_page_secluded_count; /* How many pages are secluded? */
1297 extern
1298 unsigned int    vm_page_secluded_count_free; /* how many of them are free? */
1299 extern
1300 unsigned int    vm_page_secluded_count_inuse; /* how many of them are in use? */
1301 /*
1302  * We keep filling the secluded pool with new eligible pages and
1303  * we can overshoot our target by a lot.
1304  * When there's memory pressure, vm_pageout_scan() will re-balance the queues,
1305  * pushing the extra secluded pages to the active or free queue.
1306  * Since these "over target" secluded pages are actually "available", jetsam
1307  * should consider them as such, so make them visible to jetsam via the
1308  * "vm_page_secluded_count_over_target" counter and update it whenever we
1309  * update vm_page_secluded_count or vm_page_secluded_target.
1310  */
1311 extern
1312 unsigned int    vm_page_secluded_count_over_target;
1313 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE()                     \
1314 	MACRO_BEGIN                                                     \
1315 	if (vm_page_secluded_count > vm_page_secluded_target) {         \
1316 	        vm_page_secluded_count_over_target =                    \
1317 	                (vm_page_secluded_count - vm_page_secluded_target); \
1318 	} else {                                                        \
1319 	        vm_page_secluded_count_over_target = 0;                 \
1320 	}                                                               \
1321 	MACRO_END
1322 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target
1323 #else /* CONFIG_SECLUDED_MEMORY */
1324 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1325 	MACRO_BEGIN                                 \
1326 	MACRO_END
1327 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0
1328 #endif /* CONFIG_SECLUDED_MEMORY */
1329 extern
1330 unsigned int    vm_page_cleaned_count; /* How many pages are in the clean queue? */
1331 extern
1332 unsigned int    vm_page_throttled_count;/* How many inactives are throttled */
1333 extern
1334 unsigned int    vm_page_speculative_count;      /* How many speculative pages are unclaimed? */
1335 extern unsigned int     vm_page_pageable_internal_count;
1336 extern unsigned int     vm_page_pageable_external_count;
1337 extern
1338 unsigned int    vm_page_xpmapped_external_count;        /* How many pages are mapped executable? */
1339 extern
1340 unsigned int    vm_page_external_count; /* How many pages are file-backed? */
1341 extern
1342 unsigned int    vm_page_internal_count; /* How many pages are anonymous? */
1343 extern
1344 unsigned int    vm_page_wire_count;             /* How many pages are wired? */
1345 extern
1346 unsigned int    vm_page_wire_count_initial;     /* How many pages wired at startup */
1347 extern
1348 unsigned int    vm_page_wire_count_on_boot;     /* even earlier than _initial */
1349 extern
1350 unsigned int    vm_page_free_target;    /* How many do we want free? */
1351 extern
1352 unsigned int    vm_page_free_min;       /* When to wakeup pageout */
1353 extern
1354 unsigned int    vm_page_throttle_limit; /* When to throttle new page creation */
1355 extern
1356 unsigned int    vm_page_inactive_target;/* How many do we want inactive? */
1357 #if CONFIG_SECLUDED_MEMORY
1358 extern
1359 unsigned int    vm_page_secluded_target;/* How many do we want secluded? */
1360 #endif /* CONFIG_SECLUDED_MEMORY */
1361 extern
1362 unsigned int    vm_page_anonymous_min;  /* When it's ok to pre-clean */
1363 extern
1364 unsigned int    vm_page_free_reserved;  /* How many pages reserved to do pageout */
1365 extern
1366 unsigned int    vm_page_gobble_count;
1367 extern
1368 unsigned int    vm_page_stolen_count;   /* Count of stolen pages not acccounted in zones */
1369 extern
1370 unsigned int    vm_page_kern_lpage_count;   /* Count of large pages used in early boot */
1371 
1372 
1373 #if DEVELOPMENT || DEBUG
1374 extern
1375 unsigned int    vm_page_speculative_used;
1376 #endif
1377 
1378 extern
1379 unsigned int    vm_page_purgeable_count;/* How many pages are purgeable now ? */
1380 extern
1381 unsigned int    vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
1382 extern
1383 uint64_t        vm_page_purged_count;   /* How many pages got purged so far ? */
1384 
1385 extern unsigned int     vm_page_free_wanted;
1386 /* how many threads are waiting for memory */
1387 
1388 extern unsigned int     vm_page_free_wanted_privileged;
1389 /* how many VM privileged threads are waiting for memory */
1390 #if CONFIG_SECLUDED_MEMORY
1391 extern unsigned int     vm_page_free_wanted_secluded;
1392 /* how many threads are waiting for secluded memory */
1393 #endif /* CONFIG_SECLUDED_MEMORY */
1394 
1395 extern const ppnum_t    vm_page_fictitious_addr;
1396 /* (fake) phys_addr of fictitious pages */
1397 
1398 extern const ppnum_t    vm_page_guard_addr;
1399 /* (fake) phys_addr of guard pages */
1400 
1401 
1402 extern boolean_t        vm_page_deactivate_hint;
1403 
1404 extern int              vm_compressor_mode;
1405 
1406 /*
1407  * Defaults to true, so highest memory is used first.
1408  */
1409 extern boolean_t        vm_himemory_mode;
1410 
1411 extern boolean_t        vm_lopage_needed;
1412 extern uint32_t         vm_lopage_free_count;
1413 extern uint32_t         vm_lopage_free_limit;
1414 extern uint32_t         vm_lopage_lowater;
1415 extern boolean_t        vm_lopage_refill;
1416 extern uint64_t         max_valid_dma_address;
1417 extern ppnum_t          max_valid_low_ppnum;
1418 
1419 /*
1420  * Prototypes for functions exported by this module.
1421  */
1422 extern void             vm_page_bootstrap(
1423 	vm_offset_t     *startp,
1424 	vm_offset_t     *endp);
1425 
1426 extern void             vm_page_init_local_q(unsigned int num_cpus);
1427 
1428 extern void             vm_page_create(
1429 	ppnum_t         start,
1430 	ppnum_t         end);
1431 
1432 extern void             vm_page_create_retired(
1433 	ppnum_t         pn);
1434 
1435 extern vm_page_t        kdp_vm_page_lookup(
1436 	vm_object_t             object,
1437 	vm_object_offset_t      offset);
1438 
1439 extern vm_page_t        vm_page_lookup(
1440 	vm_object_t             object,
1441 	vm_object_offset_t      offset);
1442 
1443 extern vm_page_t        vm_page_grab_fictitious(boolean_t canwait);
1444 
1445 extern vm_page_t        vm_page_grab_guard(boolean_t canwait);
1446 
1447 extern void             vm_page_release_fictitious(
1448 	vm_page_t page);
1449 
1450 extern void             vm_free_delayed_pages(void);
1451 
1452 extern bool             vm_pool_low(void);
1453 
1454 extern vm_page_t        vm_page_grab(void);
1455 extern vm_page_t        vm_page_grab_options(int flags);
1456 
1457 #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
1458 #if CONFIG_SECLUDED_MEMORY
1459 #define VM_PAGE_GRAB_SECLUDED     0x00000001
1460 #endif /* CONFIG_SECLUDED_MEMORY */
1461 #define VM_PAGE_GRAB_Q_LOCK_HELD  0x00000002
1462 
1463 extern vm_page_t        vm_page_grablo(void);
1464 
1465 extern void             vm_page_release(
1466 	vm_page_t       page,
1467 	boolean_t       page_queues_locked);
1468 
1469 extern boolean_t        vm_page_wait(
1470 	int             interruptible );
1471 
1472 extern vm_page_t        vm_page_alloc(
1473 	vm_object_t             object,
1474 	vm_object_offset_t      offset);
1475 
1476 extern void             vm_page_init(
1477 	vm_page_t       page,
1478 	ppnum_t         phys_page,
1479 	boolean_t       lopage);
1480 
1481 extern void             vm_page_free(
1482 	vm_page_t       page);
1483 
1484 extern void             vm_page_free_unlocked(
1485 	vm_page_t       page,
1486 	boolean_t       remove_from_hash);
1487 
1488 extern void             vm_page_balance_inactive(
1489 	int             max_to_move);
1490 
1491 extern void             vm_page_activate(
1492 	vm_page_t       page);
1493 
1494 extern void             vm_page_deactivate(
1495 	vm_page_t       page);
1496 
1497 extern void             vm_page_deactivate_internal(
1498 	vm_page_t       page,
1499 	boolean_t       clear_hw_reference);
1500 
1501 extern void             vm_page_enqueue_cleaned(vm_page_t page);
1502 
1503 extern void             vm_page_lru(
1504 	vm_page_t       page);
1505 
1506 extern void             vm_page_speculate(
1507 	vm_page_t       page,
1508 	boolean_t       new);
1509 
1510 extern void             vm_page_speculate_ageit(
1511 	struct vm_speculative_age_q *aq);
1512 
1513 extern void             vm_page_reactivate_all_throttled(void);
1514 
1515 extern void             vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
1516 
1517 extern void             vm_page_rename(
1518 	vm_page_t               page,
1519 	vm_object_t             new_object,
1520 	vm_object_offset_t      new_offset);
1521 
1522 extern void             vm_page_insert(
1523 	vm_page_t               page,
1524 	vm_object_t             object,
1525 	vm_object_offset_t      offset);
1526 
1527 extern void             vm_page_insert_wired(
1528 	vm_page_t               page,
1529 	vm_object_t             object,
1530 	vm_object_offset_t      offset,
1531 	vm_tag_t                tag);
1532 
1533 extern void             vm_page_insert_internal(
1534 	vm_page_t               page,
1535 	vm_object_t             object,
1536 	vm_object_offset_t      offset,
1537 	vm_tag_t                tag,
1538 	boolean_t               queues_lock_held,
1539 	boolean_t               insert_in_hash,
1540 	boolean_t               batch_pmap_op,
1541 	boolean_t               delayed_accounting,
1542 	uint64_t                *delayed_ledger_update);
1543 
1544 extern void             vm_page_replace(
1545 	vm_page_t               mem,
1546 	vm_object_t             object,
1547 	vm_object_offset_t      offset);
1548 
1549 extern void             vm_page_remove(
1550 	vm_page_t       page,
1551 	boolean_t       remove_from_hash);
1552 
1553 extern void             vm_page_zero_fill(
1554 	vm_page_t       page);
1555 
1556 extern void             vm_page_part_zero_fill(
1557 	vm_page_t       m,
1558 	vm_offset_t     m_pa,
1559 	vm_size_t       len);
1560 
1561 extern void             vm_page_copy(
1562 	vm_page_t       src_page,
1563 	vm_page_t       dest_page);
1564 
1565 extern void             vm_page_part_copy(
1566 	vm_page_t       src_m,
1567 	vm_offset_t     src_pa,
1568 	vm_page_t       dst_m,
1569 	vm_offset_t     dst_pa,
1570 	vm_size_t       len);
1571 
1572 extern void             vm_page_wire(
1573 	vm_page_t       page,
1574 	vm_tag_t        tag,
1575 	boolean_t       check_memorystatus);
1576 
1577 extern void             vm_page_unwire(
1578 	vm_page_t       page,
1579 	boolean_t       queueit);
1580 
1581 extern void             vm_set_page_size(void);
1582 
1583 extern void             vm_page_gobble(
1584 	vm_page_t      page);
1585 
1586 extern void             vm_page_validate_cs(
1587 	vm_page_t       page,
1588 	vm_map_size_t   fault_page_size,
1589 	vm_map_offset_t fault_phys_offset);
1590 extern void             vm_page_validate_cs_mapped(
1591 	vm_page_t       page,
1592 	vm_map_size_t   fault_page_size,
1593 	vm_map_offset_t fault_phys_offset,
1594 	const void      *kaddr);
1595 extern void             vm_page_validate_cs_mapped_slow(
1596 	vm_page_t       page,
1597 	const void      *kaddr);
1598 extern void             vm_page_validate_cs_mapped_chunk(
1599 	vm_page_t       page,
1600 	const void      *kaddr,
1601 	vm_offset_t     chunk_offset,
1602 	vm_size_t       chunk_size,
1603 	boolean_t       *validated,
1604 	unsigned        *tainted);
1605 
1606 extern void             vm_page_free_prepare_queues(
1607 	vm_page_t       page);
1608 
1609 extern void             vm_page_free_prepare_object(
1610 	vm_page_t       page,
1611 	boolean_t       remove_from_hash);
1612 
1613 #if CONFIG_IOSCHED
1614 extern wait_result_t    vm_page_sleep(
1615 	vm_object_t     object,
1616 	vm_page_t       m,
1617 	int     interruptible);
1618 #endif
1619 
1620 extern void vm_pressure_response(void);
1621 
1622 #if CONFIG_JETSAM
1623 extern void memorystatus_pages_update(unsigned int pages_avail);
1624 
1625 #define VM_CHECK_MEMORYSTATUS do { \
1626 	memorystatus_pages_update(              \
1627 	        vm_page_pageable_external_count + \
1628 	        vm_page_free_count +            \
1629 	        VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \
1630 	        (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1631 	        ); \
1632 	} while(0)
1633 
1634 #else /* CONFIG_JETSAM */
1635 
1636 #if !XNU_TARGET_OS_OSX
1637 
1638 #define VM_CHECK_MEMORYSTATUS do {} while(0)
1639 
1640 #else /* !XNU_TARGET_OS_OSX */
1641 
1642 #define VM_CHECK_MEMORYSTATUS   vm_pressure_response()
1643 
1644 #endif /* !XNU_TARGET_OS_OSX */
1645 
1646 #endif /* CONFIG_JETSAM */
1647 
1648 /*
1649  * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
1650  * protected by the object lock.
1651  */
1652 
1653 #if !XNU_TARGET_OS_OSX
1654 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
1655 	        MACRO_BEGIN                                             \
1656 	        vm_page_t __page__ = (m);                               \
1657 	        if (__page__->vmp_pmapped == TRUE &&                    \
1658 	            __page__->vmp_wpmapped == TRUE &&                   \
1659 	            __page__->vmp_dirty == FALSE &&                     \
1660 	            (set_pmap_modified)) {                              \
1661 	                pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
1662 	        }                                                       \
1663 	        __page__->vmp_dirty = TRUE;                             \
1664 	        MACRO_END
1665 #else /* !XNU_TARGET_OS_OSX */
1666 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
1667 	        MACRO_BEGIN                                             \
1668 	        vm_page_t __page__ = (m);                               \
1669 	        __page__->vmp_dirty = TRUE;                             \
1670 	        MACRO_END
1671 #endif /* !XNU_TARGET_OS_OSX */
1672 
1673 #define PAGE_ASSERT_WAIT(m, interruptible)                      \
1674 	        (((m)->vmp_wanted = TRUE),                      \
1675 	         assert_wait((event_t) (m), (interruptible)))
1676 
1677 #if CONFIG_IOSCHED
1678 #define PAGE_SLEEP(o, m, interruptible)                         \
1679 	        vm_page_sleep(o, m, interruptible)
1680 #else
1681 #define PAGE_SLEEP(o, m, interruptible)                         \
1682 	(((m)->vmp_wanted = TRUE),                              \
1683 	 thread_sleep_vm_object((o), (m), (interruptible)))
1684 #endif
1685 
1686 #define PAGE_WAKEUP_DONE(m)                                     \
1687 	        MACRO_BEGIN                                     \
1688 	        (m)->vmp_busy = FALSE;                          \
1689 	        if ((m)->vmp_wanted) {                          \
1690 	                (m)->vmp_wanted = FALSE;                \
1691 	                thread_wakeup((event_t) (m));           \
1692 	        }                                               \
1693 	        MACRO_END
1694 
1695 #define PAGE_WAKEUP(m)                                          \
1696 	        MACRO_BEGIN                                     \
1697 	        if ((m)->vmp_wanted) {                          \
1698 	                (m)->vmp_wanted = FALSE;                \
1699 	                thread_wakeup((event_t) (m));           \
1700 	        }                                               \
1701 	        MACRO_END
1702 
1703 #define VM_PAGE_FREE(p)                         \
1704 	        MACRO_BEGIN                     \
1705 	        vm_page_free_unlocked(p, TRUE); \
1706 	        MACRO_END
1707 
1708 #define VM_PAGE_WAIT()          ((void)vm_page_wait(THREAD_UNINT))
1709 
1710 #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1711 #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1712 
1713 #define vm_page_lock_queues()   lck_mtx_lock(&vm_page_queue_lock)
1714 #define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock)
1715 #define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock)
1716 
1717 #define vm_page_lockspin_queues()       lck_mtx_lock_spin(&vm_page_queue_lock)
1718 #define vm_page_trylockspin_queues()    lck_mtx_try_lock_spin(&vm_page_queue_lock)
1719 #define vm_page_lockconvert_queues()    lck_mtx_convert_spin(&vm_page_queue_lock)
1720 
1721 #ifdef  VPL_LOCK_SPIN
1722 extern lck_grp_t vm_page_lck_grp_local;
1723 
1724 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
1725 #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
1726 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
1727 #else
1728 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr)
1729 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
1730 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
1731 #endif
1732 
1733 
1734 #if DEVELOPMENT || DEBUG
1735 #define VM_PAGE_SPECULATIVE_USED_ADD()                          \
1736 	MACRO_BEGIN                                             \
1737 	OSAddAtomic(1, &vm_page_speculative_used);              \
1738 	MACRO_END
1739 #else
1740 #define VM_PAGE_SPECULATIVE_USED_ADD()
1741 #endif
1742 
1743 
1744 #define VM_PAGE_CONSUME_CLUSTERED(mem)                          \
1745 	MACRO_BEGIN                                             \
1746 	ppnum_t	__phys_page;                                    \
1747 	__phys_page = VM_PAGE_GET_PHYS_PAGE(mem);               \
1748 	pmap_lock_phys_page(__phys_page);                       \
1749 	if (mem->vmp_clustered) {                               \
1750 	        vm_object_t o;                                  \
1751 	        o = VM_PAGE_OBJECT(mem);                        \
1752 	        assert(o);                                      \
1753 	        o->pages_used++;                                \
1754 	        mem->vmp_clustered = FALSE;                     \
1755 	        VM_PAGE_SPECULATIVE_USED_ADD();                 \
1756 	}                                                       \
1757 	pmap_unlock_phys_page(__phys_page);                     \
1758 	MACRO_END
1759 
1760 
1761 #define VM_PAGE_COUNT_AS_PAGEIN(mem)                            \
1762 	MACRO_BEGIN                                             \
1763 	{                                                       \
1764 	vm_object_t o;                                          \
1765 	o = VM_PAGE_OBJECT(mem);                                \
1766 	DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);           \
1767 	counter_inc(&current_task()->pageins);                  \
1768 	if (o->internal) {                                      \
1769 	        DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);       \
1770 	} else {                                                \
1771 	        DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
1772 	}                                                       \
1773 	}                                                       \
1774 	MACRO_END
1775 
1776 /* adjust for stolen pages accounted elsewhere */
1777 #define VM_PAGE_MOVE_STOLEN(page_count)                         \
1778 	MACRO_BEGIN                                             \
1779 	vm_page_stolen_count -=	(page_count);                   \
1780 	vm_page_wire_count_initial -= (page_count);             \
1781 	MACRO_END
1782 
1783 #define DW_vm_page_unwire               0x01
1784 #define DW_vm_page_wire                 0x02
1785 #define DW_vm_page_free                 0x04
1786 #define DW_vm_page_activate             0x08
1787 #define DW_vm_page_deactivate_internal  0x10
1788 #define DW_vm_page_speculate            0x20
1789 #define DW_vm_page_lru                  0x40
1790 #define DW_vm_pageout_throttle_up       0x80
1791 #define DW_PAGE_WAKEUP                  0x100
1792 #define DW_clear_busy                   0x200
1793 #define DW_clear_reference              0x400
1794 #define DW_set_reference                0x800
1795 #define DW_move_page                    0x1000
1796 #define DW_VM_PAGE_QUEUES_REMOVE        0x2000
1797 #define DW_enqueue_cleaned              0x4000
1798 #define DW_vm_phantom_cache_update      0x8000
1799 
1800 struct vm_page_delayed_work {
1801 	vm_page_t       dw_m;
1802 	int             dw_mask;
1803 };
1804 
1805 #define DEFAULT_DELAYED_WORK_LIMIT      32
1806 
1807 struct vm_page_delayed_work_ctx {
1808 	struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT];
1809 	thread_t delayed_owner;
1810 };
1811 
1812 void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
1813 
1814 extern unsigned int vm_max_delayed_work_limit;
1815 
1816 extern void vm_page_delayed_work_init_ctx(void);
1817 
1818 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
1819 
1820 /*
1821  * vm_page_do_delayed_work may need to drop the object lock...
1822  * if it does, we need the pages it's looking at to
1823  * be held stable via the busy bit, so if busy isn't already
1824  * set, we need to set it and ask vm_page_do_delayed_work
1825  * to clear it and wakeup anyone that might have blocked on
1826  * it once we're done processing the page.
1827  */
1828 
1829 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt)              \
1830 	MACRO_BEGIN                                             \
1831 	if (mem->vmp_busy == FALSE) {                           \
1832 	        mem->vmp_busy = TRUE;                           \
1833 	        if ( !(dwp->dw_mask & DW_vm_page_free))         \
1834 	                dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
1835 	}                                                       \
1836 	dwp->dw_m = mem;                                        \
1837 	dwp++;                                                  \
1838 	dw_cnt++;                                               \
1839 	MACRO_END
1840 
1841 extern vm_page_t vm_object_page_grab(vm_object_t);
1842 
1843 #if VM_PAGE_BUCKETS_CHECK
1844 extern void vm_page_buckets_check(void);
1845 #endif /* VM_PAGE_BUCKETS_CHECK */
1846 
1847 extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq);
1848 extern void vm_page_remove_internal(vm_page_t page);
1849 extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
1850 extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
1851 extern void vm_page_check_pageable_safe(vm_page_t page);
1852 
1853 #if CONFIG_SECLUDED_MEMORY
1854 extern uint64_t secluded_shutoff_trigger;
1855 extern uint64_t secluded_shutoff_headroom;
1856 extern void start_secluded_suppression(task_t);
1857 extern void stop_secluded_suppression(task_t);
1858 #endif /* CONFIG_SECLUDED_MEMORY */
1859 
1860 extern void vm_retire_boot_pages(void);
1861 extern uint32_t vm_retired_pages_count(void);
1862 
1863 #endif  /* _VM_VM_PAGE_H_ */
1864