1 /*
2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Resident memory system definitions.
64 */
65
66 #ifndef _VM_VM_PAGE_H_
67 #define _VM_VM_PAGE_H_
68
69 #include <debug.h>
70 #include <stdbool.h>
71 #include <vm/vm_options.h>
72 #include <vm/vm_protos.h>
73 #include <vm/vm_far.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_param.h>
77 #include <mach/memory_object_types.h> /* for VMP_CS_BITS... */
78 #include <kern/thread.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <sys/kern_memorystatus_xnu.h>
82
83 #if __x86_64__
84 #define XNU_VM_HAS_DELAYED_PAGES 1
85 #define XNU_VM_HAS_LINEAR_PAGES_ARRAY 0
86 #else
87 #define XNU_VM_HAS_DELAYED_PAGES 0
88 #define XNU_VM_HAS_LINEAR_PAGES_ARRAY 1
89 #endif
90
91
92
93 /*
94 * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64)
95 * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate
96 * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the
97 * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack
98 * pointers from the 2 ends of these spaces
99 */
100 typedef uint32_t vm_page_packed_t;
101
102 struct vm_page_packed_queue_entry {
103 vm_page_packed_t next; /* next element */
104 vm_page_packed_t prev; /* previous element */
105 };
106
107 typedef struct vm_page_packed_queue_entry *vm_page_queue_t;
108 typedef struct vm_page_packed_queue_entry vm_page_queue_head_t;
109 typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t;
110 typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t;
111
112 typedef vm_page_packed_t vm_page_object_t;
113
114
115 /*
116 * vm_relocate_reason_t:
117 * A type to describe why a page relocation is being attempted. Depending on
118 * the reason, certain pages may or may not be relocatable.
119 *
120 * VM_RELOCATE_REASON_CONTIGUOUS:
121 * The relocation is on behalf of the contiguous allocator; it is likely to be
122 * wired, so do not consider pages that cannot be wired for any reason.
123 */
124 __enum_closed_decl(vm_relocate_reason_t, unsigned int, {
125 VM_RELOCATE_REASON_CONTIGUOUS,
126
127 VM_RELOCATE_REASON_COUNT,
128 });
129
130 /*
131 * vm_remove_reason_t:
132 * A type to describe why a page is being removed from a global free queue.
133 *
134 * VM_REMOVE_REASON_USE:
135 * The page is going to be used by the system (likely through the vm_page_grab
136 * path). Do any state updates to the page that are relevant.
137 *
138 * VM_REMOVE_REASON_REBALANCE:
139 * The page is going to be put onto a different free queue. Don't do any state
140 * updates to the page; the client will do such updates. Structured this way
141 * because rebalance operations are likely to be done in bulk, so this allows
142 * clients to perform any operations in bulk.
143 */
144 __enum_closed_decl(vm_remove_reason_t, unsigned int, {
145 VM_REMOVE_REASON_USE,
146 VM_REMOVE_REASON_REBALANCE,
147
148 VM_REMOVE_REASON_COUNT,
149 });
150
151 /*
152 * vm_memory_class_t:
153 * A type to describe what kind of memory a page represents.
154 *
155 * VM_MEMORY_CLASS_REGULAR:
156 * Normal memory, which should participate in the normal page lifecycle.
157 */
158 __enum_closed_decl(vm_memory_class_t, unsigned int, {
159 VM_MEMORY_CLASS_REGULAR,
160
161 VM_MEMORY_CLASS_COUNT,
162 });
163
164 /* pages of compressed data */
165 #define VM_PAGE_COMPRESSOR_COUNT os_atomic_load(&compressor_object->resident_page_count, relaxed)
166
167 /*
168 * Management of resident (logical) pages.
169 *
170 * A small structure is kept for each resident
171 * page, indexed by page number. Each structure
172 * is an element of several lists:
173 *
174 * A hash table bucket used to quickly
175 * perform object/offset lookups
176 *
177 * A list of all pages for a given object,
178 * so they can be quickly deactivated at
179 * time of deallocation.
180 *
181 * An ordered list of pages due for pageout.
182 *
183 * In addition, the structure contains the object
184 * and offset to which this page belongs (for pageout),
185 * and sundry status bits.
186 *
187 * Fields in this structure are locked either by the lock on the
188 * object that the page belongs to (O) or by the lock on the page
189 * queues (P). [Some fields require that both locks be held to
190 * change that field; holding either lock is sufficient to read.]
191 */
192
193 #define VM_PAGE_NULL ((vm_page_t) 0)
194
195
196 #define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state])
197 #define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state])
198 #define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state])
199 #define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state])
200
201
202 #define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */
203 #define VM_PAGE_IS_WIRED 1 /* page is currently wired */
204 #define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */
205 #define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */
206 #define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */
207 #define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */
208 #define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */
209 #define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */
210 #define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */
211 #define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */
212 #define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */
213 #define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */
214 #define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */
215 #define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */
216 #define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */
217 #define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */
218
219 #define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1)
220
221 extern const bool vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
222 extern const bool vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
223 extern const bool vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
224 extern const bool vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
225
226
227 /*
228 * The structure itself. See the block comment above for what (O) and (P) mean.
229 */
230 #define vmp_pageq vmp_q_un.vmp_q_pageq
231 #define vmp_snext vmp_q_un.vmp_q_snext
232
233 struct vm_page {
234 union {
235 vm_page_queue_chain_t vmp_q_pageq; /* queue info for FIFO queue or free list (P) */
236 struct vm_page *vmp_q_snext;
237 } vmp_q_un;
238
239 vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */
240
241 vm_page_queue_chain_t vmp_specialq; /* anonymous pages in the special queues (P) */
242 vm_object_offset_t vmp_offset; /* offset into that object (O,P) */
243
244 vm_page_object_t vmp_object; /* which object am I in (O&P) */
245
246 /*
247 * The following word of flags used to be protected by the "page queues" lock.
248 * That's no longer true and what lock, if any, is needed may depend on the
249 * value of vmp_q_state.
250 *
251 * We use 'vmp_wire_count' to store the local queue id if local queues are enabled.
252 * See the comments at 'vm_page_queues_remove' as to why this is safe to do.
253 */
254 #define VM_PAGE_SPECIAL_Q_EMPTY (0)
255 #define VM_PAGE_SPECIAL_Q_BG (1)
256 #define VM_PAGE_SPECIAL_Q_DONATE (2)
257 #define VM_PAGE_SPECIAL_Q_FG (3)
258 #define vmp_local_id vmp_wire_count
259 unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */
260 vmp_q_state:4, /* which q is the page on (P) */
261 vmp_on_specialq:2,
262 vmp_canonical:1, /* this page is a canonical kernel page (immutable) */
263 vmp_gobbled:1, /* page used internally (P) */
264 vmp_laundry:1, /* page is being cleaned now (P)*/
265 vmp_no_cache:1, /* page is not to be cached and should */
266 /* be reused ahead of other pages (P) */
267 vmp_reference:1, /* page has been used (P) */
268 vmp_lopage:1,
269 vmp_realtime:1, /* page used by realtime thread */
270 #if !CONFIG_TRACK_UNMODIFIED_ANON_PAGES
271 vmp_unused_page_bits:3;
272 #else /* ! CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
273 vmp_unmodified_ro:1, /* Tracks if an anonymous page is modified after a decompression (O&P).*/
274 vmp_unused_page_bits:2;
275 #endif /* ! CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
276
277 /*
278 * MUST keep the 2 32 bit words used as bit fields
279 * separated since the compiler has a nasty habit
280 * of using 64 bit loads and stores on them as
281 * if they were a single 64 bit field... since
282 * they are protected by 2 different locks, this
283 * is a real problem
284 */
285 vm_page_packed_t vmp_next_m; /* VP bucket link (O) */
286
287 /*
288 * The following word of flags is protected by the "VM object" lock.
289 *
290 * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the
291 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function.
292 * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro.
293 * It's also ok to modify them behind just the VM object "exclusive" lock.
294 */
295 unsigned int vmp_busy:1, /* page is in transit (O) */
296 vmp_wanted:1, /* someone is waiting for page (O) */
297 vmp_tabled:1, /* page is in VP table (O) */
298 vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */
299 __vmp_unused : 1,
300 vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */
301 vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */
302 /* (O-shared AND pmap_page) */
303 vmp_xpmapped:1, /* page has been entered with execute permission (O) or */
304 /* (O-shared AND pmap_page) */
305 vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */
306 vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */
307 vmp_absent:1, /* Data has been requested, but is not yet available (O) */
308 vmp_error:1, /* Data manager was unable to provide data due to error (O) */
309 vmp_dirty:1, /* Page must be cleaned (O) */
310 vmp_cleaning:1, /* Page clean has begun (O) */
311 vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */
312 vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */
313 /* [See vm_fault_page_overwrite] */
314 vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */
315 /* start again at top of chain */
316 vmp_unusual:1, /* Page is absent, error, restart or page locked */
317 vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */
318 vmp_cs_tainted:VMP_CS_BITS, /* code-signing: page is tainted */
319 vmp_cs_nx:VMP_CS_BITS, /* code-signing: page is nx */
320 vmp_reusable:1,
321 vmp_written_by_kernel:1; /* page was written by kernel (i.e. decompressed) */
322
323 #if !XNU_VM_HAS_LINEAR_PAGES_ARRAY
324 /*
325 * Physical number of the page
326 *
327 * Setting this value to or away from vm_page_fictitious_addr
328 * must be done with (P) held
329 */
330 ppnum_t vmp_phys_page;
331 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
332 };
333
334 /*!
335 * @var vm_pages
336 * The so called VM pages array
337 *
338 * @var vm_pages_end
339 * The pointer past the last valid page in the VM pages array.
340 *
341 * @var vm_pages_count
342 * The number of elements in the VM pages array.
343 * (vm_pages + vm_pages_count == vm_pages_end).
344 *
345 * @var vm_pages_first_pnum
346 * For linear page arrays, the pnum of the first page in the array.
347 * In other words VM_PAGE_GET_PHYS_PAGE(&vm_pages_array()[0]).
348 */
349 extern vm_page_t vm_pages_end;
350 extern uint32_t vm_pages_count;
351 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
352 extern ppnum_t vm_pages_first_pnum;
353 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
354
355 /**
356 * Internal accessor which returns the raw vm_pages pointer.
357 *
358 * This pointer must not be indexed directly. Use vm_page_get instead when
359 * indexing into the array.
360 *
361 * __pure2 helps explain to the compiler that the value vm_pages is a constant.
362 */
363 __pure2
364 static inline struct vm_page *
vm_pages_array_internal(void)365 vm_pages_array_internal(void)
366 {
367 extern vm_page_t vm_pages;
368 return vm_pages;
369 }
370
371 /**
372 * Get a pointer to page at index i.
373 *
374 * This getter is the only legal way to index into the vm_pages array.
375 */
376 __pure2
377 static inline vm_page_t
vm_page_get(uint32_t i)378 vm_page_get(uint32_t i)
379 {
380 return VM_FAR_ADD_PTR_UNBOUNDED(vm_pages_array_internal(), i);
381 }
382
383 __pure2
384 static inline bool
vm_page_in_array(const struct vm_page * m)385 vm_page_in_array(const struct vm_page *m)
386 {
387 return vm_pages_array_internal() <= m && m < vm_pages_end;
388 }
389
390 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
391 struct vm_page_with_ppnum {
392 struct vm_page vmp_page;
393 ppnum_t vmp_phys_page;
394 };
395
396 /*!
397 * @abstract
398 * Looks up the canonical kernel page for a given physical page number.
399 *
400 * @discussion
401 * This function may return VM_PAGE_NULL for kernel pages that aren't managed
402 * by the VM.
403 *
404 * @param pnum The page number to lookup. It must be within
405 * [pmap_first_pnum, vm_pages_first_pnum + vm_pages_count)
406 */
407 extern vm_page_t vm_page_find_canonical(ppnum_t pnum) __pure2;
408 #else
409 #define vm_page_with_ppnum vm_page
410 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
411 typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t;
412
413 static inline ppnum_t
VM_PAGE_GET_PHYS_PAGE(const struct vm_page * m)414 VM_PAGE_GET_PHYS_PAGE(const struct vm_page *m)
415 {
416 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
417 if (vm_page_in_array(m)) {
418 uintptr_t index = (uintptr_t)(m - vm_pages_array_internal());
419
420 return (ppnum_t)(vm_pages_first_pnum + index);
421 }
422 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
423 return ((const struct vm_page_with_ppnum *)m)->vmp_phys_page;
424 }
425
426 static inline void
VM_PAGE_INIT_PHYS_PAGE(struct vm_page * m,ppnum_t pnum)427 VM_PAGE_INIT_PHYS_PAGE(struct vm_page *m, ppnum_t pnum)
428 {
429 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
430 if (vm_page_in_array(m)) {
431 assert(pnum == VM_PAGE_GET_PHYS_PAGE(m));
432 return;
433 }
434 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
435 ((vm_page_with_ppnum_t)(m))->vmp_phys_page = pnum;
436 }
437
438 static inline void
VM_PAGE_SET_PHYS_PAGE(struct vm_page * m,ppnum_t pnum)439 VM_PAGE_SET_PHYS_PAGE(struct vm_page *m, ppnum_t pnum)
440 {
441 assert(!vm_page_in_array(m) && !m->vmp_canonical);
442 ((vm_page_with_ppnum_t)(m))->vmp_phys_page = pnum;
443 }
444
445 #if defined(__x86_64__)
446 extern unsigned int vm_clump_mask, vm_clump_shift;
447 #define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift)
448 #define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask)
449 #else
450 #define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask)
451 #endif
452
453 /*
454 * Parameters for pointer packing
455 *
456 *
457 * VM Pages pointers might point to:
458 *
459 * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals,
460 *
461 * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages
462 *
463 * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED
464 * aligned).
465 *
466 *
467 * The current scheme uses 31 bits of storage and 6 bits of shift using the
468 * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the
469 * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY).
470 *
471 * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS.
472 */
473 #define VM_VPLQ_ALIGNMENT 128
474 #define VM_PAGE_PACKED_PTR_ALIGNMENT 64 /* must be a power of 2 */
475 #define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT)))
476 #define VM_PAGE_PACKED_PTR_BITS 31
477 #define VM_PAGE_PACKED_PTR_SHIFT 6
478 #define VM_PAGE_PACKED_PTR_BASE ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS)
479
480 #define VM_PAGE_PACKED_FROM_ARRAY 0x80000000
481
482 static inline vm_page_packed_t
vm_page_pack_ptr(uintptr_t p)483 vm_page_pack_ptr(uintptr_t p)
484 {
485 if (vm_page_in_array((vm_page_t)p)) {
486 ptrdiff_t diff = (vm_page_t)p - vm_pages_array_internal();
487 assert((vm_page_t)p == vm_page_get((uint32_t)diff));
488 return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY);
489 }
490
491 VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR);
492 vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR);
493 return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed);
494 }
495
496
497 static inline uintptr_t
vm_page_unpack_ptr(uintptr_t p)498 vm_page_unpack_ptr(uintptr_t p)
499 {
500 if (p >= VM_PAGE_PACKED_FROM_ARRAY) {
501 p &= ~VM_PAGE_PACKED_FROM_ARRAY;
502 assert(p < (uintptr_t)vm_pages_count);
503 return (uintptr_t)vm_page_get((uint32_t)p);
504 }
505
506 return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR);
507 }
508
509
510 #define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p))
511 #define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p))
512
513 #define VM_OBJECT_PACK(o) ((vm_page_object_t)VM_PACK_POINTER((uintptr_t)(o), VM_PAGE_PACKED_PTR))
514 #define VM_OBJECT_UNPACK(p) ((vm_object_t)VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR))
515
516 #define VM_PAGE_OBJECT(p) VM_OBJECT_UNPACK((p)->vmp_object)
517 #define VM_PAGE_PACK_OBJECT(o) VM_OBJECT_PACK(o)
518
519
520 #define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
521 MACRO_BEGIN \
522 (p)->vmp_snext = 0; \
523 MACRO_END
524
525
526 #define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p)
527
528 /*
529 * Macro: vm_page_queue_init
530 * Function:
531 * Initialize the given queue.
532 * Header:
533 * void vm_page_queue_init(q)
534 * vm_page_queue_t q; \* MODIFIED *\
535 */
536 #define vm_page_queue_init(q) \
537 MACRO_BEGIN \
538 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \
539 (q)->next = VM_PAGE_PACK_PTR(q); \
540 (q)->prev = VM_PAGE_PACK_PTR(q); \
541 MACRO_END
542
543
544 /*
545 * Macro: vm_page_queue_enter
546 * Function:
547 * Insert a new element at the tail of the vm_page queue.
548 * Header:
549 * void vm_page_queue_enter(q, elt, field)
550 * queue_t q;
551 * vm_page_t elt;
552 * <field> is the list field in vm_page_t
553 *
554 * This macro's arguments have to match the generic "queue_enter()" macro which is
555 * what is used for this on 32 bit kernels.
556 */
557 #define vm_page_queue_enter(head, elt, field) \
558 MACRO_BEGIN \
559 vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
560 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
561 vm_page_packed_t __pck_prev = (head)->prev; \
562 \
563 if (__pck_head == __pck_prev) { \
564 (head)->next = __pck_elt; \
565 } else { \
566 vm_page_t __prev; \
567 __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
568 __prev->field.next = __pck_elt; \
569 } \
570 (elt)->field.prev = __pck_prev; \
571 (elt)->field.next = __pck_head; \
572 (head)->prev = __pck_elt; \
573 MACRO_END
574
575
576 #if defined(__x86_64__)
577 /*
578 * These are helper macros for vm_page_queue_enter_clump to assist
579 * with conditional compilation (release / debug / development)
580 */
581 #if DEVELOPMENT || DEBUG
582
583 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \
584 MACRO_BEGIN \
585 if (__prev != NULL) { \
586 assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \
587 assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
588 } \
589 MACRO_END
590
591 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \
592 MACRO_BEGIN \
593 unsigned int __i; \
594 vm_page_queue_entry_t __tmp; \
595 for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \
596 __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
597 } \
598 assert(__tmp == __last_next); \
599 MACRO_END
600
601 #define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++
602 #define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++
603 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free
604
605 #else
606
607 #define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
608 #define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
609 #define __DEBUG_STAT_INCREMENT_INRANGE
610 #define __DEBUG_STAT_INCREMENT_INSERTS
611 #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
612
613 #endif /* if DEVELOPMENT || DEBUG */
614
615 #endif
616
617 /*
618 * Macro: vm_page_queue_enter_first
619 * Function:
620 * Insert a new element at the head of the vm_page queue.
621 * Header:
622 * void queue_enter_first(q, elt, , field)
623 * queue_t q;
624 * vm_page_t elt;
625 * <field> is the linkage field in vm_page
626 *
627 * This macro's arguments have to match the generic "queue_enter_first()" macro which is
628 * what is used for this on 32 bit kernels.
629 */
630 #define vm_page_queue_enter_first(head, elt, field) \
631 MACRO_BEGIN \
632 vm_page_packed_t __pck_next = (head)->next; \
633 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
634 vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
635 \
636 if (__pck_head == __pck_next) { \
637 (head)->prev = __pck_elt; \
638 } else { \
639 vm_page_t __next; \
640 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
641 __next->field.prev = __pck_elt; \
642 } \
643 \
644 (elt)->field.next = __pck_next; \
645 (elt)->field.prev = __pck_head; \
646 (head)->next = __pck_elt; \
647 MACRO_END
648
649
650 /*
651 * Macro: vm_page_queue_remove
652 * Function:
653 * Remove an arbitrary page from a vm_page queue.
654 * Header:
655 * void vm_page_queue_remove(q, qe, field)
656 * arguments as in vm_page_queue_enter
657 *
658 * This macro's arguments have to match the generic "queue_enter()" macro which is
659 * what is used for this on 32 bit kernels.
660 */
661 #define vm_page_queue_remove(head, elt, field) \
662 MACRO_BEGIN \
663 vm_page_packed_t __pck_next = (elt)->field.next; \
664 vm_page_packed_t __pck_prev = (elt)->field.prev; \
665 vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
666 vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
667 \
668 if ((void *)(head) == (void *)__next) { \
669 (head)->prev = __pck_prev; \
670 } else { \
671 __next->field.prev = __pck_prev; \
672 } \
673 \
674 if ((void *)(head) == (void *)__prev) { \
675 (head)->next = __pck_next; \
676 } else { \
677 __prev->field.next = __pck_next; \
678 } \
679 \
680 (elt)->field.next = 0; \
681 (elt)->field.prev = 0; \
682 MACRO_END
683
684
685 /*
686 * Macro: vm_page_queue_remove_first
687 *
688 * Function:
689 * Remove and return the entry at the head of a vm_page queue.
690 *
691 * Header:
692 * vm_page_queue_remove_first(head, entry, field)
693 * N.B. entry is returned by reference
694 *
695 * This macro's arguments have to match the generic "queue_remove_first()" macro which is
696 * what is used for this on 32 bit kernels.
697 */
698 #define vm_page_queue_remove_first(head, entry, field) \
699 MACRO_BEGIN \
700 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
701 vm_page_packed_t __pck_next; \
702 vm_page_t __next; \
703 \
704 (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
705 __pck_next = (entry)->field.next; \
706 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
707 \
708 if (__pck_head == __pck_next) { \
709 (head)->prev = __pck_head; \
710 } else { \
711 __next->field.prev = __pck_head; \
712 } \
713 \
714 (head)->next = __pck_next; \
715 (entry)->field.next = 0; \
716 (entry)->field.prev = 0; \
717 MACRO_END
718
719
720 #if defined(__x86_64__)
721 /*
722 * Macro: vm_page_queue_remove_first_with_clump
723 * Function:
724 * Remove and return the entry at the head of the free queue
725 * end is set to 1 to indicate that we just returned the last page in a clump
726 *
727 * Header:
728 * vm_page_queue_remove_first_with_clump(head, entry, end)
729 * entry is returned by reference
730 * end is returned by reference
731 */
732 #define vm_page_queue_remove_first_with_clump(head, entry, end) \
733 MACRO_BEGIN \
734 vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
735 vm_page_packed_t __pck_next; \
736 vm_page_t __next; \
737 \
738 (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
739 __pck_next = (entry)->vmp_pageq.next; \
740 __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
741 \
742 (end) = 0; \
743 if (__pck_head == __pck_next) { \
744 (head)->prev = __pck_head; \
745 (end) = 1; \
746 } else { \
747 __next->vmp_pageq.prev = __pck_head; \
748 if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
749 (end) = 1; \
750 } \
751 } \
752 \
753 (head)->next = __pck_next; \
754 (entry)->vmp_pageq.next = 0; \
755 (entry)->vmp_pageq.prev = 0; \
756 MACRO_END
757 #endif
758
759 /*
760 * Macro: vm_page_queue_end
761 * Function:
762 * Tests whether a new entry is really the end of
763 * the queue.
764 * Header:
765 * boolean_t vm_page_queue_end(q, qe)
766 * vm_page_queue_t q;
767 * vm_page_queue_entry_t qe;
768 */
769 #define vm_page_queue_end(q, qe) ((q) == (qe))
770
771
772 /*
773 * Macro: vm_page_queue_empty
774 * Function:
775 * Tests whether a queue is empty.
776 * Header:
777 * boolean_t vm_page_queue_empty(q)
778 * vm_page_queue_t q;
779 */
780 #define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
781
782
783
784 /*
785 * Macro: vm_page_queue_first
786 * Function:
787 * Returns the first entry in the queue,
788 * Header:
789 * uintpr_t vm_page_queue_first(q)
790 * vm_page_queue_t q; \* IN *\
791 */
792 #define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
793
794
795
796 /*
797 * Macro: vm_page_queue_last
798 * Function:
799 * Returns the last entry in the queue.
800 * Header:
801 * vm_page_queue_entry_t queue_last(q)
802 * queue_t q; \* IN *\
803 */
804 #define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
805
806
807
808 /*
809 * Macro: vm_page_queue_next
810 * Function:
811 * Returns the entry after an item in the queue.
812 * Header:
813 * uintpr_t vm_page_queue_next(qc)
814 * vm_page_queue_t qc;
815 */
816 #define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
817
818
819
820 /*
821 * Macro: vm_page_queue_prev
822 * Function:
823 * Returns the entry before an item in the queue.
824 * Header:
825 * uinptr_t vm_page_queue_prev(qc)
826 * vm_page_queue_t qc;
827 */
828 #define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
829
830
831
832 /*
833 * Macro: vm_page_queue_iterate
834 * Function:
835 * iterate over each item in a vm_page queue.
836 * Generates a 'for' loop, setting elt to
837 * each item in turn (by reference).
838 * Header:
839 * vm_page_queue_iterate(q, elt, field)
840 * queue_t q;
841 * vm_page_t elt;
842 * <field> is the chain field in vm_page_t
843 */
844 #define vm_page_queue_iterate(head, elt, field) \
845 for ((elt) = (vm_page_t)vm_page_queue_first(head); \
846 !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
847 (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \
848
849
850 /*
851 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through vm_page_max_speculative_age_q
852 * represents a set of aging bins that are 'protected'...
853 *
854 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
855 * not yet been 'claimed' but have been aged out of the protective bins
856 * this occurs in vm_page_speculate when it advances to the next bin
857 * and discovers that it is still occupied... at that point, all of the
858 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
859 * in that bin are all guaranteed to have reached at least the maximum age
860 * we allow for a protected page... they can be older if there is no
861 * memory pressure to pull them from the bin, or there are no new speculative pages
862 * being generated to push them out.
863 * this list is the one that vm_pageout_scan will prefer when looking
864 * for pages to move to the underweight free list
865 *
866 * vm_page_max_speculative_age_q * VM_PAGE_SPECULATIVE_Q_AGE_MS
867 * defines the amount of time a speculative page is normally
868 * allowed to live in the 'protected' state (i.e. not available
869 * to be stolen if vm_pageout_scan is running and looking for
870 * pages)... however, if the total number of speculative pages
871 * in the protected state exceeds our limit (defined in vm_pageout.c)
872 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
873 * vm_pageout_scan is allowed to steal pages from the protected
874 * bucket even if they are underage.
875 *
876 * vm_pageout_scan is also allowed to pull pages from a protected
877 * bin if the bin has reached the "age of consent" we've set
878 */
879 #define VM_PAGE_RESERVED_SPECULATIVE_AGE_Q 40
880 #define VM_PAGE_DEFAULT_MAX_SPECULATIVE_AGE_Q 10
881 #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
882 #define VM_PAGE_SPECULATIVE_AGED_Q 0
883
884 #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
885
886 struct vm_speculative_age_q {
887 /*
888 * memory queue for speculative pages via clustered pageins
889 */
890 vm_page_queue_head_t age_q;
891 mach_timespec_t age_ts;
892 } VM_PAGE_PACKED_ALIGNED;
893
894
895
896 extern
897 struct vm_speculative_age_q vm_page_queue_speculative[];
898
899 extern int speculative_steal_index;
900 extern int speculative_age_index;
901 extern unsigned int vm_page_speculative_q_age_ms;
902 extern unsigned int vm_page_max_speculative_age_q;
903
904
905 typedef struct vm_locks_array {
906 char pad __attribute__ ((aligned(64)));
907 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned(64)));
908 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned(64)));
909 char pad2 __attribute__ ((aligned(64)));
910 } vm_locks_array_t;
911
912
913 #define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED)
914 #define NEXT_PAGE(m) ((m)->vmp_snext)
915 #define NEXT_PAGE_PTR(m) (&(m)->vmp_snext)
916
917 static inline vm_page_t
vm_page_list_pop(vm_page_t * list)918 vm_page_list_pop(vm_page_t *list)
919 {
920 vm_page_t mem = *list;
921
922 if (mem) {
923 *list = NEXT_PAGE(mem);
924 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
925 }
926 return mem;
927 }
928
929 static inline void
vm_page_list_push(vm_page_t * list,vm_page_t mem)930 vm_page_list_push(vm_page_t *list, vm_page_t mem)
931 {
932 mem->vmp_snext = *list;
933 *list = mem;
934 }
935
936 #define vm_page_list_foreach(m, list) \
937 for ((m) = (list); (m); (m) = (m)->vmp_snext)
938
939 #define vm_page_list_foreach_consume(it, list) \
940 while (((it) = vm_page_list_pop((list))))
941
942 /*
943 * XXX The unusual bit should not be necessary. Most of the bit
944 * XXX fields above really want to be masks.
945 */
946
947 /*
948 * For debugging, this macro can be defined to perform
949 * some useful check on a page structure.
950 * INTENTIONALLY left as a no-op so that the
951 * current call-sites can be left intact for future uses.
952 */
953
954 #define VM_PAGE_CHECK(mem) \
955 MACRO_BEGIN \
956 MACRO_END
957
958 /* Page coloring:
959 *
960 * The free page list is actually n lists, one per color,
961 * where the number of colors is a function of the machine's
962 * cache geometry set at system initialization. To disable
963 * coloring, set vm_colors to 1 and vm_color_mask to 0.
964 * The boot-arg "colors" may be used to override vm_colors.
965 * Note that there is little harm in having more colors than needed.
966 */
967
968 #define MAX_COLORS 128
969 #define DEFAULT_COLORS 32
970
971 extern unsigned int vm_colors; /* must be in range 1..MAX_COLORS */
972 extern unsigned int vm_color_mask; /* must be (vm_colors-1) */
973 extern unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
974
975
976 /*
977 * Wired memory is a very limited resource and we can't let users exhaust it
978 * and deadlock the entire system. We enforce the following limits:
979 *
980 * vm_per_task_user_wire_limit
981 * how much memory can be user-wired in one user task
982 *
983 * vm_global_user_wire_limit (default: same as vm_per_task_user_wire_limit)
984 * how much memory can be user-wired in all user tasks
985 *
986 * These values are set to defaults based on the number of pages managed
987 * by the VM system. They can be overriden via sysctls.
988 * See kmem_set_user_wire_limits for details on the default values.
989 *
990 * Regardless of the amount of memory in the system, we never reserve
991 * more than VM_NOT_USER_WIREABLE_MAX bytes as unlockable.
992 */
993 #define VM_NOT_USER_WIREABLE_MAX (32ULL*1024*1024*1024) /* 32GB */
994
995 extern vm_map_size_t vm_per_task_user_wire_limit;
996 extern vm_map_size_t vm_global_user_wire_limit;
997 extern uint64_t vm_add_wire_count_over_global_limit;
998 extern uint64_t vm_add_wire_count_over_user_limit;
999
1000 /*
1001 * Each pageable resident page falls into one of three lists:
1002 *
1003 * free
1004 * Available for allocation now. The free list is
1005 * actually an array of lists, one per color.
1006 * inactive
1007 * Not referenced in any map, but still has an
1008 * object/offset-page mapping, and may be dirty.
1009 * This is the list of pages that should be
1010 * paged out next. There are actually two
1011 * inactive lists, one for pages brought in from
1012 * disk or other backing store, and another
1013 * for "zero-filled" pages. See vm_pageout_scan()
1014 * for the distinction and usage.
1015 * active
1016 * A list of pages which have been placed in
1017 * at least one physical map. This list is
1018 * ordered, in LRU-like fashion.
1019 */
1020
1021
1022 #define VPL_LOCK_SPIN 1
1023
1024 struct vpl {
1025 vm_page_queue_head_t vpl_queue;
1026 unsigned int vpl_count;
1027 unsigned int vpl_internal_count;
1028 unsigned int vpl_external_count;
1029 lck_spin_t vpl_lock;
1030 };
1031
1032 extern
1033 struct vpl * /* __zpercpu */ vm_page_local_q;
1034 extern
1035 unsigned int vm_page_local_q_soft_limit;
1036 extern
1037 unsigned int vm_page_local_q_hard_limit;
1038 extern
1039 vm_locks_array_t vm_page_locks;
1040
1041 extern
1042 vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */
1043 extern
1044 vm_page_queue_head_t vm_page_queue_active; /* active memory queue */
1045 extern
1046 vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */
1047 #if CONFIG_SECLUDED_MEMORY
1048 extern
1049 vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */
1050 #endif /* CONFIG_SECLUDED_MEMORY */
1051 extern
1052 vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */
1053 extern
1054 vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
1055 extern
1056 vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */
1057
1058 extern
1059 queue_head_t vm_objects_wired;
1060 extern
1061 lck_spin_t vm_objects_wired_lock;
1062
1063 #define VM_PAGE_DONATE_DISABLED 0
1064 #define VM_PAGE_DONATE_ENABLED 1
1065 extern
1066 uint32_t vm_page_donate_mode;
1067 extern
1068 bool vm_page_donate_queue_ripe;
1069
1070 #define VM_PAGE_BACKGROUND_TARGET_MAX 50000
1071 #define VM_PAGE_BG_DISABLED 0
1072 #define VM_PAGE_BG_ENABLED 1
1073
1074 extern
1075 vm_page_queue_head_t vm_page_queue_background;
1076 extern
1077 uint64_t vm_page_background_promoted_count;
1078 extern
1079 uint32_t vm_page_background_count;
1080 extern
1081 uint32_t vm_page_background_target;
1082 extern
1083 uint32_t vm_page_background_internal_count;
1084 extern
1085 uint32_t vm_page_background_external_count;
1086 extern
1087 uint32_t vm_page_background_mode;
1088 extern
1089 uint32_t vm_page_background_exclude_external;
1090
1091 extern
1092 vm_page_queue_head_t vm_page_queue_donate;
1093 extern
1094 uint32_t vm_page_donate_count;
1095 extern
1096 uint32_t vm_page_donate_target_low;
1097 extern
1098 uint32_t vm_page_donate_target_high;
1099 #define VM_PAGE_DONATE_TARGET_LOWWATER (100)
1100 #define VM_PAGE_DONATE_TARGET_HIGHWATER ((unsigned int)(atop_64(max_mem) / 8))
1101
1102 extern
1103 vm_offset_t first_phys_addr; /* physical address for first_page */
1104 extern
1105 vm_offset_t last_phys_addr; /* physical address for last_page */
1106
1107 extern
1108 unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */
1109 extern
1110 unsigned int vm_page_active_count; /* How many pages are active? */
1111 extern
1112 unsigned int vm_page_inactive_count; /* How many pages are inactive? */
1113 extern
1114 unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */
1115 extern
1116 unsigned int vm_page_realtime_count; /* How many pages are used by realtime threads? */
1117 #if CONFIG_SECLUDED_MEMORY
1118 extern
1119 unsigned int vm_page_secluded_count; /* How many pages are secluded? */
1120 extern
1121 unsigned int vm_page_secluded_count_free; /* how many of them are free? */
1122 extern
1123 unsigned int vm_page_secluded_count_inuse; /* how many of them are in use? */
1124 /*
1125 * We keep filling the secluded pool with new eligible pages and
1126 * we can overshoot our target by a lot.
1127 * When there's memory pressure, vm_pageout_scan() will re-balance the queues,
1128 * pushing the extra secluded pages to the active or free queue.
1129 * Since these "over target" secluded pages are actually "available", jetsam
1130 * should consider them as such, so make them visible to jetsam via the
1131 * "vm_page_secluded_count_over_target" counter and update it whenever we
1132 * update vm_page_secluded_count or vm_page_secluded_target.
1133 */
1134 extern
1135 unsigned int vm_page_secluded_count_over_target;
1136 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1137 MACRO_BEGIN \
1138 if (vm_page_secluded_count > vm_page_secluded_target) { \
1139 vm_page_secluded_count_over_target = \
1140 (vm_page_secluded_count - vm_page_secluded_target); \
1141 } else { \
1142 vm_page_secluded_count_over_target = 0; \
1143 } \
1144 MACRO_END
1145 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() vm_page_secluded_count_over_target
1146 #else /* CONFIG_SECLUDED_MEMORY */
1147 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE() \
1148 MACRO_BEGIN \
1149 MACRO_END
1150 #define VM_PAGE_SECLUDED_COUNT_OVER_TARGET() 0
1151 #endif /* CONFIG_SECLUDED_MEMORY */
1152 extern
1153 unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */
1154 extern
1155 unsigned int vm_page_throttled_count;/* How many inactives are throttled */
1156 extern
1157 unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */
1158 extern unsigned int vm_page_pageable_internal_count;
1159 extern unsigned int vm_page_pageable_external_count;
1160 extern
1161 unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */
1162 extern
1163 unsigned int vm_page_external_count; /* How many pages are file-backed? */
1164 extern
1165 unsigned int vm_page_internal_count; /* How many pages are anonymous? */
1166 extern
1167 unsigned int vm_page_wire_count; /* How many pages are wired? */
1168 extern
1169 unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */
1170 extern
1171 unsigned int vm_page_wire_count_on_boot; /* even earlier than _initial */
1172 extern
1173 unsigned int vm_page_free_target; /* How many do we want free? */
1174 extern
1175 unsigned int vm_page_free_min; /* When to wakeup pageout */
1176 extern
1177 unsigned int vm_page_throttle_limit; /* When to throttle new page creation */
1178 extern
1179 unsigned int vm_page_inactive_target;/* How many do we want inactive? */
1180 #if CONFIG_SECLUDED_MEMORY
1181 extern
1182 unsigned int vm_page_secluded_target;/* How many do we want secluded? */
1183 #endif /* CONFIG_SECLUDED_MEMORY */
1184 extern
1185 unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */
1186 extern
1187 unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */
1188 extern
1189 unsigned int vm_page_gobble_count;
1190 extern
1191 unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */
1192 extern
1193 unsigned int vm_page_kern_lpage_count; /* Count of large pages used in early boot */
1194
1195
1196 #if DEVELOPMENT || DEBUG
1197 extern
1198 unsigned int vm_page_speculative_used;
1199 #endif
1200
1201 extern
1202 unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */
1203 extern
1204 unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
1205 extern
1206 uint64_t vm_page_purged_count; /* How many pages got purged so far ? */
1207
1208 extern unsigned int vm_page_free_wanted;
1209 /* how many threads are waiting for memory */
1210
1211 extern unsigned int vm_page_free_wanted_privileged;
1212 /* how many VM privileged threads are waiting for memory */
1213 #if CONFIG_SECLUDED_MEMORY
1214 extern unsigned int vm_page_free_wanted_secluded;
1215 /* how many threads are waiting for secluded memory */
1216 #endif /* CONFIG_SECLUDED_MEMORY */
1217
1218 extern const ppnum_t vm_page_fictitious_addr;
1219 /* (fake) phys_addr of fictitious pages */
1220
1221 extern const ppnum_t vm_page_guard_addr;
1222 /* (fake) phys_addr of guard pages */
1223
1224
1225 extern boolean_t vm_page_deactivate_hint;
1226
1227 extern int vm_compressor_mode;
1228
1229 #if __x86_64__
1230 /*
1231 * Defaults to true, so highest memory is used first.
1232 */
1233 extern boolean_t vm_himemory_mode;
1234 #else
1235 #define vm_himemory_mode TRUE
1236 #endif
1237
1238 extern boolean_t vm_lopage_needed;
1239 extern uint32_t vm_lopage_free_count;
1240 extern uint32_t vm_lopage_free_limit;
1241 extern uint32_t vm_lopage_lowater;
1242 extern boolean_t vm_lopage_refill;
1243 extern uint64_t max_valid_dma_address;
1244 extern ppnum_t max_valid_low_ppnum;
1245
1246 /*
1247 * Prototypes for functions exported by this module.
1248 */
1249
1250 extern void vm_page_init_local_q(unsigned int num_cpus);
1251
1252 extern void vm_page_create_canonical(ppnum_t pnum);
1253
1254 extern void vm_page_create_retired(ppnum_t pn);
1255
1256 #if XNU_VM_HAS_DELAYED_PAGES
1257 extern void vm_free_delayed_pages(void);
1258 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1259
1260 extern void vm_pages_array_finalize(void);
1261
1262 extern vm_page_t vm_page_alloc(
1263 vm_object_t object,
1264 vm_object_offset_t offset);
1265
1266 extern void vm_page_reactivate_all_throttled(void);
1267
1268 extern void vm_pressure_response(void);
1269
1270 #define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
1271 #define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
1272
1273 #if CONFIG_JETSAM
1274
1275 #define VM_CHECK_MEMORYSTATUS \
1276 memorystatus_update_available_page_count( \
1277 vm_page_pageable_external_count + \
1278 vm_page_free_count + \
1279 VM_PAGE_SECLUDED_COUNT_OVER_TARGET() + \
1280 (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \
1281 )
1282
1283 #else /* CONFIG_JETSAM */
1284
1285 #if !XNU_TARGET_OS_OSX
1286
1287 #define VM_CHECK_MEMORYSTATUS do {} while(0)
1288
1289 #else /* !XNU_TARGET_OS_OSX */
1290
1291 #define VM_CHECK_MEMORYSTATUS memorystatus_update_available_page_count(AVAILABLE_NON_COMPRESSED_MEMORY)
1292
1293 #endif /* !XNU_TARGET_OS_OSX */
1294
1295 #endif /* CONFIG_JETSAM */
1296
1297 #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
1298 #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
1299
1300 #ifdef MACH_KERNEL_PRIVATE
1301 static inline void
vm_page_lock_queues(void)1302 vm_page_lock_queues(void)
1303 {
1304 lck_mtx_lock(&vm_page_queue_lock);
1305 }
1306
1307 static inline boolean_t
vm_page_trylock_queues(void)1308 vm_page_trylock_queues(void)
1309 {
1310 boolean_t ret;
1311 ret = lck_mtx_try_lock(&vm_page_queue_lock);
1312 return ret;
1313 }
1314
1315 static inline void
vm_page_unlock_queues(void)1316 vm_page_unlock_queues(void)
1317 {
1318 lck_mtx_unlock(&vm_page_queue_lock);
1319 }
1320
1321 static inline void
vm_page_lockspin_queues(void)1322 vm_page_lockspin_queues(void)
1323 {
1324 lck_mtx_lock_spin(&vm_page_queue_lock);
1325 }
1326
1327 static inline boolean_t
vm_page_trylockspin_queues(void)1328 vm_page_trylockspin_queues(void)
1329 {
1330 boolean_t ret;
1331 ret = lck_mtx_try_lock_spin(&vm_page_queue_lock);
1332 return ret;
1333 }
1334
1335 extern void kdp_vm_page_sleep_find_owner(
1336 event64_t wait_event,
1337 thread_waitinfo_t *waitinfo);
1338
1339 #endif /* MACH_KERNEL_PRIVATE */
1340
1341 extern unsigned int vm_max_delayed_work_limit;
1342
1343 #if CONFIG_SECLUDED_MEMORY
1344 extern uint64_t secluded_shutoff_trigger;
1345 extern uint64_t secluded_shutoff_headroom;
1346 extern void start_secluded_suppression(task_t);
1347 extern void stop_secluded_suppression(task_t);
1348 #endif /* CONFIG_SECLUDED_MEMORY */
1349
1350 #endif /* _VM_VM_PAGE_H_ */
1351