xref: /xnu-11417.121.6/osfmk/vm/vm_page_internal.h (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_PAGE_INTERNAL_H_
30 #define _VM_VM_PAGE_INTERNAL_H_
31 
32 #include <sys/cdefs.h>
33 #include <vm/vm_page.h>
34 
35 __BEGIN_DECLS
36 #ifdef XNU_KERNEL_PRIVATE
37 
38 struct vm_page_queue_free_head {
39 	vm_page_queue_head_t    qhead;
40 } VM_PAGE_PACKED_ALIGNED;
41 
42 extern struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
43 
44 static inline int
VMP_CS_FOR_OFFSET(vm_map_offset_t fault_phys_offset)45 VMP_CS_FOR_OFFSET(
46 	vm_map_offset_t fault_phys_offset)
47 {
48 	assertf(fault_phys_offset < PAGE_SIZE &&
49 	    !(fault_phys_offset & FOURK_PAGE_MASK),
50 	    "offset 0x%llx\n", (uint64_t)fault_phys_offset);
51 	return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT);
52 }
53 static inline bool
VMP_CS_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)54 VMP_CS_VALIDATED(
55 	vm_page_t p,
56 	vm_map_size_t fault_page_size,
57 	vm_map_offset_t fault_phys_offset)
58 {
59 	assertf(fault_page_size <= PAGE_SIZE,
60 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
61 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
62 	if (fault_page_size == PAGE_SIZE) {
63 		return p->vmp_cs_validated == VMP_CS_ALL_TRUE;
64 	}
65 	return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset);
66 }
67 static inline bool
VMP_CS_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)68 VMP_CS_TAINTED(
69 	vm_page_t p,
70 	vm_map_size_t fault_page_size,
71 	vm_map_offset_t fault_phys_offset)
72 {
73 	assertf(fault_page_size <= PAGE_SIZE,
74 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
75 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
76 	if (fault_page_size == PAGE_SIZE) {
77 		return p->vmp_cs_tainted != VMP_CS_ALL_FALSE;
78 	}
79 	return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset);
80 }
81 static inline bool
VMP_CS_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)82 VMP_CS_NX(
83 	vm_page_t p,
84 	vm_map_size_t fault_page_size,
85 	vm_map_offset_t fault_phys_offset)
86 {
87 	assertf(fault_page_size <= PAGE_SIZE,
88 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
89 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
90 	if (fault_page_size == PAGE_SIZE) {
91 		return p->vmp_cs_nx != VMP_CS_ALL_FALSE;
92 	}
93 	return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset);
94 }
95 static inline void
VMP_CS_SET_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)96 VMP_CS_SET_VALIDATED(
97 	vm_page_t p,
98 	vm_map_size_t fault_page_size,
99 	vm_map_offset_t fault_phys_offset,
100 	boolean_t value)
101 {
102 	assertf(fault_page_size <= PAGE_SIZE,
103 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
104 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
105 	if (value) {
106 		if (fault_page_size == PAGE_SIZE) {
107 			p->vmp_cs_validated = VMP_CS_ALL_TRUE;
108 		}
109 		p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset);
110 	} else {
111 		if (fault_page_size == PAGE_SIZE) {
112 			p->vmp_cs_validated = VMP_CS_ALL_FALSE;
113 		}
114 		p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
115 	}
116 }
117 static inline void
VMP_CS_SET_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)118 VMP_CS_SET_TAINTED(
119 	vm_page_t p,
120 	vm_map_size_t fault_page_size,
121 	vm_map_offset_t fault_phys_offset,
122 	boolean_t value)
123 {
124 	assertf(fault_page_size <= PAGE_SIZE,
125 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
126 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
127 	if (value) {
128 		if (fault_page_size == PAGE_SIZE) {
129 			p->vmp_cs_tainted = VMP_CS_ALL_TRUE;
130 		}
131 		p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset);
132 	} else {
133 		if (fault_page_size == PAGE_SIZE) {
134 			p->vmp_cs_tainted = VMP_CS_ALL_FALSE;
135 		}
136 		p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
137 	}
138 }
139 static inline void
VMP_CS_SET_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)140 VMP_CS_SET_NX(
141 	vm_page_t p,
142 	vm_map_size_t fault_page_size,
143 	vm_map_offset_t fault_phys_offset,
144 	boolean_t value)
145 {
146 	assertf(fault_page_size <= PAGE_SIZE,
147 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
148 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
149 	if (value) {
150 		if (fault_page_size == PAGE_SIZE) {
151 			p->vmp_cs_nx = VMP_CS_ALL_TRUE;
152 		}
153 		p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset);
154 	} else {
155 		if (fault_page_size == PAGE_SIZE) {
156 			p->vmp_cs_nx = VMP_CS_ALL_FALSE;
157 		}
158 		p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
159 	}
160 }
161 
162 
163 #if defined(__LP64__)
164 static __inline__ void
vm_page_enqueue_tail(vm_page_queue_t que,vm_page_queue_entry_t elt)165 vm_page_enqueue_tail(
166 	vm_page_queue_t         que,
167 	vm_page_queue_entry_t   elt)
168 {
169 	vm_page_queue_entry_t   old_tail;
170 
171 	old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
172 	elt->next = VM_PAGE_PACK_PTR(que);
173 	elt->prev = que->prev;
174 	que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
175 }
176 
177 static __inline__ void
vm_page_remque(vm_page_queue_entry_t elt)178 vm_page_remque(
179 	vm_page_queue_entry_t elt)
180 {
181 	vm_page_queue_entry_t next;
182 	vm_page_queue_entry_t prev;
183 	vm_page_packed_t      next_pck = elt->next;
184 	vm_page_packed_t      prev_pck = elt->prev;
185 
186 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
187 
188 	/* next may equal prev (and the queue head) if elt was the only element */
189 	prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
190 
191 	next->prev = prev_pck;
192 	prev->next = next_pck;
193 
194 	elt->next = 0;
195 	elt->prev = 0;
196 }
197 
198 #if defined(__x86_64__)
199 /*
200  * Insert a new page into a free queue and clump pages within the same 16K boundary together
201  */
202 static inline void
vm_page_queue_enter_clump(vm_page_queue_t head,vm_page_t elt)203 vm_page_queue_enter_clump(
204 	vm_page_queue_t       head,
205 	vm_page_t             elt)
206 {
207 	vm_page_queue_entry_t first = NULL;    /* first page in the clump */
208 	vm_page_queue_entry_t last = NULL;     /* last page in the clump */
209 	vm_page_queue_entry_t prev = NULL;
210 	vm_page_queue_entry_t next;
211 	uint_t                n_free = 1;
212 	extern unsigned int   vm_clump_size, vm_clump_promote_threshold;
213 	extern unsigned long  vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
214 
215 	/*
216 	 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
217 	 */
218 	if (vm_page_in_array(elt)) {
219 		vm_page_t p;
220 		uint_t    i;
221 		uint_t    n;
222 		ppnum_t   clump_num;
223 
224 		first = last = (vm_page_queue_entry_t)elt;
225 		clump_num = VM_PAGE_GET_CLUMP(elt);
226 		n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
227 
228 		/*
229 		 * Check for preceeding vm_pages[] entries in the same chunk
230 		 */
231 		for (i = 0, p = elt - 1; i < n && vm_page_get(0) <= p; i++, p--) {
232 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
233 				if (prev == NULL) {
234 					prev = (vm_page_queue_entry_t)p;
235 				}
236 				first = (vm_page_queue_entry_t)p;
237 				n_free++;
238 			}
239 		}
240 
241 		/*
242 		 * Check the following vm_pages[] entries in the same chunk
243 		 */
244 		for (i = n + 1, p = elt + 1; i < vm_clump_size && p < vm_page_get(vm_pages_count); i++, p++) {
245 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
246 				if (last == (vm_page_queue_entry_t)elt) {               /* first one only */
247 					__DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
248 				}
249 
250 				if (prev == NULL) {
251 					prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
252 				}
253 				last = (vm_page_queue_entry_t)p;
254 				n_free++;
255 			}
256 		}
257 		__DEBUG_STAT_INCREMENT_INRANGE;
258 	}
259 
260 	/* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
261 	if (prev == NULL) {
262 		prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
263 	}
264 
265 	/* insert the element */
266 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
267 	elt->vmp_pageq.next = prev->next;
268 	elt->vmp_pageq.prev = next->prev;
269 	prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
270 	__DEBUG_STAT_INCREMENT_INSERTS;
271 
272 	/*
273 	 * Check if clump needs to be promoted to head.
274 	 */
275 	if (n_free >= vm_clump_promote_threshold && n_free > 1) {
276 		vm_page_queue_entry_t first_prev;
277 
278 		first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
279 
280 		/* If not at head already */
281 		if (first_prev != head) {
282 			vm_page_queue_entry_t last_next;
283 			vm_page_queue_entry_t head_next;
284 
285 			last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
286 
287 			/* verify that the links within the clump are consistent */
288 			__DEBUG_VERIFY_LINKS(first, n_free, last_next);
289 
290 			/* promote clump to head */
291 			first_prev->next = last->next;
292 			last_next->prev = first->prev;
293 			first->prev = VM_PAGE_PACK_PTR(head);
294 			last->next = head->next;
295 
296 			head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
297 			head_next->prev = VM_PAGE_PACK_PTR(last);
298 			head->next = VM_PAGE_PACK_PTR(first);
299 			__DEBUG_STAT_INCREMENT_PROMOTES(n_free);
300 		}
301 	}
302 }
303 #endif /* __x86_64__ */
304 #endif /* __LP64__ */
305 
306 
307 extern  void    vm_page_assign_special_state(vm_page_t mem, int mode);
308 extern  void    vm_page_update_special_state(vm_page_t mem);
309 extern  void    vm_page_add_to_specialq(vm_page_t mem, boolean_t first);
310 extern  void    vm_page_remove_from_specialq(vm_page_t mem);
311 
312 
313 /*
314  * Prototypes for functions exported by this module.
315  */
316 extern void             vm_page_bootstrap(
317 	vm_offset_t     *startp,
318 	vm_offset_t     *endp);
319 
320 extern vm_page_t        kdp_vm_page_lookup(
321 	vm_object_t             object,
322 	vm_object_offset_t      offset);
323 
324 extern vm_page_t        vm_page_lookup(
325 	vm_object_t             object,
326 	vm_object_offset_t      offset);
327 
328 /*!
329  * @abstract
330  * Creates a fictitious page.
331  *
332  * @discussion
333  * This function never returns VM_PAGE_NULL;
334  *
335  * Pages made by this function have the @c vm_page_fictitious_addr
336  * fake physical address.
337  */
338 extern vm_page_t        vm_page_create_fictitious(void);
339 
340 /*!
341  * @abstract
342  * Returns a kernel guard page (used by @c kmem_alloc_guard()).
343  *
344  * @discussion
345  * Pages returned by this function have the @c vm_page_guard_addr
346  * fake physical address.
347  *
348  * @param canwait       Whether the caller can wait, if true,
349  *                      this function never returns VM_PAGE_NULL.
350  */
351 extern vm_page_t        vm_page_create_guard(bool canwait);
352 
353 /*!
354  * @abstract
355  * Create a private VM page.
356  *
357  * @discussion
358  * These pages allow for non canonical references to the same physical page.
359  * Its @c VM_PAGE_GET_PHYS_PAGE() will be @c base_page.
360  *
361  * Such pages must not be released back to the free queues directly,
362  * @c vm_page_reset_private() must be called first.
363  *
364  * This function never returns VM_PAGE_NULL
365  *
366  * @param base_page     The physical page this private page represents.
367  */
368 extern vm_page_t        vm_page_create_private(ppnum_t base_page);
369 
370 /*!
371  * @abstract
372  * Returns whether this is the canonical page for a regular managed kernel page.
373  *
374  * @discussion
375  * A kernel page is the canonical @c vm_page_t for a given pmap managed physical
376  * page.  These pages are made at startup, or when @c ml_static_mfree() is
377  * called, and are never freed.
378  *
379  * Its @c VM_PAGE_GET_PHYS_PAGE() will be a valid @c ppnum_t value.
380  *
381  * A page can either be:
382  * - a canonical page (@c vm_page_is_canonical())
383  * - a fictitious page (@c vm_page_is_fictitious()),
384  *   of which guard pages are a special case (@c vm_page_is_guard())
385  * - a private page (@c vm_page_is_private())
386  */
387 extern bool             vm_page_is_canonical(const struct vm_page *m) __pure2;
388 
389 /*!
390  * @abstract
391  * Returns whether this page is fictitious (made by @c vm_page_create_guard()
392  * or by @c vm_page_create_fictitious()).
393  *
394  * @discussion
395  * A page can either be:
396  * - a canonical page (@c vm_page_is_canonical())
397  * - a fictitious page (@c vm_page_is_fictitious()),
398  *   of which guard pages are a special case (@c vm_page_is_guard())
399  * - a private page (@c vm_page_is_private())
400  */
401 extern bool             vm_page_is_fictitious(const struct vm_page *m);
402 
403 /*!
404  * @abstract
405  * Returns whether this is a kernel guard page that was made by
406  * @c vm_page_create_guard().
407  */
408 extern bool             vm_page_is_guard(const struct vm_page *m) __pure2;
409 
410 /*!
411  * @abstract
412  * Returns whether a page is private (made by @c vm_page_create_private(),
413  * or converted from a fictitious page by @c vm_page_make_private()).
414  *
415  * @discussion
416  * A page can either be:
417  * - a canonical page (@c vm_page_is_canonical())
418  * - a fictitious page (@c vm_page_is_fictitious()),
419  *   of which guard pages are a special case (@c vm_page_is_guard())
420  * - a private page (@c vm_page_is_private())
421  */
422 extern bool             vm_page_is_private(const struct vm_page *m);
423 
424 /*!
425  * @abstract
426  * Converts a fictitious page made by @c vm_page_create_fictitious()
427  * into a private page.
428  *
429  * @param m             The fictitious page to convert into a private one.
430  * @param base_page     The physical page that this page will represent
431  *                      (@c vm_page_create_private()).
432  */
433 extern void             vm_page_make_private(vm_page_t m, ppnum_t base_page);
434 
435 /*!
436  * @abstract
437  * Converts a private page into a fictitious page (as if made by
438  * @c vm_page_create_fictitious()).
439  *
440  * @discussion
441  * Private pages can't be released with @c vm_page_release()
442  * without being turned into a fictitious page first using this function.
443  */
444 extern void             vm_page_reset_private(vm_page_t m);
445 
446 extern bool             vm_pool_low(void);
447 
448 extern vm_page_t        vm_page_grab(void);
449 extern vm_page_t        vm_page_grab_options(int flags);
450 
451 #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
452 #if CONFIG_SECLUDED_MEMORY
453 #define VM_PAGE_GRAB_SECLUDED     0x00000001
454 #endif /* CONFIG_SECLUDED_MEMORY */
455 #define VM_PAGE_GRAB_Q_LOCK_HELD  0x00000002
456 
457 extern vm_page_t        vm_page_grablo(void);
458 
459 extern void             vm_page_release(
460 	vm_page_t       page,
461 	boolean_t       page_queues_locked);
462 
463 extern boolean_t        vm_page_wait(
464 	int             interruptible );
465 
466 extern void             vm_page_init(
467 	vm_page_t       page,
468 	ppnum_t         phys_page);
469 
470 extern void             vm_page_free(
471 	vm_page_t       page);
472 
473 extern void             vm_page_free_unlocked(
474 	vm_page_t       page,
475 	boolean_t       remove_from_hash);
476 
477 /*
478  * vm_page_get_memory_class:
479  * Given a page, returns the memory class of that page.
480  */
481 extern vm_memory_class_t        vm_page_get_memory_class(
482 	vm_page_t               page);
483 
484 /*
485  * vm_page_steal_free_page:
486  * Given a VM_PAGE_ON_FREE_Q page, steals it from its free queue.
487  */
488 extern void                     vm_page_steal_free_page(
489 	vm_page_t               page,
490 	vm_remove_reason_t      remove_reason);
491 
492 /*!
493  * @typedef vmp_free_list_result_t
494  *
495  * @discussion
496  * This data structure is used by vm_page_put_list_on_free_queue to track
497  * how many pages were freed to which free lists, so that it can then drive
498  * which waiters we are going to wake up.
499  *
500  * uint8_t counters are enough because we never free more than 64 pages at
501  * a time, and this allows for the data structure to be passed by register.
502  */
503 typedef struct {
504 	uint8_t vmpr_regular;
505 	uint8_t vmpr_lopage;
506 #if CONFIG_SECLUDED_MEMORY
507 	uint8_t vmpr_secluded;
508 #endif /* CONFIG_SECLUDED_MEMORY */
509 } vmp_free_list_result_t;
510 
511 /*
512  * vm_page_put_list_on_free_queue:
513  * Given a list of pages, put each page on whichever global free queue is
514  * appropriate.
515  *
516  * Must be called with the VM free page lock held.
517  */
518 extern vmp_free_list_result_t vm_page_put_list_on_free_queue(
519 	vm_page_t       list,
520 	bool            page_queues_locked);
521 
522 extern void             vm_page_balance_inactive(
523 	int             max_to_move);
524 
525 extern void             vm_page_activate(
526 	vm_page_t       page);
527 
528 extern void             vm_page_deactivate(
529 	vm_page_t       page);
530 
531 extern void             vm_page_deactivate_internal(
532 	vm_page_t       page,
533 	boolean_t       clear_hw_reference);
534 
535 extern void             vm_page_enqueue_cleaned(vm_page_t page);
536 
537 extern void             vm_page_lru(
538 	vm_page_t       page);
539 
540 extern void             vm_page_speculate(
541 	vm_page_t       page,
542 	boolean_t       new);
543 
544 extern void             vm_page_speculate_ageit(
545 	struct vm_speculative_age_q *aq);
546 
547 extern void             vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
548 
549 extern void             vm_page_rename(
550 	vm_page_t               page,
551 	vm_object_t             new_object,
552 	vm_object_offset_t      new_offset);
553 
554 extern void             vm_page_insert(
555 	vm_page_t               page,
556 	vm_object_t             object,
557 	vm_object_offset_t      offset);
558 
559 extern void             vm_page_insert_wired(
560 	vm_page_t               page,
561 	vm_object_t             object,
562 	vm_object_offset_t      offset,
563 	vm_tag_t                tag);
564 
565 
566 extern void             vm_page_insert_internal(
567 	vm_page_t               page,
568 	vm_object_t             object,
569 	vm_object_offset_t      offset,
570 	vm_tag_t                tag,
571 	boolean_t               queues_lock_held,
572 	boolean_t               insert_in_hash,
573 	boolean_t               batch_pmap_op,
574 	boolean_t               delayed_accounting,
575 	uint64_t                *delayed_ledger_update);
576 
577 extern void             vm_page_replace(
578 	vm_page_t               mem,
579 	vm_object_t             object,
580 	vm_object_offset_t      offset);
581 
582 extern void             vm_page_remove(
583 	vm_page_t       page,
584 	boolean_t       remove_from_hash);
585 
586 extern void             vm_page_zero_fill(
587 	vm_page_t       page);
588 
589 extern void             vm_page_part_zero_fill(
590 	vm_page_t       m,
591 	vm_offset_t     m_pa,
592 	vm_size_t       len);
593 
594 extern void             vm_page_copy(
595 	vm_page_t       src_page,
596 	vm_page_t       dest_page);
597 
598 extern void             vm_page_part_copy(
599 	vm_page_t       src_m,
600 	vm_offset_t     src_pa,
601 	vm_page_t       dst_m,
602 	vm_offset_t     dst_pa,
603 	vm_size_t       len);
604 
605 extern void             vm_page_wire(
606 	vm_page_t       page,
607 	vm_tag_t        tag,
608 	boolean_t       check_memorystatus);
609 
610 extern void             vm_page_unwire(
611 	vm_page_t       page,
612 	boolean_t       queueit);
613 
614 extern void             vm_set_page_size(void);
615 
616 extern void             vm_page_validate_cs(
617 	vm_page_t       page,
618 	vm_map_size_t   fault_page_size,
619 	vm_map_offset_t fault_phys_offset);
620 
621 extern void             vm_page_validate_cs_mapped(
622 	vm_page_t       page,
623 	vm_map_size_t   fault_page_size,
624 	vm_map_offset_t fault_phys_offset,
625 	const void      *kaddr);
626 extern void             vm_page_validate_cs_mapped_slow(
627 	vm_page_t       page,
628 	const void      *kaddr);
629 extern void             vm_page_validate_cs_mapped_chunk(
630 	vm_page_t       page,
631 	const void      *kaddr,
632 	vm_offset_t     chunk_offset,
633 	vm_size_t       chunk_size,
634 	boolean_t       *validated,
635 	unsigned        *tainted);
636 
637 extern void             vm_page_free_prepare_queues(
638 	vm_page_t       page);
639 
640 extern void             vm_page_free_prepare_object(
641 	vm_page_t       page,
642 	boolean_t       remove_from_hash);
643 
644 extern wait_result_t    vm_page_sleep(
645 	vm_object_t        object,
646 	vm_page_t          m,
647 	wait_interrupt_t   interruptible,
648 	lck_sleep_action_t action);
649 
650 extern void             vm_page_wakeup(
651 	vm_object_t        object,
652 	vm_page_t          m);
653 
654 extern void             vm_page_wakeup_done(
655 	vm_object_t        object,
656 	vm_page_t          m);
657 
658 typedef struct page_worker_token {
659 	thread_pri_floor_t pwt_floor_token;
660 	bool pwt_did_register_inheritor;
661 } page_worker_token_t;
662 
663 extern void             vm_page_wakeup_done_with_inheritor(
664 	vm_object_t        object,
665 	vm_page_t          m,
666 	page_worker_token_t *token);
667 
668 extern void             page_worker_register_worker(
669 	event_t            event,
670 	page_worker_token_t *out_token);
671 
672 extern boolean_t        vm_page_is_relocatable(
673 	vm_page_t            m,
674 	vm_relocate_reason_t reloc_reason);
675 
676 extern kern_return_t    vm_page_relocate(
677 	vm_page_t            m1,
678 	int *                compressed_pages,
679 	vm_relocate_reason_t reason,
680 	vm_page_t*           new_page);
681 
682 extern bool             vm_page_is_restricted(
683 	vm_page_t mem);
684 
685 /*
686  * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
687  * protected by the object lock.
688  */
689 
690 #if !XNU_TARGET_OS_OSX
691 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
692 	        MACRO_BEGIN                                             \
693 	        vm_page_t __page__ = (m);                               \
694 	        if (__page__->vmp_pmapped == TRUE &&                    \
695 	            __page__->vmp_wpmapped == TRUE &&                   \
696 	            __page__->vmp_dirty == FALSE &&                     \
697 	            (set_pmap_modified)) {                              \
698 	                pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
699 	        }                                                       \
700 	        __page__->vmp_dirty = TRUE;                             \
701 	        MACRO_END
702 #else /* !XNU_TARGET_OS_OSX */
703 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
704 	        MACRO_BEGIN                                             \
705 	        vm_page_t __page__ = (m);                               \
706 	        __page__->vmp_dirty = TRUE;                             \
707 	        MACRO_END
708 #endif /* !XNU_TARGET_OS_OSX */
709 
710 #define VM_PAGE_FREE(p)                         \
711 	        MACRO_BEGIN                     \
712 	        vm_page_free_unlocked(p, TRUE); \
713 	        MACRO_END
714 
715 
716 #define VM_PAGE_WAIT()          ((void)vm_page_wait(THREAD_UNINT))
717 
718 static inline void
vm_free_page_lock(void)719 vm_free_page_lock(void)
720 {
721 	lck_mtx_lock(&vm_page_queue_free_lock);
722 }
723 
724 static inline void
vm_free_page_lock_spin(void)725 vm_free_page_lock_spin(void)
726 {
727 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
728 }
729 
730 static inline void
vm_free_page_lock_convert(void)731 vm_free_page_lock_convert(void)
732 {
733 	lck_mtx_convert_spin(&vm_page_queue_free_lock);
734 }
735 
736 static inline void
vm_free_page_unlock(void)737 vm_free_page_unlock(void)
738 {
739 	lck_mtx_unlock(&vm_page_queue_free_lock);
740 }
741 
742 
743 #define vm_page_lockconvert_queues()    lck_mtx_convert_spin(&vm_page_queue_lock)
744 
745 
746 #ifdef  VPL_LOCK_SPIN
747 extern lck_grp_t vm_page_lck_grp_local;
748 
749 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
750 #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
751 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
752 #else
753 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
754 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
755 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
756 #endif
757 
758 #if DEVELOPMENT || DEBUG
759 #define VM_PAGE_SPECULATIVE_USED_ADD()                          \
760 	MACRO_BEGIN                                             \
761 	OSAddAtomic(1, &vm_page_speculative_used);              \
762 	MACRO_END
763 #else
764 #define VM_PAGE_SPECULATIVE_USED_ADD()
765 #endif
766 
767 #define VM_PAGE_CONSUME_CLUSTERED(mem)                          \
768 	MACRO_BEGIN                                             \
769 	ppnum_t	__phys_page;                                    \
770 	__phys_page = VM_PAGE_GET_PHYS_PAGE(mem);               \
771 	pmap_lock_phys_page(__phys_page);                       \
772 	if (mem->vmp_clustered) {                               \
773 	        vm_object_t o;                                  \
774 	        o = VM_PAGE_OBJECT(mem);                        \
775 	        assert(o);                                      \
776 	        o->pages_used++;                                \
777 	        mem->vmp_clustered = FALSE;                     \
778 	        VM_PAGE_SPECULATIVE_USED_ADD();                 \
779 	}                                                       \
780 	pmap_unlock_phys_page(__phys_page);                     \
781 	MACRO_END
782 
783 
784 #define VM_PAGE_COUNT_AS_PAGEIN(mem)                            \
785 	MACRO_BEGIN                                             \
786 	{                                                       \
787 	vm_object_t o;                                          \
788 	o = VM_PAGE_OBJECT(mem);                                \
789 	DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);           \
790 	counter_inc(&current_task()->pageins);                  \
791 	if (o->internal) {                                      \
792 	        DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);       \
793 	} else {                                                \
794 	        DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
795 	}                                                       \
796 	}                                                       \
797 	MACRO_END
798 
799 
800 /* adjust for stolen pages accounted elsewhere */
801 #define VM_PAGE_MOVE_STOLEN(page_count)                         \
802 	MACRO_BEGIN                                             \
803 	vm_page_stolen_count -=	(page_count);                   \
804 	vm_page_wire_count_initial -= (page_count);             \
805 	MACRO_END
806 
807 kern_return_t
808 pmap_enter_object_options_check(
809 	pmap_t           pmap,
810 	vm_map_address_t virtual_address,
811 	vm_map_offset_t  fault_phys_offset,
812 	vm_object_t      object,
813 	ppnum_t          pn,
814 	vm_prot_t        protection,
815 	vm_prot_t        fault_type,
816 	boolean_t        wired,
817 	unsigned int     options);
818 
819 extern kern_return_t pmap_enter_options_check(
820 	pmap_t           pmap,
821 	vm_map_address_t virtual_address,
822 	vm_map_offset_t  fault_phys_offset,
823 	vm_page_t        page,
824 	vm_prot_t        protection,
825 	vm_prot_t        fault_type,
826 	boolean_t        wired,
827 	unsigned int     options);
828 
829 extern kern_return_t pmap_enter_check(
830 	pmap_t           pmap,
831 	vm_map_address_t virtual_address,
832 	vm_page_t        page,
833 	vm_prot_t        protection,
834 	vm_prot_t        fault_type,
835 	boolean_t        wired);
836 
837 #define DW_vm_page_unwire               0x01
838 #define DW_vm_page_wire                 0x02
839 #define DW_vm_page_free                 0x04
840 #define DW_vm_page_activate             0x08
841 #define DW_vm_page_deactivate_internal  0x10
842 #define DW_vm_page_speculate            0x20
843 #define DW_vm_page_lru                  0x40
844 #define DW_vm_pageout_throttle_up       0x80
845 #define DW_PAGE_WAKEUP                  0x100
846 #define DW_clear_busy                   0x200
847 #define DW_clear_reference              0x400
848 #define DW_set_reference                0x800
849 #define DW_move_page                    0x1000
850 #define DW_VM_PAGE_QUEUES_REMOVE        0x2000
851 #define DW_enqueue_cleaned              0x4000
852 #define DW_vm_phantom_cache_update      0x8000
853 
854 struct vm_page_delayed_work {
855 	vm_page_t       dw_m;
856 	int             dw_mask;
857 };
858 
859 #define DEFAULT_DELAYED_WORK_LIMIT      32
860 
861 struct vm_page_delayed_work_ctx {
862 	struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT];
863 	thread_t                    delayed_owner;
864 };
865 
866 void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
867 
868 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
869 
870 /*
871  * vm_page_do_delayed_work may need to drop the object lock...
872  * if it does, we need the pages it's looking at to
873  * be held stable via the busy bit, so if busy isn't already
874  * set, we need to set it and ask vm_page_do_delayed_work
875  * to clear it and wakeup anyone that might have blocked on
876  * it once we're done processing the page.
877  */
878 
879 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt)              \
880 	MACRO_BEGIN                                             \
881 	if (mem->vmp_busy == FALSE) {                           \
882 	        mem->vmp_busy = TRUE;                           \
883 	        if ( !(dwp->dw_mask & DW_vm_page_free))         \
884 	                dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
885 	}                                                       \
886 	dwp->dw_m = mem;                                        \
887 	dwp++;                                                  \
888 	dw_cnt++;                                               \
889 	MACRO_END
890 
891 
892 //todo int
893 extern vm_page_t vm_object_page_grab(vm_object_t);
894 
895 //todo int
896 #if VM_PAGE_BUCKETS_CHECK
897 extern void vm_page_buckets_check(void);
898 #endif /* VM_PAGE_BUCKETS_CHECK */
899 
900 //todo int
901 extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq);
902 extern void vm_page_remove_internal(vm_page_t page);
903 extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
904 extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
905 extern void vm_page_check_pageable_safe(vm_page_t page);
906 //end int
907 
908 //todo int
909 extern void vm_retire_boot_pages(void);
910 
911 //todo all int
912 
913 #define VMP_ERROR_GET(p) ((p)->vmp_error)
914 
915 
916 #endif /* XNU_KERNEL_PRIVATE */
917 __END_DECLS
918 
919 #endif  /* _VM_VM_PAGE_INTERNAL_H_ */
920