xref: /xnu-11215.61.5/osfmk/vm/vm_page_internal.h (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_PAGE_INTERNAL_H_
30 #define _VM_VM_PAGE_INTERNAL_H_
31 
32 #include <sys/cdefs.h>
33 #include <vm/vm_page.h>
34 
35 __BEGIN_DECLS
36 #ifdef XNU_KERNEL_PRIVATE
37 
38 static inline int
VMP_CS_FOR_OFFSET(vm_map_offset_t fault_phys_offset)39 VMP_CS_FOR_OFFSET(
40 	vm_map_offset_t fault_phys_offset)
41 {
42 	assertf(fault_phys_offset < PAGE_SIZE &&
43 	    !(fault_phys_offset & FOURK_PAGE_MASK),
44 	    "offset 0x%llx\n", (uint64_t)fault_phys_offset);
45 	return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT);
46 }
47 static inline bool
VMP_CS_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)48 VMP_CS_VALIDATED(
49 	vm_page_t p,
50 	vm_map_size_t fault_page_size,
51 	vm_map_offset_t fault_phys_offset)
52 {
53 	assertf(fault_page_size <= PAGE_SIZE,
54 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
55 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
56 	if (fault_page_size == PAGE_SIZE) {
57 		return p->vmp_cs_validated == VMP_CS_ALL_TRUE;
58 	}
59 	return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset);
60 }
61 static inline bool
VMP_CS_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)62 VMP_CS_TAINTED(
63 	vm_page_t p,
64 	vm_map_size_t fault_page_size,
65 	vm_map_offset_t fault_phys_offset)
66 {
67 	assertf(fault_page_size <= PAGE_SIZE,
68 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
69 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
70 	if (fault_page_size == PAGE_SIZE) {
71 		return p->vmp_cs_tainted != VMP_CS_ALL_FALSE;
72 	}
73 	return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset);
74 }
75 static inline bool
VMP_CS_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)76 VMP_CS_NX(
77 	vm_page_t p,
78 	vm_map_size_t fault_page_size,
79 	vm_map_offset_t fault_phys_offset)
80 {
81 	assertf(fault_page_size <= PAGE_SIZE,
82 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
83 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
84 	if (fault_page_size == PAGE_SIZE) {
85 		return p->vmp_cs_nx != VMP_CS_ALL_FALSE;
86 	}
87 	return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset);
88 }
89 static inline void
VMP_CS_SET_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)90 VMP_CS_SET_VALIDATED(
91 	vm_page_t p,
92 	vm_map_size_t fault_page_size,
93 	vm_map_offset_t fault_phys_offset,
94 	boolean_t value)
95 {
96 	assertf(fault_page_size <= PAGE_SIZE,
97 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
98 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
99 	if (value) {
100 		if (fault_page_size == PAGE_SIZE) {
101 			p->vmp_cs_validated = VMP_CS_ALL_TRUE;
102 		}
103 		p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset);
104 	} else {
105 		if (fault_page_size == PAGE_SIZE) {
106 			p->vmp_cs_validated = VMP_CS_ALL_FALSE;
107 		}
108 		p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
109 	}
110 }
111 static inline void
VMP_CS_SET_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)112 VMP_CS_SET_TAINTED(
113 	vm_page_t p,
114 	vm_map_size_t fault_page_size,
115 	vm_map_offset_t fault_phys_offset,
116 	boolean_t value)
117 {
118 	assertf(fault_page_size <= PAGE_SIZE,
119 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
120 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
121 	if (value) {
122 		if (fault_page_size == PAGE_SIZE) {
123 			p->vmp_cs_tainted = VMP_CS_ALL_TRUE;
124 		}
125 		p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset);
126 	} else {
127 		if (fault_page_size == PAGE_SIZE) {
128 			p->vmp_cs_tainted = VMP_CS_ALL_FALSE;
129 		}
130 		p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
131 	}
132 }
133 static inline void
VMP_CS_SET_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)134 VMP_CS_SET_NX(
135 	vm_page_t p,
136 	vm_map_size_t fault_page_size,
137 	vm_map_offset_t fault_phys_offset,
138 	boolean_t value)
139 {
140 	assertf(fault_page_size <= PAGE_SIZE,
141 	    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
142 	    (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
143 	if (value) {
144 		if (fault_page_size == PAGE_SIZE) {
145 			p->vmp_cs_nx = VMP_CS_ALL_TRUE;
146 		}
147 		p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset);
148 	} else {
149 		if (fault_page_size == PAGE_SIZE) {
150 			p->vmp_cs_nx = VMP_CS_ALL_FALSE;
151 		}
152 		p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
153 	}
154 }
155 
156 
157 #if defined(__LP64__)
158 static __inline__ void
vm_page_enqueue_tail(vm_page_queue_t que,vm_page_queue_entry_t elt)159 vm_page_enqueue_tail(
160 	vm_page_queue_t         que,
161 	vm_page_queue_entry_t   elt)
162 {
163 	vm_page_queue_entry_t   old_tail;
164 
165 	old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
166 	elt->next = VM_PAGE_PACK_PTR(que);
167 	elt->prev = que->prev;
168 	que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
169 }
170 
171 static __inline__ void
vm_page_remque(vm_page_queue_entry_t elt)172 vm_page_remque(
173 	vm_page_queue_entry_t elt)
174 {
175 	vm_page_queue_entry_t next;
176 	vm_page_queue_entry_t prev;
177 	vm_page_packed_t      next_pck = elt->next;
178 	vm_page_packed_t      prev_pck = elt->prev;
179 
180 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
181 
182 	/* next may equal prev (and the queue head) if elt was the only element */
183 	prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
184 
185 	next->prev = prev_pck;
186 	prev->next = next_pck;
187 
188 	elt->next = 0;
189 	elt->prev = 0;
190 }
191 
192 #if defined(__x86_64__)
193 /*
194  * Insert a new page into a free queue and clump pages within the same 16K boundary together
195  */
196 static inline void
vm_page_queue_enter_clump(vm_page_queue_t head,vm_page_t elt)197 vm_page_queue_enter_clump(
198 	vm_page_queue_t       head,
199 	vm_page_t             elt)
200 {
201 	vm_page_queue_entry_t first = NULL;    /* first page in the clump */
202 	vm_page_queue_entry_t last = NULL;     /* last page in the clump */
203 	vm_page_queue_entry_t prev = NULL;
204 	vm_page_queue_entry_t next;
205 	uint_t                n_free = 1;
206 	extern unsigned int   vm_pages_count;
207 	extern unsigned int   vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
208 	extern unsigned long  vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
209 
210 	/*
211 	 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
212 	 */
213 	if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
214 		vm_page_t p;
215 		uint_t    i;
216 		uint_t    n;
217 		ppnum_t   clump_num;
218 
219 		first = last = (vm_page_queue_entry_t)elt;
220 		clump_num = VM_PAGE_GET_CLUMP(elt);
221 		n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
222 
223 		/*
224 		 * Check for preceeding vm_pages[] entries in the same chunk
225 		 */
226 		for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
227 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
228 				if (prev == NULL) {
229 					prev = (vm_page_queue_entry_t)p;
230 				}
231 				first = (vm_page_queue_entry_t)p;
232 				n_free++;
233 			}
234 		}
235 
236 		/*
237 		 * Check the following vm_pages[] entries in the same chunk
238 		 */
239 		for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
240 			if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
241 				if (last == (vm_page_queue_entry_t)elt) {               /* first one only */
242 					__DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
243 				}
244 
245 				if (prev == NULL) {
246 					prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
247 				}
248 				last = (vm_page_queue_entry_t)p;
249 				n_free++;
250 			}
251 		}
252 		__DEBUG_STAT_INCREMENT_INRANGE;
253 	}
254 
255 	/* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
256 	if (prev == NULL) {
257 		prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
258 	}
259 
260 	/* insert the element */
261 	next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
262 	elt->vmp_pageq.next = prev->next;
263 	elt->vmp_pageq.prev = next->prev;
264 	prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
265 	__DEBUG_STAT_INCREMENT_INSERTS;
266 
267 	/*
268 	 * Check if clump needs to be promoted to head.
269 	 */
270 	if (n_free >= vm_clump_promote_threshold && n_free > 1) {
271 		vm_page_queue_entry_t first_prev;
272 
273 		first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
274 
275 		/* If not at head already */
276 		if (first_prev != head) {
277 			vm_page_queue_entry_t last_next;
278 			vm_page_queue_entry_t head_next;
279 
280 			last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
281 
282 			/* verify that the links within the clump are consistent */
283 			__DEBUG_VERIFY_LINKS(first, n_free, last_next);
284 
285 			/* promote clump to head */
286 			first_prev->next = last->next;
287 			last_next->prev = first->prev;
288 			first->prev = VM_PAGE_PACK_PTR(head);
289 			last->next = head->next;
290 
291 			head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
292 			head_next->prev = VM_PAGE_PACK_PTR(last);
293 			head->next = VM_PAGE_PACK_PTR(first);
294 			__DEBUG_STAT_INCREMENT_PROMOTES(n_free);
295 		}
296 	}
297 }
298 #endif /* __x86_64__ */
299 #endif /* __LP64__ */
300 
301 
302 extern  void    vm_page_assign_special_state(vm_page_t mem, int mode);
303 extern  void    vm_page_update_special_state(vm_page_t mem);
304 extern  void    vm_page_add_to_specialq(vm_page_t mem, boolean_t first);
305 extern  void    vm_page_remove_from_specialq(vm_page_t mem);
306 
307 
308 /*
309  * Prototypes for functions exported by this module.
310  */
311 extern void             vm_page_bootstrap(
312 	vm_offset_t     *startp,
313 	vm_offset_t     *endp);
314 
315 extern vm_page_t        kdp_vm_page_lookup(
316 	vm_object_t             object,
317 	vm_object_offset_t      offset);
318 
319 extern vm_page_t        vm_page_lookup(
320 	vm_object_t             object,
321 	vm_object_offset_t      offset);
322 
323 extern vm_page_t        vm_page_grab_fictitious(boolean_t canwait);
324 
325 extern vm_page_t        vm_page_grab_guard(boolean_t canwait);
326 
327 extern void             vm_page_release_fictitious(
328 	vm_page_t page);
329 
330 extern bool             vm_pool_low(void);
331 
332 extern vm_page_t        vm_page_grab(void);
333 extern vm_page_t        vm_page_grab_options(int flags);
334 
335 #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
336 #if CONFIG_SECLUDED_MEMORY
337 #define VM_PAGE_GRAB_SECLUDED     0x00000001
338 #endif /* CONFIG_SECLUDED_MEMORY */
339 #define VM_PAGE_GRAB_Q_LOCK_HELD  0x00000002
340 
341 extern vm_page_t        vm_page_grablo(void);
342 
343 extern void             vm_page_release(
344 	vm_page_t       page,
345 	boolean_t       page_queues_locked);
346 
347 extern boolean_t        vm_page_wait(
348 	int             interruptible );
349 
350 extern void             vm_page_init(
351 	vm_page_t       page,
352 	ppnum_t         phys_page,
353 	boolean_t       lopage);
354 
355 extern void             vm_page_free(
356 	vm_page_t       page);
357 
358 extern void             vm_page_free_unlocked(
359 	vm_page_t       page,
360 	boolean_t       remove_from_hash);
361 
362 extern void             vm_page_balance_inactive(
363 	int             max_to_move);
364 
365 extern void             vm_page_activate(
366 	vm_page_t       page);
367 
368 extern void             vm_page_deactivate(
369 	vm_page_t       page);
370 
371 extern void             vm_page_deactivate_internal(
372 	vm_page_t       page,
373 	boolean_t       clear_hw_reference);
374 
375 extern void             vm_page_enqueue_cleaned(vm_page_t page);
376 
377 extern void             vm_page_lru(
378 	vm_page_t       page);
379 
380 extern void             vm_page_speculate(
381 	vm_page_t       page,
382 	boolean_t       new);
383 
384 extern void             vm_page_speculate_ageit(
385 	struct vm_speculative_age_q *aq);
386 
387 extern void             vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
388 
389 extern void             vm_page_rename(
390 	vm_page_t               page,
391 	vm_object_t             new_object,
392 	vm_object_offset_t      new_offset);
393 
394 extern void             vm_page_insert(
395 	vm_page_t               page,
396 	vm_object_t             object,
397 	vm_object_offset_t      offset);
398 
399 extern void             vm_page_insert_wired(
400 	vm_page_t               page,
401 	vm_object_t             object,
402 	vm_object_offset_t      offset,
403 	vm_tag_t                tag);
404 
405 
406 extern void             vm_page_insert_internal(
407 	vm_page_t               page,
408 	vm_object_t             object,
409 	vm_object_offset_t      offset,
410 	vm_tag_t                tag,
411 	boolean_t               queues_lock_held,
412 	boolean_t               insert_in_hash,
413 	boolean_t               batch_pmap_op,
414 	boolean_t               delayed_accounting,
415 	uint64_t                *delayed_ledger_update);
416 
417 extern void             vm_page_replace(
418 	vm_page_t               mem,
419 	vm_object_t             object,
420 	vm_object_offset_t      offset);
421 
422 extern void             vm_page_remove(
423 	vm_page_t       page,
424 	boolean_t       remove_from_hash);
425 
426 extern void             vm_page_zero_fill(
427 	vm_page_t       page);
428 
429 extern void             vm_page_part_zero_fill(
430 	vm_page_t       m,
431 	vm_offset_t     m_pa,
432 	vm_size_t       len);
433 
434 extern void             vm_page_copy(
435 	vm_page_t       src_page,
436 	vm_page_t       dest_page);
437 
438 extern void             vm_page_part_copy(
439 	vm_page_t       src_m,
440 	vm_offset_t     src_pa,
441 	vm_page_t       dst_m,
442 	vm_offset_t     dst_pa,
443 	vm_size_t       len);
444 
445 extern void             vm_page_wire(
446 	vm_page_t       page,
447 	vm_tag_t        tag,
448 	boolean_t       check_memorystatus);
449 
450 extern void             vm_page_unwire(
451 	vm_page_t       page,
452 	boolean_t       queueit);
453 
454 extern void             vm_set_page_size(void);
455 
456 extern void             vm_page_validate_cs(
457 	vm_page_t       page,
458 	vm_map_size_t   fault_page_size,
459 	vm_map_offset_t fault_phys_offset);
460 
461 extern void             vm_page_validate_cs_mapped(
462 	vm_page_t       page,
463 	vm_map_size_t   fault_page_size,
464 	vm_map_offset_t fault_phys_offset,
465 	const void      *kaddr);
466 extern void             vm_page_validate_cs_mapped_slow(
467 	vm_page_t       page,
468 	const void      *kaddr);
469 extern void             vm_page_validate_cs_mapped_chunk(
470 	vm_page_t       page,
471 	const void      *kaddr,
472 	vm_offset_t     chunk_offset,
473 	vm_size_t       chunk_size,
474 	boolean_t       *validated,
475 	unsigned        *tainted);
476 
477 extern void             vm_page_free_prepare_queues(
478 	vm_page_t       page);
479 
480 extern void             vm_page_free_prepare_object(
481 	vm_page_t       page,
482 	boolean_t       remove_from_hash);
483 
484 extern wait_result_t    vm_page_sleep(
485 	vm_object_t        object,
486 	vm_page_t          m,
487 	wait_interrupt_t   interruptible,
488 	lck_sleep_action_t action);
489 
490 extern void             vm_page_wakeup(
491 	vm_object_t        object,
492 	vm_page_t          m);
493 
494 extern void             vm_page_wakeup_done(
495 	vm_object_t        object,
496 	vm_page_t          m);
497 
498 typedef struct page_worker_token {
499 	thread_pri_floor_t pwt_floor_token;
500 	bool pwt_did_register_inheritor;
501 } page_worker_token_t;
502 
503 extern void             vm_page_wakeup_done_with_inheritor(
504 	vm_object_t        object,
505 	vm_page_t          m,
506 	page_worker_token_t *token);
507 
508 extern void             page_worker_register_worker(
509 	event_t            event,
510 	page_worker_token_t *out_token);
511 
512 /*
513  * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
514  * protected by the object lock.
515  */
516 
517 #if !XNU_TARGET_OS_OSX
518 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
519 	        MACRO_BEGIN                                             \
520 	        vm_page_t __page__ = (m);                               \
521 	        if (__page__->vmp_pmapped == TRUE &&                    \
522 	            __page__->vmp_wpmapped == TRUE &&                   \
523 	            __page__->vmp_dirty == FALSE &&                     \
524 	            (set_pmap_modified)) {                              \
525 	                pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
526 	        }                                                       \
527 	        __page__->vmp_dirty = TRUE;                             \
528 	        MACRO_END
529 #else /* !XNU_TARGET_OS_OSX */
530 #define SET_PAGE_DIRTY(m, set_pmap_modified)                            \
531 	        MACRO_BEGIN                                             \
532 	        vm_page_t __page__ = (m);                               \
533 	        __page__->vmp_dirty = TRUE;                             \
534 	        MACRO_END
535 #endif /* !XNU_TARGET_OS_OSX */
536 
537 #define VM_PAGE_FREE(p)                         \
538 	        MACRO_BEGIN                     \
539 	        vm_page_free_unlocked(p, TRUE); \
540 	        MACRO_END
541 
542 
543 #define VM_PAGE_WAIT()          ((void)vm_page_wait(THREAD_UNINT))
544 
545 static inline void
vm_free_page_lock(void)546 vm_free_page_lock(void)
547 {
548 	lck_mtx_lock(&vm_page_queue_free_lock);
549 }
550 
551 static inline void
vm_free_page_lock_spin(void)552 vm_free_page_lock_spin(void)
553 {
554 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
555 }
556 
557 static inline void
vm_free_page_lock_convert(void)558 vm_free_page_lock_convert(void)
559 {
560 	lck_mtx_convert_spin(&vm_page_queue_free_lock);
561 }
562 
563 static inline void
vm_free_page_unlock(void)564 vm_free_page_unlock(void)
565 {
566 	lck_mtx_unlock(&vm_page_queue_free_lock);
567 }
568 
569 
570 #define vm_page_lockconvert_queues()    lck_mtx_convert_spin(&vm_page_queue_lock)
571 
572 
573 #ifdef  VPL_LOCK_SPIN
574 extern lck_grp_t vm_page_lck_grp_local;
575 
576 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
577 #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
578 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
579 #else
580 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
581 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
582 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
583 #endif
584 
585 #if DEVELOPMENT || DEBUG
586 #define VM_PAGE_SPECULATIVE_USED_ADD()                          \
587 	MACRO_BEGIN                                             \
588 	OSAddAtomic(1, &vm_page_speculative_used);              \
589 	MACRO_END
590 #else
591 #define VM_PAGE_SPECULATIVE_USED_ADD()
592 #endif
593 
594 #define VM_PAGE_CONSUME_CLUSTERED(mem)                          \
595 	MACRO_BEGIN                                             \
596 	ppnum_t	__phys_page;                                    \
597 	__phys_page = VM_PAGE_GET_PHYS_PAGE(mem);               \
598 	pmap_lock_phys_page(__phys_page);                       \
599 	if (mem->vmp_clustered) {                               \
600 	        vm_object_t o;                                  \
601 	        o = VM_PAGE_OBJECT(mem);                        \
602 	        assert(o);                                      \
603 	        o->pages_used++;                                \
604 	        mem->vmp_clustered = FALSE;                     \
605 	        VM_PAGE_SPECULATIVE_USED_ADD();                 \
606 	}                                                       \
607 	pmap_unlock_phys_page(__phys_page);                     \
608 	MACRO_END
609 
610 
611 #define VM_PAGE_COUNT_AS_PAGEIN(mem)                            \
612 	MACRO_BEGIN                                             \
613 	{                                                       \
614 	vm_object_t o;                                          \
615 	o = VM_PAGE_OBJECT(mem);                                \
616 	DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);           \
617 	counter_inc(&current_task()->pageins);                  \
618 	if (o->internal) {                                      \
619 	        DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);       \
620 	} else {                                                \
621 	        DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
622 	}                                                       \
623 	}                                                       \
624 	MACRO_END
625 
626 
627 /* adjust for stolen pages accounted elsewhere */
628 #define VM_PAGE_MOVE_STOLEN(page_count)                         \
629 	MACRO_BEGIN                                             \
630 	vm_page_stolen_count -=	(page_count);                   \
631 	vm_page_wire_count_initial -= (page_count);             \
632 	MACRO_END
633 
634 extern kern_return_t pmap_enter_check(
635 	pmap_t           pmap,
636 	vm_map_address_t virtual_address,
637 	vm_page_t        page,
638 	vm_prot_t        protection,
639 	vm_prot_t        fault_type,
640 	unsigned int     flags,
641 	boolean_t        wired);
642 
643 #define DW_vm_page_unwire               0x01
644 #define DW_vm_page_wire                 0x02
645 #define DW_vm_page_free                 0x04
646 #define DW_vm_page_activate             0x08
647 #define DW_vm_page_deactivate_internal  0x10
648 #define DW_vm_page_speculate            0x20
649 #define DW_vm_page_lru                  0x40
650 #define DW_vm_pageout_throttle_up       0x80
651 #define DW_PAGE_WAKEUP                  0x100
652 #define DW_clear_busy                   0x200
653 #define DW_clear_reference              0x400
654 #define DW_set_reference                0x800
655 #define DW_move_page                    0x1000
656 #define DW_VM_PAGE_QUEUES_REMOVE        0x2000
657 #define DW_enqueue_cleaned              0x4000
658 #define DW_vm_phantom_cache_update      0x8000
659 
660 struct vm_page_delayed_work {
661 	vm_page_t       dw_m;
662 	int             dw_mask;
663 };
664 
665 #define DEFAULT_DELAYED_WORK_LIMIT      32
666 
667 struct vm_page_delayed_work_ctx {
668 	struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT];
669 	thread_t                    delayed_owner;
670 };
671 
672 void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
673 
674 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
675 
676 /*
677  * vm_page_do_delayed_work may need to drop the object lock...
678  * if it does, we need the pages it's looking at to
679  * be held stable via the busy bit, so if busy isn't already
680  * set, we need to set it and ask vm_page_do_delayed_work
681  * to clear it and wakeup anyone that might have blocked on
682  * it once we're done processing the page.
683  */
684 
685 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt)              \
686 	MACRO_BEGIN                                             \
687 	if (mem->vmp_busy == FALSE) {                           \
688 	        mem->vmp_busy = TRUE;                           \
689 	        if ( !(dwp->dw_mask & DW_vm_page_free))         \
690 	                dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
691 	}                                                       \
692 	dwp->dw_m = mem;                                        \
693 	dwp++;                                                  \
694 	dw_cnt++;                                               \
695 	MACRO_END
696 
697 
698 //todo int
699 extern vm_page_t vm_object_page_grab(vm_object_t);
700 
701 //todo int
702 #if VM_PAGE_BUCKETS_CHECK
703 extern void vm_page_buckets_check(void);
704 #endif /* VM_PAGE_BUCKETS_CHECK */
705 
706 //todo int
707 extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq);
708 extern void vm_page_remove_internal(vm_page_t page);
709 extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
710 extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
711 extern void vm_page_check_pageable_safe(vm_page_t page);
712 //end int
713 
714 
715 //todo int
716 extern void vm_retire_boot_pages(void);
717 
718 //todo all int
719 
720 #define VMP_ERROR_GET(p) ((p)->vmp_error)
721 
722 
723 //todo int
724 
725 #endif /* XNU_KERNEL_PRIVATE */
726 __END_DECLS
727 
728 #endif  /* _VM_VM_PAGE_INTERNAL_H_ */
729