1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _VM_VM_PAGE_INTERNAL_H_
30 #define _VM_VM_PAGE_INTERNAL_H_
31
32 #include <sys/cdefs.h>
33 #include <vm/vm_page.h>
34
35 __BEGIN_DECLS
36 #ifdef XNU_KERNEL_PRIVATE
37
38 static inline int
VMP_CS_FOR_OFFSET(vm_map_offset_t fault_phys_offset)39 VMP_CS_FOR_OFFSET(
40 vm_map_offset_t fault_phys_offset)
41 {
42 assertf(fault_phys_offset < PAGE_SIZE &&
43 !(fault_phys_offset & FOURK_PAGE_MASK),
44 "offset 0x%llx\n", (uint64_t)fault_phys_offset);
45 return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT);
46 }
47 static inline bool
VMP_CS_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)48 VMP_CS_VALIDATED(
49 vm_page_t p,
50 vm_map_size_t fault_page_size,
51 vm_map_offset_t fault_phys_offset)
52 {
53 assertf(fault_page_size <= PAGE_SIZE,
54 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
55 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
56 if (fault_page_size == PAGE_SIZE) {
57 return p->vmp_cs_validated == VMP_CS_ALL_TRUE;
58 }
59 return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset);
60 }
61 static inline bool
VMP_CS_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)62 VMP_CS_TAINTED(
63 vm_page_t p,
64 vm_map_size_t fault_page_size,
65 vm_map_offset_t fault_phys_offset)
66 {
67 assertf(fault_page_size <= PAGE_SIZE,
68 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
69 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
70 if (fault_page_size == PAGE_SIZE) {
71 return p->vmp_cs_tainted != VMP_CS_ALL_FALSE;
72 }
73 return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset);
74 }
75 static inline bool
VMP_CS_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)76 VMP_CS_NX(
77 vm_page_t p,
78 vm_map_size_t fault_page_size,
79 vm_map_offset_t fault_phys_offset)
80 {
81 assertf(fault_page_size <= PAGE_SIZE,
82 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
83 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
84 if (fault_page_size == PAGE_SIZE) {
85 return p->vmp_cs_nx != VMP_CS_ALL_FALSE;
86 }
87 return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset);
88 }
89 static inline void
VMP_CS_SET_VALIDATED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)90 VMP_CS_SET_VALIDATED(
91 vm_page_t p,
92 vm_map_size_t fault_page_size,
93 vm_map_offset_t fault_phys_offset,
94 boolean_t value)
95 {
96 assertf(fault_page_size <= PAGE_SIZE,
97 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
98 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
99 if (value) {
100 if (fault_page_size == PAGE_SIZE) {
101 p->vmp_cs_validated = VMP_CS_ALL_TRUE;
102 }
103 p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset);
104 } else {
105 if (fault_page_size == PAGE_SIZE) {
106 p->vmp_cs_validated = VMP_CS_ALL_FALSE;
107 }
108 p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
109 }
110 }
111 static inline void
VMP_CS_SET_TAINTED(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)112 VMP_CS_SET_TAINTED(
113 vm_page_t p,
114 vm_map_size_t fault_page_size,
115 vm_map_offset_t fault_phys_offset,
116 boolean_t value)
117 {
118 assertf(fault_page_size <= PAGE_SIZE,
119 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
120 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
121 if (value) {
122 if (fault_page_size == PAGE_SIZE) {
123 p->vmp_cs_tainted = VMP_CS_ALL_TRUE;
124 }
125 p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset);
126 } else {
127 if (fault_page_size == PAGE_SIZE) {
128 p->vmp_cs_tainted = VMP_CS_ALL_FALSE;
129 }
130 p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
131 }
132 }
133 static inline void
VMP_CS_SET_NX(vm_page_t p,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t value)134 VMP_CS_SET_NX(
135 vm_page_t p,
136 vm_map_size_t fault_page_size,
137 vm_map_offset_t fault_phys_offset,
138 boolean_t value)
139 {
140 assertf(fault_page_size <= PAGE_SIZE,
141 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
142 (uint64_t)fault_page_size, (uint64_t)fault_phys_offset);
143 if (value) {
144 if (fault_page_size == PAGE_SIZE) {
145 p->vmp_cs_nx = VMP_CS_ALL_TRUE;
146 }
147 p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset);
148 } else {
149 if (fault_page_size == PAGE_SIZE) {
150 p->vmp_cs_nx = VMP_CS_ALL_FALSE;
151 }
152 p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset);
153 }
154 }
155
156
157 #if defined(__LP64__)
158 static __inline__ void
vm_page_enqueue_tail(vm_page_queue_t que,vm_page_queue_entry_t elt)159 vm_page_enqueue_tail(
160 vm_page_queue_t que,
161 vm_page_queue_entry_t elt)
162 {
163 vm_page_queue_entry_t old_tail;
164
165 old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
166 elt->next = VM_PAGE_PACK_PTR(que);
167 elt->prev = que->prev;
168 que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
169 }
170
171 static __inline__ void
vm_page_remque(vm_page_queue_entry_t elt)172 vm_page_remque(
173 vm_page_queue_entry_t elt)
174 {
175 vm_page_queue_entry_t next;
176 vm_page_queue_entry_t prev;
177 vm_page_packed_t next_pck = elt->next;
178 vm_page_packed_t prev_pck = elt->prev;
179
180 next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
181
182 /* next may equal prev (and the queue head) if elt was the only element */
183 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
184
185 next->prev = prev_pck;
186 prev->next = next_pck;
187
188 elt->next = 0;
189 elt->prev = 0;
190 }
191
192 #if defined(__x86_64__)
193 /*
194 * Insert a new page into a free queue and clump pages within the same 16K boundary together
195 */
196 static inline void
vm_page_queue_enter_clump(vm_page_queue_t head,vm_page_t elt)197 vm_page_queue_enter_clump(
198 vm_page_queue_t head,
199 vm_page_t elt)
200 {
201 vm_page_queue_entry_t first = NULL; /* first page in the clump */
202 vm_page_queue_entry_t last = NULL; /* last page in the clump */
203 vm_page_queue_entry_t prev = NULL;
204 vm_page_queue_entry_t next;
205 uint_t n_free = 1;
206 extern unsigned int vm_pages_count;
207 extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
208 extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
209
210 /*
211 * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
212 */
213 if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
214 vm_page_t p;
215 uint_t i;
216 uint_t n;
217 ppnum_t clump_num;
218
219 first = last = (vm_page_queue_entry_t)elt;
220 clump_num = VM_PAGE_GET_CLUMP(elt);
221 n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
222
223 /*
224 * Check for preceeding vm_pages[] entries in the same chunk
225 */
226 for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
227 if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
228 if (prev == NULL) {
229 prev = (vm_page_queue_entry_t)p;
230 }
231 first = (vm_page_queue_entry_t)p;
232 n_free++;
233 }
234 }
235
236 /*
237 * Check the following vm_pages[] entries in the same chunk
238 */
239 for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
240 if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
241 if (last == (vm_page_queue_entry_t)elt) { /* first one only */
242 __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
243 }
244
245 if (prev == NULL) {
246 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
247 }
248 last = (vm_page_queue_entry_t)p;
249 n_free++;
250 }
251 }
252 __DEBUG_STAT_INCREMENT_INRANGE;
253 }
254
255 /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
256 if (prev == NULL) {
257 prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
258 }
259
260 /* insert the element */
261 next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
262 elt->vmp_pageq.next = prev->next;
263 elt->vmp_pageq.prev = next->prev;
264 prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
265 __DEBUG_STAT_INCREMENT_INSERTS;
266
267 /*
268 * Check if clump needs to be promoted to head.
269 */
270 if (n_free >= vm_clump_promote_threshold && n_free > 1) {
271 vm_page_queue_entry_t first_prev;
272
273 first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
274
275 /* If not at head already */
276 if (first_prev != head) {
277 vm_page_queue_entry_t last_next;
278 vm_page_queue_entry_t head_next;
279
280 last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
281
282 /* verify that the links within the clump are consistent */
283 __DEBUG_VERIFY_LINKS(first, n_free, last_next);
284
285 /* promote clump to head */
286 first_prev->next = last->next;
287 last_next->prev = first->prev;
288 first->prev = VM_PAGE_PACK_PTR(head);
289 last->next = head->next;
290
291 head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
292 head_next->prev = VM_PAGE_PACK_PTR(last);
293 head->next = VM_PAGE_PACK_PTR(first);
294 __DEBUG_STAT_INCREMENT_PROMOTES(n_free);
295 }
296 }
297 }
298 #endif /* __x86_64__ */
299 #endif /* __LP64__ */
300
301
302 extern void vm_page_assign_special_state(vm_page_t mem, int mode);
303 extern void vm_page_update_special_state(vm_page_t mem);
304 extern void vm_page_add_to_specialq(vm_page_t mem, boolean_t first);
305 extern void vm_page_remove_from_specialq(vm_page_t mem);
306
307
308 /*
309 * Prototypes for functions exported by this module.
310 */
311 extern void vm_page_bootstrap(
312 vm_offset_t *startp,
313 vm_offset_t *endp);
314
315 extern vm_page_t kdp_vm_page_lookup(
316 vm_object_t object,
317 vm_object_offset_t offset);
318
319 extern vm_page_t vm_page_lookup(
320 vm_object_t object,
321 vm_object_offset_t offset);
322
323 extern vm_page_t vm_page_grab_fictitious(boolean_t canwait);
324
325 extern vm_page_t vm_page_grab_guard(boolean_t canwait);
326
327 extern void vm_page_release_fictitious(
328 vm_page_t page);
329
330 extern bool vm_pool_low(void);
331
332 extern vm_page_t vm_page_grab(void);
333 extern vm_page_t vm_page_grab_options(int flags);
334
335 #define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000
336 #if CONFIG_SECLUDED_MEMORY
337 #define VM_PAGE_GRAB_SECLUDED 0x00000001
338 #endif /* CONFIG_SECLUDED_MEMORY */
339 #define VM_PAGE_GRAB_Q_LOCK_HELD 0x00000002
340
341 extern vm_page_t vm_page_grablo(void);
342
343 extern void vm_page_release(
344 vm_page_t page,
345 boolean_t page_queues_locked);
346
347 extern boolean_t vm_page_wait(
348 int interruptible );
349
350 extern void vm_page_init(
351 vm_page_t page,
352 ppnum_t phys_page,
353 boolean_t lopage);
354
355 extern void vm_page_free(
356 vm_page_t page);
357
358 extern void vm_page_free_unlocked(
359 vm_page_t page,
360 boolean_t remove_from_hash);
361
362 extern void vm_page_balance_inactive(
363 int max_to_move);
364
365 extern void vm_page_activate(
366 vm_page_t page);
367
368 extern void vm_page_deactivate(
369 vm_page_t page);
370
371 extern void vm_page_deactivate_internal(
372 vm_page_t page,
373 boolean_t clear_hw_reference);
374
375 extern void vm_page_enqueue_cleaned(vm_page_t page);
376
377 extern void vm_page_lru(
378 vm_page_t page);
379
380 extern void vm_page_speculate(
381 vm_page_t page,
382 boolean_t new);
383
384 extern void vm_page_speculate_ageit(
385 struct vm_speculative_age_q *aq);
386
387 extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
388
389 extern void vm_page_rename(
390 vm_page_t page,
391 vm_object_t new_object,
392 vm_object_offset_t new_offset);
393
394 extern void vm_page_insert(
395 vm_page_t page,
396 vm_object_t object,
397 vm_object_offset_t offset);
398
399 extern void vm_page_insert_wired(
400 vm_page_t page,
401 vm_object_t object,
402 vm_object_offset_t offset,
403 vm_tag_t tag);
404
405
406 extern void vm_page_insert_internal(
407 vm_page_t page,
408 vm_object_t object,
409 vm_object_offset_t offset,
410 vm_tag_t tag,
411 boolean_t queues_lock_held,
412 boolean_t insert_in_hash,
413 boolean_t batch_pmap_op,
414 boolean_t delayed_accounting,
415 uint64_t *delayed_ledger_update);
416
417 extern void vm_page_replace(
418 vm_page_t mem,
419 vm_object_t object,
420 vm_object_offset_t offset);
421
422 extern void vm_page_remove(
423 vm_page_t page,
424 boolean_t remove_from_hash);
425
426 extern void vm_page_zero_fill(
427 vm_page_t page);
428
429 extern void vm_page_part_zero_fill(
430 vm_page_t m,
431 vm_offset_t m_pa,
432 vm_size_t len);
433
434 extern void vm_page_copy(
435 vm_page_t src_page,
436 vm_page_t dest_page);
437
438 extern void vm_page_part_copy(
439 vm_page_t src_m,
440 vm_offset_t src_pa,
441 vm_page_t dst_m,
442 vm_offset_t dst_pa,
443 vm_size_t len);
444
445 extern void vm_page_wire(
446 vm_page_t page,
447 vm_tag_t tag,
448 boolean_t check_memorystatus);
449
450 extern void vm_page_unwire(
451 vm_page_t page,
452 boolean_t queueit);
453
454 extern void vm_set_page_size(void);
455
456 extern void vm_page_validate_cs(
457 vm_page_t page,
458 vm_map_size_t fault_page_size,
459 vm_map_offset_t fault_phys_offset);
460
461 extern void vm_page_validate_cs_mapped(
462 vm_page_t page,
463 vm_map_size_t fault_page_size,
464 vm_map_offset_t fault_phys_offset,
465 const void *kaddr);
466 extern void vm_page_validate_cs_mapped_slow(
467 vm_page_t page,
468 const void *kaddr);
469 extern void vm_page_validate_cs_mapped_chunk(
470 vm_page_t page,
471 const void *kaddr,
472 vm_offset_t chunk_offset,
473 vm_size_t chunk_size,
474 boolean_t *validated,
475 unsigned *tainted);
476
477 extern void vm_page_free_prepare_queues(
478 vm_page_t page);
479
480 extern void vm_page_free_prepare_object(
481 vm_page_t page,
482 boolean_t remove_from_hash);
483
484 extern wait_result_t vm_page_sleep(
485 vm_object_t object,
486 vm_page_t m,
487 wait_interrupt_t interruptible,
488 lck_sleep_action_t action);
489
490 extern void vm_page_wakeup(
491 vm_object_t object,
492 vm_page_t m);
493
494 extern void vm_page_wakeup_done(
495 vm_object_t object,
496 vm_page_t m);
497
498 /*
499 * Functions implemented as macros. m->vmp_wanted and m->vmp_busy are
500 * protected by the object lock.
501 */
502
503 #if !XNU_TARGET_OS_OSX
504 #define SET_PAGE_DIRTY(m, set_pmap_modified) \
505 MACRO_BEGIN \
506 vm_page_t __page__ = (m); \
507 if (__page__->vmp_pmapped == TRUE && \
508 __page__->vmp_wpmapped == TRUE && \
509 __page__->vmp_dirty == FALSE && \
510 (set_pmap_modified)) { \
511 pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \
512 } \
513 __page__->vmp_dirty = TRUE; \
514 MACRO_END
515 #else /* !XNU_TARGET_OS_OSX */
516 #define SET_PAGE_DIRTY(m, set_pmap_modified) \
517 MACRO_BEGIN \
518 vm_page_t __page__ = (m); \
519 __page__->vmp_dirty = TRUE; \
520 MACRO_END
521 #endif /* !XNU_TARGET_OS_OSX */
522
523 #define VM_PAGE_FREE(p) \
524 MACRO_BEGIN \
525 vm_page_free_unlocked(p, TRUE); \
526 MACRO_END
527
528
529 #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT))
530
531 static inline void
vm_free_page_lock(void)532 vm_free_page_lock(void)
533 {
534 lck_mtx_lock(&vm_page_queue_free_lock);
535 }
536
537 static inline void
vm_free_page_lock_spin(void)538 vm_free_page_lock_spin(void)
539 {
540 lck_mtx_lock_spin(&vm_page_queue_free_lock);
541 }
542
543 static inline void
vm_free_page_lock_convert(void)544 vm_free_page_lock_convert(void)
545 {
546 lck_mtx_convert_spin(&vm_page_queue_free_lock);
547 }
548
549 static inline void
vm_free_page_unlock(void)550 vm_free_page_unlock(void)
551 {
552 lck_mtx_unlock(&vm_page_queue_free_lock);
553 }
554
555
556 #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock)
557
558
559 #ifdef VPL_LOCK_SPIN
560 extern lck_grp_t vm_page_lck_grp_local;
561
562 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
563 #define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local)
564 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
565 #else
566 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
567 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
568 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
569 #endif
570
571 #if DEVELOPMENT || DEBUG
572 #define VM_PAGE_SPECULATIVE_USED_ADD() \
573 MACRO_BEGIN \
574 OSAddAtomic(1, &vm_page_speculative_used); \
575 MACRO_END
576 #else
577 #define VM_PAGE_SPECULATIVE_USED_ADD()
578 #endif
579
580 #define VM_PAGE_CONSUME_CLUSTERED(mem) \
581 MACRO_BEGIN \
582 ppnum_t __phys_page; \
583 __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \
584 pmap_lock_phys_page(__phys_page); \
585 if (mem->vmp_clustered) { \
586 vm_object_t o; \
587 o = VM_PAGE_OBJECT(mem); \
588 assert(o); \
589 o->pages_used++; \
590 mem->vmp_clustered = FALSE; \
591 VM_PAGE_SPECULATIVE_USED_ADD(); \
592 } \
593 pmap_unlock_phys_page(__phys_page); \
594 MACRO_END
595
596
597 #define VM_PAGE_COUNT_AS_PAGEIN(mem) \
598 MACRO_BEGIN \
599 { \
600 vm_object_t o; \
601 o = VM_PAGE_OBJECT(mem); \
602 DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \
603 counter_inc(¤t_task()->pageins); \
604 if (o->internal) { \
605 DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \
606 } else { \
607 DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \
608 } \
609 } \
610 MACRO_END
611
612
613 /* adjust for stolen pages accounted elsewhere */
614 #define VM_PAGE_MOVE_STOLEN(page_count) \
615 MACRO_BEGIN \
616 vm_page_stolen_count -= (page_count); \
617 vm_page_wire_count_initial -= (page_count); \
618 MACRO_END
619
620 extern kern_return_t pmap_enter_check(
621 pmap_t pmap,
622 vm_map_address_t virtual_address,
623 vm_page_t page,
624 vm_prot_t protection,
625 vm_prot_t fault_type,
626 unsigned int flags,
627 boolean_t wired);
628
629 #define DW_vm_page_unwire 0x01
630 #define DW_vm_page_wire 0x02
631 #define DW_vm_page_free 0x04
632 #define DW_vm_page_activate 0x08
633 #define DW_vm_page_deactivate_internal 0x10
634 #define DW_vm_page_speculate 0x20
635 #define DW_vm_page_lru 0x40
636 #define DW_vm_pageout_throttle_up 0x80
637 #define DW_PAGE_WAKEUP 0x100
638 #define DW_clear_busy 0x200
639 #define DW_clear_reference 0x400
640 #define DW_set_reference 0x800
641 #define DW_move_page 0x1000
642 #define DW_VM_PAGE_QUEUES_REMOVE 0x2000
643 #define DW_enqueue_cleaned 0x4000
644 #define DW_vm_phantom_cache_update 0x8000
645
646 struct vm_page_delayed_work {
647 vm_page_t dw_m;
648 int dw_mask;
649 };
650
651 #define DEFAULT_DELAYED_WORK_LIMIT 32
652
653 struct vm_page_delayed_work_ctx {
654 struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT];
655 thread_t delayed_owner;
656 };
657
658 void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count);
659
660 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
661
662 /*
663 * vm_page_do_delayed_work may need to drop the object lock...
664 * if it does, we need the pages it's looking at to
665 * be held stable via the busy bit, so if busy isn't already
666 * set, we need to set it and ask vm_page_do_delayed_work
667 * to clear it and wakeup anyone that might have blocked on
668 * it once we're done processing the page.
669 */
670
671 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \
672 MACRO_BEGIN \
673 if (mem->vmp_busy == FALSE) { \
674 mem->vmp_busy = TRUE; \
675 if ( !(dwp->dw_mask & DW_vm_page_free)) \
676 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
677 } \
678 dwp->dw_m = mem; \
679 dwp++; \
680 dw_cnt++; \
681 MACRO_END
682
683
684 //todo int
685 extern vm_page_t vm_object_page_grab(vm_object_t);
686
687 //todo int
688 #if VM_PAGE_BUCKETS_CHECK
689 extern void vm_page_buckets_check(void);
690 #endif /* VM_PAGE_BUCKETS_CHECK */
691
692 //todo int
693 extern void vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq);
694 extern void vm_page_remove_internal(vm_page_t page);
695 extern void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first);
696 extern void vm_page_enqueue_active(vm_page_t mem, boolean_t first);
697 extern void vm_page_check_pageable_safe(vm_page_t page);
698 //end int
699
700
701 //todo int
702 extern void vm_retire_boot_pages(void);
703
704 //todo all int
705
706 #define VMP_ERROR_GET(p) ((p)->vmp_error)
707
708
709 //todo int
710
711 #endif /* XNU_KERNEL_PRIVATE */
712 __END_DECLS
713
714 #endif /* _VM_VM_PAGE_INTERNAL_H_ */
715