xref: /xnu-11215.41.3/osfmk/vm/vm_pageout_internal.h (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_PAGEOUT_INTERNAL_H_
30 #define _VM_VM_PAGEOUT_INTERNAL_H_
31 
32 #include <sys/cdefs.h>
33 #include <vm/vm_pageout_xnu.h>
34 
35 __BEGIN_DECLS
36 
37 #ifdef XNU_KERNEL_PRIVATE
38 
39 #ifdef MACH_KERNEL_PRIVATE
40 
41 #define VM_PAGEOUT_GC_INIT      ((void *)0)
42 #define VM_PAGEOUT_GC_COLLECT   ((void *)1)
43 #define VM_PAGEOUT_GC_EVENT     ((event_t)&vm_pageout_garbage_collect)
44 extern void vm_pageout_garbage_collect(void *, wait_result_t);
45 
46 /* UPL exported routines and structures */
47 
48 #define upl_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
49 #define upl_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
50 #define upl_lock(object)        lck_mtx_lock(&(object)->Lock)
51 #define upl_unlock(object)      lck_mtx_unlock(&(object)->Lock)
52 #define upl_try_lock(object)    lck_mtx_try_lock(&(object)->Lock)
53 #define upl_lock_sleep(object, event, thread)                           \
54 	lck_mtx_sleep_with_inheritor(&(object)->Lock,                   \
55 	              LCK_SLEEP_DEFAULT,                                \
56 	              (event_t) (event),                                \
57 	              (thread),                                         \
58 	              THREAD_UNINT,                                     \
59 	              TIMEOUT_WAIT_FOREVER)
60 #define upl_wakeup(event) wakeup_all_with_inheritor((event), THREAD_AWAKENED)
61 
62 extern void vm_object_set_pmap_cache_attr(
63 	vm_object_t             object,
64 	upl_page_info_array_t   user_page_list,
65 	unsigned int            num_pages,
66 	boolean_t               batch_pmap_op);
67 
68 /* should be just a regular vm_map_enter() */
69 extern kern_return_t vm_map_enter_upl(
70 	vm_map_t                map,
71 	upl_t                   upl,
72 	vm_map_offset_t         *dst_addr);
73 
74 /* should be just a regular vm_map_remove() */
75 extern kern_return_t vm_map_remove_upl(
76 	vm_map_t                map,
77 	upl_t                   upl);
78 
79 extern kern_return_t vm_map_enter_upl_range(
80 	vm_map_t                map,
81 	upl_t                   upl,
82 	vm_object_offset_t             offset,
83 	vm_size_t               size,
84 	vm_prot_t               prot,
85 	vm_map_offset_t         *dst_addr);
86 
87 extern kern_return_t vm_map_remove_upl_range(
88 	vm_map_t                map,
89 	upl_t                   upl,
90 	vm_object_offset_t             offset,
91 	vm_size_t               size);
92 
93 
94 extern struct vm_page_delayed_work*
95 vm_page_delayed_work_get_ctx(void);
96 
97 extern void
98 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp);
99 
100 extern void vm_pageout_throttle_up(vm_page_t page);
101 
102 extern kern_return_t vm_paging_map_object(
103 	vm_page_t               page,
104 	vm_object_t             object,
105 	vm_object_offset_t      offset,
106 	vm_prot_t               protection,
107 	boolean_t               can_unlock_object,
108 	vm_map_size_t           *size,          /* IN/OUT */
109 	vm_map_offset_t         *address,       /* OUT */
110 	boolean_t               *need_unmap);   /* OUT */
111 extern void vm_paging_unmap_object(
112 	vm_object_t             object,
113 	vm_map_offset_t         start,
114 	vm_map_offset_t         end);
115 decl_simple_lock_data(extern, vm_paging_lock);
116 
117 
118 /*
119  * Backing store throttle when BS is exhausted
120  */
121 extern unsigned int    vm_backing_store_low;
122 
123 extern void vm_pageout_steal_laundry(
124 	vm_page_t page,
125 	boolean_t queues_locked);
126 
127 
128 #endif /* MACH_KERNEL_PRIVATE */
129 
130 #endif /* XNU_KERNEL_PRIVATE */
131 __END_DECLS
132 
133 #endif  /* _VM_VM_PAGEOUT_INTERNAL_H_ */
134