xref: /xnu-12377.1.9/osfmk/vm/vm_pageout_internal.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_PAGEOUT_INTERNAL_H_
30 #define _VM_VM_PAGEOUT_INTERNAL_H_
31 
32 #include <sys/cdefs.h>
33 #include <vm/vm_pageout_xnu.h>
34 
35 __BEGIN_DECLS
36 
37 #ifdef XNU_KERNEL_PRIVATE
38 
39 #ifdef MACH_KERNEL_PRIVATE
40 
41 #define VM_PAGEOUT_GC_INIT      ((void *)0)
42 #define VM_PAGEOUT_GC_COLLECT   ((void *)1)
43 extern void vm_pageout_garbage_collect(void *, wait_result_t);
44 
45 /* UPL exported routines and structures */
46 
47 #define upl_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
48 #define upl_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
49 #define upl_lock(object)        lck_mtx_lock(&(object)->Lock)
50 #define upl_unlock(object)      lck_mtx_unlock(&(object)->Lock)
51 #define upl_try_lock(object)    lck_mtx_try_lock(&(object)->Lock)
52 #define upl_lock_sleep(object, event, thread)                           \
53 	lck_mtx_sleep_with_inheritor(&(object)->Lock,                   \
54 	              LCK_SLEEP_DEFAULT,                                \
55 	              (event_t) (event),                                \
56 	              (thread),                                         \
57 	              THREAD_UNINT,                                     \
58 	              TIMEOUT_WAIT_FOREVER)
59 #define upl_wakeup(event) wakeup_all_with_inheritor((event), THREAD_AWAKENED)
60 
61 extern void vm_object_set_pmap_cache_attr(
62 	vm_object_t             object,
63 	upl_page_info_array_t   user_page_list,
64 	unsigned int            num_pages,
65 	boolean_t               batch_pmap_op);
66 
67 extern kern_return_t vm_object_iopl_request(
68 	vm_object_t             object,
69 	vm_object_offset_t      offset,
70 	upl_size_t              size,
71 	upl_t                  *upl_ptr,
72 	upl_page_info_array_t   user_page_list,
73 	unsigned int           *page_list_count,
74 	upl_control_flags_t     cntrl_flags,
75 	vm_tag_t                tag);
76 
77 /* should be just a regular vm_map_enter() */
78 extern kern_return_t vm_map_enter_upl(
79 	vm_map_t                map,
80 	upl_t                   upl,
81 	vm_map_offset_t         *dst_addr);
82 
83 /* should be just a regular vm_map_remove() */
84 extern kern_return_t vm_map_remove_upl(
85 	vm_map_t                map,
86 	upl_t                   upl);
87 
88 extern kern_return_t vm_map_enter_upl_range(
89 	vm_map_t                map,
90 	upl_t                   upl,
91 	vm_object_offset_t             offset,
92 	vm_size_t               size,
93 	vm_prot_t               prot,
94 	vm_map_offset_t         *dst_addr);
95 
96 extern kern_return_t vm_map_remove_upl_range(
97 	vm_map_t                map,
98 	upl_t                   upl,
99 	vm_object_offset_t             offset,
100 	vm_size_t               size);
101 
102 
103 extern struct vm_page_delayed_work*
104 vm_page_delayed_work_get_ctx(void);
105 
106 extern void
107 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp);
108 
109 extern void vm_pageout_throttle_up(vm_page_t page);
110 
111 extern kern_return_t vm_paging_map_object(
112 	vm_page_t               page,
113 	vm_object_t             object,
114 	vm_object_offset_t      offset,
115 	vm_prot_t               protection,
116 	boolean_t               can_unlock_object,
117 	vm_map_size_t           *size,          /* IN/OUT */
118 	vm_map_offset_t         *address,       /* OUT */
119 	boolean_t               *need_unmap);   /* OUT */
120 extern void vm_paging_unmap_object(
121 	vm_object_t             object,
122 	vm_map_offset_t         start,
123 	vm_map_offset_t         end);
124 decl_simple_lock_data(extern, vm_paging_lock);
125 
126 
127 /*
128  * Backing store throttle when BS is exhausted
129  */
130 extern unsigned int    vm_backing_store_low;
131 
132 extern void vm_pageout_steal_laundry(
133 	vm_page_t page,
134 	boolean_t queues_locked);
135 
136 
137 #endif /* MACH_KERNEL_PRIVATE */
138 
139 #endif /* XNU_KERNEL_PRIVATE */
140 __END_DECLS
141 
142 #endif  /* _VM_VM_PAGEOUT_INTERNAL_H_ */
143