xref: /xnu-10002.81.5/osfmk/vm/vm_fault.h (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_fault.h
60  *
61  *	Page fault handling module declarations.
62  */
63 
64 #ifndef _VM_VM_FAULT_H_
65 #define _VM_VM_FAULT_H_
66 
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/boolean.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_behavior.h>
73 
74 #ifdef  KERNEL_PRIVATE
75 
76 typedef kern_return_t   vm_fault_return_t;
77 
78 #define VM_FAULT_SUCCESS                0
79 #define VM_FAULT_RETRY                  1
80 #define VM_FAULT_INTERRUPTED            2
81 #define VM_FAULT_MEMORY_SHORTAGE        3
82 #define VM_FAULT_MEMORY_ERROR           5
83 #define VM_FAULT_SUCCESS_NO_VM_PAGE     6       /* success but no VM page */
84 
85 /*
86  *	Page fault handling based on vm_map (or entries therein)
87  */
88 
89 extern kern_return_t vm_fault(
90 	vm_map_t        map,
91 	vm_map_offset_t vaddr,
92 	vm_prot_t       fault_type,
93 	boolean_t       change_wiring,
94 #if XNU_KERNEL_PRIVATE
95 	vm_tag_t        wire_tag,                   /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
96 #endif
97 	int             interruptible,
98 	pmap_t          pmap,
99 	vm_map_offset_t pmap_addr)
100 #if XNU_KERNEL_PRIVATE
101 __XNU_INTERNAL(vm_fault)
102 #endif
103 ;
104 
105 extern void vm_pre_fault(vm_map_offset_t, vm_prot_t);
106 
107 #ifdef  MACH_KERNEL_PRIVATE
108 
109 #include <vm/vm_page.h>
110 #include <vm/vm_object.h>
111 #include <vm/vm_map.h>
112 
113 extern void vm_fault_init(void);
114 
115 /* exported kext version */
116 extern kern_return_t vm_fault_external(
117 	vm_map_t        map,
118 	vm_map_offset_t vaddr,
119 	vm_prot_t       fault_type,
120 	boolean_t       change_wiring,
121 	int             interruptible,
122 	pmap_t          caller_pmap,
123 	vm_map_offset_t caller_pmap_addr);
124 
125 /*
126  *	Page fault handling based on vm_object only.
127  */
128 
129 extern vm_fault_return_t vm_fault_page(
130 	/* Arguments: */
131 	vm_object_t     first_object,           /* Object to begin search */
132 	vm_object_offset_t first_offset,        /* Offset into object */
133 	vm_prot_t       fault_type,             /* What access is requested */
134 	boolean_t       must_be_resident,        /* Must page be resident? */
135 	boolean_t       caller_lookup,          /* caller looked up page */
136 	/* Modifies in place: */
137 	vm_prot_t       *protection,            /* Protection for mapping */
138 	vm_page_t       *result_page,           /* Page found, if successful */
139 	/* Returns: */
140 	vm_page_t       *top_page,              /* Page in top object, if
141                                                  * not result_page.  */
142 	int             *type_of_fault,         /* if non-zero, return COW, zero-filled, etc...
143                                                  * used by kernel trace point in vm_fault */
144 	/* More arguments: */
145 	kern_return_t   *error_code,            /* code if page is in error */
146 	boolean_t       no_zero_fill,           /* don't fill absent pages */
147 	vm_object_fault_info_t fault_info);
148 
149 extern void vm_fault_cleanup(
150 	vm_object_t     object,
151 	vm_page_t       top_page);
152 
153 extern kern_return_t vm_fault_wire(
154 	vm_map_t        map,
155 	vm_map_entry_t  entry,
156 	vm_prot_t       prot,
157 	vm_tag_t        wire_tag,
158 	pmap_t          pmap,
159 	vm_map_offset_t pmap_addr,
160 	ppnum_t         *physpage_p);
161 
162 extern void vm_fault_unwire(
163 	vm_map_t        map,
164 	vm_map_entry_t  entry,
165 	boolean_t       deallocate,
166 	pmap_t          pmap,
167 	vm_map_offset_t pmap_addr,
168 	vm_map_offset_t end_addr);
169 
170 extern kern_return_t    vm_fault_copy(
171 	vm_object_t             src_object,
172 	vm_object_offset_t      src_offset,
173 	vm_map_size_t           *copy_size,             /* INOUT */
174 	vm_object_t             dst_object,
175 	vm_object_offset_t      dst_offset,
176 	vm_map_t                dst_map,
177 	vm_map_version_t         *dst_version,
178 	int                     interruptible);
179 
180 extern kern_return_t vm_fault_enter(
181 	vm_page_t m,
182 	pmap_t pmap,
183 	vm_map_offset_t vaddr,
184 	vm_map_size_t fault_page_size,
185 	vm_map_offset_t fault_phys_offset,
186 	vm_prot_t prot,
187 	vm_prot_t fault_type,
188 	boolean_t wired,
189 	boolean_t change_wiring,
190 	vm_tag_t  wire_tag,             /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
191 	vm_object_fault_info_t fault_info,
192 	boolean_t *need_retry,
193 	int *type_of_fault,
194 	uint8_t *object_lock_type);
195 
196 extern vm_offset_t kdp_lightweight_fault(
197 	vm_map_t map,
198 	vm_offset_t cur_target_addr);
199 
200 #endif  /* MACH_KERNEL_PRIVATE */
201 
202 #if XNU_KERNEL_PRIVATE
203 
204 boolean_t NEED_TO_HARD_THROTTLE_THIS_TASK(void);
205 
206 #endif
207 
208 #endif  /* KERNEL_PRIVATE */
209 
210 #endif  /* _VM_VM_FAULT_H_ */
211