1 /* 2 * Copyright (c) 2023 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _VM_VM_FAULT_INTERNAL_H_ 30 #define _VM_VM_FAULT_INTERNAL_H_ 31 32 33 #include <sys/cdefs.h> 34 #include <vm/vm_fault_xnu.h> 35 36 __BEGIN_DECLS 37 38 #ifdef MACH_KERNEL_PRIVATE 39 40 /* 41 * Page fault handling based on vm_object only. 42 */ 43 44 extern vm_fault_return_t vm_fault_page( 45 /* Arguments: */ 46 vm_object_t first_object, /* Object to begin search */ 47 vm_object_offset_t first_offset, /* Offset into object */ 48 vm_prot_t fault_type, /* What access is requested */ 49 boolean_t must_be_resident, /* Must page be resident? */ 50 boolean_t caller_lookup, /* caller looked up page */ 51 /* Modifies in place: */ 52 vm_prot_t *protection, /* Protection for mapping */ 53 vm_page_t *result_page, /* Page found, if successful */ 54 /* Returns: */ 55 vm_page_t *top_page, /* Page in top object, if 56 * not result_page. */ 57 int *type_of_fault, /* if non-zero, return COW, zero-filled, etc... 58 * used by kernel trace point in vm_fault */ 59 /* More arguments: */ 60 kern_return_t *error_code, /* code if page is in error */ 61 boolean_t no_zero_fill, /* don't fill absent pages */ 62 vm_object_fault_info_t fault_info); 63 64 extern void vm_fault_cleanup( 65 vm_object_t object, 66 vm_page_t top_page); 67 68 extern kern_return_t vm_fault_wire( 69 vm_map_t map, 70 vm_map_entry_t entry, 71 vm_prot_t prot, 72 vm_tag_t wire_tag, 73 pmap_t pmap, 74 vm_map_offset_t pmap_addr, 75 ppnum_t *physpage_p); 76 77 extern void vm_fault_unwire( 78 vm_map_t map, 79 vm_map_entry_t entry, 80 boolean_t deallocate, 81 pmap_t pmap, 82 vm_map_offset_t pmap_addr, 83 vm_map_offset_t end_addr); 84 85 extern kern_return_t vm_fault_copy( 86 vm_object_t src_object, 87 vm_object_offset_t src_offset, 88 vm_map_size_t *copy_size, /* INOUT */ 89 vm_object_t dst_object, 90 vm_object_offset_t dst_offset, 91 vm_map_t dst_map, 92 vm_map_version_t *dst_version, 93 int interruptible); 94 95 extern kern_return_t vm_fault_enter( 96 vm_page_t m, 97 pmap_t pmap, 98 vm_map_offset_t vaddr, 99 vm_map_size_t fault_page_size, 100 vm_map_offset_t fault_phys_offset, 101 vm_prot_t prot, 102 vm_prot_t fault_type, 103 boolean_t wired, 104 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ 105 vm_object_fault_info_t fault_info, 106 boolean_t *need_retry, 107 int *type_of_fault, 108 uint8_t *object_lock_type); 109 110 extern kern_return_t vm_pre_fault_with_info( 111 vm_map_t map, 112 vm_map_offset_t offset, 113 vm_prot_t prot, 114 vm_object_fault_info_t fault_info); 115 116 #endif /* MACH_KERNEL_PRIVATE */ 117 118 __END_DECLS 119 120 #endif /* _VM_VM_FAULT_INTERNAL_H_ */ 121