xref: /xnu-8019.80.24/osfmk/vm/vm_map_store.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map_store.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33 
34 #if MACH_ASSERT
35 boolean_t
first_free_is_valid_store(vm_map_t map)36 first_free_is_valid_store( vm_map_t map )
37 {
38 	return first_free_is_valid_ll( map );
39 }
40 #endif
41 
42 boolean_t
vm_map_store_has_RB_support(struct vm_map_header * hdr)43 vm_map_store_has_RB_support( struct vm_map_header *hdr )
44 {
45 	if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
46 		return FALSE;
47 	}
48 	return TRUE;
49 }
50 
51 void
vm_map_store_init(struct vm_map_header * hdr)52 vm_map_store_init( struct vm_map_header *hdr )
53 {
54 	vm_map_store_init_ll( hdr );
55 #ifdef VM_MAP_STORE_USE_RB
56 	if (vm_map_store_has_RB_support( hdr )) {
57 		vm_map_store_init_rb( hdr );
58 	}
59 #endif
60 }
61 
62 __attribute__((noinline))
63 boolean_t
vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)64 vm_map_store_lookup_entry(
65 	vm_map_t                map,
66 	vm_map_offset_t         address,
67 	vm_map_entry_t          *entry)         /* OUT */
68 {
69 #ifdef VM_MAP_STORE_USE_LL
70 	return vm_map_store_lookup_entry_ll( map, address, entry );
71 #elif defined VM_MAP_STORE_USE_RB
72 	if (vm_map_store_has_RB_support( &map->hdr )) {
73 		return vm_map_store_lookup_entry_rb( map, address, entry );
74 	} else {
75 		panic("VM map lookups need RB tree support.");
76 		return FALSE; /* For compiler warning.*/
77 	}
78 #endif
79 }
80 
81 void
vm_map_store_update(vm_map_t map,vm_map_entry_t entry,int update_type)82 vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type )
83 {
84 	switch (update_type) {
85 	case VM_MAP_ENTRY_CREATE:
86 		break;
87 	case VM_MAP_ENTRY_DELETE:
88 		if ((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) {
89 			(map)->first_free = vm_map_to_entry(map);
90 		}
91 		if ((entry) == (map)->hint) {
92 			(map)->hint = vm_map_to_entry(map);
93 		}
94 		break;
95 	default:
96 		break;
97 	}
98 }
99 
100 /*
101  *  vm_map_store_find_last_free:
102  *
103  *  Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP.
104  *  Returns NULL if map is full and no hole can be found.
105  */
106 void
vm_map_store_find_last_free(vm_map_t map,vm_map_entry_t * o_entry)107 vm_map_store_find_last_free(
108 	vm_map_t map,
109 	vm_map_entry_t *o_entry)        /* OUT */
110 {
111 	/* TODO: Provide a RB implementation for this routine. */
112 	vm_map_store_find_last_free_ll(map, o_entry);
113 }
114 
115 /*
116  *	vm_map_entry_{un,}link:
117  *
118  *	Insert/remove entries from maps (or map copies).
119  *	The _vm_map_store_entry_{un,}link variants are used at
120  *	some places where updating first_free is not needed &
121  *	copy maps are being modified. Also note the first argument
122  *	is the map header.
123  *	Modifying the vm_map_store_entry_{un,}link functions to
124  *	deal with these call sites made the interface confusing
125  *	and clunky.
126  */
127 
128 void
_vm_map_store_entry_link(struct vm_map_header * mapHdr,vm_map_entry_t after_where,vm_map_entry_t entry)129 _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
130 {
131 	assert(entry->vme_start < entry->vme_end);
132 	if (__improbable(vm_debug_events)) {
133 		DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
134 	}
135 
136 	vm_map_store_entry_link_ll(mapHdr, after_where, entry);
137 #ifdef VM_MAP_STORE_USE_RB
138 	if (vm_map_store_has_RB_support( mapHdr )) {
139 		vm_map_store_entry_link_rb(mapHdr, after_where, entry);
140 	}
141 #endif
142 #if MAP_ENTRY_INSERTION_DEBUG
143 	if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
144 		entry->vme_start_original = entry->vme_start;
145 		entry->vme_end_original = entry->vme_end;
146 	}
147 	backtrace(&entry->vme_insertion_bt[0],
148 	    (sizeof(entry->vme_insertion_bt) / sizeof(uintptr_t)), NULL, NULL);
149 #endif
150 }
151 
152 void
vm_map_store_entry_link(vm_map_t map,vm_map_entry_t after_where,vm_map_entry_t entry,vm_map_kernel_flags_t vmk_flags)153 vm_map_store_entry_link(
154 	vm_map_t                map,
155 	vm_map_entry_t          after_where,
156 	vm_map_entry_t          entry,
157 	vm_map_kernel_flags_t   vmk_flags)
158 {
159 	vm_map_t VMEL_map;
160 	vm_map_entry_t VMEL_entry;
161 	VMEL_map = (map);
162 	VMEL_entry = (entry);
163 
164 	if (entry->is_sub_map) {
165 		assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
166 		    "map %p (%d) entry %p submap %p (%d)\n",
167 		    map, VM_MAP_PAGE_SHIFT(map), entry,
168 		    VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
169 	}
170 
171 	_vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry);
172 	if (VMEL_map->disable_vmentry_reuse == TRUE) {
173 		UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry);
174 	} else {
175 		update_first_free_ll(VMEL_map, VMEL_map->first_free);
176 #ifdef VM_MAP_STORE_USE_RB
177 		if (vm_map_store_has_RB_support( &VMEL_map->hdr )) {
178 			update_first_free_rb(VMEL_map, entry, TRUE);
179 		}
180 #endif
181 	}
182 	(void) vmk_flags;
183 }
184 
185 void
_vm_map_store_entry_unlink(struct vm_map_header * mapHdr,vm_map_entry_t entry)186 _vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry)
187 {
188 	if (__improbable(vm_debug_events)) {
189 		DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
190 	}
191 
192 	vm_map_store_entry_unlink_ll(mapHdr, entry);
193 #ifdef VM_MAP_STORE_USE_RB
194 	if (vm_map_store_has_RB_support( mapHdr )) {
195 		vm_map_store_entry_unlink_rb(mapHdr, entry);
196 	}
197 #endif
198 }
199 
200 void
vm_map_store_entry_unlink(vm_map_t map,vm_map_entry_t entry)201 vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry)
202 {
203 	vm_map_t VMEU_map;
204 	vm_map_entry_t VMEU_entry = NULL;
205 	vm_map_entry_t VMEU_first_free = NULL;
206 	VMEU_map = (map);
207 	VMEU_entry = (entry);
208 
209 	if (map->holelistenabled == FALSE) {
210 		if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
211 			VMEU_first_free = VMEU_entry->vme_prev;
212 		} else {
213 			VMEU_first_free = VMEU_map->first_free;
214 		}
215 	}
216 	_vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry);
217 	vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE);
218 	update_first_free_ll(VMEU_map, VMEU_first_free);
219 #ifdef VM_MAP_STORE_USE_RB
220 	if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
221 		update_first_free_rb(VMEU_map, entry, FALSE);
222 	}
223 #endif
224 }
225 
226 void
vm_map_store_copy_reset(vm_map_copy_t copy,vm_map_entry_t entry)227 vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
228 {
229 	int nentries = copy->cpy_hdr.nentries;
230 	vm_map_store_copy_reset_ll(copy, entry, nentries);
231 #ifdef VM_MAP_STORE_USE_RB
232 	if (vm_map_store_has_RB_support( &copy->c_u.hdr )) {
233 		vm_map_store_copy_reset_rb(copy, entry, nentries);
234 	}
235 #endif
236 }
237 
238 void
vm_map_store_update_first_free(vm_map_t map,vm_map_entry_t first_free_entry,boolean_t new_entry_creation)239 vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation)
240 {
241 	update_first_free_ll(map, first_free_entry);
242 #ifdef VM_MAP_STORE_USE_RB
243 	if (vm_map_store_has_RB_support( &map->hdr )) {
244 		update_first_free_rb(map, first_free_entry, new_entry_creation);
245 	}
246 #endif
247 }
248