xref: /xnu-8020.140.41/osfmk/vm/vm_map_store.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map_store.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33 
34 #if MACH_ASSERT
35 boolean_t
first_free_is_valid_store(vm_map_t map)36 first_free_is_valid_store( vm_map_t map )
37 {
38 	return first_free_is_valid_ll( map );
39 }
40 #endif
41 
42 boolean_t
vm_map_store_has_RB_support(struct vm_map_header * hdr)43 vm_map_store_has_RB_support( struct vm_map_header *hdr )
44 {
45 	if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
46 		return FALSE;
47 	}
48 	return TRUE;
49 }
50 
51 void
vm_map_store_init(struct vm_map_header * hdr)52 vm_map_store_init( struct vm_map_header *hdr )
53 {
54 	vm_map_store_init_ll( hdr );
55 #ifdef VM_MAP_STORE_USE_RB
56 	if (vm_map_store_has_RB_support( hdr )) {
57 		vm_map_store_init_rb( hdr );
58 	}
59 #endif
60 }
61 
62 static inline boolean_t
_vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)63 _vm_map_store_lookup_entry(
64 	vm_map_t                map,
65 	vm_map_offset_t         address,
66 	vm_map_entry_t          *entry)         /* OUT */
67 {
68 #ifdef VM_MAP_STORE_USE_LL
69 	return vm_map_store_lookup_entry_ll( map, address, entry );
70 #elif defined VM_MAP_STORE_USE_RB
71 	if (vm_map_store_has_RB_support( &map->hdr )) {
72 		return vm_map_store_lookup_entry_rb( map, address, entry );
73 	} else {
74 		panic("VM map lookups need RB tree support.");
75 		return FALSE; /* For compiler warning.*/
76 	}
77 #endif
78 }
79 
80 __attribute__((noinline))
81 boolean_t
vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)82 vm_map_store_lookup_entry(
83 	vm_map_t                map,
84 	vm_map_offset_t         address,
85 	vm_map_entry_t          *entry)         /* OUT */
86 {
87 	return _vm_map_store_lookup_entry(map, address, entry);
88 }
89 
90 void
vm_map_store_update(vm_map_t map,vm_map_entry_t entry,int update_type)91 vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type )
92 {
93 	switch (update_type) {
94 	case VM_MAP_ENTRY_CREATE:
95 		break;
96 	case VM_MAP_ENTRY_DELETE:
97 		if ((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) {
98 			(map)->first_free = vm_map_to_entry(map);
99 		}
100 		if ((entry) == (map)->hint) {
101 			(map)->hint = vm_map_to_entry(map);
102 		}
103 		break;
104 	default:
105 		break;
106 	}
107 }
108 
109 /*
110  *	vm_map_entry_{un,}link:
111  *
112  *	Insert/remove entries from maps (or map copies).
113  *	The _vm_map_store_entry_{un,}link variants are used at
114  *	some places where updating first_free is not needed &
115  *	copy maps are being modified. Also note the first argument
116  *	is the map header.
117  *	Modifying the vm_map_store_entry_{un,}link functions to
118  *	deal with these call sites made the interface confusing
119  *	and clunky.
120  */
121 
122 void
_vm_map_store_entry_link(struct vm_map_header * mapHdr,vm_map_entry_t after_where,vm_map_entry_t entry)123 _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
124 {
125 	assert(entry->vme_start < entry->vme_end);
126 	if (__improbable(vm_debug_events)) {
127 		DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
128 	}
129 
130 	vm_map_store_entry_link_ll(mapHdr, after_where, entry);
131 #ifdef VM_MAP_STORE_USE_RB
132 	if (vm_map_store_has_RB_support( mapHdr )) {
133 		vm_map_store_entry_link_rb(mapHdr, after_where, entry);
134 	}
135 #endif
136 #if MAP_ENTRY_INSERTION_DEBUG
137 	if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
138 		entry->vme_start_original = entry->vme_start;
139 		entry->vme_end_original = entry->vme_end;
140 	}
141 	btref_put(entry->vme_insertion_bt);
142 	entry->vme_insertion_bt = btref_get(__builtin_frame_address(0),
143 	    BTREF_GET_NOWAIT);
144 #endif
145 }
146 
147 void
vm_map_store_entry_link(vm_map_t map,vm_map_entry_t after_where,vm_map_entry_t entry,vm_map_kernel_flags_t vmk_flags)148 vm_map_store_entry_link(
149 	vm_map_t                map,
150 	vm_map_entry_t          after_where,
151 	vm_map_entry_t          entry,
152 	vm_map_kernel_flags_t   vmk_flags)
153 {
154 	vm_map_t VMEL_map;
155 	vm_map_entry_t VMEL_entry;
156 	VMEL_map = (map);
157 	VMEL_entry = (entry);
158 
159 	if (entry->is_sub_map) {
160 		assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
161 		    "map %p (%d) entry %p submap %p (%d)\n",
162 		    map, VM_MAP_PAGE_SHIFT(map), entry,
163 		    VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
164 	}
165 
166 	_vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry);
167 	if (VMEL_map->disable_vmentry_reuse == TRUE) {
168 		UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry);
169 	} else {
170 		update_first_free_ll(VMEL_map, VMEL_map->first_free);
171 #ifdef VM_MAP_STORE_USE_RB
172 		if (vm_map_store_has_RB_support( &VMEL_map->hdr )) {
173 			update_first_free_rb(VMEL_map, entry, TRUE);
174 		}
175 #endif
176 	}
177 	(void) vmk_flags;
178 }
179 
180 void
_vm_map_store_entry_unlink(struct vm_map_header * mapHdr,vm_map_entry_t entry)181 _vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry)
182 {
183 	if (__improbable(vm_debug_events)) {
184 		DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
185 	}
186 
187 	vm_map_store_entry_unlink_ll(mapHdr, entry);
188 #ifdef VM_MAP_STORE_USE_RB
189 	if (vm_map_store_has_RB_support( mapHdr )) {
190 		vm_map_store_entry_unlink_rb(mapHdr, entry);
191 	}
192 #endif
193 }
194 
195 void
vm_map_store_entry_unlink(vm_map_t map,vm_map_entry_t entry)196 vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry)
197 {
198 	vm_map_t VMEU_map;
199 	vm_map_entry_t VMEU_entry = NULL;
200 	vm_map_entry_t VMEU_first_free = NULL;
201 	VMEU_map = (map);
202 	VMEU_entry = (entry);
203 
204 	if (map->holelistenabled == FALSE) {
205 		if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
206 			VMEU_first_free = VMEU_entry->vme_prev;
207 		} else {
208 			VMEU_first_free = VMEU_map->first_free;
209 		}
210 	}
211 	_vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry);
212 	vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE);
213 	update_first_free_ll(VMEU_map, VMEU_first_free);
214 #ifdef VM_MAP_STORE_USE_RB
215 	if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
216 		update_first_free_rb(VMEU_map, entry, FALSE);
217 	}
218 #endif
219 }
220 
221 void
vm_map_store_copy_reset(vm_map_copy_t copy,vm_map_entry_t entry)222 vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
223 {
224 	int nentries = copy->cpy_hdr.nentries;
225 	vm_map_store_copy_reset_ll(copy, entry, nentries);
226 #ifdef VM_MAP_STORE_USE_RB
227 	if (vm_map_store_has_RB_support( &copy->c_u.hdr )) {
228 		vm_map_store_copy_reset_rb(copy, entry, nentries);
229 	}
230 #endif
231 }
232 
233 void
vm_map_store_update_first_free(vm_map_t map,vm_map_entry_t first_free_entry,boolean_t new_entry_creation)234 vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation)
235 {
236 	update_first_free_ll(map, first_free_entry);
237 #ifdef VM_MAP_STORE_USE_RB
238 	if (vm_map_store_has_RB_support( &map->hdr )) {
239 		update_first_free_rb(map, first_free_entry, new_entry_creation);
240 	}
241 #endif
242 }
243 
244 __abortlike
245 static void
__vm_map_store_find_space_holelist_corruption(vm_map_t map,vm_map_offset_t start,vm_map_entry_t entry)246 __vm_map_store_find_space_holelist_corruption(
247 	vm_map_t                map,
248 	vm_map_offset_t         start,
249 	vm_map_entry_t          entry)
250 {
251 	panic("Found an existing entry %p [0x%llx, 0x%llx) in map %p "
252 	    "instead of potential hole at address: 0x%llx.",
253 	    entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end,
254 	    map, (uint64_t)start);
255 }
256 
257 static void
vm_map_store_convert_hole_to_entry(vm_map_t map,vm_map_offset_t addr,vm_map_entry_t * entry_p)258 vm_map_store_convert_hole_to_entry(
259 	vm_map_t                map,
260 	vm_map_offset_t         addr,
261 	vm_map_entry_t         *entry_p)
262 {
263 	vm_map_entry_t entry = *entry_p;
264 
265 	if (_vm_map_store_lookup_entry(map, entry->vme_start, entry_p)) {
266 		__vm_map_store_find_space_holelist_corruption(map, addr, entry);
267 	}
268 }
269 
270 static struct vm_map_entry *
vm_map_store_find_space_backwards(vm_map_t map,vm_map_offset_t end,vm_map_offset_t lowest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)271 vm_map_store_find_space_backwards(
272 	vm_map_t                map,
273 	vm_map_offset_t         end,
274 	vm_map_offset_t         lowest_addr,
275 	vm_map_offset_t         guard_offset,
276 	vm_map_size_t           size,
277 	vm_map_offset_t         mask,
278 	vm_map_offset_t        *addr_out)
279 {
280 	const vm_map_offset_t map_mask  = VM_MAP_PAGE_MASK(map);
281 	const bool            use_holes = map->holelistenabled;
282 	vm_map_offset_t       start;
283 	vm_map_entry_t        entry;
284 
285 	/*
286 	 *	Find the entry we will scan from that is the closest
287 	 *	to our required scan hint "end".
288 	 */
289 
290 	if (use_holes) {
291 		entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
292 		if (entry == VM_MAP_ENTRY_NULL) {
293 			return VM_MAP_ENTRY_NULL;
294 		}
295 
296 		entry = entry->vme_prev;
297 
298 		while (end <= entry->vme_start) {
299 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
300 				return VM_MAP_ENTRY_NULL;
301 			}
302 
303 			entry = entry->vme_prev;
304 		}
305 
306 		if (entry->vme_end < end) {
307 			end = entry->vme_end;
308 		}
309 	} else {
310 		if (map->max_offset <= end) {
311 			entry = vm_map_to_entry(map);
312 			end = map->max_offset;
313 		} else if (_vm_map_store_lookup_entry(map, end - 1, &entry)) {
314 			end = entry->vme_start;
315 		} else {
316 			entry = entry->vme_next;
317 		}
318 	}
319 
320 	for (;;) {
321 		/*
322 		 * The "entry" follows the proposed new region.
323 		 */
324 
325 		end    = vm_map_trunc_page(end, map_mask);
326 		start  = (end - size) & ~mask;
327 		start  = vm_map_trunc_page(start, map_mask);
328 		end    = start + size;
329 		start -= guard_offset;
330 
331 		if (end < start || start < lowest_addr) {
332 			/*
333 			 * Fail: reached our scan lowest address limit,
334 			 * without finding a large enough hole.
335 			 */
336 			return VM_MAP_ENTRY_NULL;
337 		}
338 
339 		if (use_holes) {
340 			if (entry->vme_start <= start) {
341 				/*
342 				 * Done: this hole is wide enough.
343 				 */
344 				vm_map_store_convert_hole_to_entry(map, start, &entry);
345 				break;
346 			}
347 
348 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
349 				/*
350 				 * Fail: wrapped around, no more holes
351 				 */
352 				return VM_MAP_ENTRY_NULL;
353 			}
354 
355 			entry = entry->vme_prev;
356 			end = entry->vme_end;
357 		} else {
358 			entry = entry->vme_prev;
359 
360 			if (entry == vm_map_to_entry(map)) {
361 				/*
362 				 * Done: no more entries toward the start
363 				 * of the map, only a big enough void.
364 				 */
365 				break;
366 			}
367 
368 			if (entry->vme_end <= start) {
369 				/*
370 				 * Done: the gap between the two consecutive
371 				 * entries is large enough.
372 				 */
373 				break;
374 			}
375 
376 			end = entry->vme_start;
377 		}
378 	}
379 
380 	*addr_out = start;
381 	return entry;
382 }
383 
384 static struct vm_map_entry *
vm_map_store_find_space_forward(vm_map_t map,vm_map_offset_t start,vm_map_offset_t highest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)385 vm_map_store_find_space_forward(
386 	vm_map_t                map,
387 	vm_map_offset_t         start,
388 	vm_map_offset_t         highest_addr,
389 	vm_map_offset_t         guard_offset,
390 	vm_map_size_t           size,
391 	vm_map_offset_t         mask,
392 	vm_map_offset_t        *addr_out)
393 {
394 	const vm_map_offset_t map_mask  = VM_MAP_PAGE_MASK(map);
395 	const bool            use_holes = map->holelistenabled;
396 	vm_map_entry_t        entry;
397 
398 	/*
399 	 *	Find the entry we will scan from that is the closest
400 	 *	to our required scan hint "start".
401 	 */
402 
403 	if (__improbable(map->disable_vmentry_reuse)) {
404 		VM_MAP_HIGHEST_ENTRY(map, entry, start);
405 	} else if (use_holes) {
406 		entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
407 		if (entry == VM_MAP_ENTRY_NULL) {
408 			return VM_MAP_ENTRY_NULL;
409 		}
410 
411 		while (entry->vme_end <= start) {
412 			entry = entry->vme_next;
413 
414 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
415 				return VM_MAP_ENTRY_NULL;
416 			}
417 		}
418 
419 		if (start < entry->vme_start) {
420 			start = entry->vme_start;
421 		}
422 	} else {
423 		vm_map_offset_t first_free_start;
424 
425 		assert(first_free_is_valid(map));
426 
427 		entry = map->first_free;
428 		if (entry == vm_map_to_entry(map)) {
429 			first_free_start = map->min_offset;
430 		} else {
431 			first_free_start = entry->vme_end;
432 		}
433 
434 		if (start <= first_free_start) {
435 			start = first_free_start;
436 		} else if (_vm_map_store_lookup_entry(map, start, &entry)) {
437 			start = entry->vme_end;
438 		}
439 	}
440 
441 	for (;;) {
442 		vm_map_offset_t orig_start = start;
443 		vm_map_offset_t end, desired_empty_end;
444 
445 		/*
446 		 * The "entry" precedes the proposed new region.
447 		 */
448 
449 		start  = (start + guard_offset + mask) & ~mask;
450 		start  = vm_map_round_page(start, map_mask);
451 		end    = start + size;
452 		start -= guard_offset;
453 		/*
454 		 * We want an entire page of empty space,
455 		 * but don't increase the allocation size.
456 		 */
457 		desired_empty_end = vm_map_round_page(end, map_mask);
458 
459 		if (start < orig_start || desired_empty_end < start ||
460 		    highest_addr < desired_empty_end) {
461 			/*
462 			 * Fail: reached our scan highest address limit,
463 			 * without finding a large enough hole.
464 			 */
465 			return VM_MAP_ENTRY_NULL;
466 		}
467 
468 		if (use_holes) {
469 			if (desired_empty_end <= entry->vme_end) {
470 				/*
471 				 * Done: this hole is wide enough.
472 				 */
473 				vm_map_store_convert_hole_to_entry(map, start, &entry);
474 				break;
475 			}
476 
477 			entry = entry->vme_next;
478 
479 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
480 				/*
481 				 * Fail: wrapped around, no more holes
482 				 */
483 				return VM_MAP_ENTRY_NULL;
484 			}
485 
486 			start = entry->vme_start;
487 		} else {
488 			vm_map_entry_t next = entry->vme_next;
489 
490 			if (next == vm_map_to_entry(map)) {
491 				/*
492 				 * Done: no more entries toward the end
493 				 * of the map, only a big enough void.
494 				 */
495 				break;
496 			}
497 
498 			if (desired_empty_end <= next->vme_start) {
499 				/*
500 				 * Done: the gap between the two consecutive
501 				 * entries is large enough.
502 				 */
503 				break;
504 			}
505 
506 			entry = next;
507 			start = entry->vme_end;
508 		}
509 	}
510 
511 	*addr_out = start;
512 	return entry;
513 }
514 
515 struct vm_map_entry *
vm_map_store_find_space(vm_map_t map,vm_map_offset_t hint,vm_map_offset_t limit,boolean_t backwards,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)516 vm_map_store_find_space(
517 	vm_map_t                map,
518 	vm_map_offset_t         hint,
519 	vm_map_offset_t         limit,
520 	boolean_t               backwards,
521 	vm_map_offset_t         guard_offset,
522 	vm_map_size_t           size,
523 	vm_map_offset_t         mask,
524 	vm_map_offset_t        *addr_out)
525 {
526 	vm_map_entry_t entry;
527 
528 #if defined VM_MAP_STORE_USE_RB
529 	__builtin_assume((void*)map->hdr.rb_head_store.rbh_root !=
530 	    (void*)(int)SKIP_RB_TREE);
531 #endif
532 
533 	if (backwards) {
534 		entry = vm_map_store_find_space_backwards(map, hint, limit,
535 		    guard_offset, size, mask, addr_out);
536 	} else {
537 		entry = vm_map_store_find_space_forward(map, hint, limit,
538 		    guard_offset, size, mask, addr_out);
539 	}
540 
541 	return entry;
542 }
543