xref: /xnu-10063.121.3/osfmk/vm/vm_map_store.c (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33 #include <sys/code_signing.h>
34 
35 #if MACH_ASSERT
36 bool
first_free_is_valid_store(vm_map_t map)37 first_free_is_valid_store( vm_map_t map )
38 {
39 	return first_free_is_valid_ll( map );
40 }
41 #endif
42 
43 bool
vm_map_store_has_RB_support(struct vm_map_header * hdr)44 vm_map_store_has_RB_support( struct vm_map_header *hdr )
45 {
46 	if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
47 		return FALSE;
48 	}
49 	return TRUE;
50 }
51 
52 void
vm_map_store_init(struct vm_map_header * hdr)53 vm_map_store_init( struct vm_map_header *hdr )
54 {
55 	vm_map_store_init_ll( hdr );
56 #ifdef VM_MAP_STORE_USE_RB
57 	if (vm_map_store_has_RB_support( hdr )) {
58 		vm_map_store_init_rb( hdr );
59 	}
60 #endif
61 }
62 
63 static inline bool
_vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)64 _vm_map_store_lookup_entry(
65 	vm_map_t                map,
66 	vm_map_offset_t         address,
67 	vm_map_entry_t          *entry)         /* OUT */
68 {
69 #ifdef VM_MAP_STORE_USE_RB
70 	if (vm_map_store_has_RB_support( &map->hdr )) {
71 		return vm_map_store_lookup_entry_rb( map, address, entry );
72 	} else {
73 		panic("VM map lookups need RB tree support.");
74 		return FALSE; /* For compiler warning.*/
75 	}
76 #endif
77 }
78 
79 __attribute__((noinline))
80 bool
vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)81 vm_map_store_lookup_entry(
82 	vm_map_t                map,
83 	vm_map_offset_t         address,
84 	vm_map_entry_t          *entry)         /* OUT */
85 {
86 	return _vm_map_store_lookup_entry(map, address, entry);
87 }
88 
89 /*
90  *	vm_map_entry_{un,}link:
91  *
92  *	Insert/remove entries from maps (or map copies).
93  *	The _vm_map_store_entry_{un,}link variants are used at
94  *	some places where updating first_free is not needed &
95  *	copy maps are being modified. Also note the first argument
96  *	is the map header.
97  *	Modifying the vm_map_store_entry_{un,}link functions to
98  *	deal with these call sites made the interface confusing
99  *	and clunky.
100  */
101 
102 void
_vm_map_store_entry_link(struct vm_map_header * mapHdr,vm_map_entry_t after_where,vm_map_entry_t entry)103 _vm_map_store_entry_link(
104 	struct vm_map_header   *mapHdr,
105 	vm_map_entry_t          after_where,
106 	vm_map_entry_t          entry)
107 {
108 	if (__improbable(entry->vme_end <= entry->vme_start)) {
109 		panic("maphdr %p entry %p start 0x%llx end 0x%llx\n", mapHdr, entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end);
110 	}
111 
112 	assert(entry->vme_start < entry->vme_end);
113 	if (__improbable(vm_debug_events)) {
114 		DTRACE_VM4(map_entry_link,
115 		    vm_map_t, __container_of(mapHdr, struct _vm_map, hdr),
116 		    vm_map_entry_t, entry,
117 		    vm_address_t, entry->vme_start,
118 		    vm_address_t, entry->vme_end);
119 	}
120 
121 	vm_map_store_entry_link_ll(mapHdr, after_where, entry);
122 #ifdef VM_MAP_STORE_USE_RB
123 	if (vm_map_store_has_RB_support( mapHdr )) {
124 		vm_map_store_entry_link_rb(mapHdr, entry);
125 	}
126 #endif
127 #if MAP_ENTRY_INSERTION_DEBUG
128 	if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
129 		entry->vme_start_original = entry->vme_start;
130 		entry->vme_end_original = entry->vme_end;
131 	}
132 	btref_put(entry->vme_insertion_bt);
133 	entry->vme_insertion_bt = btref_get(__builtin_frame_address(0),
134 	    BTREF_GET_NOWAIT);
135 #endif
136 }
137 
138 void
vm_map_store_entry_link(vm_map_t map,vm_map_entry_t after_where,vm_map_entry_t entry,vm_map_kernel_flags_t vmk_flags)139 vm_map_store_entry_link(
140 	vm_map_t                map,
141 	vm_map_entry_t          after_where,
142 	vm_map_entry_t          entry,
143 	vm_map_kernel_flags_t   vmk_flags)
144 {
145 	if (entry->is_sub_map) {
146 		assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
147 		    "map %p (%d) entry %p submap %p (%d)\n",
148 		    map, VM_MAP_PAGE_SHIFT(map), entry,
149 		    VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
150 	}
151 
152 	_vm_map_store_entry_link(&map->hdr, after_where, entry);
153 
154 	if (map->disable_vmentry_reuse == TRUE) {
155 		/*
156 		 * GuardMalloc support:
157 		 * Some of these entries are created with MAP_FIXED.
158 		 * Some are created with a very high hint address.
159 		 * So we use aliases and address ranges to make sure
160 		 * that those special regions (nano, jit etc) don't
161 		 * result in our highest hint being set to near
162 		 * the end of the map and future alloctions getting
163 		 * KERN_NO_SPACE when running with guardmalloc.
164 		 */
165 		int alias = VME_ALIAS(entry);
166 
167 		assert(!map->is_nested_map);
168 		if (alias != VM_MEMORY_MALLOC_NANO &&
169 		    alias != VM_MEMORY_MALLOC_TINY &&
170 		    alias != VM_MEMORY_MALLOC_SMALL &&
171 		    alias != VM_MEMORY_MALLOC_MEDIUM &&
172 		    alias != VM_MEMORY_MALLOC_LARGE &&
173 		    alias != VM_MEMORY_MALLOC_HUGE &&
174 		    entry->used_for_jit == 0 &&
175 		    (entry->vme_start < SHARED_REGION_BASE ||
176 		    entry->vme_start >= (SHARED_REGION_BASE + SHARED_REGION_SIZE)) &&
177 		    map->highest_entry_end < entry->vme_end) {
178 			map->highest_entry_end = entry->vme_end;
179 		}
180 	} else {
181 		update_first_free_ll(map, map->first_free);
182 #ifdef VM_MAP_STORE_USE_RB
183 		if (vm_map_store_has_RB_support(&map->hdr)) {
184 			update_first_free_rb(map, entry, TRUE);
185 		}
186 #endif
187 	}
188 
189 #if CODE_SIGNING_MONITOR
190 	(void) vm_map_entry_cs_associate(map, entry, vmk_flags);
191 #else
192 	(void) vmk_flags;
193 #endif
194 }
195 
196 void
_vm_map_store_entry_unlink(struct vm_map_header * mapHdr,vm_map_entry_t entry,bool check_permanent)197 _vm_map_store_entry_unlink(
198 	struct vm_map_header * mapHdr,
199 	vm_map_entry_t entry,
200 	bool check_permanent)
201 {
202 	if (__improbable(vm_debug_events)) {
203 		DTRACE_VM4(map_entry_unlink,
204 		    vm_map_t, __container_of(mapHdr, struct _vm_map, hdr),
205 		    vm_map_entry_t, entry,
206 		    vm_address_t, entry->vme_start,
207 		    vm_address_t, entry->vme_end);
208 	}
209 
210 	/*
211 	 * We should never unlink a "permanent" entry.  The caller should
212 	 * clear "permanent" first if it wants it to be bypassed.
213 	 */
214 	if (check_permanent) {
215 		assertf(!entry->vme_permanent,
216 		    "mapHdr %p entry %p [ 0x%llx end 0x%llx ] prot 0x%x/0x%x submap %d\n",
217 		    mapHdr, entry,
218 		    (uint64_t)entry->vme_start, (uint64_t)entry->vme_end,
219 		    entry->protection, entry->max_protection, entry->is_sub_map);
220 	}
221 
222 	vm_map_store_entry_unlink_ll(mapHdr, entry);
223 #ifdef VM_MAP_STORE_USE_RB
224 	if (vm_map_store_has_RB_support( mapHdr )) {
225 		vm_map_store_entry_unlink_rb(mapHdr, entry);
226 	}
227 #endif
228 }
229 
230 void
vm_map_store_entry_unlink(vm_map_t map,vm_map_entry_t entry,bool check_permanent)231 vm_map_store_entry_unlink(
232 	vm_map_t map,
233 	vm_map_entry_t entry,
234 	bool check_permanent)
235 {
236 	vm_map_t VMEU_map;
237 	vm_map_entry_t VMEU_entry = NULL;
238 	vm_map_entry_t VMEU_first_free = NULL;
239 	VMEU_map = (map);
240 	VMEU_entry = (entry);
241 
242 	if (entry == map->hint) {
243 		map->hint = vm_map_to_entry(map);
244 	}
245 	if (map->holelistenabled == FALSE) {
246 		if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
247 			VMEU_first_free = VMEU_entry->vme_prev;
248 		} else {
249 			VMEU_first_free = VMEU_map->first_free;
250 		}
251 	}
252 	_vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry, check_permanent);
253 
254 	update_first_free_ll(VMEU_map, VMEU_first_free);
255 #ifdef VM_MAP_STORE_USE_RB
256 	if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
257 		update_first_free_rb(VMEU_map, entry, FALSE);
258 	}
259 #endif
260 }
261 
262 void
vm_map_store_copy_reset(vm_map_copy_t copy,vm_map_entry_t entry)263 vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
264 {
265 	int nentries = copy->cpy_hdr.nentries;
266 	vm_map_store_copy_reset_ll(copy, entry, nentries);
267 #ifdef VM_MAP_STORE_USE_RB
268 	if (vm_map_store_has_RB_support( &copy->c_u.hdr )) {
269 		vm_map_store_copy_reset_rb(copy, entry, nentries);
270 	}
271 #endif
272 }
273 
274 void
vm_map_store_update_first_free(vm_map_t map,vm_map_entry_t first_free_entry,bool new_entry_creation)275 vm_map_store_update_first_free(
276 	vm_map_t                map,
277 	vm_map_entry_t          first_free_entry,
278 	bool                    new_entry_creation)
279 {
280 	update_first_free_ll(map, first_free_entry);
281 #ifdef VM_MAP_STORE_USE_RB
282 	if (vm_map_store_has_RB_support( &map->hdr )) {
283 		update_first_free_rb(map, first_free_entry, new_entry_creation);
284 	}
285 #endif
286 }
287 
288 __abortlike
289 static void
__vm_map_store_find_space_holelist_corruption(vm_map_t map,vm_map_offset_t start,vm_map_entry_t entry)290 __vm_map_store_find_space_holelist_corruption(
291 	vm_map_t                map,
292 	vm_map_offset_t         start,
293 	vm_map_entry_t          entry)
294 {
295 	panic("Found an existing entry %p [0x%llx, 0x%llx) in map %p "
296 	    "instead of potential hole at address: 0x%llx.",
297 	    entry, entry->vme_start, entry->vme_end, map, start);
298 }
299 
300 static void
vm_map_store_convert_hole_to_entry(vm_map_t map,vm_map_offset_t addr,vm_map_entry_t * entry_p)301 vm_map_store_convert_hole_to_entry(
302 	vm_map_t                map,
303 	vm_map_offset_t         addr,
304 	vm_map_entry_t         *entry_p)
305 {
306 	vm_map_entry_t entry = *entry_p;
307 
308 	if (_vm_map_store_lookup_entry(map, entry->vme_start, entry_p)) {
309 		__vm_map_store_find_space_holelist_corruption(map, addr, entry);
310 	}
311 }
312 
313 static struct vm_map_entry *
vm_map_store_find_space_backwards(vm_map_t map,vm_map_offset_t end,vm_map_offset_t lowest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)314 vm_map_store_find_space_backwards(
315 	vm_map_t                map,
316 	vm_map_offset_t         end,
317 	vm_map_offset_t         lowest_addr,
318 	vm_map_offset_t         guard_offset,
319 	vm_map_size_t           size,
320 	vm_map_offset_t         mask,
321 	vm_map_offset_t        *addr_out)
322 {
323 	const vm_map_offset_t map_mask  = VM_MAP_PAGE_MASK(map);
324 	const bool            use_holes = map->holelistenabled;
325 	vm_map_offset_t       start;
326 	vm_map_entry_t        entry;
327 
328 	/*
329 	 *	Find the entry we will scan from that is the closest
330 	 *	to our required scan hint "end".
331 	 */
332 
333 	if (use_holes) {
334 		entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
335 		if (entry == VM_MAP_ENTRY_NULL) {
336 			return VM_MAP_ENTRY_NULL;
337 		}
338 
339 		entry = entry->vme_prev;
340 
341 		while (end <= entry->vme_start) {
342 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
343 				return VM_MAP_ENTRY_NULL;
344 			}
345 
346 			entry = entry->vme_prev;
347 		}
348 
349 		if (entry->vme_end < end) {
350 			end = entry->vme_end;
351 		}
352 	} else {
353 		if (map->max_offset <= end) {
354 			entry = vm_map_to_entry(map);
355 			end = map->max_offset;
356 		} else if (_vm_map_store_lookup_entry(map, end - 1, &entry)) {
357 			end = entry->vme_start;
358 		} else {
359 			entry = entry->vme_next;
360 		}
361 	}
362 
363 	for (;;) {
364 		/*
365 		 * The "entry" follows the proposed new region.
366 		 */
367 
368 		end    = vm_map_trunc_page(end, map_mask);
369 		start  = (end - size) & ~mask;
370 		start  = vm_map_trunc_page(start, map_mask);
371 		end    = start + size;
372 		start -= guard_offset;
373 
374 		if (end < start || start < lowest_addr) {
375 			/*
376 			 * Fail: reached our scan lowest address limit,
377 			 * without finding a large enough hole.
378 			 */
379 			return VM_MAP_ENTRY_NULL;
380 		}
381 
382 		if (use_holes) {
383 			if (entry->vme_start <= start) {
384 				/*
385 				 * Done: this hole is wide enough.
386 				 */
387 				vm_map_store_convert_hole_to_entry(map, start, &entry);
388 				break;
389 			}
390 
391 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
392 				/*
393 				 * Fail: wrapped around, no more holes
394 				 */
395 				return VM_MAP_ENTRY_NULL;
396 			}
397 
398 			entry = entry->vme_prev;
399 			end = entry->vme_end;
400 		} else {
401 			entry = entry->vme_prev;
402 
403 			if (entry == vm_map_to_entry(map)) {
404 				/*
405 				 * Done: no more entries toward the start
406 				 * of the map, only a big enough void.
407 				 */
408 				break;
409 			}
410 
411 			if (entry->vme_end <= start) {
412 				/*
413 				 * Done: the gap between the two consecutive
414 				 * entries is large enough.
415 				 */
416 				break;
417 			}
418 
419 			end = entry->vme_start;
420 		}
421 	}
422 
423 	*addr_out = start;
424 	return entry;
425 }
426 
427 static struct vm_map_entry *
vm_map_store_find_space_forward(vm_map_t map,vm_map_offset_t start,vm_map_offset_t highest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)428 vm_map_store_find_space_forward(
429 	vm_map_t                map,
430 	vm_map_offset_t         start,
431 	vm_map_offset_t         highest_addr,
432 	vm_map_offset_t         guard_offset,
433 	vm_map_size_t           size,
434 	vm_map_offset_t         mask,
435 	vm_map_offset_t        *addr_out)
436 {
437 	const vm_map_offset_t map_mask  = VM_MAP_PAGE_MASK(map);
438 	const bool            use_holes = map->holelistenabled;
439 	vm_map_entry_t        entry;
440 
441 	/*
442 	 *	Find the entry we will scan from that is the closest
443 	 *	to our required scan hint "start".
444 	 */
445 
446 	if (__improbable(map->disable_vmentry_reuse)) {
447 		assert(!map->is_nested_map);
448 
449 		start = map->highest_entry_end + PAGE_SIZE_64;
450 		while (vm_map_lookup_entry(map, start, &entry)) {
451 			start = entry->vme_end + PAGE_SIZE_64;
452 		}
453 	} else if (use_holes) {
454 		entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
455 		if (entry == VM_MAP_ENTRY_NULL) {
456 			return VM_MAP_ENTRY_NULL;
457 		}
458 
459 		while (entry->vme_end <= start) {
460 			entry = entry->vme_next;
461 
462 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
463 				return VM_MAP_ENTRY_NULL;
464 			}
465 		}
466 
467 		if (start < entry->vme_start) {
468 			start = entry->vme_start;
469 		}
470 	} else {
471 		vm_map_offset_t first_free_start;
472 
473 		assert(first_free_is_valid(map));
474 
475 		entry = map->first_free;
476 		if (entry == vm_map_to_entry(map)) {
477 			first_free_start = map->min_offset;
478 		} else {
479 			first_free_start = entry->vme_end;
480 		}
481 
482 		if (start <= first_free_start) {
483 			start = first_free_start;
484 		} else if (_vm_map_store_lookup_entry(map, start, &entry)) {
485 			start = entry->vme_end;
486 		}
487 	}
488 
489 	for (;;) {
490 		vm_map_offset_t orig_start = start;
491 		vm_map_offset_t end, desired_empty_end;
492 
493 		/*
494 		 * The "entry" precedes the proposed new region.
495 		 */
496 
497 		start  = (start + guard_offset + mask) & ~mask;
498 		start  = vm_map_round_page(start, map_mask);
499 		end    = start + size;
500 		start -= guard_offset;
501 		/*
502 		 * We want an entire page of empty space,
503 		 * but don't increase the allocation size.
504 		 */
505 		desired_empty_end = vm_map_round_page(end, map_mask);
506 
507 		if (start < orig_start || desired_empty_end < start ||
508 		    highest_addr < desired_empty_end) {
509 			/*
510 			 * Fail: reached our scan highest address limit,
511 			 * without finding a large enough hole.
512 			 */
513 			return VM_MAP_ENTRY_NULL;
514 		}
515 
516 		if (use_holes) {
517 			if (desired_empty_end <= entry->vme_end) {
518 				/*
519 				 * Done: this hole is wide enough.
520 				 */
521 				vm_map_store_convert_hole_to_entry(map, start, &entry);
522 				break;
523 			}
524 
525 			entry = entry->vme_next;
526 
527 			if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
528 				/*
529 				 * Fail: wrapped around, no more holes
530 				 */
531 				return VM_MAP_ENTRY_NULL;
532 			}
533 
534 			start = entry->vme_start;
535 		} else {
536 			vm_map_entry_t next = entry->vme_next;
537 
538 			if (next == vm_map_to_entry(map)) {
539 				/*
540 				 * Done: no more entries toward the end
541 				 * of the map, only a big enough void.
542 				 */
543 				break;
544 			}
545 
546 			if (desired_empty_end <= next->vme_start) {
547 				/*
548 				 * Done: the gap between the two consecutive
549 				 * entries is large enough.
550 				 */
551 				break;
552 			}
553 
554 			entry = next;
555 			start = entry->vme_end;
556 		}
557 	}
558 
559 	*addr_out = start;
560 	return entry;
561 }
562 
563 struct vm_map_entry *
vm_map_store_find_space(vm_map_t map,vm_map_offset_t hint,vm_map_offset_t limit,bool backwards,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)564 vm_map_store_find_space(
565 	vm_map_t                map,
566 	vm_map_offset_t         hint,
567 	vm_map_offset_t         limit,
568 	bool                    backwards,
569 	vm_map_offset_t         guard_offset,
570 	vm_map_size_t           size,
571 	vm_map_offset_t         mask,
572 	vm_map_offset_t        *addr_out)
573 {
574 	vm_map_entry_t entry;
575 
576 #if defined VM_MAP_STORE_USE_RB
577 	__builtin_assume((void*)map->hdr.rb_head_store.rbh_root !=
578 	    (void*)(int)SKIP_RB_TREE);
579 #endif
580 
581 	if (backwards) {
582 		entry = vm_map_store_find_space_backwards(map, hint, limit,
583 		    guard_offset, size, mask, addr_out);
584 	} else {
585 		entry = vm_map_store_find_space_forward(map, hint, limit,
586 		    guard_offset, size, mask, addr_out);
587 	}
588 
589 	return entry;
590 }
591