1 /*
2 * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33 #include <sys/code_signing.h>
34
35 #if MACH_ASSERT
36 bool
first_free_is_valid_store(vm_map_t map)37 first_free_is_valid_store( vm_map_t map )
38 {
39 return first_free_is_valid_ll( map );
40 }
41 #endif
42
43 bool
vm_map_store_has_RB_support(struct vm_map_header * hdr)44 vm_map_store_has_RB_support( struct vm_map_header *hdr )
45 {
46 if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
47 return FALSE;
48 }
49 return TRUE;
50 }
51
52 void
vm_map_store_init(struct vm_map_header * hdr)53 vm_map_store_init( struct vm_map_header *hdr )
54 {
55 vm_map_store_init_ll( hdr );
56 #ifdef VM_MAP_STORE_USE_RB
57 if (vm_map_store_has_RB_support( hdr )) {
58 vm_map_store_init_rb( hdr );
59 }
60 #endif
61 }
62
63 static inline bool
_vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)64 _vm_map_store_lookup_entry(
65 vm_map_t map,
66 vm_map_offset_t address,
67 vm_map_entry_t *entry) /* OUT */
68 {
69 #ifdef VM_MAP_STORE_USE_RB
70 if (vm_map_store_has_RB_support( &map->hdr )) {
71 return vm_map_store_lookup_entry_rb( map, address, entry );
72 } else {
73 panic("VM map lookups need RB tree support.");
74 return FALSE; /* For compiler warning.*/
75 }
76 #endif
77 }
78
79 __attribute__((noinline))
80 bool
vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)81 vm_map_store_lookup_entry(
82 vm_map_t map,
83 vm_map_offset_t address,
84 vm_map_entry_t *entry) /* OUT */
85 {
86 return _vm_map_store_lookup_entry(map, address, entry);
87 }
88
89 /*
90 * vm_map_entry_{un,}link:
91 *
92 * Insert/remove entries from maps (or map copies).
93 * The _vm_map_store_entry_{un,}link variants are used at
94 * some places where updating first_free is not needed &
95 * copy maps are being modified. Also note the first argument
96 * is the map header.
97 * Modifying the vm_map_store_entry_{un,}link functions to
98 * deal with these call sites made the interface confusing
99 * and clunky.
100 */
101
102 void
_vm_map_store_entry_link(struct vm_map_header * mapHdr,vm_map_entry_t after_where,vm_map_entry_t entry)103 _vm_map_store_entry_link(
104 struct vm_map_header *mapHdr,
105 vm_map_entry_t after_where,
106 vm_map_entry_t entry)
107 {
108 assert(entry->vme_start < entry->vme_end);
109 if (__improbable(vm_debug_events)) {
110 DTRACE_VM4(map_entry_link,
111 vm_map_t, __container_of(mapHdr, struct _vm_map, hdr),
112 vm_map_entry_t, entry,
113 vm_address_t, entry->vme_start,
114 vm_address_t, entry->vme_end);
115 }
116
117 vm_map_store_entry_link_ll(mapHdr, after_where, entry);
118 #ifdef VM_MAP_STORE_USE_RB
119 if (vm_map_store_has_RB_support( mapHdr )) {
120 vm_map_store_entry_link_rb(mapHdr, entry);
121 }
122 #endif
123 #if MAP_ENTRY_INSERTION_DEBUG
124 if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
125 entry->vme_start_original = entry->vme_start;
126 entry->vme_end_original = entry->vme_end;
127 }
128 btref_put(entry->vme_insertion_bt);
129 entry->vme_insertion_bt = btref_get(__builtin_frame_address(0),
130 BTREF_GET_NOWAIT);
131 #endif
132 }
133
134 void
vm_map_store_entry_link(vm_map_t map,vm_map_entry_t after_where,vm_map_entry_t entry,vm_map_kernel_flags_t vmk_flags)135 vm_map_store_entry_link(
136 vm_map_t map,
137 vm_map_entry_t after_where,
138 vm_map_entry_t entry,
139 vm_map_kernel_flags_t vmk_flags)
140 {
141 if (entry->is_sub_map) {
142 assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
143 "map %p (%d) entry %p submap %p (%d)\n",
144 map, VM_MAP_PAGE_SHIFT(map), entry,
145 VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
146 }
147
148 _vm_map_store_entry_link(&map->hdr, after_where, entry);
149
150 if (map->disable_vmentry_reuse == TRUE) {
151 /*
152 * GuardMalloc support:
153 * Some of these entries are created with MAP_FIXED.
154 * Some are created with a very high hint address.
155 * So we use aliases and address ranges to make sure
156 * that those special regions (nano, jit etc) don't
157 * result in our highest hint being set to near
158 * the end of the map and future alloctions getting
159 * KERN_NO_SPACE when running with guardmalloc.
160 */
161 int alias = VME_ALIAS(entry);
162
163 assert(!map->is_nested_map);
164 if (alias != VM_MEMORY_MALLOC_NANO &&
165 alias != VM_MEMORY_MALLOC_TINY &&
166 alias != VM_MEMORY_MALLOC_SMALL &&
167 alias != VM_MEMORY_MALLOC_MEDIUM &&
168 alias != VM_MEMORY_MALLOC_LARGE &&
169 alias != VM_MEMORY_MALLOC_HUGE &&
170 entry->used_for_jit == 0 &&
171 (entry->vme_start < SHARED_REGION_BASE ||
172 entry->vme_start >= (SHARED_REGION_BASE + SHARED_REGION_SIZE)) &&
173 map->highest_entry_end < entry->vme_end) {
174 map->highest_entry_end = entry->vme_end;
175 }
176 } else {
177 update_first_free_ll(map, map->first_free);
178 #ifdef VM_MAP_STORE_USE_RB
179 if (vm_map_store_has_RB_support(&map->hdr)) {
180 update_first_free_rb(map, entry, TRUE);
181 }
182 #endif
183 }
184
185 #if CODE_SIGNING_MONITOR
186 (void) vm_map_entry_cs_associate(map, entry, vmk_flags);
187 #else
188 (void) vmk_flags;
189 #endif
190 }
191
192 void
_vm_map_store_entry_unlink(struct vm_map_header * mapHdr,vm_map_entry_t entry,bool check_permanent)193 _vm_map_store_entry_unlink(
194 struct vm_map_header * mapHdr,
195 vm_map_entry_t entry,
196 bool check_permanent)
197 {
198 if (__improbable(vm_debug_events)) {
199 DTRACE_VM4(map_entry_unlink,
200 vm_map_t, __container_of(mapHdr, struct _vm_map, hdr),
201 vm_map_entry_t, entry,
202 vm_address_t, entry->vme_start,
203 vm_address_t, entry->vme_end);
204 }
205
206 /*
207 * We should never unlink a "permanent" entry. The caller should
208 * clear "permanent" first if it wants it to be bypassed.
209 */
210 if (check_permanent) {
211 assertf(!entry->vme_permanent,
212 "mapHdr %p entry %p [ 0x%llx end 0x%llx ] prot 0x%x/0x%x submap %d\n",
213 mapHdr, entry,
214 (uint64_t)entry->vme_start, (uint64_t)entry->vme_end,
215 entry->protection, entry->max_protection, entry->is_sub_map);
216 }
217
218 vm_map_store_entry_unlink_ll(mapHdr, entry);
219 #ifdef VM_MAP_STORE_USE_RB
220 if (vm_map_store_has_RB_support( mapHdr )) {
221 vm_map_store_entry_unlink_rb(mapHdr, entry);
222 }
223 #endif
224 }
225
226 void
vm_map_store_entry_unlink(vm_map_t map,vm_map_entry_t entry,bool check_permanent)227 vm_map_store_entry_unlink(
228 vm_map_t map,
229 vm_map_entry_t entry,
230 bool check_permanent)
231 {
232 vm_map_t VMEU_map;
233 vm_map_entry_t VMEU_entry = NULL;
234 vm_map_entry_t VMEU_first_free = NULL;
235 VMEU_map = (map);
236 VMEU_entry = (entry);
237
238 if (entry == map->hint) {
239 map->hint = vm_map_to_entry(map);
240 }
241 if (map->holelistenabled == FALSE) {
242 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
243 VMEU_first_free = VMEU_entry->vme_prev;
244 } else {
245 VMEU_first_free = VMEU_map->first_free;
246 }
247 }
248 _vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry, check_permanent);
249
250 update_first_free_ll(VMEU_map, VMEU_first_free);
251 #ifdef VM_MAP_STORE_USE_RB
252 if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
253 update_first_free_rb(VMEU_map, entry, FALSE);
254 }
255 #endif
256 }
257
258 void
vm_map_store_copy_reset(vm_map_copy_t copy,vm_map_entry_t entry)259 vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
260 {
261 int nentries = copy->cpy_hdr.nentries;
262 vm_map_store_copy_reset_ll(copy, entry, nentries);
263 #ifdef VM_MAP_STORE_USE_RB
264 if (vm_map_store_has_RB_support( ©->c_u.hdr )) {
265 vm_map_store_copy_reset_rb(copy, entry, nentries);
266 }
267 #endif
268 }
269
270 void
vm_map_store_update_first_free(vm_map_t map,vm_map_entry_t first_free_entry,bool new_entry_creation)271 vm_map_store_update_first_free(
272 vm_map_t map,
273 vm_map_entry_t first_free_entry,
274 bool new_entry_creation)
275 {
276 update_first_free_ll(map, first_free_entry);
277 #ifdef VM_MAP_STORE_USE_RB
278 if (vm_map_store_has_RB_support( &map->hdr )) {
279 update_first_free_rb(map, first_free_entry, new_entry_creation);
280 }
281 #endif
282 }
283
284 __abortlike
285 static void
__vm_map_store_find_space_holelist_corruption(vm_map_t map,vm_map_offset_t start,vm_map_entry_t entry)286 __vm_map_store_find_space_holelist_corruption(
287 vm_map_t map,
288 vm_map_offset_t start,
289 vm_map_entry_t entry)
290 {
291 panic("Found an existing entry %p [0x%llx, 0x%llx) in map %p "
292 "instead of potential hole at address: 0x%llx.",
293 entry, entry->vme_start, entry->vme_end, map, start);
294 }
295
296 static void
vm_map_store_convert_hole_to_entry(vm_map_t map,vm_map_offset_t addr,vm_map_entry_t * entry_p)297 vm_map_store_convert_hole_to_entry(
298 vm_map_t map,
299 vm_map_offset_t addr,
300 vm_map_entry_t *entry_p)
301 {
302 vm_map_entry_t entry = *entry_p;
303
304 if (_vm_map_store_lookup_entry(map, entry->vme_start, entry_p)) {
305 __vm_map_store_find_space_holelist_corruption(map, addr, entry);
306 }
307 }
308
309 static struct vm_map_entry *
vm_map_store_find_space_backwards(vm_map_t map,vm_map_offset_t end,vm_map_offset_t lowest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)310 vm_map_store_find_space_backwards(
311 vm_map_t map,
312 vm_map_offset_t end,
313 vm_map_offset_t lowest_addr,
314 vm_map_offset_t guard_offset,
315 vm_map_size_t size,
316 vm_map_offset_t mask,
317 vm_map_offset_t *addr_out)
318 {
319 const vm_map_offset_t map_mask = VM_MAP_PAGE_MASK(map);
320 const bool use_holes = map->holelistenabled;
321 vm_map_offset_t start;
322 vm_map_entry_t entry;
323
324 /*
325 * Find the entry we will scan from that is the closest
326 * to our required scan hint "end".
327 */
328
329 if (use_holes) {
330 entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
331 if (entry == VM_MAP_ENTRY_NULL) {
332 return VM_MAP_ENTRY_NULL;
333 }
334
335 entry = entry->vme_prev;
336
337 while (end <= entry->vme_start) {
338 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
339 return VM_MAP_ENTRY_NULL;
340 }
341
342 entry = entry->vme_prev;
343 }
344
345 if (entry->vme_end < end) {
346 end = entry->vme_end;
347 }
348 } else {
349 if (map->max_offset <= end) {
350 entry = vm_map_to_entry(map);
351 end = map->max_offset;
352 } else if (_vm_map_store_lookup_entry(map, end - 1, &entry)) {
353 end = entry->vme_start;
354 } else {
355 entry = entry->vme_next;
356 }
357 }
358
359 for (;;) {
360 /*
361 * The "entry" follows the proposed new region.
362 */
363
364 end = vm_map_trunc_page(end, map_mask);
365 start = (end - size) & ~mask;
366 start = vm_map_trunc_page(start, map_mask);
367 end = start + size;
368 start -= guard_offset;
369
370 if (end < start || start < lowest_addr) {
371 /*
372 * Fail: reached our scan lowest address limit,
373 * without finding a large enough hole.
374 */
375 return VM_MAP_ENTRY_NULL;
376 }
377
378 if (use_holes) {
379 if (entry->vme_start <= start) {
380 /*
381 * Done: this hole is wide enough.
382 */
383 vm_map_store_convert_hole_to_entry(map, start, &entry);
384 break;
385 }
386
387 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
388 /*
389 * Fail: wrapped around, no more holes
390 */
391 return VM_MAP_ENTRY_NULL;
392 }
393
394 entry = entry->vme_prev;
395 end = entry->vme_end;
396 } else {
397 entry = entry->vme_prev;
398
399 if (entry == vm_map_to_entry(map)) {
400 /*
401 * Done: no more entries toward the start
402 * of the map, only a big enough void.
403 */
404 break;
405 }
406
407 if (entry->vme_end <= start) {
408 /*
409 * Done: the gap between the two consecutive
410 * entries is large enough.
411 */
412 break;
413 }
414
415 end = entry->vme_start;
416 }
417 }
418
419 *addr_out = start;
420 return entry;
421 }
422
423 static struct vm_map_entry *
vm_map_store_find_space_forward(vm_map_t map,vm_map_offset_t start,vm_map_offset_t highest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)424 vm_map_store_find_space_forward(
425 vm_map_t map,
426 vm_map_offset_t start,
427 vm_map_offset_t highest_addr,
428 vm_map_offset_t guard_offset,
429 vm_map_size_t size,
430 vm_map_offset_t mask,
431 vm_map_offset_t *addr_out)
432 {
433 const vm_map_offset_t map_mask = VM_MAP_PAGE_MASK(map);
434 const bool use_holes = map->holelistenabled;
435 vm_map_entry_t entry;
436
437 /*
438 * Find the entry we will scan from that is the closest
439 * to our required scan hint "start".
440 */
441
442 if (__improbable(map->disable_vmentry_reuse)) {
443 assert(!map->is_nested_map);
444
445 start = map->highest_entry_end + PAGE_SIZE_64;
446 while (vm_map_lookup_entry(map, start, &entry)) {
447 start = entry->vme_end + PAGE_SIZE_64;
448 }
449 } else if (use_holes) {
450 entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
451 if (entry == VM_MAP_ENTRY_NULL) {
452 return VM_MAP_ENTRY_NULL;
453 }
454
455 while (entry->vme_end <= start) {
456 entry = entry->vme_next;
457
458 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
459 return VM_MAP_ENTRY_NULL;
460 }
461 }
462
463 if (start < entry->vme_start) {
464 start = entry->vme_start;
465 }
466 } else {
467 vm_map_offset_t first_free_start;
468
469 assert(first_free_is_valid(map));
470
471 entry = map->first_free;
472 if (entry == vm_map_to_entry(map)) {
473 first_free_start = map->min_offset;
474 } else {
475 first_free_start = entry->vme_end;
476 }
477
478 if (start <= first_free_start) {
479 start = first_free_start;
480 } else if (_vm_map_store_lookup_entry(map, start, &entry)) {
481 start = entry->vme_end;
482 }
483 }
484
485 for (;;) {
486 vm_map_offset_t orig_start = start;
487 vm_map_offset_t end, desired_empty_end;
488
489 /*
490 * The "entry" precedes the proposed new region.
491 */
492
493 start = (start + guard_offset + mask) & ~mask;
494 start = vm_map_round_page(start, map_mask);
495 end = start + size;
496 start -= guard_offset;
497 /*
498 * We want an entire page of empty space,
499 * but don't increase the allocation size.
500 */
501 desired_empty_end = vm_map_round_page(end, map_mask);
502
503 if (start < orig_start || desired_empty_end < start ||
504 highest_addr < desired_empty_end) {
505 /*
506 * Fail: reached our scan highest address limit,
507 * without finding a large enough hole.
508 */
509 return VM_MAP_ENTRY_NULL;
510 }
511
512 if (use_holes) {
513 if (desired_empty_end <= entry->vme_end) {
514 /*
515 * Done: this hole is wide enough.
516 */
517 vm_map_store_convert_hole_to_entry(map, start, &entry);
518 break;
519 }
520
521 entry = entry->vme_next;
522
523 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
524 /*
525 * Fail: wrapped around, no more holes
526 */
527 return VM_MAP_ENTRY_NULL;
528 }
529
530 start = entry->vme_start;
531 } else {
532 vm_map_entry_t next = entry->vme_next;
533
534 if (next == vm_map_to_entry(map)) {
535 /*
536 * Done: no more entries toward the end
537 * of the map, only a big enough void.
538 */
539 break;
540 }
541
542 if (desired_empty_end <= next->vme_start) {
543 /*
544 * Done: the gap between the two consecutive
545 * entries is large enough.
546 */
547 break;
548 }
549
550 entry = next;
551 start = entry->vme_end;
552 }
553 }
554
555 *addr_out = start;
556 return entry;
557 }
558
559 struct vm_map_entry *
vm_map_store_find_space(vm_map_t map,vm_map_offset_t hint,vm_map_offset_t limit,bool backwards,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)560 vm_map_store_find_space(
561 vm_map_t map,
562 vm_map_offset_t hint,
563 vm_map_offset_t limit,
564 bool backwards,
565 vm_map_offset_t guard_offset,
566 vm_map_size_t size,
567 vm_map_offset_t mask,
568 vm_map_offset_t *addr_out)
569 {
570 vm_map_entry_t entry;
571
572 #if defined VM_MAP_STORE_USE_RB
573 __builtin_assume((void*)map->hdr.rb_head_store.rbh_root !=
574 (void*)(int)SKIP_RB_TREE);
575 #endif
576
577 if (backwards) {
578 entry = vm_map_store_find_space_backwards(map, hint, limit,
579 guard_offset, size, mask, addr_out);
580 } else {
581 entry = vm_map_store_find_space_forward(map, hint, limit,
582 guard_offset, size, mask, addr_out);
583 }
584
585 return entry;
586 }
587