1 /*
2 * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map_store.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33
34 #if MACH_ASSERT
35 boolean_t
first_free_is_valid_store(vm_map_t map)36 first_free_is_valid_store( vm_map_t map )
37 {
38 return first_free_is_valid_ll( map );
39 }
40 #endif
41
42 boolean_t
vm_map_store_has_RB_support(struct vm_map_header * hdr)43 vm_map_store_has_RB_support( struct vm_map_header *hdr )
44 {
45 if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
46 return FALSE;
47 }
48 return TRUE;
49 }
50
51 void
vm_map_store_init(struct vm_map_header * hdr)52 vm_map_store_init( struct vm_map_header *hdr )
53 {
54 vm_map_store_init_ll( hdr );
55 #ifdef VM_MAP_STORE_USE_RB
56 if (vm_map_store_has_RB_support( hdr )) {
57 vm_map_store_init_rb( hdr );
58 }
59 #endif
60 }
61
62 static inline boolean_t
_vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)63 _vm_map_store_lookup_entry(
64 vm_map_t map,
65 vm_map_offset_t address,
66 vm_map_entry_t *entry) /* OUT */
67 {
68 #ifdef VM_MAP_STORE_USE_LL
69 return vm_map_store_lookup_entry_ll( map, address, entry );
70 #elif defined VM_MAP_STORE_USE_RB
71 if (vm_map_store_has_RB_support( &map->hdr )) {
72 return vm_map_store_lookup_entry_rb( map, address, entry );
73 } else {
74 panic("VM map lookups need RB tree support.");
75 return FALSE; /* For compiler warning.*/
76 }
77 #endif
78 }
79
80 __attribute__((noinline))
81 boolean_t
vm_map_store_lookup_entry(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)82 vm_map_store_lookup_entry(
83 vm_map_t map,
84 vm_map_offset_t address,
85 vm_map_entry_t *entry) /* OUT */
86 {
87 return _vm_map_store_lookup_entry(map, address, entry);
88 }
89
90 void
vm_map_store_update(vm_map_t map,vm_map_entry_t entry,int update_type)91 vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type )
92 {
93 switch (update_type) {
94 case VM_MAP_ENTRY_CREATE:
95 break;
96 case VM_MAP_ENTRY_DELETE:
97 if ((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) {
98 (map)->first_free = vm_map_to_entry(map);
99 }
100 if ((entry) == (map)->hint) {
101 (map)->hint = vm_map_to_entry(map);
102 }
103 break;
104 default:
105 break;
106 }
107 }
108
109 /*
110 * vm_map_entry_{un,}link:
111 *
112 * Insert/remove entries from maps (or map copies).
113 * The _vm_map_store_entry_{un,}link variants are used at
114 * some places where updating first_free is not needed &
115 * copy maps are being modified. Also note the first argument
116 * is the map header.
117 * Modifying the vm_map_store_entry_{un,}link functions to
118 * deal with these call sites made the interface confusing
119 * and clunky.
120 */
121
122 void
_vm_map_store_entry_link(struct vm_map_header * mapHdr,vm_map_entry_t after_where,vm_map_entry_t entry)123 _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
124 {
125 assert(entry->vme_start < entry->vme_end);
126 if (__improbable(vm_debug_events)) {
127 DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
128 }
129
130 vm_map_store_entry_link_ll(mapHdr, after_where, entry);
131 #ifdef VM_MAP_STORE_USE_RB
132 if (vm_map_store_has_RB_support( mapHdr )) {
133 vm_map_store_entry_link_rb(mapHdr, after_where, entry);
134 }
135 #endif
136 #if MAP_ENTRY_INSERTION_DEBUG
137 if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
138 entry->vme_start_original = entry->vme_start;
139 entry->vme_end_original = entry->vme_end;
140 }
141 btref_put(entry->vme_insertion_bt);
142 entry->vme_insertion_bt = btref_get(__builtin_frame_address(0),
143 BTREF_GET_NOWAIT);
144 #endif
145 }
146
147 void
vm_map_store_entry_link(vm_map_t map,vm_map_entry_t after_where,vm_map_entry_t entry,vm_map_kernel_flags_t vmk_flags)148 vm_map_store_entry_link(
149 vm_map_t map,
150 vm_map_entry_t after_where,
151 vm_map_entry_t entry,
152 vm_map_kernel_flags_t vmk_flags)
153 {
154 vm_map_t VMEL_map;
155 vm_map_entry_t VMEL_entry;
156 VMEL_map = (map);
157 VMEL_entry = (entry);
158
159 if (entry->is_sub_map) {
160 assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
161 "map %p (%d) entry %p submap %p (%d)\n",
162 map, VM_MAP_PAGE_SHIFT(map), entry,
163 VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
164 }
165
166 _vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry);
167 if (VMEL_map->disable_vmentry_reuse == TRUE) {
168 UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry);
169 } else {
170 update_first_free_ll(VMEL_map, VMEL_map->first_free);
171 #ifdef VM_MAP_STORE_USE_RB
172 if (vm_map_store_has_RB_support( &VMEL_map->hdr )) {
173 update_first_free_rb(VMEL_map, entry, TRUE);
174 }
175 #endif
176 }
177 (void) vmk_flags;
178 }
179
180 void
_vm_map_store_entry_unlink(struct vm_map_header * mapHdr,vm_map_entry_t entry,bool check_permanent)181 _vm_map_store_entry_unlink(
182 struct vm_map_header * mapHdr,
183 vm_map_entry_t entry,
184 bool check_permanent)
185 {
186 if (__improbable(vm_debug_events)) {
187 DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
188 }
189
190 /*
191 * We should never unlink a "permanent" entry. The caller should
192 * clear "permanent" first if it wants it to be bypassed.
193 */
194 if (check_permanent) {
195 assertf(!entry->vme_permanent, "mapHdr %p entry %p [ 0x%llx end 0x%llx ] prot 0x%x/0x%x submap %d\n", mapHdr, entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, entry->protection, entry->max_protection, entry->is_sub_map);
196 }
197
198 vm_map_store_entry_unlink_ll(mapHdr, entry);
199 #ifdef VM_MAP_STORE_USE_RB
200 if (vm_map_store_has_RB_support( mapHdr )) {
201 vm_map_store_entry_unlink_rb(mapHdr, entry);
202 }
203 #endif
204 }
205
206 void
vm_map_store_entry_unlink(vm_map_t map,vm_map_entry_t entry,bool check_permanent)207 vm_map_store_entry_unlink(
208 vm_map_t map,
209 vm_map_entry_t entry,
210 bool check_permanent)
211 {
212 vm_map_t VMEU_map;
213 vm_map_entry_t VMEU_entry = NULL;
214 vm_map_entry_t VMEU_first_free = NULL;
215 VMEU_map = (map);
216 VMEU_entry = (entry);
217
218 if (map->holelistenabled == FALSE) {
219 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
220 VMEU_first_free = VMEU_entry->vme_prev;
221 } else {
222 VMEU_first_free = VMEU_map->first_free;
223 }
224 }
225 _vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry, check_permanent);
226 vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE);
227 update_first_free_ll(VMEU_map, VMEU_first_free);
228 #ifdef VM_MAP_STORE_USE_RB
229 if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
230 update_first_free_rb(VMEU_map, entry, FALSE);
231 }
232 #endif
233 }
234
235 void
vm_map_store_copy_reset(vm_map_copy_t copy,vm_map_entry_t entry)236 vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
237 {
238 int nentries = copy->cpy_hdr.nentries;
239 vm_map_store_copy_reset_ll(copy, entry, nentries);
240 #ifdef VM_MAP_STORE_USE_RB
241 if (vm_map_store_has_RB_support( ©->c_u.hdr )) {
242 vm_map_store_copy_reset_rb(copy, entry, nentries);
243 }
244 #endif
245 }
246
247 void
vm_map_store_update_first_free(vm_map_t map,vm_map_entry_t first_free_entry,boolean_t new_entry_creation)248 vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation)
249 {
250 update_first_free_ll(map, first_free_entry);
251 #ifdef VM_MAP_STORE_USE_RB
252 if (vm_map_store_has_RB_support( &map->hdr )) {
253 update_first_free_rb(map, first_free_entry, new_entry_creation);
254 }
255 #endif
256 }
257
258 __abortlike
259 static void
__vm_map_store_find_space_holelist_corruption(vm_map_t map,vm_map_offset_t start,vm_map_entry_t entry)260 __vm_map_store_find_space_holelist_corruption(
261 vm_map_t map,
262 vm_map_offset_t start,
263 vm_map_entry_t entry)
264 {
265 panic("Found an existing entry %p [0x%llx, 0x%llx) in map %p "
266 "instead of potential hole at address: 0x%llx.",
267 entry, entry->vme_start, entry->vme_end, map, start);
268 }
269
270 static void
vm_map_store_convert_hole_to_entry(vm_map_t map,vm_map_offset_t addr,vm_map_entry_t * entry_p)271 vm_map_store_convert_hole_to_entry(
272 vm_map_t map,
273 vm_map_offset_t addr,
274 vm_map_entry_t *entry_p)
275 {
276 vm_map_entry_t entry = *entry_p;
277
278 if (_vm_map_store_lookup_entry(map, entry->vme_start, entry_p)) {
279 __vm_map_store_find_space_holelist_corruption(map, addr, entry);
280 }
281 }
282
283 static struct vm_map_entry *
vm_map_store_find_space_backwards(vm_map_t map,vm_map_offset_t end,vm_map_offset_t lowest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)284 vm_map_store_find_space_backwards(
285 vm_map_t map,
286 vm_map_offset_t end,
287 vm_map_offset_t lowest_addr,
288 vm_map_offset_t guard_offset,
289 vm_map_size_t size,
290 vm_map_offset_t mask,
291 vm_map_offset_t *addr_out)
292 {
293 const vm_map_offset_t map_mask = VM_MAP_PAGE_MASK(map);
294 const bool use_holes = map->holelistenabled;
295 vm_map_offset_t start;
296 vm_map_entry_t entry;
297
298 /*
299 * Find the entry we will scan from that is the closest
300 * to our required scan hint "end".
301 */
302
303 if (use_holes) {
304 entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
305 if (entry == VM_MAP_ENTRY_NULL) {
306 return VM_MAP_ENTRY_NULL;
307 }
308
309 entry = entry->vme_prev;
310
311 while (end <= entry->vme_start) {
312 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
313 return VM_MAP_ENTRY_NULL;
314 }
315
316 entry = entry->vme_prev;
317 }
318
319 if (entry->vme_end < end) {
320 end = entry->vme_end;
321 }
322 } else {
323 if (map->max_offset <= end) {
324 entry = vm_map_to_entry(map);
325 end = map->max_offset;
326 } else if (_vm_map_store_lookup_entry(map, end - 1, &entry)) {
327 end = entry->vme_start;
328 } else {
329 entry = entry->vme_next;
330 }
331 }
332
333 for (;;) {
334 /*
335 * The "entry" follows the proposed new region.
336 */
337
338 end = vm_map_trunc_page(end, map_mask);
339 start = (end - size) & ~mask;
340 start = vm_map_trunc_page(start, map_mask);
341 end = start + size;
342 start -= guard_offset;
343
344 if (end < start || start < lowest_addr) {
345 /*
346 * Fail: reached our scan lowest address limit,
347 * without finding a large enough hole.
348 */
349 return VM_MAP_ENTRY_NULL;
350 }
351
352 if (use_holes) {
353 if (entry->vme_start <= start) {
354 /*
355 * Done: this hole is wide enough.
356 */
357 vm_map_store_convert_hole_to_entry(map, start, &entry);
358 break;
359 }
360
361 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
362 /*
363 * Fail: wrapped around, no more holes
364 */
365 return VM_MAP_ENTRY_NULL;
366 }
367
368 entry = entry->vme_prev;
369 end = entry->vme_end;
370 } else {
371 entry = entry->vme_prev;
372
373 if (entry == vm_map_to_entry(map)) {
374 /*
375 * Done: no more entries toward the start
376 * of the map, only a big enough void.
377 */
378 break;
379 }
380
381 if (entry->vme_end <= start) {
382 /*
383 * Done: the gap between the two consecutive
384 * entries is large enough.
385 */
386 break;
387 }
388
389 end = entry->vme_start;
390 }
391 }
392
393 *addr_out = start;
394 return entry;
395 }
396
397 static struct vm_map_entry *
vm_map_store_find_space_forward(vm_map_t map,vm_map_offset_t start,vm_map_offset_t highest_addr,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)398 vm_map_store_find_space_forward(
399 vm_map_t map,
400 vm_map_offset_t start,
401 vm_map_offset_t highest_addr,
402 vm_map_offset_t guard_offset,
403 vm_map_size_t size,
404 vm_map_offset_t mask,
405 vm_map_offset_t *addr_out)
406 {
407 const vm_map_offset_t map_mask = VM_MAP_PAGE_MASK(map);
408 const bool use_holes = map->holelistenabled;
409 vm_map_entry_t entry;
410
411 /*
412 * Find the entry we will scan from that is the closest
413 * to our required scan hint "start".
414 */
415
416 if (__improbable(map->disable_vmentry_reuse)) {
417 VM_MAP_HIGHEST_ENTRY(map, entry, start);
418 } else if (use_holes) {
419 entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
420 if (entry == VM_MAP_ENTRY_NULL) {
421 return VM_MAP_ENTRY_NULL;
422 }
423
424 while (entry->vme_end <= start) {
425 entry = entry->vme_next;
426
427 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
428 return VM_MAP_ENTRY_NULL;
429 }
430 }
431
432 if (start < entry->vme_start) {
433 start = entry->vme_start;
434 }
435 } else {
436 vm_map_offset_t first_free_start;
437
438 assert(first_free_is_valid(map));
439
440 entry = map->first_free;
441 if (entry == vm_map_to_entry(map)) {
442 first_free_start = map->min_offset;
443 } else {
444 first_free_start = entry->vme_end;
445 }
446
447 if (start <= first_free_start) {
448 start = first_free_start;
449 } else if (_vm_map_store_lookup_entry(map, start, &entry)) {
450 start = entry->vme_end;
451 }
452 }
453
454 for (;;) {
455 vm_map_offset_t orig_start = start;
456 vm_map_offset_t end, desired_empty_end;
457
458 /*
459 * The "entry" precedes the proposed new region.
460 */
461
462 start = (start + guard_offset + mask) & ~mask;
463 start = vm_map_round_page(start, map_mask);
464 end = start + size;
465 start -= guard_offset;
466 /*
467 * We want an entire page of empty space,
468 * but don't increase the allocation size.
469 */
470 desired_empty_end = vm_map_round_page(end, map_mask);
471
472 if (start < orig_start || desired_empty_end < start ||
473 highest_addr < desired_empty_end) {
474 /*
475 * Fail: reached our scan highest address limit,
476 * without finding a large enough hole.
477 */
478 return VM_MAP_ENTRY_NULL;
479 }
480
481 if (use_holes) {
482 if (desired_empty_end <= entry->vme_end) {
483 /*
484 * Done: this hole is wide enough.
485 */
486 vm_map_store_convert_hole_to_entry(map, start, &entry);
487 break;
488 }
489
490 entry = entry->vme_next;
491
492 if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
493 /*
494 * Fail: wrapped around, no more holes
495 */
496 return VM_MAP_ENTRY_NULL;
497 }
498
499 start = entry->vme_start;
500 } else {
501 vm_map_entry_t next = entry->vme_next;
502
503 if (next == vm_map_to_entry(map)) {
504 /*
505 * Done: no more entries toward the end
506 * of the map, only a big enough void.
507 */
508 break;
509 }
510
511 if (desired_empty_end <= next->vme_start) {
512 /*
513 * Done: the gap between the two consecutive
514 * entries is large enough.
515 */
516 break;
517 }
518
519 entry = next;
520 start = entry->vme_end;
521 }
522 }
523
524 *addr_out = start;
525 return entry;
526 }
527
528 struct vm_map_entry *
vm_map_store_find_space(vm_map_t map,vm_map_offset_t hint,vm_map_offset_t limit,boolean_t backwards,vm_map_offset_t guard_offset,vm_map_size_t size,vm_map_offset_t mask,vm_map_offset_t * addr_out)529 vm_map_store_find_space(
530 vm_map_t map,
531 vm_map_offset_t hint,
532 vm_map_offset_t limit,
533 boolean_t backwards,
534 vm_map_offset_t guard_offset,
535 vm_map_size_t size,
536 vm_map_offset_t mask,
537 vm_map_offset_t *addr_out)
538 {
539 vm_map_entry_t entry;
540
541 #if defined VM_MAP_STORE_USE_RB
542 __builtin_assume((void*)map->hdr.rb_head_store.rbh_root !=
543 (void*)(int)SKIP_RB_TREE);
544 #endif
545
546 if (backwards) {
547 entry = vm_map_store_find_space_backwards(map, hint, limit,
548 guard_offset, size, mask, addr_out);
549 } else {
550 entry = vm_map_store_find_space_forward(map, hint, limit,
551 guard_offset, size, mask, addr_out);
552 }
553
554 return entry;
555 }
556