xref: /xnu-8020.140.41/osfmk/vm/vm_map_store_rb.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
31 
32 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33 
34 #define VME_FOR_STORE(ptr) __container_of(ptr, struct vm_map_entry, store)
35 
36 void
vm_map_store_init_rb(struct vm_map_header * hdr)37 vm_map_store_init_rb( struct vm_map_header* hdr )
38 {
39 	RB_INIT(&(hdr->rb_head_store));
40 }
41 
42 int
rb_node_compare(struct vm_map_store * node,struct vm_map_store * parent)43 rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
44 {
45 	vm_map_entry_t vme_c;
46 	vm_map_entry_t vme_p;
47 
48 	vme_c = VME_FOR_STORE(node);
49 	vme_p = VME_FOR_STORE(parent);
50 	if (vme_c->vme_start < vme_p->vme_start) {
51 		return -1;
52 	}
53 	if (vme_c->vme_start >= vme_p->vme_end) {
54 		return 1;
55 	}
56 	return 0;
57 }
58 
59 __dead2
60 void
vm_map_store_walk_rb(vm_map_t map,vm_map_entry_t * wrong_vme,vm_map_entry_t * vm_entry)61 vm_map_store_walk_rb(vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
62 {
63 	struct vm_map_header *hdr = &map->hdr;
64 	struct vm_map_store  *rb_entry = RB_ROOT(&hdr->rb_head_store);
65 	vm_map_entry_t       cur = *vm_entry;
66 
67 	rb_entry = RB_FIND(rb_head, &hdr->rb_head_store, &(cur->store));
68 	if (rb_entry == NULL) {
69 		panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
70 	} else {
71 		panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry)));
72 	}
73 }
74 
75 
76 boolean_t
vm_map_store_lookup_entry_rb(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * vm_entry)77 vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
78 {
79 	struct vm_map_header *hdr = &map->hdr;
80 	struct vm_map_store  *rb_entry = RB_ROOT(&hdr->rb_head_store);
81 	vm_map_entry_t       cur = vm_map_to_entry(map);
82 	vm_map_entry_t       prev = VM_MAP_ENTRY_NULL;
83 
84 	while (rb_entry != (struct vm_map_store*)NULL) {
85 		cur =  VME_FOR_STORE(rb_entry);
86 		if (address >= cur->vme_start) {
87 			if (address < cur->vme_end) {
88 				*vm_entry = cur;
89 				return TRUE;
90 			}
91 			rb_entry = RB_RIGHT(rb_entry, entry);
92 			prev = cur;
93 		} else {
94 			rb_entry = RB_LEFT(rb_entry, entry);
95 		}
96 	}
97 	if (prev == VM_MAP_ENTRY_NULL) {
98 		prev = vm_map_to_entry(map);
99 	}
100 	*vm_entry = prev;
101 	return FALSE;
102 }
103 
104 void
vm_map_store_entry_link_rb(struct vm_map_header * mapHdr,__unused vm_map_entry_t after_where,vm_map_entry_t entry)105 vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
106 {
107 	struct rb_head *rbh = &(mapHdr->rb_head_store);
108 	struct vm_map_store *store = &(entry->store);
109 	struct vm_map_store *tmp_store;
110 	if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
111 		panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
112 		    (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
113 	}
114 }
115 
116 void
vm_map_store_entry_unlink_rb(struct vm_map_header * mapHdr,vm_map_entry_t entry)117 vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
118 {
119 	struct rb_head *rbh = &(mapHdr->rb_head_store);
120 	struct vm_map_store *rb_entry;
121 	struct vm_map_store *store = &(entry->store);
122 
123 	rb_entry = RB_FIND( rb_head, rbh, store);
124 	if (rb_entry == NULL) {
125 		panic("NO ENTRY TO DELETE");
126 	}
127 	RB_REMOVE( rb_head, rbh, store );
128 }
129 
130 void
vm_map_store_copy_reset_rb(vm_map_copy_t copy,vm_map_entry_t entry,int nentries)131 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
132 {
133 	struct vm_map_header *mapHdr = &(copy->cpy_hdr);
134 	struct rb_head *rbh = &(mapHdr->rb_head_store);
135 	struct vm_map_store *store;
136 	int deleted = 0;
137 
138 	while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
139 		store = &(entry->store);
140 		RB_REMOVE( rb_head, rbh, store );
141 		entry = entry->vme_next;
142 		deleted++;
143 		nentries--;
144 	}
145 }
146 
147 extern zone_t   vm_map_holes_zone;      /* zone for vm map holes (vm_map_links) structures */
148 
149 void
150 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
151 void
vm_map_combine_hole(__unused vm_map_t map,vm_map_entry_t hole_entry)152 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
153 {
154 	vm_map_entry_t middle_hole_entry, last_hole_entry;
155 
156 	hole_entry->vme_end = hole_entry->vme_next->vme_end;
157 
158 	middle_hole_entry = hole_entry->vme_next;
159 	last_hole_entry = middle_hole_entry->vme_next;
160 
161 	assert(last_hole_entry->vme_prev == middle_hole_entry);
162 	assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
163 
164 	last_hole_entry->vme_prev = hole_entry;
165 	hole_entry->vme_next = last_hole_entry;
166 
167 	middle_hole_entry->vme_prev = NULL;
168 	middle_hole_entry->vme_next = NULL;
169 
170 	zfree(vm_map_holes_zone, middle_hole_entry);
171 
172 	assert(hole_entry->vme_start < hole_entry->vme_end);
173 	assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
174 }
175 
176 
177 void
178 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
179 void
vm_map_delete_hole(vm_map_t map,vm_map_entry_t hole_entry)180 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
181 {
182 	if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
183 		if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
184 			map->holes_list = NULL;
185 			SAVE_HINT_HOLE_WRITE(map, NULL);
186 		} else {
187 			vm_map_entry_t l_next, l_prev;
188 
189 			l_next = (vm_map_entry_t) map->holes_list->next;
190 			l_prev = (vm_map_entry_t) map->holes_list->prev;
191 			map->holes_list = (struct vm_map_links*) l_next;
192 
193 			l_next->vme_prev = l_prev;
194 			l_prev->vme_next = l_next;
195 
196 			SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
197 		}
198 	} else {
199 		SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
200 
201 		hole_entry->vme_prev->vme_next = hole_entry->vme_next;
202 		hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
203 	}
204 
205 	hole_entry->vme_next = NULL;
206 	hole_entry->vme_prev = NULL;
207 	zfree(vm_map_holes_zone, hole_entry);
208 }
209 
210 
211 /*
212  * For Debugging.
213  */
214 
215 #if DEBUG
216 extern int vm_check_map_sanity;
217 
218 static void
check_map_sanity(vm_map_t map,vm_map_entry_t old_hole_entry)219 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
220 {
221 	vm_map_entry_t  hole_entry, next_hole_entry;
222 	vm_map_entry_t  map_entry, next_map_entry;
223 
224 	if (map->holes_list == NULL) {
225 		return;
226 	}
227 
228 	hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list);
229 	next_hole_entry = hole_entry->vme_next;
230 
231 	map_entry = vm_map_first_entry(map);
232 	next_map_entry = map_entry->vme_next;
233 
234 	while (map_entry->vme_start > hole_entry->vme_start) {
235 		hole_entry = next_hole_entry;
236 		next_hole_entry = hole_entry->vme_next;
237 
238 		if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
239 			break;
240 		}
241 	}
242 
243 	while (map_entry != vm_map_to_entry(map)) {
244 		if (map_entry->vme_start >= map->max_offset) {
245 			break;
246 		}
247 
248 		if (map_entry->vme_end != map_entry->vme_next->vme_start) {
249 			if (map_entry->vme_next == vm_map_to_entry(map)) {
250 				break;
251 			}
252 
253 			if (hole_entry->vme_start != map_entry->vme_end) {
254 				panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
255 				assert(hole_entry->vme_start == map_entry->vme_end);
256 			}
257 
258 			if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
259 				panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
260 				assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
261 			}
262 
263 			hole_entry = next_hole_entry;
264 			next_hole_entry = hole_entry->vme_next;
265 
266 			if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
267 				break;
268 			}
269 		}
270 
271 		map_entry = map_entry->vme_next;
272 	}
273 }
274 
275 /*
276  * For debugging.
277  */
278 static void
copy_hole_info(vm_map_entry_t hole_entry,vm_map_entry_t old_hole_entry)279 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
280 {
281 	old_hole_entry->vme_prev = hole_entry->vme_prev;
282 	old_hole_entry->vme_next = hole_entry->vme_next;
283 	old_hole_entry->vme_start = hole_entry->vme_start;
284 	old_hole_entry->vme_end = hole_entry->vme_end;
285 }
286 #endif /* DEBUG */
287 
288 void
289 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
290 void
update_holes_on_entry_deletion(vm_map_t map,vm_map_entry_t old_entry)291 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
292 {
293 	/*
294 	 * Dealing with the deletion of an older entry.
295 	 */
296 
297 	vm_map_entry_t          hole_entry, next_hole_entry;
298 #if DEBUG
299 	struct vm_map_entry     old_hole_entry;
300 #endif /* DEBUG */
301 	boolean_t               create_new_hole = TRUE;
302 
303 	hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
304 
305 	if (hole_entry) {
306 		if (hole_entry->vme_end == old_entry->vme_start) {
307 			/*
308 			 * Found a hole right after above our entry.
309 			 * Hit.
310 			 */
311 		} else if (hole_entry->vme_start == old_entry->vme_end) {
312 			if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
313 				/*
314 				 * Found a hole right after below our entry but
315 				 * make sure we don't erroneously extend backwards.
316 				 *
317 				 * Hit.
318 				 */
319 
320 				hole_entry = hole_entry->vme_prev;
321 			}
322 		} else if (hole_entry->vme_start > old_entry->vme_end) {
323 			/*
324 			 * Useless hint. Start from the top.
325 			 */
326 
327 			hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
328 		}
329 
330 		if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
331 			if (hole_entry->vme_start > old_entry->vme_start) {
332 				panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx",
333 				    (unsigned long long)hole_entry->vme_start,
334 				    (unsigned long long)old_entry->vme_start,
335 				    (unsigned long long)map->holes_list->start,
336 				    (unsigned long long)map->hole_hint->start);
337 			}
338 			if (hole_entry->vme_end > old_entry->vme_start) {
339 				panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx",
340 				    (unsigned long long)hole_entry->vme_end,
341 				    (unsigned long long)old_entry->vme_start,
342 				    (unsigned long long)map->holes_list->start,
343 				    (unsigned long long)map->hole_hint->start);
344 			}
345 		}
346 
347 		while (1) {
348 			next_hole_entry = hole_entry->vme_next;
349 
350 			/*
351 			 * Hole is right above the entry.
352 			 */
353 			if (hole_entry->vme_end == old_entry->vme_start) {
354 #if DEBUG
355 				copy_hole_info(hole_entry, &old_hole_entry);
356 #endif /* DEBUG */
357 
358 				/*
359 				 * Is there another hole right below the entry?
360 				 * Can we combine holes?
361 				 */
362 
363 				if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
364 					vm_map_combine_hole(map, hole_entry);
365 				} else {
366 					hole_entry->vme_end = old_entry->vme_end;
367 				}
368 				create_new_hole = FALSE;
369 #if DEBUG
370 				if (vm_check_map_sanity) {
371 					check_map_sanity(map, &old_hole_entry);
372 				}
373 #endif /* DEBUG */
374 				break;
375 			}
376 
377 			/*
378 			 * Hole is right below the entry.
379 			 */
380 			if (hole_entry->vme_start == old_entry->vme_end) {
381 #if DEBUG
382 				copy_hole_info(hole_entry, &old_hole_entry);
383 #endif /* DEBUG */
384 
385 				hole_entry->vme_start = old_entry->vme_start;
386 				create_new_hole = FALSE;
387 
388 #if DEBUG
389 				if (vm_check_map_sanity) {
390 					check_map_sanity(map, &old_hole_entry);
391 				}
392 #endif /* DEBUG */
393 				break;
394 			}
395 
396 			/*
397 			 * Hole is beyond our entry. Let's go back to the last hole
398 			 * before our entry so we have the right place to link up the
399 			 * new hole that will be needed.
400 			 */
401 			if (hole_entry->vme_start > old_entry->vme_end) {
402 #if DEBUG
403 				copy_hole_info(hole_entry, &old_hole_entry);
404 #endif /* DEBUG */
405 
406 				if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
407 					assert(hole_entry->vme_start != old_entry->vme_start);
408 					hole_entry = hole_entry->vme_prev;
409 				}
410 				break;
411 			}
412 
413 			hole_entry = next_hole_entry;
414 
415 			if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
416 				hole_entry = hole_entry->vme_prev;
417 				break;
418 			}
419 		}
420 	}
421 
422 	if (create_new_hole) {
423 		struct vm_map_links     *new_hole_entry = NULL;
424 		vm_map_entry_t          l_next, l_prev;
425 
426 		new_hole_entry = zalloc(vm_map_holes_zone);
427 
428 		/*
429 		 * First hole in the map?
430 		 * OR
431 		 * A hole that is located above the current first hole in the map?
432 		 */
433 		if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
434 			if (map->holes_list == NULL) {
435 				map->holes_list = new_hole_entry;
436 				new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
437 			} else {
438 				l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
439 				l_prev = map->holes_list->prev;
440 				map->holes_list = new_hole_entry;
441 				new_hole_entry->next = l_next;
442 				new_hole_entry->prev = l_prev;
443 
444 				l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
445 			}
446 		} else {
447 			l_next = hole_entry->vme_next;
448 			l_prev = hole_entry->vme_next->vme_prev;
449 
450 			new_hole_entry->prev = hole_entry;
451 			new_hole_entry->next = l_next;
452 
453 			hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
454 			l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
455 		}
456 
457 		new_hole_entry->start = old_entry->vme_start;
458 		new_hole_entry->end = old_entry->vme_end;
459 
460 		hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
461 
462 		assert(new_hole_entry->start < new_hole_entry->end);
463 	}
464 
465 #if DEBUG
466 	if (vm_check_map_sanity) {
467 		check_map_sanity(map, &old_hole_entry);
468 	}
469 #endif /* DEBUG */
470 
471 	SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
472 	return;
473 }
474 
475 
476 void
477 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
478 void
update_holes_on_entry_creation(vm_map_t map,vm_map_entry_t new_entry)479 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
480 {
481 	vm_map_entry_t                  hole_entry, next_hole_entry;
482 #if DEBUG
483 	struct vm_map_entry             old_hole_entry;
484 	vm_map_entry_t                  tmp_entry;
485 	boolean_t                               check_map_with_hole_sanity = TRUE;
486 #endif /* DEBUG */
487 
488 	/*
489 	 * Case A: The entry is aligned exactly with the start and end of the hole.
490 	 *	   This will delete the hole.
491 	 *
492 	 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
493 	 *	   This  will split a hole.
494 	 *
495 	 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
496 	 *	   This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
497 	 */
498 
499 	hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
500 	assert(hole_entry);
501 	next_hole_entry = hole_entry->vme_next;
502 
503 	while (1) {
504 #if DEBUG
505 		/*
506 		 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
507 		 * the entries belonging to the copy map are linked into the list of entries silently and
508 		 * then added to the RB-tree later on.
509 		 * So sanity checks are useless in that case.
510 		 */
511 		check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
512 #endif /* DEBUG */
513 
514 		if (hole_entry->vme_start == new_entry->vme_start &&
515 		    hole_entry->vme_end == new_entry->vme_end) {
516 			/* Case A */
517 #if DEBUG
518 			copy_hole_info(hole_entry, &old_hole_entry);
519 #endif /* DEBUG */
520 
521 			/*
522 			 * This check makes sense only for regular maps, not copy maps.
523 			 * With a regular map, the VM entry is first linked and then
524 			 * the hole is deleted. So the check below, which makes sure that
525 			 * the map's bounds are being respected, is valid.
526 			 * But for copy maps, the hole is deleted before the VM entry is
527 			 * linked (vm_map_store_copy_insert) and so this check is invalid.
528 			 *
529 			 *  if (hole_entry == (vm_map_entry_t) map->holes_list) {
530 			 *
531 			 *       if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
532 			 *
533 			 *               next_hole_entry = vm_map_last_entry(map);
534 			 *               assert(next_hole_entry->vme_end >= map->max_offset);
535 			 *       }
536 			 *  }
537 			 */
538 
539 			vm_map_delete_hole(map, hole_entry);
540 
541 #if DEBUG
542 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
543 				check_map_sanity(map, &old_hole_entry);
544 			}
545 #endif /* DEBUG */
546 			return;
547 		} else if (hole_entry->vme_start < new_entry->vme_start &&
548 		    hole_entry->vme_end > new_entry->vme_end) {
549 			/* Case B */
550 			struct vm_map_links *new_hole_entry = NULL;
551 
552 			new_hole_entry = zalloc(vm_map_holes_zone);
553 
554 #if DEBUG
555 			copy_hole_info(hole_entry, &old_hole_entry);
556 #endif /* DEBUG */
557 
558 			new_hole_entry->prev = hole_entry;
559 			new_hole_entry->next = hole_entry->vme_next;
560 			hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
561 			hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
562 
563 			new_hole_entry->start = new_entry->vme_end;
564 			new_hole_entry->end = hole_entry->vme_end;
565 			hole_entry->vme_end = new_entry->vme_start;
566 
567 			assert(hole_entry->vme_start < hole_entry->vme_end);
568 			assert(new_hole_entry->start < new_hole_entry->end);
569 
570 #if DEBUG
571 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
572 				check_map_sanity(map, &old_hole_entry);
573 			}
574 #endif /* DEBUG */
575 
576 			SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
577 			return;
578 		} else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
579 			/*
580 			 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
581 			 */
582 
583 #if DEBUG
584 			copy_hole_info(hole_entry, &old_hole_entry);
585 #endif /* DEBUG */
586 
587 			if (hole_entry->vme_end <= new_entry->vme_end) {
588 				vm_map_delete_hole(map, hole_entry);
589 			} else {
590 				hole_entry->vme_start = new_entry->vme_end;
591 				SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
592 			}
593 
594 #if DEBUG
595 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
596 				check_map_sanity(map, &old_hole_entry);
597 			}
598 #endif /* DEBUG */
599 
600 			return;
601 		} else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
602 			/*
603 			 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
604 			 */
605 
606 #if DEBUG
607 			copy_hole_info(hole_entry, &old_hole_entry);
608 #endif /* DEBUG */
609 
610 			if (hole_entry->vme_start >= new_entry->vme_start) {
611 				vm_map_delete_hole(map, hole_entry);
612 			} else {
613 				hole_entry->vme_end = new_entry->vme_start;
614 				SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
615 			}
616 
617 #if DEBUG
618 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
619 				check_map_sanity(map, &old_hole_entry);
620 			}
621 #endif /* DEBUG */
622 
623 			return;
624 		}
625 
626 		hole_entry = next_hole_entry;
627 		next_hole_entry = hole_entry->vme_next;
628 
629 		if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
630 			break;
631 		}
632 	}
633 
634 	panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx",
635 	    hole_entry->vme_prev,
636 	    (unsigned long long)hole_entry->vme_prev->vme_start,
637 	    (unsigned long long)hole_entry->vme_prev->vme_end,
638 	    hole_entry,
639 	    (unsigned long long)hole_entry->vme_start,
640 	    (unsigned long long)hole_entry->vme_end,
641 	    hole_entry->vme_next,
642 	    (unsigned long long)hole_entry->vme_next->vme_start,
643 	    (unsigned long long)hole_entry->vme_next->vme_end);
644 }
645 
646 void
update_first_free_rb(vm_map_t map,vm_map_entry_t entry,boolean_t new_entry_creation)647 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
648 {
649 	if (map->holelistenabled) {
650 		/*
651 		 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
652 		 */
653 		vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
654 
655 		/*
656 		 * Clipping an entry will not result in the creation/deletion/modification of
657 		 * a hole. Those calls pass NULL for their target entry.
658 		 */
659 		if (entry == NULL) {
660 			return;
661 		}
662 
663 		/*
664 		 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
665 		 * holes within the bounds of the map.
666 		 */
667 		if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
668 			return;
669 		}
670 
671 		/*
672 		 *
673 		 * Note:
674 		 *
675 		 * - A new entry has already been added to the map
676 		 * OR
677 		 * - An older entry has already been deleted from the map
678 		 *
679 		 * We are updating the hole list after the fact (except in one special case involving copy maps).
680 		 *
681 		 */
682 
683 		if (new_entry_creation) {
684 			update_holes_on_entry_creation(map, entry);
685 		} else {
686 			update_holes_on_entry_deletion(map, entry);
687 		}
688 	}
689 }
690