xref: /xnu-8792.61.2/osfmk/vm/vm_map_store_rb.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
31 
32 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33 
34 #define VME_FOR_STORE(ptr) __container_of(ptr, struct vm_map_entry, store)
35 
36 void
vm_map_store_init_rb(struct vm_map_header * hdr)37 vm_map_store_init_rb( struct vm_map_header* hdr )
38 {
39 	RB_INIT(&(hdr->rb_head_store));
40 }
41 
42 int
rb_node_compare(struct vm_map_store * node,struct vm_map_store * parent)43 rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
44 {
45 	vm_map_entry_t vme_c;
46 	vm_map_entry_t vme_p;
47 
48 	vme_c = VME_FOR_STORE(node);
49 	vme_p = VME_FOR_STORE(parent);
50 	if (vme_c->vme_start < vme_p->vme_start) {
51 		return -1;
52 	}
53 	if (vme_c->vme_start >= vme_p->vme_end) {
54 		return 1;
55 	}
56 	return 0;
57 }
58 
59 __dead2
60 void
vm_map_store_walk_rb(vm_map_t map,vm_map_entry_t * wrong_vme,vm_map_entry_t * vm_entry)61 vm_map_store_walk_rb(vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
62 {
63 	struct vm_map_header *hdr = &map->hdr;
64 	struct vm_map_store  *rb_entry = RB_ROOT(&hdr->rb_head_store);
65 	vm_map_entry_t       cur = *vm_entry;
66 
67 	rb_entry = RB_FIND(rb_head, &hdr->rb_head_store, &(cur->store));
68 	if (rb_entry == NULL) {
69 		panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
70 	} else {
71 		panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry)));
72 	}
73 }
74 
75 
76 boolean_t
vm_map_store_lookup_entry_rb(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * vm_entry)77 vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
78 {
79 	struct vm_map_header *hdr = &map->hdr;
80 	struct vm_map_store  *rb_entry = RB_ROOT(&hdr->rb_head_store);
81 	vm_map_entry_t       cur = vm_map_to_entry(map);
82 	vm_map_entry_t       prev = VM_MAP_ENTRY_NULL;
83 
84 	while (rb_entry != (struct vm_map_store*)NULL) {
85 		cur =  VME_FOR_STORE(rb_entry);
86 		if (address >= cur->vme_start) {
87 			if (address < cur->vme_end) {
88 				*vm_entry = cur;
89 				return TRUE;
90 			}
91 			rb_entry = RB_RIGHT(rb_entry, entry);
92 			prev = cur;
93 		} else {
94 			rb_entry = RB_LEFT(rb_entry, entry);
95 		}
96 	}
97 	if (prev == VM_MAP_ENTRY_NULL) {
98 		prev = vm_map_to_entry(map);
99 	}
100 	*vm_entry = prev;
101 	return FALSE;
102 }
103 
104 void
vm_map_store_entry_link_rb(struct vm_map_header * mapHdr,__unused vm_map_entry_t after_where,vm_map_entry_t entry)105 vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
106 {
107 	struct rb_head *rbh = &(mapHdr->rb_head_store);
108 	struct vm_map_store *store = &(entry->store);
109 	struct vm_map_store *tmp_store;
110 	if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
111 		panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
112 		    (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
113 	}
114 }
115 
116 void
vm_map_store_entry_unlink_rb(struct vm_map_header * mapHdr,vm_map_entry_t entry)117 vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
118 {
119 	struct rb_head *rbh = &(mapHdr->rb_head_store);
120 	struct vm_map_store *rb_entry;
121 	struct vm_map_store *store = &(entry->store);
122 
123 	rb_entry = RB_FIND( rb_head, rbh, store);
124 	if (rb_entry == NULL) {
125 		panic("NO ENTRY TO DELETE");
126 	}
127 	RB_REMOVE( rb_head, rbh, store );
128 }
129 
130 void
vm_map_store_copy_reset_rb(vm_map_copy_t copy,vm_map_entry_t entry,int nentries)131 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
132 {
133 	struct vm_map_header *mapHdr = &(copy->cpy_hdr);
134 	struct rb_head *rbh = &(mapHdr->rb_head_store);
135 	struct vm_map_store *store;
136 	int deleted = 0;
137 
138 	while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
139 		store = &(entry->store);
140 		RB_REMOVE( rb_head, rbh, store );
141 		entry = entry->vme_next;
142 		deleted++;
143 		nentries--;
144 	}
145 }
146 
147 void
148 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
149 void
vm_map_combine_hole(__unused vm_map_t map,vm_map_entry_t hole_entry)150 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
151 {
152 	vm_map_entry_t middle_hole_entry, last_hole_entry;
153 
154 	hole_entry->vme_end = hole_entry->vme_next->vme_end;
155 
156 	middle_hole_entry = hole_entry->vme_next;
157 	last_hole_entry = middle_hole_entry->vme_next;
158 
159 	assert(last_hole_entry->vme_prev == middle_hole_entry);
160 	assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
161 
162 	last_hole_entry->vme_prev = hole_entry;
163 	hole_entry->vme_next = last_hole_entry;
164 
165 	middle_hole_entry->vme_prev = NULL;
166 	middle_hole_entry->vme_next = NULL;
167 
168 	zfree_id(ZONE_ID_VM_MAP_HOLES, middle_hole_entry);
169 
170 	assert(hole_entry->vme_start < hole_entry->vme_end);
171 	assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
172 }
173 
174 
175 void
176 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
177 void
vm_map_delete_hole(vm_map_t map,vm_map_entry_t hole_entry)178 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
179 {
180 	if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
181 		if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
182 			map->holes_list = NULL;
183 			SAVE_HINT_HOLE_WRITE(map, NULL);
184 		} else {
185 			vm_map_entry_t l_next, l_prev;
186 
187 			l_next = (vm_map_entry_t) map->holes_list->next;
188 			l_prev = (vm_map_entry_t) map->holes_list->prev;
189 			map->holes_list = (struct vm_map_links*) l_next;
190 
191 			l_next->vme_prev = l_prev;
192 			l_prev->vme_next = l_next;
193 
194 			SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
195 		}
196 	} else {
197 		SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
198 
199 		hole_entry->vme_prev->vme_next = hole_entry->vme_next;
200 		hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
201 	}
202 
203 	hole_entry->vme_next = NULL;
204 	hole_entry->vme_prev = NULL;
205 	zfree_id(ZONE_ID_VM_MAP_HOLES, hole_entry);
206 }
207 
208 
209 /*
210  * For Debugging.
211  */
212 
213 #if DEBUG
214 extern int vm_check_map_sanity;
215 
216 static void
check_map_sanity(vm_map_t map,vm_map_entry_t old_hole_entry)217 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
218 {
219 	vm_map_entry_t  hole_entry, next_hole_entry;
220 	vm_map_entry_t  map_entry, next_map_entry;
221 
222 	if (map->holes_list == NULL) {
223 		return;
224 	}
225 
226 	hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list);
227 	next_hole_entry = hole_entry->vme_next;
228 
229 	map_entry = vm_map_first_entry(map);
230 	next_map_entry = map_entry->vme_next;
231 
232 	while (map_entry->vme_start > hole_entry->vme_start) {
233 		hole_entry = next_hole_entry;
234 		next_hole_entry = hole_entry->vme_next;
235 
236 		if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
237 			break;
238 		}
239 	}
240 
241 	while (map_entry != vm_map_to_entry(map)) {
242 		if (map_entry->vme_start >= map->max_offset) {
243 			break;
244 		}
245 
246 		if (map_entry->vme_end != map_entry->vme_next->vme_start) {
247 			if (map_entry->vme_next == vm_map_to_entry(map)) {
248 				break;
249 			}
250 
251 			if (hole_entry->vme_start != map_entry->vme_end) {
252 				panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
253 				assert(hole_entry->vme_start == map_entry->vme_end);
254 			}
255 
256 			if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
257 				panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
258 				assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
259 			}
260 
261 			hole_entry = next_hole_entry;
262 			next_hole_entry = hole_entry->vme_next;
263 
264 			if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
265 				break;
266 			}
267 		}
268 
269 		map_entry = map_entry->vme_next;
270 	}
271 }
272 
273 /*
274  * For debugging.
275  */
276 static void
copy_hole_info(vm_map_entry_t hole_entry,vm_map_entry_t old_hole_entry)277 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
278 {
279 	old_hole_entry->vme_prev = hole_entry->vme_prev;
280 	old_hole_entry->vme_next = hole_entry->vme_next;
281 	old_hole_entry->vme_start = hole_entry->vme_start;
282 	old_hole_entry->vme_end = hole_entry->vme_end;
283 }
284 #endif /* DEBUG */
285 
286 void
287 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
288 void
update_holes_on_entry_deletion(vm_map_t map,vm_map_entry_t old_entry)289 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
290 {
291 	/*
292 	 * Dealing with the deletion of an older entry.
293 	 */
294 
295 	vm_map_entry_t          hole_entry, next_hole_entry;
296 #if DEBUG
297 	struct vm_map_entry     old_hole_entry;
298 #endif /* DEBUG */
299 	boolean_t               create_new_hole = TRUE;
300 
301 	hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
302 
303 	if (hole_entry) {
304 		if (hole_entry->vme_end == old_entry->vme_start) {
305 			/*
306 			 * Found a hole right after above our entry.
307 			 * Hit.
308 			 */
309 		} else if (hole_entry->vme_start == old_entry->vme_end) {
310 			if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
311 				/*
312 				 * Found a hole right after below our entry but
313 				 * make sure we don't erroneously extend backwards.
314 				 *
315 				 * Hit.
316 				 */
317 
318 				hole_entry = hole_entry->vme_prev;
319 			}
320 		} else if (hole_entry->vme_start > old_entry->vme_end) {
321 			/*
322 			 * Useless hint. Start from the top.
323 			 */
324 
325 			hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
326 		}
327 
328 		if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
329 			if (hole_entry->vme_start > old_entry->vme_start) {
330 				panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx",
331 				    (unsigned long long)hole_entry->vme_start,
332 				    (unsigned long long)old_entry->vme_start,
333 				    (unsigned long long)map->holes_list->start,
334 				    (unsigned long long)map->hole_hint->start);
335 			}
336 			if (hole_entry->vme_end > old_entry->vme_start) {
337 				panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx",
338 				    (unsigned long long)hole_entry->vme_end,
339 				    (unsigned long long)old_entry->vme_start,
340 				    (unsigned long long)map->holes_list->start,
341 				    (unsigned long long)map->hole_hint->start);
342 			}
343 		}
344 
345 		while (1) {
346 			next_hole_entry = hole_entry->vme_next;
347 
348 			/*
349 			 * Hole is right above the entry.
350 			 */
351 			if (hole_entry->vme_end == old_entry->vme_start) {
352 #if DEBUG
353 				copy_hole_info(hole_entry, &old_hole_entry);
354 #endif /* DEBUG */
355 
356 				/*
357 				 * Is there another hole right below the entry?
358 				 * Can we combine holes?
359 				 */
360 
361 				if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
362 					vm_map_combine_hole(map, hole_entry);
363 				} else {
364 					hole_entry->vme_end = old_entry->vme_end;
365 				}
366 				create_new_hole = FALSE;
367 #if DEBUG
368 				if (vm_check_map_sanity) {
369 					check_map_sanity(map, &old_hole_entry);
370 				}
371 #endif /* DEBUG */
372 				break;
373 			}
374 
375 			/*
376 			 * Hole is right below the entry.
377 			 */
378 			if (hole_entry->vme_start == old_entry->vme_end) {
379 #if DEBUG
380 				copy_hole_info(hole_entry, &old_hole_entry);
381 #endif /* DEBUG */
382 
383 				hole_entry->vme_start = old_entry->vme_start;
384 				create_new_hole = FALSE;
385 
386 #if DEBUG
387 				if (vm_check_map_sanity) {
388 					check_map_sanity(map, &old_hole_entry);
389 				}
390 #endif /* DEBUG */
391 				break;
392 			}
393 
394 			/*
395 			 * Hole is beyond our entry. Let's go back to the last hole
396 			 * before our entry so we have the right place to link up the
397 			 * new hole that will be needed.
398 			 */
399 			if (hole_entry->vme_start > old_entry->vme_end) {
400 #if DEBUG
401 				copy_hole_info(hole_entry, &old_hole_entry);
402 #endif /* DEBUG */
403 
404 				if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
405 					assert(hole_entry->vme_start != old_entry->vme_start);
406 					hole_entry = hole_entry->vme_prev;
407 				}
408 				break;
409 			}
410 
411 			hole_entry = next_hole_entry;
412 
413 			if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
414 				hole_entry = hole_entry->vme_prev;
415 				break;
416 			}
417 		}
418 	}
419 
420 	if (create_new_hole) {
421 		struct vm_map_links     *new_hole_entry = NULL;
422 		vm_map_entry_t          l_next, l_prev;
423 
424 		new_hole_entry = zalloc_id(ZONE_ID_VM_MAP_HOLES, Z_WAITOK | Z_NOFAIL);
425 
426 		/*
427 		 * First hole in the map?
428 		 * OR
429 		 * A hole that is located above the current first hole in the map?
430 		 */
431 		if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
432 			if (map->holes_list == NULL) {
433 				map->holes_list = new_hole_entry;
434 				new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
435 			} else {
436 				l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
437 				l_prev = map->holes_list->prev;
438 				map->holes_list = new_hole_entry;
439 				new_hole_entry->next = l_next;
440 				new_hole_entry->prev = l_prev;
441 
442 				l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
443 			}
444 		} else {
445 			l_next = hole_entry->vme_next;
446 			l_prev = hole_entry->vme_next->vme_prev;
447 
448 			new_hole_entry->prev = hole_entry;
449 			new_hole_entry->next = l_next;
450 
451 			hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
452 			l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
453 		}
454 
455 		new_hole_entry->start = old_entry->vme_start;
456 		new_hole_entry->end = old_entry->vme_end;
457 
458 		hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
459 
460 		assert(new_hole_entry->start < new_hole_entry->end);
461 	}
462 
463 #if DEBUG
464 	if (vm_check_map_sanity) {
465 		check_map_sanity(map, &old_hole_entry);
466 	}
467 #endif /* DEBUG */
468 
469 	SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
470 	return;
471 }
472 
473 
474 void
475 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
476 void
update_holes_on_entry_creation(vm_map_t map,vm_map_entry_t new_entry)477 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
478 {
479 	vm_map_entry_t                  hole_entry, next_hole_entry;
480 #if DEBUG
481 	struct vm_map_entry             old_hole_entry;
482 	vm_map_entry_t                  tmp_entry;
483 	boolean_t                               check_map_with_hole_sanity = TRUE;
484 #endif /* DEBUG */
485 
486 	/*
487 	 * Case A: The entry is aligned exactly with the start and end of the hole.
488 	 *	   This will delete the hole.
489 	 *
490 	 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
491 	 *	   This  will split a hole.
492 	 *
493 	 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
494 	 *	   This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
495 	 */
496 
497 	hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
498 	assert(hole_entry);
499 	next_hole_entry = hole_entry->vme_next;
500 
501 	while (1) {
502 #if DEBUG
503 		/*
504 		 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
505 		 * the entries belonging to the copy map are linked into the list of entries silently and
506 		 * then added to the RB-tree later on.
507 		 * So sanity checks are useless in that case.
508 		 */
509 		check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
510 #endif /* DEBUG */
511 
512 		if (hole_entry->vme_start == new_entry->vme_start &&
513 		    hole_entry->vme_end == new_entry->vme_end) {
514 			/* Case A */
515 #if DEBUG
516 			copy_hole_info(hole_entry, &old_hole_entry);
517 #endif /* DEBUG */
518 
519 			/*
520 			 * This check makes sense only for regular maps, not copy maps.
521 			 * With a regular map, the VM entry is first linked and then
522 			 * the hole is deleted. So the check below, which makes sure that
523 			 * the map's bounds are being respected, is valid.
524 			 * But for copy maps, the hole is deleted before the VM entry is
525 			 * linked (vm_map_store_copy_insert) and so this check is invalid.
526 			 *
527 			 *  if (hole_entry == (vm_map_entry_t) map->holes_list) {
528 			 *
529 			 *       if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
530 			 *
531 			 *               next_hole_entry = vm_map_last_entry(map);
532 			 *               assert(next_hole_entry->vme_end >= map->max_offset);
533 			 *       }
534 			 *  }
535 			 */
536 
537 			vm_map_delete_hole(map, hole_entry);
538 
539 #if DEBUG
540 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
541 				check_map_sanity(map, &old_hole_entry);
542 			}
543 #endif /* DEBUG */
544 			return;
545 		} else if (hole_entry->vme_start < new_entry->vme_start &&
546 		    hole_entry->vme_end > new_entry->vme_end) {
547 			/* Case B */
548 			struct vm_map_links *new_hole_entry = NULL;
549 
550 			new_hole_entry = zalloc_id(ZONE_ID_VM_MAP_HOLES, Z_WAITOK | Z_NOFAIL);
551 
552 #if DEBUG
553 			copy_hole_info(hole_entry, &old_hole_entry);
554 #endif /* DEBUG */
555 
556 			new_hole_entry->prev = hole_entry;
557 			new_hole_entry->next = hole_entry->vme_next;
558 			hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
559 			hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
560 
561 			new_hole_entry->start = new_entry->vme_end;
562 			new_hole_entry->end = hole_entry->vme_end;
563 			hole_entry->vme_end = new_entry->vme_start;
564 
565 			assert(hole_entry->vme_start < hole_entry->vme_end);
566 			assert(new_hole_entry->start < new_hole_entry->end);
567 
568 #if DEBUG
569 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
570 				check_map_sanity(map, &old_hole_entry);
571 			}
572 #endif /* DEBUG */
573 
574 			SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
575 			return;
576 		} else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
577 			/*
578 			 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
579 			 */
580 
581 #if DEBUG
582 			copy_hole_info(hole_entry, &old_hole_entry);
583 #endif /* DEBUG */
584 
585 			if (hole_entry->vme_end <= new_entry->vme_end) {
586 				vm_map_delete_hole(map, hole_entry);
587 			} else {
588 				hole_entry->vme_start = new_entry->vme_end;
589 				SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
590 			}
591 
592 #if DEBUG
593 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
594 				check_map_sanity(map, &old_hole_entry);
595 			}
596 #endif /* DEBUG */
597 
598 			return;
599 		} else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
600 			/*
601 			 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
602 			 */
603 
604 #if DEBUG
605 			copy_hole_info(hole_entry, &old_hole_entry);
606 #endif /* DEBUG */
607 
608 			if (hole_entry->vme_start >= new_entry->vme_start) {
609 				vm_map_delete_hole(map, hole_entry);
610 			} else {
611 				hole_entry->vme_end = new_entry->vme_start;
612 				SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
613 			}
614 
615 #if DEBUG
616 			if (vm_check_map_sanity && check_map_with_hole_sanity) {
617 				check_map_sanity(map, &old_hole_entry);
618 			}
619 #endif /* DEBUG */
620 
621 			return;
622 		}
623 
624 		hole_entry = next_hole_entry;
625 		next_hole_entry = hole_entry->vme_next;
626 
627 		if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
628 			break;
629 		}
630 	}
631 
632 	panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx",
633 	    hole_entry->vme_prev,
634 	    (unsigned long long)hole_entry->vme_prev->vme_start,
635 	    (unsigned long long)hole_entry->vme_prev->vme_end,
636 	    hole_entry,
637 	    (unsigned long long)hole_entry->vme_start,
638 	    (unsigned long long)hole_entry->vme_end,
639 	    hole_entry->vme_next,
640 	    (unsigned long long)hole_entry->vme_next->vme_start,
641 	    (unsigned long long)hole_entry->vme_next->vme_end);
642 }
643 
644 void
update_first_free_rb(vm_map_t map,vm_map_entry_t entry,boolean_t new_entry_creation)645 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
646 {
647 	if (map->holelistenabled) {
648 		/*
649 		 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
650 		 */
651 		vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
652 
653 		/*
654 		 * Clipping an entry will not result in the creation/deletion/modification of
655 		 * a hole. Those calls pass NULL for their target entry.
656 		 */
657 		if (entry == NULL) {
658 			return;
659 		}
660 
661 		/*
662 		 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
663 		 * holes within the bounds of the map.
664 		 */
665 		if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
666 			return;
667 		}
668 
669 		/*
670 		 *
671 		 * Note:
672 		 *
673 		 * - A new entry has already been added to the map
674 		 * OR
675 		 * - An older entry has already been deleted from the map
676 		 *
677 		 * We are updating the hole list after the fact (except in one special case involving copy maps).
678 		 *
679 		 */
680 
681 		if (new_entry_creation) {
682 			update_holes_on_entry_creation(map, entry);
683 		} else {
684 			update_holes_on_entry_deletion(map, entry);
685 		}
686 	}
687 }
688