xref: /xnu-12377.1.9/osfmk/vm/vm_map_store_ll.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_map_internal.h>
30 
31 bool
first_free_is_valid_ll(vm_map_t map)32 first_free_is_valid_ll(vm_map_t map)
33 {
34 	vm_map_offset_t map_page_mask = VM_MAP_PAGE_MASK(map);
35 	vm_map_entry_t  entry, next;
36 
37 	entry = vm_map_to_entry(map);
38 	next = entry->vme_next;
39 	while (vm_map_trunc_page(next->vme_start, map_page_mask) ==
40 	    vm_map_trunc_page(entry->vme_end, map_page_mask) ||
41 	    (vm_map_trunc_page(next->vme_start, map_page_mask) ==
42 	    vm_map_trunc_page(entry->vme_start, map_page_mask) &&
43 	    next != vm_map_to_entry(map))) {
44 		entry = next;
45 		next = entry->vme_next;
46 		if (entry == vm_map_to_entry(map)) {
47 			break;
48 		}
49 	}
50 	if (map->first_free != entry) {
51 		printf("Bad first_free for map %p: %p should be %p\n",
52 		    map, map->first_free, entry);
53 		return FALSE;
54 	}
55 	return TRUE;
56 }
57 
58 void
vm_map_store_init_ll(struct vm_map_header * hdr)59 vm_map_store_init_ll(struct vm_map_header *hdr)
60 {
61 	hdr->links.next = CAST_TO_VM_MAP_ENTRY(hdr);
62 	VMH_PREV_SET(hdr, CAST_TO_VM_MAP_ENTRY(hdr));
63 }
64 
65 void
vm_map_store_entry_link_ll(struct vm_map_header * hdr,vm_map_entry_t after_where,vm_map_entry_t entry)66 vm_map_store_entry_link_ll(
67 	struct vm_map_header   *hdr,
68 	vm_map_entry_t          after_where,
69 	vm_map_entry_t          entry)
70 {
71 	assert(VM_MAP_PAGE_ALIGNED(entry->vme_start,
72 	    VM_MAP_HDR_PAGE_MASK(hdr)));
73 	assert(VM_MAP_PAGE_ALIGNED(entry->vme_end,
74 	    VM_MAP_HDR_PAGE_MASK(hdr)));
75 	hdr->nentries++;
76 	VME_PREV_SET(entry, after_where);
77 	entry->vme_next = after_where->vme_next;
78 	VME_PREV(entry)->vme_next = entry;
79 	VME_PREV_SET(entry->vme_next, entry);
80 }
81 
82 void
vm_map_store_entry_unlink_ll(struct vm_map_header * hdr,vm_map_entry_t entry)83 vm_map_store_entry_unlink_ll(struct vm_map_header *hdr, vm_map_entry_t entry)
84 {
85 	hdr->nentries--;
86 	VME_PREV_SET(entry->vme_next, VME_PREV(entry));
87 	VME_PREV(entry)->vme_next = entry->vme_next;
88 }
89 
90 void
vm_map_store_copy_reset_ll(vm_map_copy_t copy,__unused vm_map_entry_t entry,__unused int nentries)91 vm_map_store_copy_reset_ll(
92 	vm_map_copy_t           copy,
93 	__unused vm_map_entry_t entry,
94 	__unused int            nentries)
95 {
96 	copy->cpy_hdr.nentries = 0;
97 	vm_map_copy_first_entry(copy) = vm_map_copy_to_entry(copy);
98 	VMH_PREV_SET(&copy->cpy_hdr, vm_map_copy_to_entry(copy));
99 }
100 
101 /*
102  *	UPDATE_FIRST_FREE:
103  *
104  *	Updates the map->first_free pointer to the
105  *	entry immediately before the first hole in the map.
106  *      The map should be locked.
107  */
108 void
update_first_free_ll(vm_map_t map,vm_map_entry_t new_first_free)109 update_first_free_ll(vm_map_t map, vm_map_entry_t new_first_free)
110 {
111 	vm_map_offset_t map_page_mask = VM_MAP_PAGE_MASK(map);
112 	vm_map_entry_t  next;
113 
114 	if (map->holelistenabled || map->disable_vmentry_reuse) {
115 		return;
116 	}
117 
118 	next = new_first_free->vme_next;
119 	while (vm_map_trunc_page(next->vme_start, map_page_mask) ==
120 	    vm_map_trunc_page(new_first_free->vme_end, map_page_mask) ||
121 	    (vm_map_trunc_page(next->vme_start, map_page_mask) ==
122 	    vm_map_trunc_page(new_first_free->vme_start, map_page_mask) &&
123 	    next != vm_map_to_entry(map))) {
124 		new_first_free = next;
125 		next = new_first_free->vme_next;
126 		if (new_first_free == vm_map_to_entry(map)) {
127 			break;
128 		}
129 	}
130 
131 	map->first_free = new_first_free;
132 	assert(first_free_is_valid(map));
133 }
134