xref: /xnu-8020.140.41/osfmk/vm/vm_map_store_ll.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <vm/vm_map_store_ll.h>
30 
31 boolean_t
first_free_is_valid_ll(vm_map_t map)32 first_free_is_valid_ll( vm_map_t map )
33 {
34 	vm_map_entry_t  entry, next;
35 	entry = vm_map_to_entry(map);
36 	next = entry->vme_next;
37 	while (vm_map_trunc_page(next->vme_start,
38 	    VM_MAP_PAGE_MASK(map)) ==
39 	    vm_map_trunc_page(entry->vme_end,
40 	    VM_MAP_PAGE_MASK(map)) ||
41 	    (vm_map_trunc_page(next->vme_start,
42 	    VM_MAP_PAGE_MASK(map)) ==
43 	    vm_map_trunc_page(entry->vme_start,
44 	    VM_MAP_PAGE_MASK(map)) &&
45 	    next != vm_map_to_entry(map))) {
46 		entry = next;
47 		next = entry->vme_next;
48 		if (entry == vm_map_to_entry(map)) {
49 			break;
50 		}
51 	}
52 	if (map->first_free != entry) {
53 		printf("Bad first_free for map %p: %p should be %p\n",
54 		    map, map->first_free, entry);
55 		return FALSE;
56 	}
57 	return TRUE;
58 }
59 
60 /*
61  *	UPDATE_FIRST_FREE:
62  *
63  *	Updates the map->first_free pointer to the
64  *	entry immediately before the first hole in the map.
65  *      The map should be locked.
66  */
67 #define UPDATE_FIRST_FREE_LL(map, new_first_free)                       \
68 	MACRO_BEGIN                                                     \
69 	if( map->disable_vmentry_reuse == FALSE){                       \
70 	        vm_map_t	UFF_map;                                \
71 	        vm_map_entry_t	UFF_first_free;                         \
72 	        vm_map_entry_t	UFF_next_entry;                         \
73 	        UFF_map = (map);                                        \
74 	        UFF_first_free = (new_first_free);                      \
75 	        UFF_next_entry = UFF_first_free->vme_next;              \
76 	        while (vm_map_trunc_page(UFF_next_entry->vme_start,     \
77 	                                 VM_MAP_PAGE_MASK(UFF_map)) ==  \
78 	               vm_map_trunc_page(UFF_first_free->vme_end,       \
79 	                                 VM_MAP_PAGE_MASK(UFF_map)) ||  \
80 	               (vm_map_trunc_page(UFF_next_entry->vme_start,    \
81 	                                  VM_MAP_PAGE_MASK(UFF_map)) == \
82 	                vm_map_trunc_page(UFF_first_free->vme_start,    \
83 	                                  VM_MAP_PAGE_MASK(UFF_map)) && \
84 	                UFF_next_entry != vm_map_to_entry(UFF_map))) {  \
85 	                UFF_first_free = UFF_next_entry;                \
86 	                UFF_next_entry = UFF_first_free->vme_next;      \
87 	                if (UFF_first_free == vm_map_to_entry(UFF_map)) \
88 	                        break;                                  \
89 	        }                                                       \
90 	        UFF_map->first_free = UFF_first_free;                   \
91 	        assert(first_free_is_valid(UFF_map));                   \
92 	}                                                               \
93 	MACRO_END
94 
95 #define _vm_map_entry_link_ll(hdr, after_where, entry)                  \
96 	MACRO_BEGIN                                                     \
97 	if (entry->map_aligned) {                                       \
98 	        assert(VM_MAP_PAGE_ALIGNED((entry->vme_start),          \
99 	                                   VM_MAP_HDR_PAGE_MASK((hdr))));\
100 	        assert(VM_MAP_PAGE_ALIGNED((entry->vme_end),            \
101 	                                   VM_MAP_HDR_PAGE_MASK((hdr))));\
102 	}                                                               \
103 	(hdr)->nentries++;                                              \
104 	(entry)->vme_prev = (after_where);                              \
105 	(entry)->vme_next = (after_where)->vme_next;                    \
106 	(entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
107 	MACRO_END
108 
109 #define _vm_map_entry_unlink_ll(hdr, entry)                             \
110 	MACRO_BEGIN                                                     \
111 	(hdr)->nentries--;                                              \
112 	(entry)->vme_next->vme_prev = (entry)->vme_prev;                \
113 	(entry)->vme_prev->vme_next = (entry)->vme_next;                \
114 	MACRO_END
115 /*
116  *	Macro:		vm_map_copy_insert
117  *
118  *	Description:
119  *		Link a copy chain ("copy") into a map at the
120  *		specified location (after "where").
121  *	Side effects:
122  *		The copy chain is destroyed.
123  *	Warning:
124  *		The arguments are evaluated multiple times.
125  */
126 #define _vm_map_copy_insert_ll(map, where, copy)                                \
127 MACRO_BEGIN                                                             \
128 	vm_map_t VMCI_map;                                              \
129 	vm_map_entry_t VMCI_where;                                      \
130 	vm_map_copy_t VMCI_copy;                                        \
131 	VMCI_map = (map);                                               \
132 	VMCI_where = (where);                                           \
133 	VMCI_copy = (copy);                                             \
134 	((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
135 	        ->vme_next = (VMCI_where->vme_next);                    \
136 	((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy))   \
137 	        ->vme_prev = VMCI_where;                                \
138 	VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries;          \
139 	update_first_free_ll(VMCI_map, VMCI_map->first_free);           \
140 MACRO_END
141 
142 
143 
144 void
vm_map_store_init_ll(__unused struct vm_map_header * hdr)145 vm_map_store_init_ll( __unused struct vm_map_header *hdr)
146 {
147 	return;
148 }
149 
150 /*
151  *	vm_map_lookup_entry_ll:	[ internal use only ]
152  *	Use the linked list to find the map entry containing (or
153  *	immediately preceding) the specified address
154  *	in the given map; the entry is returned
155  *	in the "entry" parameter.  The boolean
156  *	result indicates whether the address is
157  *	actually contained in the map.
158  */
159 boolean_t
vm_map_store_lookup_entry_ll(vm_map_t map,vm_map_offset_t address,vm_map_entry_t * entry)160 vm_map_store_lookup_entry_ll(
161 	vm_map_t                map,
162 	vm_map_offset_t         address,
163 	vm_map_entry_t          *entry)         /* OUT */
164 {
165 	vm_map_entry_t          cur;
166 	vm_map_entry_t          last;
167 
168 	/*
169 	 *	Start looking either from the head of the
170 	 *	list, or from the hint.
171 	 */
172 	cur = map->hint;
173 
174 	if (cur == vm_map_to_entry(map)) {
175 		cur = cur->vme_next;
176 	}
177 
178 	if (address >= cur->vme_start) {
179 		/*
180 		 *	Go from hint to end of list.
181 		 *
182 		 *	But first, make a quick check to see if
183 		 *	we are already looking at the entry we
184 		 *	want (which is usually the case).
185 		 *	Note also that we don't need to save the hint
186 		 *	here... it is the same hint (unless we are
187 		 *	at the header, in which case the hint didn't
188 		 *	buy us anything anyway).
189 		 */
190 		last = vm_map_to_entry(map);
191 		if ((cur != last) && (cur->vme_end > address)) {
192 			*entry = cur;
193 			return TRUE;
194 		}
195 	} else {
196 		/*
197 		 *	Go from start to hint, *inclusively*
198 		 */
199 		last = cur->vme_next;
200 		cur = vm_map_first_entry(map);
201 	}
202 
203 	/*
204 	 *	Search linearly
205 	 */
206 
207 	while (cur != last) {
208 		if (cur->vme_end > address) {
209 			if (address >= cur->vme_start) {
210 				/*
211 				 *	Save this lookup for future
212 				 *	hints, and return
213 				 */
214 
215 				*entry = cur;
216 				SAVE_HINT_MAP_READ(map, cur);
217 
218 				return TRUE;
219 			}
220 			break;
221 		}
222 		cur = cur->vme_next;
223 	}
224 	*entry = cur->vme_prev;
225 	SAVE_HINT_MAP_READ(map, *entry);
226 
227 	return FALSE;
228 }
229 
230 void
vm_map_store_entry_link_ll(struct vm_map_header * mapHdr,vm_map_entry_t after_where,vm_map_entry_t entry)231 vm_map_store_entry_link_ll( struct vm_map_header *mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
232 {
233 	_vm_map_entry_link_ll( mapHdr, after_where, entry);
234 }
235 
236 void
vm_map_store_entry_unlink_ll(struct vm_map_header * mapHdr,vm_map_entry_t entry)237 vm_map_store_entry_unlink_ll( struct vm_map_header *mapHdr, vm_map_entry_t entry)
238 {
239 	_vm_map_entry_unlink_ll( mapHdr, entry);
240 }
241 
242 void
vm_map_store_copy_reset_ll(vm_map_copy_t copy,__unused vm_map_entry_t entry,__unused int nentries)243 vm_map_store_copy_reset_ll( vm_map_copy_t copy, __unused vm_map_entry_t entry, __unused int nentries)
244 {
245 	copy->cpy_hdr.nentries = 0;
246 	vm_map_copy_first_entry(copy) =
247 	    vm_map_copy_last_entry(copy) =
248 	    vm_map_copy_to_entry(copy);
249 }
250 
251 void
update_first_free_ll(vm_map_t map,vm_map_entry_t new_first_free)252 update_first_free_ll( vm_map_t map, vm_map_entry_t new_first_free)
253 {
254 	if (map->holelistenabled) {
255 		return;
256 	}
257 
258 	UPDATE_FIRST_FREE_LL( map, new_first_free);
259 }
260