xref: /xnu-8792.61.2/osfmk/vm/vm_map_store.h (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_MAP_STORE_H
30 #define _VM_VM_MAP_STORE_H
31 
32 /*
33  #ifndef VM_MAP_STORE_USE_LL
34  #define VM_MAP_STORE_USE_LL
35  #endif
36  */
37 #ifndef VM_MAP_STORE_USE_RB
38 #define VM_MAP_STORE_USE_RB
39 #endif
40 
41 #include <libkern/tree.h>
42 #include <mach/shared_region.h>
43 
44 struct _vm_map;
45 struct vm_map_entry;
46 struct vm_map_copy;
47 struct vm_map_header;
48 
49 struct vm_map_store {
50 #ifdef VM_MAP_STORE_USE_RB
51 	RB_ENTRY(vm_map_store) entry;
52 #endif
53 };
54 
55 #ifdef VM_MAP_STORE_USE_RB
56 RB_HEAD( rb_head, vm_map_store );
57 #endif
58 
59 #include <vm/vm_map.h>
60 #include <vm/vm_map_store_ll.h>
61 #include <vm/vm_map_store_rb.h>
62 
63 /*
64  * GuardMalloc support:-
65  * Some of these entries are created with MAP_FIXED.
66  * Some are created with a very high hint address.
67  * So we use aliases and address ranges to make sure
68  * that those special regions (nano, jit etc) don't
69  * result in our highest hint being set to near
70  * the end of the map and future alloctions getting
71  * KERN_NO_SPACE when running with guardmalloc.
72  */
73 #define UPDATE_HIGHEST_ENTRY_END(map, highest_entry)                    \
74 	MACRO_BEGIN                                                     \
75 	struct _vm_map*	UHEE_map;                                       \
76 	struct vm_map_entry*	UHEE_entry;                             \
77 	UHEE_map = (map);                                               \
78 	assert(UHEE_map->disable_vmentry_reuse);                        \
79 	assert(!UHEE_map->is_nested_map);                               \
80 	UHEE_entry = (highest_entry);                                   \
81 	int UHEE_alias = VME_ALIAS(UHEE_entry); \
82 	if(UHEE_alias != VM_MEMORY_MALLOC_NANO && \
83 	   UHEE_alias != VM_MEMORY_MALLOC_TINY && \
84 	   UHEE_alias != VM_MEMORY_MALLOC_SMALL && \
85 	   UHEE_alias != VM_MEMORY_MALLOC_MEDIUM && \
86 	   UHEE_alias != VM_MEMORY_MALLOC_LARGE && \
87 	   UHEE_alias != VM_MEMORY_MALLOC_HUGE && \
88 	   UHEE_entry->used_for_jit == 0 && \
89 	   (UHEE_entry->vme_start < SHARED_REGION_BASE || \
90 	   UHEE_entry->vme_start >= (SHARED_REGION_BASE + SHARED_REGION_SIZE)) && \
91 	   UHEE_map->highest_entry_end < UHEE_entry->vme_end) {        \
92 	        UHEE_map->highest_entry_end = UHEE_entry->vme_end;      \
93 	}                                                               \
94 	MACRO_END
95 
96 #define VM_MAP_HIGHEST_ENTRY(map, entry, start)                         \
97 	MACRO_BEGIN                                                     \
98 	struct _vm_map* VMHE_map;                                       \
99 	struct vm_map_entry*	tmp_entry;                              \
100 	vm_map_offset_t VMHE_start;                                     \
101 	VMHE_map = (map);                                               \
102 	assert(VMHE_map->disable_vmentry_reuse);                        \
103 	assert(!VMHE_map->is_nested_map);                               \
104 	VMHE_start= VMHE_map->highest_entry_end + PAGE_SIZE_64;         \
105 	while(vm_map_lookup_entry(VMHE_map, VMHE_start, &tmp_entry)){   \
106 	        VMHE_start = tmp_entry->vme_end + PAGE_SIZE_64; \
107 	}                                                               \
108 	entry = tmp_entry;                                              \
109 	start = VMHE_start;                                             \
110 	MACRO_END
111 
112 /*
113  *	SAVE_HINT_MAP_READ:
114  *
115  *	Saves the specified entry as the hint for
116  *	future lookups.  only a read lock is held on map,
117  *      so make sure the store is atomic... OSCompareAndSwap
118  *	guarantees this... also, we don't care if we collide
119  *	and someone else wins and stores their 'hint'
120  */
121 #define SAVE_HINT_MAP_READ(map, value) \
122 	MACRO_BEGIN                                                     \
123 	OSCompareAndSwapPtr((map)->hint, value, &(map)->hint); \
124 	MACRO_END
125 
126 
127 /*
128  *	SAVE_HINT_MAP_WRITE:
129  *
130  *	Saves the specified entry as the hint for
131  *	future lookups.  write lock held on map,
132  *      so no one else can be writing or looking
133  *      until the lock is dropped, so it's safe
134  *      to just do an assignment
135  */
136 #define SAVE_HINT_MAP_WRITE(map, value) \
137 	MACRO_BEGIN                    \
138 	(map)->hint = (value);         \
139 	MACRO_END
140 
141 #define SAVE_HINT_HOLE_WRITE(map, value) \
142 	MACRO_BEGIN                    \
143 	(map)->hole_hint = (value);     \
144 	MACRO_END
145 
146 #define SKIP_RB_TREE            0xBAADC0D1
147 
148 #define VM_MAP_ENTRY_CREATE     1
149 #define VM_MAP_ENTRY_DELETE     2
150 
151 void vm_map_store_init( struct vm_map_header*  );
152 boolean_t vm_map_store_lookup_entry( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**);
153 void    vm_map_store_update( struct _vm_map*, struct vm_map_entry*, int);
154 void    _vm_map_store_entry_link( struct vm_map_header *, struct vm_map_entry*, struct vm_map_entry*);
155 void    vm_map_store_entry_link( struct _vm_map*, struct vm_map_entry*, struct vm_map_entry*, vm_map_kernel_flags_t);
156 void    _vm_map_store_entry_unlink( struct vm_map_header *, struct vm_map_entry*, bool);
157 void    vm_map_store_entry_unlink( struct _vm_map*, struct vm_map_entry*, bool);
158 void    vm_map_store_update_first_free( struct _vm_map*, struct vm_map_entry*, boolean_t new_entry_creation);
159 void    vm_map_store_copy_reset( struct vm_map_copy*, struct vm_map_entry*);
160 #if MACH_ASSERT
161 boolean_t first_free_is_valid_store( struct _vm_map*);
162 #endif
163 boolean_t vm_map_store_has_RB_support( struct vm_map_header *hdr );
164 
165 struct vm_map_entry *
166 vm_map_store_find_space(
167 	vm_map_t                map,
168 	vm_map_offset_t         hint,
169 	vm_map_offset_t         limit,
170 	boolean_t               backwards,
171 	vm_map_offset_t         guard_offset,
172 	vm_map_size_t           size,
173 	vm_map_offset_t         mask,
174 	vm_map_offset_t        *addr_out);
175 
176 #endif /* _VM_VM_MAP_STORE_H */
177