xref: /xnu-8019.80.24/osfmk/vm/vm_map_store.h (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_MAP_STORE_H
30 #define _VM_VM_MAP_STORE_H
31 
32 /*
33  #ifndef VM_MAP_STORE_USE_LL
34  #define VM_MAP_STORE_USE_LL
35  #endif
36  */
37 #ifndef VM_MAP_STORE_USE_RB
38 #define VM_MAP_STORE_USE_RB
39 #endif
40 
41 #include <libkern/tree.h>
42 #include <mach/shared_region.h>
43 
44 struct _vm_map;
45 struct vm_map_entry;
46 struct vm_map_copy;
47 struct vm_map_header;
48 
49 struct vm_map_store {
50 #ifdef VM_MAP_STORE_USE_RB
51 	RB_ENTRY(vm_map_store) entry;
52 #endif
53 };
54 
55 #ifdef VM_MAP_STORE_USE_RB
56 RB_HEAD( rb_head, vm_map_store );
57 #endif
58 
59 #include <vm/vm_map.h>
60 #include <vm/vm_map_store_ll.h>
61 #include <vm/vm_map_store_rb.h>
62 
63 /*
64  * GuardMalloc support:-
65  * Some of these entries are created with MAP_FIXED.
66  * Some are created with a very high hint address.
67  * So we use aliases and address ranges to make sure
68  * that those special regions (nano, jit etc) don't
69  * result in our highest hint being set to near
70  * the end of the map and future alloctions getting
71  * KERN_NO_SPACE when running with guardmalloc.
72  */
73 #define UPDATE_HIGHEST_ENTRY_END(map, highest_entry)                    \
74 	MACRO_BEGIN                                                     \
75 	struct _vm_map*	UHEE_map;                                       \
76 	struct vm_map_entry*	UHEE_entry;                             \
77 	UHEE_map = (map);                                               \
78 	assert(UHEE_map->disable_vmentry_reuse);                        \
79 	assert(!UHEE_map->is_nested_map);                               \
80 	UHEE_entry = (highest_entry);                                   \
81 	int UHEE_alias = VME_ALIAS(UHEE_entry); \
82 	if(UHEE_alias != VM_MEMORY_MALLOC_NANO && \
83 	   UHEE_alias != VM_MEMORY_MALLOC_TINY && \
84 	   UHEE_alias != VM_MEMORY_MALLOC_SMALL && \
85 	   UHEE_alias != VM_MEMORY_MALLOC_MEDIUM && \
86 	   UHEE_alias != VM_MEMORY_MALLOC_LARGE && \
87 	   UHEE_alias != VM_MEMORY_MALLOC_HUGE && \
88 	   UHEE_entry->used_for_jit == 0 && \
89 	   (UHEE_entry->vme_start < SHARED_REGION_BASE || \
90 	   UHEE_entry->vme_start >= (SHARED_REGION_BASE + SHARED_REGION_SIZE)) && \
91 	   UHEE_map->highest_entry_end < UHEE_entry->vme_end) {        \
92 	        UHEE_map->highest_entry_end = UHEE_entry->vme_end;      \
93 	}                                                               \
94 	MACRO_END
95 
96 #define VM_MAP_HIGHEST_ENTRY(map, entry, start)                         \
97 	MACRO_BEGIN                                                     \
98 	struct _vm_map* VMHE_map;                                       \
99 	struct vm_map_entry*	tmp_entry;                              \
100 	vm_map_offset_t VMHE_start;                                     \
101 	VMHE_map = (map);                                               \
102 	assert(VMHE_map->disable_vmentry_reuse);                        \
103 	assert(!VMHE_map->is_nested_map);                               \
104 	VMHE_start= VMHE_map->highest_entry_end + PAGE_SIZE_64;         \
105 	while(vm_map_lookup_entry(VMHE_map, VMHE_start, &tmp_entry)){   \
106 	        VMHE_map->highest_entry_end = tmp_entry->vme_end;       \
107 	        VMHE_start = VMHE_map->highest_entry_end + PAGE_SIZE_64; \
108 	}                                                               \
109 	entry = tmp_entry;                                              \
110 	start = VMHE_start;                                             \
111 	MACRO_END
112 
113 /*
114  *	SAVE_HINT_MAP_READ:
115  *
116  *	Saves the specified entry as the hint for
117  *	future lookups.  only a read lock is held on map,
118  *      so make sure the store is atomic... OSCompareAndSwap
119  *	guarantees this... also, we don't care if we collide
120  *	and someone else wins and stores their 'hint'
121  */
122 #define SAVE_HINT_MAP_READ(map, value) \
123 	MACRO_BEGIN                                                     \
124 	OSCompareAndSwapPtr((map)->hint, value, &(map)->hint); \
125 	MACRO_END
126 
127 
128 /*
129  *	SAVE_HINT_MAP_WRITE:
130  *
131  *	Saves the specified entry as the hint for
132  *	future lookups.  write lock held on map,
133  *      so no one else can be writing or looking
134  *      until the lock is dropped, so it's safe
135  *      to just do an assignment
136  */
137 #define SAVE_HINT_MAP_WRITE(map, value) \
138 	MACRO_BEGIN                    \
139 	(map)->hint = (value);         \
140 	MACRO_END
141 
142 #define SAVE_HINT_HOLE_WRITE(map, value) \
143 	MACRO_BEGIN                    \
144 	(map)->hole_hint = (value);     \
145 	MACRO_END
146 
147 #define SKIP_RB_TREE            0xBAADC0D1
148 
149 #define VM_MAP_ENTRY_CREATE     1
150 #define VM_MAP_ENTRY_DELETE     2
151 
152 void vm_map_store_init( struct vm_map_header*  );
153 boolean_t vm_map_store_lookup_entry( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**);
154 void    vm_map_store_update( struct _vm_map*, struct vm_map_entry*, int);
155 void    vm_map_store_find_last_free( struct _vm_map*, struct vm_map_entry**);
156 void    _vm_map_store_entry_link( struct vm_map_header *, struct vm_map_entry*, struct vm_map_entry*);
157 void    vm_map_store_entry_link( struct _vm_map*, struct vm_map_entry*, struct vm_map_entry*, vm_map_kernel_flags_t);
158 void    _vm_map_store_entry_unlink( struct vm_map_header *, struct vm_map_entry*);
159 void    vm_map_store_entry_unlink( struct _vm_map*, struct vm_map_entry*);
160 void    vm_map_store_update_first_free( struct _vm_map*, struct vm_map_entry*, boolean_t new_entry_creation);
161 void    vm_map_store_copy_reset( struct vm_map_copy*, struct vm_map_entry*);
162 #if MACH_ASSERT
163 boolean_t first_free_is_valid_store( struct _vm_map*);
164 #endif
165 boolean_t vm_map_store_has_RB_support( struct vm_map_header *hdr );
166 
167 #endif /* _VM_VM_MAP_STORE_H */
168