1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management definitions.
64 */
65
66 #ifndef _VM_VM_KERN_H_
67 #define _VM_VM_KERN_H_
68
69 #ifdef XNU_KERNEL_PRIVATE
70 #include <kern/locks.h>
71 #endif /* XNU_KERNEL_PRIVATE */
72
73 #ifdef __cplusplus
74 extern "C" {
75 #endif
76
77 #include <mach/mach_types.h>
78 #include <mach/boolean.h>
79 #include <mach/kern_return.h>
80 #include <mach/vm_types.h>
81
82 #ifdef KERNEL_PRIVATE
83
84 #ifdef XNU_KERNEL_PRIVATE
85
86 struct vm_page;
87
88 __options_decl(kma_flags_t, uint32_t, {
89 KMA_NONE = 0x00000000,
90 KMA_NOPAGEWAIT = 0x00000002,
91 KMA_KOBJECT = 0x00000004,
92 KMA_LOMEM = 0x00000008,
93 KMA_GUARD_FIRST = 0x00000010,
94 KMA_GUARD_LAST = 0x00000020,
95 KMA_PERMANENT = 0x00000040,
96 KMA_NOENCRYPT = 0x00000080,
97 KMA_KSTACK = 0x00000100,
98 KMA_VAONLY = 0x00000200,
99 /*
100 * Pages belonging to the compressor are not on the paging queues,
101 * nor are they counted as wired.
102 */
103 KMA_COMPRESSOR = 0x00000400,
104 KMA_ATOMIC = 0x00000800,
105 KMA_ZERO = 0x00001000,
106 KMA_PAGEABLE = 0x00002000,
107 KMA_LAST_FREE = 0x00004000,
108 });
109
110 extern kern_return_t kernel_memory_allocate(
111 vm_map_t map,
112 vm_offset_t *addrp,
113 vm_size_t size,
114 vm_offset_t mask,
115 kma_flags_t flags,
116 vm_tag_t tag);
117
118 static inline kern_return_t
kmem_alloc(vm_map_t map,vm_offset_t * addrp,vm_size_t size,vm_tag_t tag)119 kmem_alloc(
120 vm_map_t map,
121 vm_offset_t *addrp,
122 vm_size_t size,
123 vm_tag_t tag)
124 {
125 return kernel_memory_allocate(map, addrp, size, 0, KMA_NONE, tag);
126 }
127
128 static inline kern_return_t
kmem_alloc_pageable(vm_map_t map,vm_offset_t * addrp,vm_size_t size,vm_tag_t tag)129 kmem_alloc_pageable(
130 vm_map_t map,
131 vm_offset_t *addrp,
132 vm_size_t size,
133 vm_tag_t tag)
134 {
135 return kernel_memory_allocate(map, addrp, size, 0, KMA_PAGEABLE, tag);
136 }
137
138 static inline kern_return_t
kmem_alloc_kobject(vm_map_t map,vm_offset_t * addrp,vm_size_t size,vm_tag_t tag)139 kmem_alloc_kobject(
140 vm_map_t map,
141 vm_offset_t *addrp,
142 vm_size_t size,
143 vm_tag_t tag)
144 {
145 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag);
146 }
147
148 extern kern_return_t kmem_alloc_contig(
149 vm_map_t map,
150 vm_offset_t *addrp,
151 vm_size_t size,
152 vm_offset_t mask,
153 ppnum_t max_pnum,
154 ppnum_t pnum_mask,
155 kma_flags_t flags,
156 vm_tag_t tag);
157
158 extern kern_return_t kmem_realloc(
159 vm_map_t map,
160 vm_offset_t oldaddr,
161 vm_size_t oldsize,
162 vm_offset_t *newaddrp,
163 vm_size_t newsize,
164 vm_tag_t tag);
165
166 extern void kmem_free(
167 vm_map_t map,
168 vm_offset_t addr,
169 vm_size_t size);
170
171 extern kern_return_t kmem_suballoc(
172 vm_map_t parent,
173 vm_offset_t *addr,
174 vm_size_t size,
175 vm_map_create_options_t vmc_options,
176 int flags,
177 vm_map_kernel_flags_t vmk_flags,
178 vm_tag_t tag,
179 vm_map_t *new_map);
180
181 extern void kernel_memory_populate_with_pages(
182 vm_map_t map,
183 vm_offset_t addr,
184 vm_size_t size,
185 struct vm_page *page_list,
186 kma_flags_t flags,
187 vm_tag_t tag,
188 vm_prot_t prot);
189
190 extern kern_return_t kernel_memory_populate(
191 vm_map_t map,
192 vm_offset_t addr,
193 vm_size_t size,
194 kma_flags_t flags,
195 vm_tag_t tag);
196
197 extern void kernel_memory_depopulate(
198 vm_map_t map,
199 vm_offset_t addr,
200 vm_size_t size,
201 kma_flags_t flags,
202 vm_tag_t tag);
203
204 extern kern_return_t memory_object_iopl_request(
205 ipc_port_t port,
206 memory_object_offset_t offset,
207 upl_size_t *upl_size,
208 upl_t *upl_ptr,
209 upl_page_info_array_t user_page_list,
210 unsigned int *page_list_count,
211 upl_control_flags_t *flags,
212 vm_tag_t tag);
213
214 struct mach_memory_info;
215 extern kern_return_t vm_page_diagnose(
216 struct mach_memory_info *info,
217 unsigned int num_info,
218 uint64_t zones_collectable_bytes);
219
220 extern uint32_t vm_page_diagnose_estimate(void);
221
222 typedef enum {
223 PMAP_FEAT_UEXEC = 1
224 } pmap_feature_flags_t;
225
226 #if defined(__x86_64__)
227 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat);
228 #endif
229
230 #if DEBUG || DEVELOPMENT
231
232 extern kern_return_t mach_memory_info_check(void);
233
234 extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size);
235
236 #endif /* DEBUG || DEVELOPMENT */
237
238 #if HIBERNATION
239 extern void hibernate_rebuild_vm_structs(void);
240 #endif /* HIBERNATION */
241
242 extern vm_tag_t vm_tag_bt(void);
243
244 extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site);
245
246 extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP);
247
248 extern void vm_tag_update_size(vm_tag_t tag, int64_t size);
249
250 #if VM_TAG_SIZECLASSES
251
252 extern void vm_allocation_zones_init(void);
253 extern vm_tag_t vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags);
254 extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta);
255
256 #endif /* VM_TAG_SIZECLASSES */
257
258 extern vm_tag_t vm_tag_bt_debug(void);
259
260 extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen);
261
262 extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
263
264 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
265
266 #else /* XNU_KERNEL_PRIVATE */
267
268 extern kern_return_t kmem_alloc(
269 vm_map_t map,
270 vm_offset_t *addrp,
271 vm_size_t size);
272
273 extern kern_return_t kmem_alloc_pageable(
274 vm_map_t map,
275 vm_offset_t *addrp,
276 vm_size_t size);
277
278 extern kern_return_t kmem_alloc_kobject(
279 vm_map_t map,
280 vm_offset_t *addrp,
281 vm_size_t size);
282
283 extern void kmem_free(
284 vm_map_t map,
285 vm_offset_t addr,
286 vm_size_t size);
287
288 #endif /* !XNU_KERNEL_PRIVATE */
289
290
291 #ifdef XNU_KERNEL_PRIVATE
292 typedef struct vm_allocation_site kern_allocation_name;
293 typedef kern_allocation_name * kern_allocation_name_t;
294 #else /* XNU_KERNEL_PRIVATE */
295 struct kern_allocation_name;
296 typedef struct kern_allocation_name * kern_allocation_name_t;
297 #endif /* !XNU_KERNEL_PRIVATE */
298
299 extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs);
300 extern void kern_allocation_name_release(kern_allocation_name_t allocation);
301 extern const char * kern_allocation_get_name(kern_allocation_name_t allocation);
302 #ifdef XNU_KERNEL_PRIVATE
303 extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta);
304 extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta);
305 extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation);
306 #endif /* XNU_KERNEL_PRIVATE */
307
308 #ifdef MACH_KERNEL_PRIVATE
309
310 extern void kmem_init(
311 vm_offset_t start,
312 vm_offset_t end);
313
314 extern kern_return_t copyinmap(
315 vm_map_t map,
316 vm_map_offset_t fromaddr,
317 void *todata,
318 vm_size_t length);
319
320 extern kern_return_t copyoutmap(
321 vm_map_t map,
322 void *fromdata,
323 vm_map_offset_t toaddr,
324 vm_size_t length);
325
326 extern kern_return_t copyoutmap_atomic32(
327 vm_map_t map,
328 uint32_t value,
329 vm_map_offset_t toaddr);
330
331 extern kern_return_t copyoutmap_atomic64(
332 vm_map_t map,
333 uint64_t value,
334 vm_map_offset_t toaddr);
335
336 extern kern_return_t kmem_alloc_external(
337 vm_map_t map,
338 vm_offset_t *addrp,
339 vm_size_t size);
340
341 extern kern_return_t kmem_alloc_kobject_external(
342 vm_map_t map,
343 vm_offset_t *addrp,
344 vm_size_t size);
345
346 extern kern_return_t kmem_alloc_pageable_external(
347 vm_map_t map,
348 vm_offset_t *addrp,
349 vm_size_t size);
350
351 #endif /* MACH_KERNEL_PRIVATE */
352
353 #ifdef XNU_KERNEL_PRIVATE
354
355 extern kern_return_t mach_vm_allocate_kernel(
356 vm_map_t map,
357 mach_vm_offset_t *addr,
358 mach_vm_size_t size,
359 int flags,
360 vm_tag_t tag);
361
362 extern kern_return_t mach_vm_map_kernel(
363 vm_map_t target_map,
364 mach_vm_offset_t *address,
365 mach_vm_size_t initial_size,
366 mach_vm_offset_t mask,
367 int flags,
368 vm_map_kernel_flags_t vmk_flags,
369 vm_tag_t tag,
370 ipc_port_t port,
371 vm_object_offset_t offset,
372 boolean_t copy,
373 vm_prot_t cur_protection,
374 vm_prot_t max_protection,
375 vm_inherit_t inheritance);
376
377
378 extern kern_return_t vm_map_kernel(
379 vm_map_t target_map,
380 vm_offset_t *address,
381 vm_size_t size,
382 vm_offset_t mask,
383 int flags,
384 vm_map_kernel_flags_t vmk_flags,
385 vm_tag_t tag,
386 ipc_port_t port,
387 vm_offset_t offset,
388 boolean_t copy,
389 vm_prot_t cur_protection,
390 vm_prot_t max_protection,
391 vm_inherit_t inheritance);
392
393 extern kern_return_t mach_vm_remap_kernel(
394 vm_map_t target_map,
395 mach_vm_offset_t *address,
396 mach_vm_size_t size,
397 mach_vm_offset_t mask,
398 int flags,
399 vm_tag_t tag,
400 vm_map_t src_map,
401 mach_vm_offset_t memory_address,
402 boolean_t copy,
403 vm_prot_t *cur_protection,
404 vm_prot_t *max_protection,
405 vm_inherit_t inheritance);
406
407 extern kern_return_t vm_remap_kernel(
408 vm_map_t target_map,
409 vm_offset_t *address,
410 vm_size_t size,
411 vm_offset_t mask,
412 int flags,
413 vm_tag_t tag,
414 vm_map_t src_map,
415 vm_offset_t memory_address,
416 boolean_t copy,
417 vm_prot_t *cur_protection,
418 vm_prot_t *max_protection,
419 vm_inherit_t inheritance);
420
421 extern kern_return_t vm_map_64_kernel(
422 vm_map_t target_map,
423 vm_offset_t *address,
424 vm_size_t size,
425 vm_offset_t mask,
426 int flags,
427 vm_map_kernel_flags_t vmk_flags,
428 vm_tag_t tag,
429 ipc_port_t port,
430 vm_object_offset_t offset,
431 boolean_t copy,
432 vm_prot_t cur_protection,
433 vm_prot_t max_protection,
434 vm_inherit_t inheritance);
435
436 extern kern_return_t mach_vm_wire_kernel(
437 host_priv_t host_priv,
438 vm_map_t map,
439 mach_vm_offset_t start,
440 mach_vm_size_t size,
441 vm_prot_t access,
442 vm_tag_t tag);
443
444 extern kern_return_t vm_map_wire_kernel(
445 vm_map_t map,
446 vm_map_offset_t start,
447 vm_map_offset_t end,
448 vm_prot_t caller_prot,
449 vm_tag_t tag,
450 boolean_t user_wire);
451
452 extern kern_return_t vm_map_wire_and_extract_kernel(
453 vm_map_t map,
454 vm_map_offset_t start,
455 vm_prot_t caller_prot,
456 vm_tag_t tag,
457 boolean_t user_wire,
458 ppnum_t *physpage_p);
459
460 #endif /* XNU_KERNEL_PRIVATE */
461
462 extern vm_map_t kernel_map;
463 extern vm_map_t kernel_pageable_map;
464 extern vm_map_t ipc_kernel_map;
465 extern vm_map_t g_kext_map;
466
467 #endif /* KERNEL_PRIVATE */
468
469 #ifdef KERNEL
470
471 __BEGIN_DECLS
472 #if MACH_KERNEL_PRIVATE
473 extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr)
474 __XNU_INTERNAL(vm_kernel_addrhash);
475 #else
476 extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr);
477 #endif
478 __END_DECLS
479
480 extern void vm_kernel_addrhide(
481 vm_offset_t addr,
482 vm_offset_t *hide_addr);
483
484 extern vm_offset_t vm_kernel_addrperm_ext;
485
486 extern void vm_kernel_addrperm_external(
487 vm_offset_t addr,
488 vm_offset_t *perm_addr);
489
490 extern void vm_kernel_unslide_or_perm_external(
491 vm_offset_t addr,
492 vm_offset_t *up_addr);
493
494 #if MACH_KERNEL_PRIVATE
495 extern uint64_t vm_kernel_addrhash_salt;
496 extern uint64_t vm_kernel_addrhash_salt_ext;
497
498 extern void vm_kernel_addrhash_external(
499 vm_offset_t addr,
500 vm_offset_t *perm_addr);
501 #endif /* MACH_KERNEL_PRIVATE */
502
503 extern void vm_init_before_launchd(void);
504
505 #endif /* KERNEL */
506
507 #ifdef __cplusplus
508 }
509 #endif
510
511 #endif /* _VM_VM_KERN_H_ */
512