1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management definitions.
64 */
65
66 #ifndef _VM_VM_KERN_H_
67 #define _VM_VM_KERN_H_
68
69 #include <mach/mach_types.h>
70 #include <mach/boolean.h>
71 #include <mach/kern_return.h>
72 #include <mach/vm_types.h>
73 #ifdef XNU_KERNEL_PRIVATE
74 #include <kern/locks.h>
75 #endif /* XNU_KERNEL_PRIVATE */
76
77 __BEGIN_DECLS
78
79 #ifdef KERNEL_PRIVATE
80 extern vm_map_t kernel_map;
81 extern vm_map_t ipc_kernel_map;
82 extern vm_map_t g_kext_map;
83 #endif /* KERNEL_PRIVATE */
84
85 #pragma mark - the kmem subsystem
86 #ifdef XNU_KERNEL_PRIVATE
87 #pragma GCC visibility push(hidden)
88
89 /*
90 * "kmem" is a set of methods that provide interfaces suitable
91 * to allocate memory from the VM in the kernel map or submaps.
92 *
93 * It provide leaner alternatives to some of the VM functions,
94 * closer to a typical allocator.
95 */
96
97 struct vm_page;
98 struct vm_map_entry;
99
100 /*!
101 * @typedef
102 *
103 * @brief
104 * Pair of a return code and size/address/... used by kmem interfaces.
105 *
106 * @discussion
107 * Using a pair of integers allows the compiler to return everything
108 * through registers, and doesn't need to use stack values to get results,
109 * which yields significantly better codegen.
110 *
111 * If @c kmr_return is not @c KERN_SUCCESS, then the other field
112 * of the union is always supposed to be 0.
113 */
114 typedef struct {
115 kern_return_t kmr_return;
116 union {
117 vm_address_t kmr_address;
118 vm_size_t kmr_size;
119 void *kmr_ptr;
120 vm_map_t kmr_submap;
121 };
122 } kmem_return_t;
123
124 /*!
125 * @typedef
126 *
127 * @brief
128 * Pair of a min/max address used to denote a memory region.
129 */
130 typedef struct kmem_range {
131 vm_offset_t min_address;
132 vm_offset_t max_address;
133 } __attribute__((aligned(2 * sizeof(vm_offset_t)))) * kmem_range_t;
134
135 /*!
136 * @typedef kmem_flags_t
137 *
138 * @brief
139 * Sets of flags taken by several of the @c kmem_* family of functions.
140 *
141 * @discussion
142 * This type is not used directly by any function, it is an underlying raw
143 * type that is re-vended under different namespaces for each @c kmem_*
144 * interface.
145 *
146 * - @c kmem_alloc uses @c kma_flags_t / @c KMA_* namespaced values.
147 * - @c kmem_suballoc uses @c kms_flags_t / @c KMS_* namespaced values.
148 *
149 *
150 * <h2>Call behavior</h2>
151 *
152 * @const KMEM_NONE (all)
153 * Pass this when no special options is to be used.
154 *
155 * @const KMEM_NOFAIL (alloc, suballoc)
156 * When this flag is passed, any allocation failure results into a panic().
157 * Using this flag should really be limited to cases when failure is not
158 * recoverable and possibly during early boot only.
159 *
160 * @const KMEM_NOPAGEWAIT (alloc)
161 * Pass this flag if the system should not wait in VM_PAGE_WAIT().
162 *
163 *
164 * <h2>How the entry is populated</h2>
165 *
166 * @const KMEM_VAONLY (alloc)
167 * By default memory allocated by the kmem subsystem is wired and mapped.
168 * Passing @c KMEM_VAONLY will cause the range to still be wired,
169 * but no page is actually mapped.
170 *
171 * @const KMEM_PAGEABLE (alloc)
172 * By default memory allocated by the kmem subsystem is wired and mapped.
173 * Passing @c KMEM_PAGEABLE makes the entry non wired, and pages will be
174 * added to the entry as it faults.
175 *
176 * @const KMEM_ZERO (alloc)
177 * Any new page added is zeroed.
178 *
179 *
180 * <h2>VM object to use for the entry</h2>
181 *
182 * @const KMEM_KOBJECT (alloc)
183 * The entry will be made for the @c kernel_object.
184 *
185 * Note that the @c kernel_object is just a "collection of pages".
186 * Pages in that object can't be remaped or present in several VM maps
187 * like traditional objects.
188 *
189 * If neither @c KMEM_KOBJECT nor @c KMEM_COMPRESSOR is passed,
190 * the a new fresh VM object will be made for this allocation.
191 * This is expensive and should be limited to allocations that
192 * need the features associated with a VM object.
193 *
194 * @const KMEM_COMPRESSOR (alloc)
195 * The entry is allocated for the @c compressor_object.
196 * Pages belonging to the compressor are not on the paging queues,
197 * nor are they counted as wired.
198 *
199 * Only the VM Compressor subsystem should use this.
200 *
201 *
202 * <h2>How to look for addresses</h2>
203 *
204 * @const KMEM_LOMEM (alloc)
205 * The physical memory allocated must be in the first 4G of memory,
206 * in order to support hardware controllers incapable of generating DMAs
207 * with more than 32bits of physical address.
208 *
209 * @const KMEM_LAST_FREE (alloc, suballoc)
210 * When looking for space in the specified map,
211 * start scanning for addresses from the end of the map
212 * rather than the start.
213 *
214 * @const KMEM_DATA (alloc, suballoc)
215 * The memory must be allocated from the "Data" range.
216 *
217 * <h2>Entry properties</h2>
218 *
219 * @const KMEM_PERMANENT (alloc, suballoc)
220 * The entry is made permanent.
221 *
222 * In the kernel maps, permanent entries can never be deleted.
223 * Calling @c kmem_free() on such a range will panic.
224 *
225 * In user maps, permanent entries will only be deleted
226 * whenthe map is terminated.
227 *
228 * @const KMEM_GUARD_FIRST (alloc)
229 * @const KMEM_GUARD_LAST (alloc)
230 * Asks @c kmem_* to put a guard page at the beginning (resp. end)
231 * of the allocation.
232 *
233 * The allocation size will not be extended to accomodate for guards,
234 * and the client of this interface must take them into account.
235 * Typically if a usable range of 3 pages is needed with both guards,
236 * then 5 pages must be asked.
237 *
238 * Alignment constraints take guards into account (the aligment applies
239 * to the address right after the first guard page).
240 *
241 * The returned address for allocation will pointing at the entry start,
242 * which is the address of the left guard page if any.
243 *
244 * @const KMEM_KSTACK (alloc)
245 * This flag must be passed when the allocation is for kernel stacks.
246 * This only has an effect on Intel.
247 *
248 * @const KMEM_NOENCRYPT (alloc)
249 * Obsolete, will be repurposed soon.
250 */
251 __options_decl(kmem_flags_t, uint32_t, {
252 KMEM_NONE = 0x00000000,
253
254 /* Call behavior */
255 KMEM_NOFAIL = 0x00000001,
256 KMEM_NOPAGEWAIT = 0x00000002,
257
258 /* How the entry is populated */
259 KMEM_VAONLY = 0x00000010,
260 KMEM_PAGEABLE = 0x00000020,
261 KMEM_ZERO = 0x00000040,
262
263 /* VM object to use for the entry */
264 KMEM_KOBJECT = 0x00000100,
265 KMEM_COMPRESSOR = 0x00000200,
266
267 /* How to look for addresses */
268 KMEM_LOMEM = 0x00001000,
269 KMEM_LAST_FREE = 0x00002000,
270 KMEM_DATA = 0x00008000,
271
272 /* Entry properties */
273 KMEM_PERMANENT = 0x00010000,
274 KMEM_GUARD_FIRST = 0x00020000,
275 KMEM_GUARD_LAST = 0x00040000,
276 KMEM_KSTACK = 0x00080000,
277 KMEM_NOENCRYPT = 0x00100000,
278 KMEM_ATOMIC = 0x40000000, /* temporary */
279 });
280
281
282 #pragma mark kmem range methods
283
284 extern struct kmem_range kmem_ranges[KMEM_RANGE_COUNT];
285 extern struct kmem_range kmem_large_ranges[KMEM_RANGE_COUNT];
286
287 __attribute__((overloadable))
288 extern bool kmem_range_contains(
289 const struct kmem_range *r,
290 vm_offset_t addr);
291
292 __attribute__((overloadable))
293 extern bool kmem_range_contains(
294 const struct kmem_range *r,
295 vm_offset_t addr,
296 vm_offset_t size);
297
298 extern vm_size_t kmem_range_size(
299 const struct kmem_range *r);
300
301 extern bool kmem_range_id_contains(
302 kmem_range_id_t range_id,
303 vm_map_offset_t addr,
304 vm_map_size_t size);
305
306 extern kmem_range_id_t kmem_addr_get_range(
307 vm_map_offset_t addr,
308 vm_map_size_t size);
309
310 /**
311 * @enum kmem_claims_flags_t
312 *
313 * @abstract
314 * Set of flags used in the processing of kmem_range claims
315 *
316 * @discussion
317 * These flags are used by the kmem subsytem while processing kmem_range
318 * claims and are not explicitly passed by the caller registering the claim.
319 *
320 * @const KC_NO_ENTRY
321 * A vm map entry should not be created for the respective claim.
322 *
323 * @const KC_NO_MOVE
324 * The range shouldn't be moved once it has been placed as it has constraints.
325 */
326 __options_decl(kmem_claims_flags_t, uint32_t, {
327 KC_NONE = 0x00000000,
328 KC_NO_ENTRY = 0x00000001,
329 KC_NO_MOVE = 0x00000002,
330 });
331
332 /*
333 * Security config that creates the data split in kernel_map
334 */
335 #if !defined(__LP64__)
336 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT OFF
337 #else
338 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT ON
339 #endif
340
341 /*
342 * Security config that creates the additional splits in non data part of
343 * kernel_map
344 */
345 #if KASAN || (__arm64__ && !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR))
346 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT OFF
347 #else
348 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT ON
349 #endif
350
351 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__OFF() 0
352 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__ON() 1
353 #define ZSECURITY_CONFIG2(v) ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__##v()
354 #define ZSECURITY_CONFIG1(v) ZSECURITY_CONFIG2(v)
355 #define ZSECURITY_CONFIG(opt) ZSECURITY_CONFIG1(ZSECURITY_CONFIG_##opt)
356
357 struct kmem_range_startup_spec {
358 const char *kc_name;
359 struct kmem_range *kc_range;
360 vm_map_size_t kc_size;
361 vm_map_size_t (^kc_calculate_sz)(void);
362 kmem_claims_flags_t kc_flags;
363 };
364
365 extern void kmem_range_startup_init(
366 struct kmem_range_startup_spec *sp);
367
368 /*!
369 * @macro KMEM_RANGE_REGISTER_*
370 *
371 * @abstract
372 * Register a claim for kmem range or submap.
373 *
374 * @discussion
375 * Claims are shuffled during startup to randomize the layout of the kernel map.
376 * Temporary entries are created in place of the claims, therefore the caller
377 * must provide the start of the assigned range as a hint and
378 * @c VM_FLAGS_FIXED_RANGE_SUBALLOC to kmem_suballoc to replace the mapping.
379 *
380 * Min/max constraints can be provided in the range when the claim is
381 * registered.
382 *
383 * This macro comes in 2 flavors:
384 * - STATIC : When the size of the range/submap is known at compile time
385 * - DYNAMIC: When the size of the range/submap needs to be computed
386 * Temporary entries are create
387 * The start of the
388 *
389 * @param name the name of the claim
390 * @param range the assigned range for the claim
391 * @param size the size of submap/range (if known at compile time)
392 * @param calculate_sz a block that returns the computed size of submap/range
393 */
394 #define KMEM_RANGE_REGISTER_STATIC(name, range, size) \
395 static __startup_data struct kmem_range_startup_spec \
396 __startup_kmem_range_spec_ ## name = { #name, range, size, NULL, KC_NONE}; \
397 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
398 &__startup_kmem_range_spec_ ## name)
399
400 #define KMEM_RANGE_REGISTER_DYNAMIC(name, range, calculate_sz) \
401 static __startup_data struct kmem_range_startup_spec \
402 __startup_kmem_range_spec_ ## name = { #name, range, 0, calculate_sz, \
403 KC_NONE}; \
404 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
405 &__startup_kmem_range_spec_ ## name)
406
407 #if XNU_KERNEL_PRIVATE
408 #if ZSECURITY_CONFIG(KERNEL_DATA_SPLIT)
409 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE)
410 #else /* ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
411 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_ANYWHERE)
412 #endif /* !ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
413 #endif /* XNU_KERNEL_PRIVATE */
414
415 __startup_func
416 extern uint16_t kmem_get_random16(
417 uint16_t upper_limit);
418
419 __startup_func
420 extern void kmem_shuffle(
421 uint16_t *shuffle_buf,
422 uint16_t count);
423
424
425 #pragma mark kmem allocations
426
427 /*!
428 * @typedef kma_flags_t
429 *
430 * @brief
431 * Flags used by the @c kmem_alloc* family of flags.
432 */
433 __options_decl(kma_flags_t, uint32_t, {
434 KMA_NONE = KMEM_NONE,
435
436 /* Call behavior */
437 KMA_NOFAIL = KMEM_NOFAIL,
438 KMA_NOPAGEWAIT = KMEM_NOPAGEWAIT,
439
440 /* How the entry is populated */
441 KMA_VAONLY = KMEM_VAONLY,
442 KMA_PAGEABLE = KMEM_PAGEABLE,
443 KMA_ZERO = KMEM_ZERO,
444
445 /* VM object to use for the entry */
446 KMA_KOBJECT = KMEM_KOBJECT,
447 KMA_COMPRESSOR = KMEM_COMPRESSOR,
448
449 /* How to look for addresses */
450 KMA_LOMEM = KMEM_LOMEM,
451 KMA_LAST_FREE = KMEM_LAST_FREE,
452 KMA_DATA = KMEM_DATA,
453
454 /* Entry properties */
455 KMA_PERMANENT = KMEM_PERMANENT,
456 KMA_GUARD_FIRST = KMEM_GUARD_FIRST,
457 KMA_GUARD_LAST = KMEM_GUARD_LAST,
458 KMA_KSTACK = KMEM_KSTACK,
459 KMA_NOENCRYPT = KMEM_NOENCRYPT,
460 KMA_ATOMIC = KMEM_ATOMIC,
461 });
462
463 #define KMEM_ALLOC_CONTIG_FLAGS ( \
464 /* Call behavior */ \
465 KMA_NOPAGEWAIT | \
466 \
467 /* How the entry is populated */ \
468 KMA_ZERO | \
469 \
470 /* VM object to use for the entry */ \
471 KMA_KOBJECT | \
472 \
473 /* How to look for addresses */ \
474 KMA_LOMEM | \
475 KMA_DATA | \
476 \
477 /* Entry properties */ \
478 KMA_PERMANENT | \
479 \
480 KMA_NONE)
481
482
483
484 extern kern_return_t kernel_memory_allocate(
485 vm_map_t map,
486 vm_offset_t *addrp,
487 vm_size_t size,
488 vm_offset_t mask,
489 kma_flags_t flags,
490 vm_tag_t tag);
491
492 static inline kern_return_t
kmem_alloc(vm_map_t map,vm_offset_t * addrp,vm_size_t size,kma_flags_t flags,vm_tag_t tag)493 kmem_alloc(
494 vm_map_t map,
495 vm_offset_t *addrp,
496 vm_size_t size,
497 kma_flags_t flags,
498 vm_tag_t tag)
499 {
500 return kernel_memory_allocate(map, addrp, size, 0, flags, tag);
501 }
502
503 extern kern_return_t kmem_alloc_contig(
504 vm_map_t map,
505 vm_offset_t *addrp,
506 vm_size_t size,
507 vm_offset_t mask,
508 ppnum_t max_pnum,
509 ppnum_t pnum_mask,
510 kma_flags_t flags,
511 vm_tag_t tag)
512 __attribute__((diagnose_if(flags & ~KMEM_ALLOC_CONTIG_FLAGS,
513 "invalid alloc_contig flags passed", "error")));
514
515
516 /*!
517 * @typedef kms_flags_t
518 *
519 * @brief
520 * Flags used by @c kmem_suballoc.
521 */
522 __options_decl(kms_flags_t, uint32_t, {
523 KMS_NONE = KMEM_NONE,
524
525 /* Call behavior */
526 KMS_NOFAIL = KMEM_NOFAIL,
527
528 /* How to look for addresses */
529 KMS_LAST_FREE = KMEM_LAST_FREE,
530 KMS_DATA = KMEM_DATA,
531
532 /* Entry properties */
533 KMS_PERMANENT = KMEM_PERMANENT,
534 });
535
536 /*!
537 * @function kmem_suballoc()
538 *
539 * @brief
540 * Create a kernel submap, in an atomic entry guarded with KMEM_GUARD_SUBMAP.
541 *
542 * @param parent map to allocate into, must be a kernel map.
543 * @param addr (in/out) the address for the map (see vm_map_enter)
544 * @param size the size of the entry to allocate, must not be 0.
545 * @param vmc_options the map creation options
546 * @param vm_flags a set of @c VM_FLAGS_* flags
547 * @param flags a set of @c KMS_* flags, (@see @c kmem_flags_t)
548 * @param tag the tag for this submap's entry.
549 */
550 extern kmem_return_t kmem_suballoc(
551 vm_map_t parent,
552 vm_offset_t *addr,
553 vm_size_t size,
554 vm_map_create_options_t vmc_options,
555 int vm_flags,
556 kms_flags_t flags,
557 vm_tag_t tag);
558
559
560 #pragma mark kmem reallocation
561
562 extern kern_return_t kmem_realloc(
563 vm_map_t map,
564 vm_offset_t oldaddr,
565 vm_size_t oldsize,
566 vm_offset_t *newaddrp,
567 vm_size_t newsize,
568 vm_tag_t tag);
569
570 extern void kmem_realloc_down(
571 vm_map_t map,
572 vm_offset_t addr,
573 vm_size_t oldsize,
574 vm_size_t newsize);
575
576 __exported
577 extern void kmem_free(
578 vm_map_t map,
579 vm_offset_t addr,
580 vm_size_t size);
581
582 #pragma mark kmem population
583
584 extern void kernel_memory_populate_object_and_unlock(
585 vm_object_t object, /* must be locked */
586 vm_address_t addr,
587 vm_offset_t offset,
588 vm_size_t size,
589 struct vm_page *page_list,
590 kma_flags_t flags,
591 vm_tag_t tag,
592 vm_prot_t prot);
593
594 extern kern_return_t kernel_memory_populate(
595 vm_offset_t addr,
596 vm_size_t size,
597 kma_flags_t flags,
598 vm_tag_t tag);
599
600 extern void kernel_memory_depopulate(
601 vm_offset_t addr,
602 vm_size_t size,
603 kma_flags_t flags,
604 vm_tag_t tag);
605
606 #pragma GCC visibility pop
607 #elif KERNEL_PRIVATE /* XNU_KERNEL_PRIVATE */
608
609 extern kern_return_t kmem_alloc(
610 vm_map_t map,
611 vm_offset_t *addrp,
612 vm_size_t size);
613
614 extern kern_return_t kmem_alloc_pageable(
615 vm_map_t map,
616 vm_offset_t *addrp,
617 vm_size_t size);
618
619 extern kern_return_t kmem_alloc_kobject(
620 vm_map_t map,
621 vm_offset_t *addrp,
622 vm_size_t size);
623
624 extern void kmem_free(
625 vm_map_t map,
626 vm_offset_t addr,
627 vm_size_t size);
628
629 #endif /* KERNEL_PRIVATE */
630
631 #pragma mark - kernel address obfuscation / hashhing for logging
632
633 extern vm_offset_t vm_kernel_addrperm_ext;
634
635 extern void vm_kernel_addrhide(
636 vm_offset_t addr,
637 vm_offset_t *hide_addr);
638
639 extern void vm_kernel_addrperm_external(
640 vm_offset_t addr,
641 vm_offset_t *perm_addr);
642
643 extern void vm_kernel_unslide_or_perm_external(
644 vm_offset_t addr,
645 vm_offset_t *up_addr);
646
647 #if !XNU_KERNEL_PRIVATE
648
649 extern vm_offset_t vm_kernel_addrhash(
650 vm_offset_t addr);
651
652 #else /* XNU_KERNEL_PRIVATE */
653 #pragma GCC visibility push(hidden)
654
655 extern uint64_t vm_kernel_addrhash_salt;
656 extern uint64_t vm_kernel_addrhash_salt_ext;
657
658 extern vm_offset_t vm_kernel_addrhash_internal(
659 vm_offset_t addr,
660 uint64_t salt);
661
662 static inline vm_offset_t
vm_kernel_addrhash(vm_offset_t addr)663 vm_kernel_addrhash(vm_offset_t addr)
664 {
665 return vm_kernel_addrhash_internal(addr, vm_kernel_addrhash_salt);
666 }
667
668 #pragma mark - kernel variants of the Mach VM interfaces
669
670 extern kern_return_t mach_vm_allocate_kernel(
671 vm_map_t map,
672 mach_vm_offset_t *addr,
673 mach_vm_size_t size,
674 int flags,
675 vm_tag_t tag);
676
677 extern kern_return_t mach_vm_map_kernel(
678 vm_map_t target_map,
679 mach_vm_offset_t *address,
680 mach_vm_size_t initial_size,
681 mach_vm_offset_t mask,
682 int flags,
683 vm_map_kernel_flags_t vmk_flags,
684 vm_tag_t tag,
685 ipc_port_t port,
686 vm_object_offset_t offset,
687 boolean_t copy,
688 vm_prot_t cur_protection,
689 vm_prot_t max_protection,
690 vm_inherit_t inheritance);
691
692
693 extern kern_return_t vm_map_kernel(
694 vm_map_t target_map,
695 vm_offset_t *address,
696 vm_size_t size,
697 vm_offset_t mask,
698 int flags,
699 vm_map_kernel_flags_t vmk_flags,
700 vm_tag_t tag,
701 ipc_port_t port,
702 vm_offset_t offset,
703 boolean_t copy,
704 vm_prot_t cur_protection,
705 vm_prot_t max_protection,
706 vm_inherit_t inheritance);
707
708 extern kern_return_t mach_vm_remap_kernel(
709 vm_map_t target_map,
710 mach_vm_offset_t *address,
711 mach_vm_size_t size,
712 mach_vm_offset_t mask,
713 int flags,
714 vm_tag_t tag,
715 vm_map_t src_map,
716 mach_vm_offset_t memory_address,
717 boolean_t copy,
718 vm_prot_t *cur_protection,
719 vm_prot_t *max_protection,
720 vm_inherit_t inheritance);
721
722 extern kern_return_t vm_remap_kernel(
723 vm_map_t target_map,
724 vm_offset_t *address,
725 vm_size_t size,
726 vm_offset_t mask,
727 int flags,
728 vm_tag_t tag,
729 vm_map_t src_map,
730 vm_offset_t memory_address,
731 boolean_t copy,
732 vm_prot_t *cur_protection,
733 vm_prot_t *max_protection,
734 vm_inherit_t inheritance);
735
736 extern kern_return_t vm_map_64_kernel(
737 vm_map_t target_map,
738 vm_offset_t *address,
739 vm_size_t size,
740 vm_offset_t mask,
741 int flags,
742 vm_map_kernel_flags_t vmk_flags,
743 vm_tag_t tag,
744 ipc_port_t port,
745 vm_object_offset_t offset,
746 boolean_t copy,
747 vm_prot_t cur_protection,
748 vm_prot_t max_protection,
749 vm_inherit_t inheritance);
750
751 extern kern_return_t mach_vm_wire_kernel(
752 host_priv_t host_priv,
753 vm_map_t map,
754 mach_vm_offset_t start,
755 mach_vm_size_t size,
756 vm_prot_t access,
757 vm_tag_t tag);
758
759 extern kern_return_t vm_map_wire_kernel(
760 vm_map_t map,
761 vm_map_offset_t start,
762 vm_map_offset_t end,
763 vm_prot_t caller_prot,
764 vm_tag_t tag,
765 boolean_t user_wire);
766
767 extern kern_return_t vm_map_wire_and_extract_kernel(
768 vm_map_t map,
769 vm_map_offset_t start,
770 vm_prot_t caller_prot,
771 vm_tag_t tag,
772 boolean_t user_wire,
773 ppnum_t *physpage_p);
774
775 extern kern_return_t memory_object_iopl_request(
776 ipc_port_t port,
777 memory_object_offset_t offset,
778 upl_size_t *upl_size,
779 upl_t *upl_ptr,
780 upl_page_info_array_t user_page_list,
781 unsigned int *page_list_count,
782 upl_control_flags_t *flags,
783 vm_tag_t tag);
784
785 #ifdef MACH_KERNEL_PRIVATE
786
787 extern kern_return_t copyinmap(
788 vm_map_t map,
789 vm_map_offset_t fromaddr,
790 void *todata,
791 vm_size_t length);
792
793 extern kern_return_t copyoutmap(
794 vm_map_t map,
795 void *fromdata,
796 vm_map_offset_t toaddr,
797 vm_size_t length);
798
799 extern kern_return_t copyoutmap_atomic32(
800 vm_map_t map,
801 uint32_t value,
802 vm_map_offset_t toaddr);
803
804 extern kern_return_t copyoutmap_atomic64(
805 vm_map_t map,
806 uint64_t value,
807 vm_map_offset_t toaddr);
808
809 #endif /* MACH_KERNEL_PRIVATE */
810 #pragma GCC visibility pop
811 #endif /* XNU_KERNEL_PRIVATE */
812 #ifdef KERNEL_PRIVATE
813 #pragma mark - unsorted interfaces
814
815 #ifdef XNU_KERNEL_PRIVATE
816 typedef struct vm_allocation_site kern_allocation_name;
817 typedef kern_allocation_name * kern_allocation_name_t;
818 #else /* XNU_KERNEL_PRIVATE */
819 struct kern_allocation_name;
820 typedef struct kern_allocation_name * kern_allocation_name_t;
821 #endif /* !XNU_KERNEL_PRIVATE */
822
823 extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs);
824 extern void kern_allocation_name_release(kern_allocation_name_t allocation);
825 extern const char * kern_allocation_get_name(kern_allocation_name_t allocation);
826
827 #endif /* KERNEL_PRIVATE */
828 #ifdef XNU_KERNEL_PRIVATE
829 #pragma GCC visibility push(hidden)
830
831 extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta);
832 extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta);
833 extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation);
834
835 struct mach_memory_info;
836 extern kern_return_t vm_page_diagnose(
837 struct mach_memory_info *info,
838 unsigned int num_info,
839 uint64_t zones_collectable_bytes);
840
841 extern uint32_t vm_page_diagnose_estimate(void);
842
843 extern void vm_init_before_launchd(void);
844
845 typedef enum {
846 PMAP_FEAT_UEXEC = 1
847 } pmap_feature_flags_t;
848
849 #if defined(__x86_64__)
850 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat);
851 #endif
852
853 #if DEBUG || DEVELOPMENT
854
855 extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size);
856
857 #endif /* DEBUG || DEVELOPMENT */
858
859 #if HIBERNATION
860 extern void hibernate_rebuild_vm_structs(void);
861 #endif /* HIBERNATION */
862
863 extern vm_tag_t vm_tag_bt(void);
864
865 extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site);
866
867 extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP);
868
869 extern void vm_tag_update_size(vm_tag_t tag, int64_t size);
870
871 #if VM_TAG_SIZECLASSES
872
873 extern void vm_allocation_zones_init(void);
874 extern vm_tag_t vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags);
875 extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta);
876
877 #endif /* VM_TAG_SIZECLASSES */
878
879 extern vm_tag_t vm_tag_bt_debug(void);
880
881 extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen);
882
883 extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
884
885 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
886
887 #pragma GCC visibility pop
888 #endif /* XNU_KERNEL_PRIVATE */
889
890 __END_DECLS
891
892 #endif /* _VM_VM_KERN_H_ */
893