1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management definitions.
64 */
65
66 #ifndef _VM_VM_KERN_H_
67 #define _VM_VM_KERN_H_
68
69 #include <mach/mach_types.h>
70 #include <mach/boolean.h>
71 #include <mach/kern_return.h>
72 #include <mach/vm_types.h>
73 #ifdef XNU_KERNEL_PRIVATE
74 #include <kern/locks.h>
75 #endif /* XNU_KERNEL_PRIVATE */
76
77 __BEGIN_DECLS
78
79 #ifdef KERNEL_PRIVATE
80 extern vm_map_t kernel_map;
81 extern vm_map_t ipc_kernel_map;
82 extern vm_map_t g_kext_map;
83 #endif /* KERNEL_PRIVATE */
84
85 #pragma mark - the kmem subsystem
86 #ifdef XNU_KERNEL_PRIVATE
87 #pragma GCC visibility push(hidden)
88
89 /*
90 * "kmem" is a set of methods that provide interfaces suitable
91 * to allocate memory from the VM in the kernel map or submaps.
92 *
93 * It provide leaner alternatives to some of the VM functions,
94 * closer to a typical allocator.
95 */
96
97 struct vm_page;
98 struct vm_map_entry;
99
100 /*!
101 * @typedef
102 *
103 * @brief
104 * Pair of a return code and size/address/... used by kmem interfaces.
105 *
106 * @discussion
107 * Using a pair of integers allows the compiler to return everything
108 * through registers, and doesn't need to use stack values to get results,
109 * which yields significantly better codegen.
110 *
111 * If @c kmr_return is not @c KERN_SUCCESS, then the other field
112 * of the union is always supposed to be 0.
113 */
114 typedef struct {
115 kern_return_t kmr_return;
116 union {
117 vm_address_t kmr_address;
118 vm_size_t kmr_size;
119 void *kmr_ptr;
120 vm_map_t kmr_submap;
121 };
122 } kmem_return_t;
123
124 /*!
125 * @typedef kmem_guard_t
126 *
127 * @brief
128 * KMEM guards are used by the kmem_* subsystem to secure atomic allocations.
129 *
130 * @discussion
131 * This parameter is used to transmit the tag for the allocation.
132 *
133 * If @c kmg_atomic is set, then the other fields are also taken into account
134 * and will affect the allocation behavior for this allocation.
135 *
136 * @field kmg_tag The VM_KERN_MEMORY_* tag for this entry.
137 * @field kmg_type_hash Some hash related to the type of the allocation.
138 * @field kmg_atomic Whether the entry is atomic.
139 * @field kmg_submap Whether the entry is for a submap.
140 * @field kmg_context A use defined 30 bits that will be stored
141 * on the entry on allocation and checked
142 * on other operations.
143 */
144 typedef struct {
145 uint16_t kmg_tag;
146 uint16_t kmg_type_hash;
147 uint32_t kmg_atomic : 1;
148 uint32_t kmg_submap : 1;
149 uint32_t kmg_context : 30;
150 } kmem_guard_t;
151 #define KMEM_GUARD_NONE (kmem_guard_t){ }
152 #define KMEM_GUARD_SUBMAP (kmem_guard_t){ .kmg_atomic = 0, .kmg_submap = 1 }
153
154 /*!
155 * @typedef
156 *
157 * @brief
158 * Pair of a min/max address used to denote a memory region.
159 */
160 typedef struct kmem_range {
161 vm_offset_t min_address;
162 vm_offset_t max_address;
163 } __attribute__((aligned(2 * sizeof(vm_offset_t)))) * kmem_range_t;
164
165 /*!
166 * @typedef kmem_flags_t
167 *
168 * @brief
169 * Sets of flags taken by several of the @c kmem_* family of functions.
170 *
171 * @discussion
172 * This type is not used directly by any function, it is an underlying raw
173 * type that is re-vended under different namespaces for each @c kmem_*
174 * interface.
175 *
176 * - @c kmem_alloc uses @c kma_flags_t / @c KMA_* namespaced values.
177 * - @c kmem_suballoc uses @c kms_flags_t / @c KMS_* namespaced values.
178 * - @c kmem_realloc uses @c kmr_flags_t / @c KMR_* namespaced values.
179 * - @c kmem_free uses @c kmf_flags_t / @c KMF_* napespaced values.
180 *
181 *
182 * <h2>Call behavior</h2>
183 *
184 * @const KMEM_NONE (all)
185 * Pass this when no special options is to be used.
186 *
187 * @const KMEM_NOFAIL (alloc, suballoc)
188 * When this flag is passed, any allocation failure results into a panic().
189 * Using this flag should really be limited to cases when failure is not
190 * recoverable and possibly during early boot only.
191 *
192 * @const KMEM_NOPAGEWAIT (alloc, realloc)
193 * Pass this flag if the system should not wait in VM_PAGE_WAIT().
194 *
195 * @const KMEM_FREEOLD (realloc)
196 * Pass this flag if @c kmem_realloc should free the old mapping
197 * (when the address changed) as part of the call.
198 *
199 * @const KMEM_REALLOCF (realloc)
200 * Similar to @c Z_REALLOCF: if the call is failing,
201 * then free the old allocation too.
202 *
203 *
204 * <h2>How the entry is populated</h2>
205 *
206 * @const KMEM_VAONLY (alloc)
207 * By default memory allocated by the kmem subsystem is wired and mapped.
208 * Passing @c KMEM_VAONLY will cause the range to still be wired,
209 * but no page is actually mapped.
210 *
211 * @const KMEM_PAGEABLE (alloc)
212 * By default memory allocated by the kmem subsystem is wired and mapped.
213 * Passing @c KMEM_PAGEABLE makes the entry non wired, and pages will be
214 * added to the entry as it faults.
215 *
216 * @const KMEM_ZERO (alloc, realloc)
217 * Any new page added is zeroed.
218 *
219 *
220 * <h2>VM object to use for the entry</h2>
221 *
222 * @const KMEM_KOBJECT (alloc, realloc)
223 * The entry will be made for the @c kernel_object.
224 *
225 * Note that the @c kernel_object is just a "collection of pages".
226 * Pages in that object can't be remaped or present in several VM maps
227 * like traditional objects.
228 *
229 * If neither @c KMEM_KOBJECT nor @c KMEM_COMPRESSOR is passed,
230 * the a new fresh VM object will be made for this allocation.
231 * This is expensive and should be limited to allocations that
232 * need the features associated with a VM object.
233 *
234 * @const KMEM_COMPRESSOR (alloc)
235 * The entry is allocated for the @c compressor_object.
236 * Pages belonging to the compressor are not on the paging queues,
237 * nor are they counted as wired.
238 *
239 * Only the VM Compressor subsystem should use this.
240 *
241 *
242 * <h2>How to look for addresses</h2>
243 *
244 * @const KMEM_LOMEM (alloc, realloc)
245 * The physical memory allocated must be in the first 4G of memory,
246 * in order to support hardware controllers incapable of generating DMAs
247 * with more than 32bits of physical address.
248 *
249 * @const KMEM_LAST_FREE (alloc, suballoc, realloc)
250 * When looking for space in the specified map,
251 * start scanning for addresses from the end of the map
252 * rather than the start.
253 *
254 * @const KMEM_DATA (alloc, suballoc, realloc)
255 * The memory must be allocated from the "Data" range.
256 *
257 * @const KMEM_GUESS_SIZE (free)
258 * When freeing an atomic entry (requires a valid kmem guard),
259 * then look up the entry size because the caller didn't
260 * preserve it.
261 *
262 * This flag is only here in order to support kfree_data_addr(),
263 * and shall not be used by any other clients.
264 *
265 * <h2>Entry properties</h2>
266 *
267 * @const KMEM_PERMANENT (alloc, suballoc)
268 * The entry is made permanent.
269 *
270 * In the kernel maps, permanent entries can never be deleted.
271 * Calling @c kmem_free() on such a range will panic.
272 *
273 * In user maps, permanent entries will only be deleted
274 * whenthe map is terminated.
275 *
276 * @const KMEM_GUARD_FIRST (alloc, realloc)
277 * @const KMEM_GUARD_LAST (alloc, realloc)
278 * Asks @c kmem_* to put a guard page at the beginning (resp. end)
279 * of the allocation.
280 *
281 * The allocation size will not be extended to accomodate for guards,
282 * and the client of this interface must take them into account.
283 * Typically if a usable range of 3 pages is needed with both guards,
284 * then 5 pages must be asked.
285 *
286 * Alignment constraints take guards into account (the aligment applies
287 * to the address right after the first guard page).
288 *
289 * The returned address for allocation will pointing at the entry start,
290 * which is the address of the left guard page if any.
291 *
292 * Note that if @c kmem_realloc* is called, the *exact* same
293 * guard flags must be passed for this entry. The KMEM subsystem
294 * is generally oblivious to guards, and passing inconsistent flags
295 * will cause pages to be moved incorrectly.
296 *
297 * @const KMEM_KSTACK (alloc)
298 * This flag must be passed when the allocation is for kernel stacks.
299 * This only has an effect on Intel.
300 *
301 * @const KMEM_NOENCRYPT (alloc)
302 * Obsolete, will be repurposed soon.
303 */
304 __options_decl(kmem_flags_t, uint32_t, {
305 KMEM_NONE = 0x00000000,
306
307 /* Call behavior */
308 KMEM_NOFAIL = 0x00000001,
309 KMEM_NOPAGEWAIT = 0x00000002,
310 KMEM_FREEOLD = 0x00000004,
311 KMEM_REALLOCF = 0x00000008,
312
313 /* How the entry is populated */
314 KMEM_VAONLY = 0x00000010,
315 KMEM_PAGEABLE = 0x00000020,
316 KMEM_ZERO = 0x00000040,
317
318 /* VM object to use for the entry */
319 KMEM_KOBJECT = 0x00000100,
320 KMEM_COMPRESSOR = 0x00000200,
321
322 /* How to look for addresses */
323 KMEM_LOMEM = 0x00001000,
324 KMEM_LAST_FREE = 0x00002000,
325 KMEM_GUESS_SIZE = 0x00004000,
326 KMEM_DATA = 0x00008000,
327
328 /* Entry properties */
329 KMEM_PERMANENT = 0x00010000,
330 KMEM_GUARD_FIRST = 0x00020000,
331 KMEM_GUARD_LAST = 0x00040000,
332 KMEM_KSTACK = 0x00080000,
333 KMEM_NOENCRYPT = 0x00100000,
334 });
335
336
337 #pragma mark kmem range methods
338
339 extern struct kmem_range kmem_ranges[KMEM_RANGE_COUNT];
340 extern struct kmem_range kmem_large_ranges[KMEM_RANGE_COUNT];
341 #define KMEM_RANGE_MASK 0x3fff
342 #define KMEM_HASH_SET 0x4000
343 #define KMEM_DIRECTION_MASK 0x8000
344
345 __attribute__((overloadable))
346 extern bool kmem_range_contains(
347 const struct kmem_range *r,
348 vm_offset_t addr);
349
350 __attribute__((overloadable))
351 extern bool kmem_range_contains(
352 const struct kmem_range *r,
353 vm_offset_t addr,
354 vm_offset_t size);
355
356 extern vm_size_t kmem_range_size(
357 const struct kmem_range *r);
358
359 extern bool kmem_range_id_contains(
360 kmem_range_id_t range_id,
361 vm_map_offset_t addr,
362 vm_map_size_t size);
363
364 extern kmem_range_id_t kmem_addr_get_range(
365 vm_map_offset_t addr,
366 vm_map_size_t size);
367
368 extern kmem_range_id_t kmem_adjust_range_id(
369 uint32_t hash);
370
371
372 /**
373 * @enum kmem_claims_flags_t
374 *
375 * @abstract
376 * Set of flags used in the processing of kmem_range claims
377 *
378 * @discussion
379 * These flags are used by the kmem subsytem while processing kmem_range
380 * claims and are not explicitly passed by the caller registering the claim.
381 *
382 * @const KC_NO_ENTRY
383 * A vm map entry should not be created for the respective claim.
384 *
385 * @const KC_NO_MOVE
386 * The range shouldn't be moved once it has been placed as it has constraints.
387 */
388 __options_decl(kmem_claims_flags_t, uint32_t, {
389 KC_NONE = 0x00000000,
390 KC_NO_ENTRY = 0x00000001,
391 KC_NO_MOVE = 0x00000002,
392 });
393
394 /*
395 * Security config that creates the data split in kernel_map
396 */
397 #if !defined(__LP64__)
398 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT OFF
399 #else
400 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT ON
401 #endif
402
403 /*
404 * Security config that creates the additional splits in non data part of
405 * kernel_map
406 */
407 #if KASAN || !defined(__LP64__) || (__arm64__ && !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR))
408 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT OFF
409 #else
410 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT ON
411 #endif
412
413 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__OFF() 0
414 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__ON() 1
415 #define ZSECURITY_CONFIG2(v) ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__##v()
416 #define ZSECURITY_CONFIG1(v) ZSECURITY_CONFIG2(v)
417 #define ZSECURITY_CONFIG(opt) ZSECURITY_CONFIG1(ZSECURITY_CONFIG_##opt)
418
419 struct kmem_range_startup_spec {
420 const char *kc_name;
421 struct kmem_range *kc_range;
422 vm_map_size_t kc_size;
423 vm_map_size_t (^kc_calculate_sz)(void);
424 kmem_claims_flags_t kc_flags;
425 };
426
427 extern void kmem_range_startup_init(
428 struct kmem_range_startup_spec *sp);
429
430 /*!
431 * @macro KMEM_RANGE_REGISTER_*
432 *
433 * @abstract
434 * Register a claim for kmem range or submap.
435 *
436 * @discussion
437 * Claims are shuffled during startup to randomize the layout of the kernel map.
438 * Temporary entries are created in place of the claims, therefore the caller
439 * must provide the start of the assigned range as a hint and
440 * @c VM_FLAGS_FIXED_RANGE_SUBALLOC to kmem_suballoc to replace the mapping.
441 *
442 * Min/max constraints can be provided in the range when the claim is
443 * registered.
444 *
445 * This macro comes in 2 flavors:
446 * - STATIC : When the size of the range/submap is known at compile time
447 * - DYNAMIC: When the size of the range/submap needs to be computed
448 * Temporary entries are create
449 * The start of the
450 *
451 * @param name the name of the claim
452 * @param range the assigned range for the claim
453 * @param size the size of submap/range (if known at compile time)
454 * @param calculate_sz a block that returns the computed size of submap/range
455 */
456 #define KMEM_RANGE_REGISTER_STATIC(name, range, size) \
457 static __startup_data struct kmem_range_startup_spec \
458 __startup_kmem_range_spec_ ## name = { #name, range, size, NULL, KC_NONE}; \
459 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
460 &__startup_kmem_range_spec_ ## name)
461
462 #define KMEM_RANGE_REGISTER_DYNAMIC(name, range, calculate_sz) \
463 static __startup_data struct kmem_range_startup_spec \
464 __startup_kmem_range_spec_ ## name = { #name, range, 0, calculate_sz, \
465 KC_NONE}; \
466 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
467 &__startup_kmem_range_spec_ ## name)
468
469 #if XNU_KERNEL_PRIVATE
470 #if ZSECURITY_CONFIG(KERNEL_DATA_SPLIT)
471 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE)
472 #else /* ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
473 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_ANYWHERE)
474 #endif /* !ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
475 #endif /* XNU_KERNEL_PRIVATE */
476
477 __startup_func
478 extern uint16_t kmem_get_random16(
479 uint16_t upper_limit);
480
481 __startup_func
482 extern void kmem_shuffle(
483 uint16_t *shuffle_buf,
484 uint16_t count);
485
486
487 #pragma mark kmem entry parameters
488
489 /*!
490 * @function kmem_entry_validate_guard()
491 *
492 * @brief
493 * Validates that the entry matches the input parameters, panic otherwise.
494 *
495 * @discussion
496 * If the guard has a zero @c kmg_guard value,
497 * then the entry must be non atomic.
498 *
499 * The guard tag is not used for validation as the VM subsystems
500 * (particularly in IOKit) might decide to substitute it in ways
501 * that are difficult to predict for the programmer.
502 *
503 * @param entry the entry to validate
504 * @param addr the supposed start address
505 * @param size the supposed size of the entry
506 * @param guard the guard to use to "authenticate" the allocation.
507 */
508 extern void kmem_entry_validate_guard(
509 vm_map_t map,
510 struct vm_map_entry *entry,
511 vm_offset_t addr,
512 vm_size_t size,
513 kmem_guard_t guard);
514
515 /*!
516 * @function kmem_size_guard()
517 *
518 * @brief
519 * Returns the size of an atomic allocation made in the specified map,
520 * according to the guard.
521 *
522 * @param map a kernel map to lookup the entry into.
523 * @param addr the kernel address to lookup.
524 * @param guard the guard to use to "authenticate" the allocation.
525 */
526 extern vm_size_t kmem_size_guard(
527 vm_map_t map,
528 vm_offset_t addr,
529 kmem_guard_t guard);
530
531 #pragma mark kmem allocations
532
533 /*!
534 * @typedef kma_flags_t
535 *
536 * @brief
537 * Flags used by the @c kmem_alloc* family of flags.
538 */
539 __options_decl(kma_flags_t, uint32_t, {
540 KMA_NONE = KMEM_NONE,
541
542 /* Call behavior */
543 KMA_NOFAIL = KMEM_NOFAIL,
544 KMA_NOPAGEWAIT = KMEM_NOPAGEWAIT,
545
546 /* How the entry is populated */
547 KMA_VAONLY = KMEM_VAONLY,
548 KMA_PAGEABLE = KMEM_PAGEABLE,
549 KMA_ZERO = KMEM_ZERO,
550
551 /* VM object to use for the entry */
552 KMA_KOBJECT = KMEM_KOBJECT,
553 KMA_COMPRESSOR = KMEM_COMPRESSOR,
554
555 /* How to look for addresses */
556 KMA_LOMEM = KMEM_LOMEM,
557 KMA_LAST_FREE = KMEM_LAST_FREE,
558 KMA_DATA = KMEM_DATA,
559
560 /* Entry properties */
561 KMA_PERMANENT = KMEM_PERMANENT,
562 KMA_GUARD_FIRST = KMEM_GUARD_FIRST,
563 KMA_GUARD_LAST = KMEM_GUARD_LAST,
564 KMA_KSTACK = KMEM_KSTACK,
565 KMA_NOENCRYPT = KMEM_NOENCRYPT,
566 });
567
568 #define KMEM_ALLOC_CONTIG_FLAGS ( \
569 /* Call behavior */ \
570 KMA_NOPAGEWAIT | \
571 \
572 /* How the entry is populated */ \
573 KMA_ZERO | \
574 \
575 /* VM object to use for the entry */ \
576 KMA_KOBJECT | \
577 \
578 /* How to look for addresses */ \
579 KMA_LOMEM | \
580 KMA_DATA | \
581 \
582 /* Entry properties */ \
583 KMA_PERMANENT | \
584 \
585 KMA_NONE)
586
587
588 /*!
589 * @function kmem_alloc_guard()
590 *
591 * @brief
592 * Master entry point for allocating kernel memory.
593 *
594 * @param map map to allocate into, must be a kernel map.
595 * @param size the size of the entry to allocate, must not be 0.
596 * @param mask an alignment mask that the returned allocation
597 * will be aligned to (ignoring guards, see @const
598 * KMEM_GUARD_FIRST).
599 * @param flags a set of @c KMA_* flags, (@see @c kmem_flags_t)
600 * @param guard how to guard the allocation.
601 *
602 * @returns
603 * - the non zero address of the allocaation on success in @c kmr_address.
604 * - @c KERN_NO_SPACE if the target map is out of address space.
605 * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages.
606 */
607 extern kmem_return_t kmem_alloc_guard(
608 vm_map_t map,
609 vm_size_t size,
610 vm_offset_t mask,
611 kma_flags_t flags,
612 kmem_guard_t guard) __result_use_check;
613
614 static inline kern_return_t
kernel_memory_allocate(vm_map_t map,vm_offset_t * addrp,vm_size_t size,vm_offset_t mask,kma_flags_t flags,vm_tag_t tag)615 kernel_memory_allocate(
616 vm_map_t map,
617 vm_offset_t *addrp,
618 vm_size_t size,
619 vm_offset_t mask,
620 kma_flags_t flags,
621 vm_tag_t tag)
622 {
623 kmem_guard_t guard = {
624 .kmg_tag = tag,
625 };
626 kmem_return_t kmr;
627
628 kmr = kmem_alloc_guard(map, size, mask, flags, guard);
629 if (kmr.kmr_return == KERN_SUCCESS) {
630 __builtin_assume(kmr.kmr_address != 0);
631 } else {
632 __builtin_assume(kmr.kmr_address == 0);
633 }
634 *addrp = kmr.kmr_address;
635 return kmr.kmr_return;
636 }
637
638 static inline kern_return_t
kmem_alloc(vm_map_t map,vm_offset_t * addrp,vm_size_t size,kma_flags_t flags,vm_tag_t tag)639 kmem_alloc(
640 vm_map_t map,
641 vm_offset_t *addrp,
642 vm_size_t size,
643 kma_flags_t flags,
644 vm_tag_t tag)
645 {
646 return kernel_memory_allocate(map, addrp, size, 0, flags, tag);
647 }
648
649 extern kern_return_t kmem_alloc_contig(
650 vm_map_t map,
651 vm_offset_t *addrp,
652 vm_size_t size,
653 vm_offset_t mask,
654 ppnum_t max_pnum,
655 ppnum_t pnum_mask,
656 kma_flags_t flags,
657 vm_tag_t tag)
658 __attribute__((diagnose_if(flags & ~KMEM_ALLOC_CONTIG_FLAGS,
659 "invalid alloc_contig flags passed", "error")));
660
661
662 /*!
663 * @typedef kms_flags_t
664 *
665 * @brief
666 * Flags used by @c kmem_suballoc.
667 */
668 __options_decl(kms_flags_t, uint32_t, {
669 KMS_NONE = KMEM_NONE,
670
671 /* Call behavior */
672 KMS_NOFAIL = KMEM_NOFAIL,
673
674 /* How to look for addresses */
675 KMS_LAST_FREE = KMEM_LAST_FREE,
676 KMS_DATA = KMEM_DATA,
677
678 /* Entry properties */
679 KMS_PERMANENT = KMEM_PERMANENT,
680 });
681
682 /*!
683 * @function kmem_suballoc()
684 *
685 * @brief
686 * Create a kernel submap, in an atomic entry guarded with KMEM_GUARD_SUBMAP.
687 *
688 * @param parent map to allocate into, must be a kernel map.
689 * @param addr (in/out) the address for the map (see vm_map_enter)
690 * @param size the size of the entry to allocate, must not be 0.
691 * @param vmc_options the map creation options
692 * @param vm_flags a set of @c VM_FLAGS_* flags
693 * @param flags a set of @c KMS_* flags, (@see @c kmem_flags_t)
694 * @param tag the tag for this submap's entry.
695 */
696 extern kmem_return_t kmem_suballoc(
697 vm_map_t parent,
698 vm_offset_t *addr,
699 vm_size_t size,
700 vm_map_create_options_t vmc_options,
701 int vm_flags,
702 kms_flags_t flags,
703 vm_tag_t tag);
704
705
706 #pragma mark kmem reallocation
707
708 /*!
709 * @typedef kmr_flags_t
710 *
711 * @brief
712 * Flags used by the @c kmem_realloc* family of flags.
713 */
714 __options_decl(kmr_flags_t, uint32_t, {
715 KMR_NONE = KMEM_NONE,
716
717 /* Call behavior */
718 KMR_NOPAGEWAIT = KMEM_NOPAGEWAIT,
719 KMR_FREEOLD = KMEM_FREEOLD,
720 KMR_REALLOCF = KMEM_REALLOCF,
721
722 /* How the entry is populated */
723 KMR_ZERO = KMEM_ZERO,
724
725 /* VM object to use for the entry */
726 KMR_KOBJECT = KMEM_KOBJECT,
727
728 /* How to look for addresses */
729 KMR_LOMEM = KMEM_LOMEM,
730 KMR_LAST_FREE = KMEM_LAST_FREE,
731 KMR_DATA = KMEM_DATA,
732
733 /* Entry properties */
734 KMR_GUARD_FIRST = KMEM_GUARD_FIRST,
735 KMR_GUARD_LAST = KMEM_GUARD_LAST,
736 });
737
738 #define KMEM_REALLOC_FLAGS_VALID(flags) \
739 (((flags) & KMR_KOBJECT) == 0 || ((flags) & KMR_FREEOLD))
740
741 /*!
742 * @function kmem_realloc_guard()
743 *
744 * @brief
745 * Reallocates memory allocated with kmem_alloc_guard()
746 *
747 * @discussion
748 * @c kmem_realloc_guard() either mandates a guard with atomicity set,
749 * or must use KMR_DATA (this is not an implementation limitation but
750 * but a security policy).
751 *
752 * If kmem_realloc_guard() is called for the kernel object
753 * (with @c KMR_KOBJECT), then the use of @c KMR_FREEOLD is mandatory.
754 *
755 * When @c KMR_FREEOLD isn't used, if the allocation was relocated
756 * as opposed to be extended or truncated in place, the caller
757 * must free its old mapping manually by calling @c kmem_free_guard().
758 *
759 * Note that if the entry is truncated, it will always be done in place.
760 *
761 *
762 * @param map map to allocate into, must be a kernel map.
763 * @param oldaddr the address to reallocate,
764 * passing 0 means @c kmem_alloc_guard() will be called.
765 * @param oldsize the current size of the entry
766 * @param newsize the new size of the entry,
767 * 0 means kmem_free_guard() will be called.
768 * @param flags a set of @c KMR_* flags, (@see @c kmem_flags_t)
769 * the exact same set of @c KMR_GUARD_* flags must
770 * be passed for all calls (@see kmem_flags_t).
771 * @param guard the allocation guard.
772 *
773 * @returns
774 * - the newly allocated address on success in @c kmr_address
775 * (note that if newsize is 0, then address will be 0 too).
776 * - @c KERN_NO_SPACE if the target map is out of address space.
777 * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages.
778 */
779 extern kmem_return_t kmem_realloc_guard(
780 vm_map_t map,
781 vm_offset_t oldaddr,
782 vm_size_t oldsize,
783 vm_size_t newsize,
784 kmr_flags_t flags,
785 kmem_guard_t guard) __result_use_check
786 __attribute__((diagnose_if(!KMEM_REALLOC_FLAGS_VALID(flags),
787 "invalid realloc flags passed", "error")));
788
789 /*!
790 * @function kmem_realloc_should_free()
791 *
792 * @brief
793 * Returns whether the old address passed to a @c kmem_realloc_guard()
794 * call without @c KMR_FREEOLD must be freed.
795 *
796 * @param oldaddr the "oldaddr" passed to @c kmem_realloc_guard().
797 * @param kmr the result of that @c kmem_realloc_should_free() call.
798 */
799 static inline bool
kmem_realloc_should_free(vm_offset_t oldaddr,kmem_return_t kmr)800 kmem_realloc_should_free(
801 vm_offset_t oldaddr,
802 kmem_return_t kmr)
803 {
804 return oldaddr && oldaddr != kmr.kmr_address;
805 }
806
807
808 #pragma mark kmem free
809
810 /*!
811 * @typedef kmf_flags_t
812 *
813 * @brief
814 * Flags used by the @c kmem_free* family of flags.
815 */
816 __options_decl(kmf_flags_t, uint32_t, {
817 KMF_NONE = KMEM_NONE,
818
819 /* Call behavior */
820
821 /* How the entry is populated */
822
823 /* How to look for addresses */
824 KMF_GUESS_SIZE = KMEM_GUESS_SIZE,
825 });
826
827
828 /*!
829 * @function kmem_free_guard()
830 *
831 * @brief
832 * Frees memory allocated with @c kmem_alloc or @c kmem_realloc.
833 *
834 * @param map map to free from, must be a kernel map.
835 * @param addr the address to free
836 * @param size the size of the memory to free
837 * @param flags a set of @c KMF_* flags, (@see @c kmem_flags_t)
838 * @param guard the allocation guard.
839 *
840 * @returns the size of the entry that was deleted.
841 * (useful when @c KMF_GUESS_SIZE was used)
842 */
843 extern vm_size_t kmem_free_guard(
844 vm_map_t map,
845 vm_offset_t addr,
846 vm_size_t size,
847 kmf_flags_t flags,
848 kmem_guard_t guard);
849
850 static inline void
kmem_free(vm_map_t map,vm_offset_t addr,vm_size_t size)851 kmem_free(
852 vm_map_t map,
853 vm_offset_t addr,
854 vm_size_t size)
855 {
856 kmem_free_guard(map, addr, size, KMF_NONE, KMEM_GUARD_NONE);
857 }
858
859 #pragma mark kmem population
860
861 extern void kernel_memory_populate_object_and_unlock(
862 vm_object_t object, /* must be locked */
863 vm_address_t addr,
864 vm_offset_t offset,
865 vm_size_t size,
866 struct vm_page *page_list,
867 kma_flags_t flags,
868 vm_tag_t tag,
869 vm_prot_t prot);
870
871 extern kern_return_t kernel_memory_populate(
872 vm_offset_t addr,
873 vm_size_t size,
874 kma_flags_t flags,
875 vm_tag_t tag);
876
877 extern void kernel_memory_depopulate(
878 vm_offset_t addr,
879 vm_size_t size,
880 kma_flags_t flags,
881 vm_tag_t tag);
882
883 #pragma GCC visibility pop
884 #elif KERNEL_PRIVATE /* XNU_KERNEL_PRIVATE */
885
886 extern kern_return_t kmem_alloc(
887 vm_map_t map,
888 vm_offset_t *addrp,
889 vm_size_t size);
890
891 extern kern_return_t kmem_alloc_pageable(
892 vm_map_t map,
893 vm_offset_t *addrp,
894 vm_size_t size);
895
896 extern kern_return_t kmem_alloc_kobject(
897 vm_map_t map,
898 vm_offset_t *addrp,
899 vm_size_t size);
900
901 extern void kmem_free(
902 vm_map_t map,
903 vm_offset_t addr,
904 vm_size_t size);
905
906 #endif /* KERNEL_PRIVATE */
907
908 #pragma mark - kernel address obfuscation / hashhing for logging
909
910 extern vm_offset_t vm_kernel_addrperm_ext;
911
912 extern void vm_kernel_addrhide(
913 vm_offset_t addr,
914 vm_offset_t *hide_addr);
915
916 extern void vm_kernel_addrperm_external(
917 vm_offset_t addr,
918 vm_offset_t *perm_addr);
919
920 extern void vm_kernel_unslide_or_perm_external(
921 vm_offset_t addr,
922 vm_offset_t *up_addr);
923
924 #if !XNU_KERNEL_PRIVATE
925
926 extern vm_offset_t vm_kernel_addrhash(
927 vm_offset_t addr);
928
929 #else /* XNU_KERNEL_PRIVATE */
930 #pragma GCC visibility push(hidden)
931
932 extern uint64_t vm_kernel_addrhash_salt;
933 extern uint64_t vm_kernel_addrhash_salt_ext;
934
935 extern vm_offset_t vm_kernel_addrhash_internal(
936 vm_offset_t addr,
937 uint64_t salt);
938
939 static inline vm_offset_t
vm_kernel_addrhash(vm_offset_t addr)940 vm_kernel_addrhash(vm_offset_t addr)
941 {
942 return vm_kernel_addrhash_internal(addr, vm_kernel_addrhash_salt);
943 }
944
945 #pragma mark - kernel variants of the Mach VM interfaces
946
947 extern kern_return_t mach_vm_allocate_kernel(
948 vm_map_t map,
949 mach_vm_offset_t *addr,
950 mach_vm_size_t size,
951 int flags,
952 vm_tag_t tag);
953
954 extern kern_return_t mach_vm_map_kernel(
955 vm_map_t target_map,
956 mach_vm_offset_t *address,
957 mach_vm_size_t initial_size,
958 mach_vm_offset_t mask,
959 int flags,
960 vm_map_kernel_flags_t vmk_flags,
961 vm_tag_t tag,
962 ipc_port_t port,
963 vm_object_offset_t offset,
964 boolean_t copy,
965 vm_prot_t cur_protection,
966 vm_prot_t max_protection,
967 vm_inherit_t inheritance);
968
969
970 extern kern_return_t vm_map_kernel(
971 vm_map_t target_map,
972 vm_offset_t *address,
973 vm_size_t size,
974 vm_offset_t mask,
975 int flags,
976 vm_map_kernel_flags_t vmk_flags,
977 vm_tag_t tag,
978 ipc_port_t port,
979 vm_offset_t offset,
980 boolean_t copy,
981 vm_prot_t cur_protection,
982 vm_prot_t max_protection,
983 vm_inherit_t inheritance);
984
985 extern kern_return_t mach_vm_remap_kernel(
986 vm_map_t target_map,
987 mach_vm_offset_t *address,
988 mach_vm_size_t size,
989 mach_vm_offset_t mask,
990 int flags,
991 vm_tag_t tag,
992 vm_map_t src_map,
993 mach_vm_offset_t memory_address,
994 boolean_t copy,
995 vm_prot_t *cur_protection,
996 vm_prot_t *max_protection,
997 vm_inherit_t inheritance);
998
999 extern kern_return_t mach_vm_remap_new_kernel(
1000 vm_map_t target_map,
1001 mach_vm_offset_t *address,
1002 mach_vm_size_t size,
1003 mach_vm_offset_t mask,
1004 int flags,
1005 vm_tag_t tag,
1006 vm_map_t src_map,
1007 mach_vm_offset_t memory_address,
1008 boolean_t copy,
1009 vm_prot_t *cur_protection,
1010 vm_prot_t *max_protection,
1011 vm_inherit_t inheritance);
1012
1013 extern kern_return_t vm_remap_kernel(
1014 vm_map_t target_map,
1015 vm_offset_t *address,
1016 vm_size_t size,
1017 vm_offset_t mask,
1018 int flags,
1019 vm_tag_t tag,
1020 vm_map_t src_map,
1021 vm_offset_t memory_address,
1022 boolean_t copy,
1023 vm_prot_t *cur_protection,
1024 vm_prot_t *max_protection,
1025 vm_inherit_t inheritance);
1026
1027 extern kern_return_t vm_map_64_kernel(
1028 vm_map_t target_map,
1029 vm_offset_t *address,
1030 vm_size_t size,
1031 vm_offset_t mask,
1032 int flags,
1033 vm_map_kernel_flags_t vmk_flags,
1034 vm_tag_t tag,
1035 ipc_port_t port,
1036 vm_object_offset_t offset,
1037 boolean_t copy,
1038 vm_prot_t cur_protection,
1039 vm_prot_t max_protection,
1040 vm_inherit_t inheritance);
1041
1042 extern kern_return_t mach_vm_wire_kernel(
1043 host_priv_t host_priv,
1044 vm_map_t map,
1045 mach_vm_offset_t start,
1046 mach_vm_size_t size,
1047 vm_prot_t access,
1048 vm_tag_t tag);
1049
1050 extern kern_return_t vm_map_wire_kernel(
1051 vm_map_t map,
1052 vm_map_offset_t start,
1053 vm_map_offset_t end,
1054 vm_prot_t caller_prot,
1055 vm_tag_t tag,
1056 boolean_t user_wire);
1057
1058 extern kern_return_t vm_map_wire_and_extract_kernel(
1059 vm_map_t map,
1060 vm_map_offset_t start,
1061 vm_prot_t caller_prot,
1062 vm_tag_t tag,
1063 boolean_t user_wire,
1064 ppnum_t *physpage_p);
1065
1066 extern kern_return_t memory_object_iopl_request(
1067 ipc_port_t port,
1068 memory_object_offset_t offset,
1069 upl_size_t *upl_size,
1070 upl_t *upl_ptr,
1071 upl_page_info_array_t user_page_list,
1072 unsigned int *page_list_count,
1073 upl_control_flags_t *flags,
1074 vm_tag_t tag);
1075
1076 #ifdef MACH_KERNEL_PRIVATE
1077
1078 extern kern_return_t copyinmap(
1079 vm_map_t map,
1080 vm_map_offset_t fromaddr,
1081 void *todata,
1082 vm_size_t length);
1083
1084 extern kern_return_t copyoutmap(
1085 vm_map_t map,
1086 void *fromdata,
1087 vm_map_offset_t toaddr,
1088 vm_size_t length);
1089
1090 extern kern_return_t copyoutmap_atomic32(
1091 vm_map_t map,
1092 uint32_t value,
1093 vm_map_offset_t toaddr);
1094
1095 extern kern_return_t copyoutmap_atomic64(
1096 vm_map_t map,
1097 uint64_t value,
1098 vm_map_offset_t toaddr);
1099
1100 #endif /* MACH_KERNEL_PRIVATE */
1101 #pragma GCC visibility pop
1102 #endif /* XNU_KERNEL_PRIVATE */
1103 #ifdef KERNEL_PRIVATE
1104 #pragma mark - unsorted interfaces
1105
1106 #ifdef XNU_KERNEL_PRIVATE
1107 typedef struct vm_allocation_site kern_allocation_name;
1108 typedef kern_allocation_name * kern_allocation_name_t;
1109 #else /* XNU_KERNEL_PRIVATE */
1110 struct kern_allocation_name;
1111 typedef struct kern_allocation_name * kern_allocation_name_t;
1112 #endif /* !XNU_KERNEL_PRIVATE */
1113
1114 extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs);
1115 extern void kern_allocation_name_release(kern_allocation_name_t allocation);
1116 extern const char * kern_allocation_get_name(kern_allocation_name_t allocation);
1117
1118 #endif /* KERNEL_PRIVATE */
1119 #ifdef XNU_KERNEL_PRIVATE
1120 #pragma GCC visibility push(hidden)
1121
1122 extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta);
1123 extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta);
1124 extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation);
1125
1126 struct mach_memory_info;
1127 extern kern_return_t vm_page_diagnose(
1128 struct mach_memory_info *info,
1129 unsigned int num_info,
1130 uint64_t zones_collectable_bytes);
1131
1132 extern uint32_t vm_page_diagnose_estimate(void);
1133
1134 extern void vm_init_before_launchd(void);
1135
1136 typedef enum {
1137 PMAP_FEAT_UEXEC = 1
1138 } pmap_feature_flags_t;
1139
1140 #if defined(__x86_64__)
1141 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat);
1142 #endif
1143
1144 #if DEBUG || DEVELOPMENT
1145
1146 extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size);
1147
1148 #endif /* DEBUG || DEVELOPMENT */
1149
1150 #if HIBERNATION
1151 extern void hibernate_rebuild_vm_structs(void);
1152 #endif /* HIBERNATION */
1153
1154 extern vm_tag_t vm_tag_bt(void);
1155
1156 extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site);
1157
1158 extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP);
1159
1160 extern void vm_tag_update_size(vm_tag_t tag, int64_t size);
1161
1162 #if VM_TAG_SIZECLASSES
1163
1164 extern void vm_allocation_zones_init(void);
1165 extern vm_tag_t vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags);
1166 extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta);
1167
1168 #endif /* VM_TAG_SIZECLASSES */
1169
1170 extern vm_tag_t vm_tag_bt_debug(void);
1171
1172 extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen);
1173
1174 extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
1175
1176 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
1177
1178 #pragma GCC visibility pop
1179 #endif /* XNU_KERNEL_PRIVATE */
1180
1181 __END_DECLS
1182
1183 #endif /* _VM_VM_KERN_H_ */
1184