1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management definitions.
64 */
65
66 #ifndef _VM_VM_KERN_H_
67 #define _VM_VM_KERN_H_
68
69 #include <mach/mach_types.h>
70 #include <mach/boolean.h>
71 #include <mach/kern_return.h>
72 #include <mach/vm_types.h>
73 #ifdef XNU_KERNEL_PRIVATE
74 #include <kern/locks.h>
75 #endif /* XNU_KERNEL_PRIVATE */
76
77 __BEGIN_DECLS
78
79 #ifdef KERNEL_PRIVATE
80 extern vm_map_t kernel_map;
81 extern vm_map_t ipc_kernel_map;
82 extern vm_map_t g_kext_map;
83 #endif /* KERNEL_PRIVATE */
84
85 #pragma mark - the kmem subsystem
86 #ifdef XNU_KERNEL_PRIVATE
87 #pragma GCC visibility push(hidden)
88
89 /*
90 * "kmem" is a set of methods that provide interfaces suitable
91 * to allocate memory from the VM in the kernel map or submaps.
92 *
93 * It provide leaner alternatives to some of the VM functions,
94 * closer to a typical allocator.
95 */
96
97 struct vm_page;
98 struct vm_map_entry;
99
100 /*!
101 * @typedef
102 *
103 * @brief
104 * Pair of a return code and size/address/... used by kmem interfaces.
105 *
106 * @discussion
107 * Using a pair of integers allows the compiler to return everything
108 * through registers, and doesn't need to use stack values to get results,
109 * which yields significantly better codegen.
110 *
111 * If @c kmr_return is not @c KERN_SUCCESS, then the other field
112 * of the union is always supposed to be 0.
113 */
114 typedef struct {
115 kern_return_t kmr_return;
116 union {
117 vm_address_t kmr_address;
118 vm_size_t kmr_size;
119 void *kmr_ptr;
120 vm_map_t kmr_submap;
121 };
122 } kmem_return_t;
123
124 /*!
125 * @typedef kmem_guard_t
126 *
127 * @brief
128 * KMEM guards are used by the kmem_* subsystem to secure atomic allocations.
129 *
130 * @discussion
131 * This parameter is used to transmit the tag for the allocation.
132 *
133 * If @c kmg_atomic is set, then the other fields are also taken into account
134 * and will affect the allocation behavior for this allocation.
135 *
136 * @field kmg_tag The VM_KERN_MEMORY_* tag for this entry.
137 * @field kmg_type_hash Some hash related to the type of the allocation.
138 * @field kmg_atomic Whether the entry is atomic.
139 * @field kmg_submap Whether the entry is for a submap.
140 * @field kmg_context A use defined 30 bits that will be stored
141 * on the entry on allocation and checked
142 * on other operations.
143 */
144 typedef struct {
145 uint16_t kmg_tag;
146 uint16_t kmg_type_hash;
147 uint32_t kmg_atomic : 1;
148 uint32_t kmg_submap : 1;
149 uint32_t kmg_context : 30;
150 } kmem_guard_t;
151 #define KMEM_GUARD_NONE (kmem_guard_t){ }
152 #define KMEM_GUARD_SUBMAP (kmem_guard_t){ .kmg_atomic = 0, .kmg_submap = 1 }
153
154
155 /*!
156 * @typedef kmem_flags_t
157 *
158 * @brief
159 * Sets of flags taken by several of the @c kmem_* family of functions.
160 *
161 * @discussion
162 * This type is not used directly by any function, it is an underlying raw
163 * type that is re-vended under different namespaces for each @c kmem_*
164 * interface.
165 *
166 * - @c kmem_alloc uses @c kma_flags_t / @c KMA_* namespaced values.
167 * - @c kmem_suballoc uses @c kms_flags_t / @c KMS_* namespaced values.
168 * - @c kmem_realloc uses @c kmr_flags_t / @c KMR_* namespaced values.
169 * - @c kmem_free uses @c kmf_flags_t / @c KMF_* napespaced values.
170 *
171 *
172 * <h2>Call behavior</h2>
173 *
174 * @const KMEM_NONE (all)
175 * Pass this when no special options is to be used.
176 *
177 * @const KMEM_NOFAIL (alloc, suballoc)
178 * When this flag is passed, any allocation failure results into a panic().
179 * Using this flag should really be limited to cases when failure is not
180 * recoverable and possibly during early boot only.
181 *
182 * @const KMEM_NOPAGEWAIT (alloc, realloc)
183 * Pass this flag if the system should not wait in VM_PAGE_WAIT().
184 *
185 * @const KMEM_FREEOLD (realloc)
186 * Pass this flag if @c kmem_realloc should free the old mapping
187 * (when the address changed) as part of the call.
188 *
189 * @const KMEM_REALLOCF (realloc)
190 * Similar to @c Z_REALLOCF: if the call is failing,
191 * then free the old allocation too.
192 *
193 *
194 * <h2>How the entry is populated</h2>
195 *
196 * @const KMEM_VAONLY (alloc)
197 * By default memory allocated by the kmem subsystem is wired and mapped.
198 * Passing @c KMEM_VAONLY will cause the range to still be wired,
199 * but no page is actually mapped.
200 *
201 * @const KMEM_PAGEABLE (alloc)
202 * By default memory allocated by the kmem subsystem is wired and mapped.
203 * Passing @c KMEM_PAGEABLE makes the entry non wired, and pages will be
204 * added to the entry as it faults.
205 *
206 * @const KMEM_ZERO (alloc, realloc)
207 * Any new page added is zeroed.
208 *
209 *
210 * <h2>VM object to use for the entry</h2>
211 *
212 * @const KMEM_KOBJECT (alloc, realloc)
213 * The entry will be made for the @c kernel_object.
214 *
215 * Note that the @c kernel_object is just a "collection of pages".
216 * Pages in that object can't be remaped or present in several VM maps
217 * like traditional objects.
218 *
219 * If neither @c KMEM_KOBJECT nor @c KMEM_COMPRESSOR is passed,
220 * the a new fresh VM object will be made for this allocation.
221 * This is expensive and should be limited to allocations that
222 * need the features associated with a VM object.
223 *
224 * @const KMEM_COMPRESSOR (alloc)
225 * The entry is allocated for the @c compressor_object.
226 * Pages belonging to the compressor are not on the paging queues,
227 * nor are they counted as wired.
228 *
229 * Only the VM Compressor subsystem should use this.
230 *
231 *
232 * <h2>How to look for addresses</h2>
233 *
234 * @const KMEM_LOMEM (alloc, realloc)
235 * The physical memory allocated must be in the first 4G of memory,
236 * in order to support hardware controllers incapable of generating DMAs
237 * with more than 32bits of physical address.
238 *
239 * @const KMEM_LAST_FREE (alloc, suballoc, realloc)
240 * When looking for space in the specified map,
241 * start scanning for addresses from the end of the map
242 * rather than the start.
243 *
244 * @const KMEM_DATA (alloc, suballoc, realloc)
245 * The memory must be allocated from the "Data" range.
246 *
247 * @const KMEM_GUESS_SIZE (free)
248 * When freeing an atomic entry (requires a valid kmem guard),
249 * then look up the entry size because the caller didn't
250 * preserve it.
251 *
252 * This flag is only here in order to support kfree_data_addr(),
253 * and shall not be used by any other clients.
254 *
255 * <h2>Entry properties</h2>
256 *
257 * @const KMEM_PERMANENT (alloc, suballoc)
258 * The entry is made permanent.
259 *
260 * In the kernel maps, permanent entries can never be deleted.
261 * Calling @c kmem_free() on such a range will panic.
262 *
263 * In user maps, permanent entries will only be deleted
264 * whenthe map is terminated.
265 *
266 * @const KMEM_GUARD_FIRST (alloc, realloc)
267 * @const KMEM_GUARD_LAST (alloc, realloc)
268 * Asks @c kmem_* to put a guard page at the beginning (resp. end)
269 * of the allocation.
270 *
271 * The allocation size will not be extended to accomodate for guards,
272 * and the client of this interface must take them into account.
273 * Typically if a usable range of 3 pages is needed with both guards,
274 * then 5 pages must be asked.
275 *
276 * Alignment constraints take guards into account (the aligment applies
277 * to the address right after the first guard page).
278 *
279 * The returned address for allocation will pointing at the entry start,
280 * which is the address of the left guard page if any.
281 *
282 * Note that if @c kmem_realloc* is called, the *exact* same
283 * guard flags must be passed for this entry. The KMEM subsystem
284 * is generally oblivious to guards, and passing inconsistent flags
285 * will cause pages to be moved incorrectly.
286 *
287 * @const KMEM_KSTACK (alloc)
288 * This flag must be passed when the allocation is for kernel stacks.
289 * This only has an effect on Intel.
290 *
291 * @const KMEM_NOENCRYPT (alloc)
292 * Obsolete, will be repurposed soon.
293 */
294 __options_decl(kmem_flags_t, uint32_t, {
295 KMEM_NONE = 0x00000000,
296
297 /* Call behavior */
298 KMEM_NOFAIL = 0x00000001,
299 KMEM_NOPAGEWAIT = 0x00000002,
300 KMEM_FREEOLD = 0x00000004,
301 KMEM_REALLOCF = 0x00000008,
302
303 /* How the entry is populated */
304 KMEM_VAONLY = 0x00000010,
305 KMEM_PAGEABLE = 0x00000020,
306 KMEM_ZERO = 0x00000040,
307
308 /* VM object to use for the entry */
309 KMEM_KOBJECT = 0x00000100,
310 KMEM_COMPRESSOR = 0x00000200,
311
312 /* How to look for addresses */
313 KMEM_LOMEM = 0x00001000,
314 KMEM_LAST_FREE = 0x00002000,
315 KMEM_GUESS_SIZE = 0x00004000,
316 KMEM_DATA = 0x00008000,
317
318 /* Entry properties */
319 KMEM_PERMANENT = 0x00010000,
320 KMEM_GUARD_FIRST = 0x00020000,
321 KMEM_GUARD_LAST = 0x00040000,
322 KMEM_KSTACK = 0x00080000,
323 KMEM_NOENCRYPT = 0x00100000,
324 });
325
326
327 #pragma mark kmem range methods
328
329 extern struct mach_vm_range kmem_ranges[KMEM_RANGE_COUNT];
330 extern struct mach_vm_range kmem_large_ranges[KMEM_RANGE_COUNT];
331 #define KMEM_RANGE_MASK 0x3fff
332 #define KMEM_HASH_SET 0x4000
333 #define KMEM_DIRECTION_MASK 0x8000
334
335 __stateful_pure
336 extern mach_vm_size_t mach_vm_range_size(
337 const struct mach_vm_range *r);
338
339 __attribute__((overloadable, pure))
340 extern bool mach_vm_range_contains(
341 const struct mach_vm_range *r,
342 mach_vm_offset_t addr);
343
344 __attribute__((overloadable, pure))
345 extern bool mach_vm_range_contains(
346 const struct mach_vm_range *r,
347 mach_vm_offset_t addr,
348 mach_vm_offset_t size);
349
350 __attribute__((overloadable, pure))
351 extern bool mach_vm_range_intersects(
352 const struct mach_vm_range *r1,
353 const struct mach_vm_range *r2);
354
355 __attribute__((overloadable, pure))
356 extern bool mach_vm_range_intersects(
357 const struct mach_vm_range *r1,
358 mach_vm_offset_t addr,
359 mach_vm_offset_t size);
360
361 /*
362 * @function kmem_range_id_contains
363 *
364 * @abstract Return whether the region of `[addr, addr + size)` is completely
365 * within the memory range.
366 */
367 __pure2
368 extern bool kmem_range_id_contains(
369 kmem_range_id_t range_id,
370 vm_map_offset_t addr,
371 vm_map_size_t size);
372
373 /*
374 * @function kmem_range_id_size
375 *
376 * @abstract Return the addressable size of the memory range.
377 */
378 __pure2
379 extern vm_map_size_t kmem_range_id_size(
380 kmem_range_id_t range_id);
381
382 __pure2
383 extern kmem_range_id_t kmem_addr_get_range(
384 vm_map_offset_t addr,
385 vm_map_size_t size);
386
387 extern kmem_range_id_t kmem_adjust_range_id(
388 uint32_t hash);
389
390
391 /**
392 * @enum kmem_claims_flags_t
393 *
394 * @abstract
395 * Set of flags used in the processing of kmem_range claims
396 *
397 * @discussion
398 * These flags are used by the kmem subsytem while processing kmem_range
399 * claims and are not explicitly passed by the caller registering the claim.
400 *
401 * @const KC_NO_ENTRY
402 * A vm map entry should not be created for the respective claim.
403 *
404 * @const KC_NO_MOVE
405 * The range shouldn't be moved once it has been placed as it has constraints.
406 */
407 __options_decl(kmem_claims_flags_t, uint32_t, {
408 KC_NONE = 0x00000000,
409 KC_NO_ENTRY = 0x00000001,
410 KC_NO_MOVE = 0x00000002,
411 });
412
413 /*
414 * Security config that creates the data split in kernel_map
415 */
416 #if !defined(__LP64__)
417 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT OFF
418 #else
419 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT ON
420 #endif
421
422 /*
423 * Security config that creates the additional splits in non data part of
424 * kernel_map
425 */
426 #if KASAN || (__arm64__ && !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR))
427 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT OFF
428 #else
429 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT ON
430 #endif
431
432 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__OFF() 0
433 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__ON() 1
434 #define ZSECURITY_CONFIG2(v) ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__##v()
435 #define ZSECURITY_CONFIG1(v) ZSECURITY_CONFIG2(v)
436 #define ZSECURITY_CONFIG(opt) ZSECURITY_CONFIG1(ZSECURITY_CONFIG_##opt)
437
438 struct kmem_range_startup_spec {
439 const char *kc_name;
440 struct mach_vm_range *kc_range;
441 vm_map_size_t kc_size;
442 vm_map_size_t (^kc_calculate_sz)(void);
443 kmem_claims_flags_t kc_flags;
444 };
445
446 extern void kmem_range_startup_init(
447 struct kmem_range_startup_spec *sp);
448
449 /*!
450 * @macro KMEM_RANGE_REGISTER_*
451 *
452 * @abstract
453 * Register a claim for kmem range or submap.
454 *
455 * @discussion
456 * Claims are shuffled during startup to randomize the layout of the kernel map.
457 * Temporary entries are created in place of the claims, therefore the caller
458 * must provide the start of the assigned range as a hint and
459 * @c VM_FLAGS_FIXED_RANGE_SUBALLOC to kmem_suballoc to replace the mapping.
460 *
461 * Min/max constraints can be provided in the range when the claim is
462 * registered.
463 *
464 * This macro comes in 2 flavors:
465 * - STATIC : When the size of the range/submap is known at compile time
466 * - DYNAMIC: When the size of the range/submap needs to be computed
467 * Temporary entries are create
468 * The start of the
469 *
470 * @param name the name of the claim
471 * @param range the assigned range for the claim
472 * @param size the size of submap/range (if known at compile time)
473 * @param calculate_sz a block that returns the computed size of submap/range
474 */
475 #define KMEM_RANGE_REGISTER_STATIC(name, range, size) \
476 static __startup_data struct kmem_range_startup_spec \
477 __startup_kmem_range_spec_ ## name = { #name, range, size, NULL, KC_NONE}; \
478 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
479 &__startup_kmem_range_spec_ ## name)
480
481 #define KMEM_RANGE_REGISTER_DYNAMIC(name, range, calculate_sz) \
482 static __startup_data struct kmem_range_startup_spec \
483 __startup_kmem_range_spec_ ## name = { #name, range, 0, calculate_sz, \
484 KC_NONE}; \
485 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
486 &__startup_kmem_range_spec_ ## name)
487
488 #if XNU_KERNEL_PRIVATE
489 #if ZSECURITY_CONFIG(KERNEL_DATA_SPLIT)
490 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE)
491 #else /* ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
492 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_ANYWHERE)
493 #endif /* !ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
494 #endif /* XNU_KERNEL_PRIVATE */
495
496 __startup_func
497 extern uint16_t kmem_get_random16(
498 uint16_t upper_limit);
499
500 __startup_func
501 extern void kmem_shuffle(
502 uint16_t *shuffle_buf,
503 uint16_t count);
504
505
506 #pragma mark kmem entry parameters
507
508 /*!
509 * @function kmem_entry_validate_guard()
510 *
511 * @brief
512 * Validates that the entry matches the input parameters, panic otherwise.
513 *
514 * @discussion
515 * If the guard has a zero @c kmg_guard value,
516 * then the entry must be non atomic.
517 *
518 * The guard tag is not used for validation as the VM subsystems
519 * (particularly in IOKit) might decide to substitute it in ways
520 * that are difficult to predict for the programmer.
521 *
522 * @param entry the entry to validate
523 * @param addr the supposed start address
524 * @param size the supposed size of the entry
525 * @param guard the guard to use to "authenticate" the allocation.
526 */
527 extern void kmem_entry_validate_guard(
528 vm_map_t map,
529 struct vm_map_entry *entry,
530 vm_offset_t addr,
531 vm_size_t size,
532 kmem_guard_t guard);
533
534 /*!
535 * @function kmem_size_guard()
536 *
537 * @brief
538 * Returns the size of an atomic allocation made in the specified map,
539 * according to the guard.
540 *
541 * @param map a kernel map to lookup the entry into.
542 * @param addr the kernel address to lookup.
543 * @param guard the guard to use to "authenticate" the allocation.
544 */
545 extern vm_size_t kmem_size_guard(
546 vm_map_t map,
547 vm_offset_t addr,
548 kmem_guard_t guard);
549
550 #pragma mark kmem allocations
551
552 /*!
553 * @typedef kma_flags_t
554 *
555 * @brief
556 * Flags used by the @c kmem_alloc* family of flags.
557 */
558 __options_decl(kma_flags_t, uint32_t, {
559 KMA_NONE = KMEM_NONE,
560
561 /* Call behavior */
562 KMA_NOFAIL = KMEM_NOFAIL,
563 KMA_NOPAGEWAIT = KMEM_NOPAGEWAIT,
564
565 /* How the entry is populated */
566 KMA_VAONLY = KMEM_VAONLY,
567 KMA_PAGEABLE = KMEM_PAGEABLE,
568 KMA_ZERO = KMEM_ZERO,
569
570 /* VM object to use for the entry */
571 KMA_KOBJECT = KMEM_KOBJECT,
572 KMA_COMPRESSOR = KMEM_COMPRESSOR,
573
574 /* How to look for addresses */
575 KMA_LOMEM = KMEM_LOMEM,
576 KMA_LAST_FREE = KMEM_LAST_FREE,
577 KMA_DATA = KMEM_DATA,
578
579 /* Entry properties */
580 KMA_PERMANENT = KMEM_PERMANENT,
581 KMA_GUARD_FIRST = KMEM_GUARD_FIRST,
582 KMA_GUARD_LAST = KMEM_GUARD_LAST,
583 KMA_KSTACK = KMEM_KSTACK,
584 KMA_NOENCRYPT = KMEM_NOENCRYPT,
585 });
586
587 #define KMEM_ALLOC_CONTIG_FLAGS ( \
588 /* Call behavior */ \
589 KMA_NOPAGEWAIT | \
590 \
591 /* How the entry is populated */ \
592 KMA_ZERO | \
593 \
594 /* VM object to use for the entry */ \
595 KMA_KOBJECT | \
596 \
597 /* How to look for addresses */ \
598 KMA_LOMEM | \
599 KMA_DATA | \
600 \
601 /* Entry properties */ \
602 KMA_PERMANENT | \
603 \
604 KMA_NONE)
605
606
607 /*!
608 * @function kmem_alloc_guard()
609 *
610 * @brief
611 * Master entry point for allocating kernel memory.
612 *
613 * @param map map to allocate into, must be a kernel map.
614 * @param size the size of the entry to allocate, must not be 0.
615 * @param mask an alignment mask that the returned allocation
616 * will be aligned to (ignoring guards, see @const
617 * KMEM_GUARD_FIRST).
618 * @param flags a set of @c KMA_* flags, (@see @c kmem_flags_t)
619 * @param guard how to guard the allocation.
620 *
621 * @returns
622 * - the non zero address of the allocaation on success in @c kmr_address.
623 * - @c KERN_NO_SPACE if the target map is out of address space.
624 * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages.
625 */
626 extern kmem_return_t kmem_alloc_guard(
627 vm_map_t map,
628 vm_size_t size,
629 vm_offset_t mask,
630 kma_flags_t flags,
631 kmem_guard_t guard) __result_use_check;
632
633 static inline kern_return_t
kernel_memory_allocate(vm_map_t map,vm_offset_t * addrp,vm_size_t size,vm_offset_t mask,kma_flags_t flags,vm_tag_t tag)634 kernel_memory_allocate(
635 vm_map_t map,
636 vm_offset_t *addrp,
637 vm_size_t size,
638 vm_offset_t mask,
639 kma_flags_t flags,
640 vm_tag_t tag)
641 {
642 kmem_guard_t guard = {
643 .kmg_tag = tag,
644 };
645 kmem_return_t kmr;
646
647 kmr = kmem_alloc_guard(map, size, mask, flags, guard);
648 if (kmr.kmr_return == KERN_SUCCESS) {
649 __builtin_assume(kmr.kmr_address != 0);
650 } else {
651 __builtin_assume(kmr.kmr_address == 0);
652 }
653 *addrp = kmr.kmr_address;
654 return kmr.kmr_return;
655 }
656
657 static inline kern_return_t
kmem_alloc(vm_map_t map,vm_offset_t * addrp,vm_size_t size,kma_flags_t flags,vm_tag_t tag)658 kmem_alloc(
659 vm_map_t map,
660 vm_offset_t *addrp,
661 vm_size_t size,
662 kma_flags_t flags,
663 vm_tag_t tag)
664 {
665 return kernel_memory_allocate(map, addrp, size, 0, flags, tag);
666 }
667
668 extern kern_return_t kmem_alloc_contig(
669 vm_map_t map,
670 vm_offset_t *addrp,
671 vm_size_t size,
672 vm_offset_t mask,
673 ppnum_t max_pnum,
674 ppnum_t pnum_mask,
675 kma_flags_t flags,
676 vm_tag_t tag)
677 __attribute__((diagnose_if(flags & ~KMEM_ALLOC_CONTIG_FLAGS,
678 "invalid alloc_contig flags passed", "error")));
679
680
681 /*!
682 * @typedef kms_flags_t
683 *
684 * @brief
685 * Flags used by @c kmem_suballoc.
686 */
687 __options_decl(kms_flags_t, uint32_t, {
688 KMS_NONE = KMEM_NONE,
689
690 /* Call behavior */
691 KMS_NOFAIL = KMEM_NOFAIL,
692
693 /* How to look for addresses */
694 KMS_LAST_FREE = KMEM_LAST_FREE,
695 KMS_DATA = KMEM_DATA,
696
697 /* Entry properties */
698 KMS_PERMANENT = KMEM_PERMANENT,
699 });
700
701 /*!
702 * @function kmem_suballoc()
703 *
704 * @brief
705 * Create a kernel submap, in an atomic entry guarded with KMEM_GUARD_SUBMAP.
706 *
707 * @param parent map to allocate into, must be a kernel map.
708 * @param addr (in/out) the address for the map (see vm_map_enter)
709 * @param size the size of the entry to allocate, must not be 0.
710 * @param vmc_options the map creation options
711 * @param vm_flags a set of @c VM_FLAGS_* flags
712 * @param flags a set of @c KMS_* flags, (@see @c kmem_flags_t)
713 * @param tag the tag for this submap's entry.
714 */
715 extern kmem_return_t kmem_suballoc(
716 vm_map_t parent,
717 mach_vm_offset_t *addr,
718 vm_size_t size,
719 vm_map_create_options_t vmc_options,
720 int vm_flags,
721 kms_flags_t flags,
722 vm_tag_t tag);
723
724
725 #pragma mark kmem reallocation
726
727 /*!
728 * @typedef kmr_flags_t
729 *
730 * @brief
731 * Flags used by the @c kmem_realloc* family of flags.
732 */
733 __options_decl(kmr_flags_t, uint32_t, {
734 KMR_NONE = KMEM_NONE,
735
736 /* Call behavior */
737 KMR_NOPAGEWAIT = KMEM_NOPAGEWAIT,
738 KMR_FREEOLD = KMEM_FREEOLD,
739 KMR_REALLOCF = KMEM_REALLOCF,
740
741 /* How the entry is populated */
742 KMR_ZERO = KMEM_ZERO,
743
744 /* VM object to use for the entry */
745 KMR_KOBJECT = KMEM_KOBJECT,
746
747 /* How to look for addresses */
748 KMR_LOMEM = KMEM_LOMEM,
749 KMR_LAST_FREE = KMEM_LAST_FREE,
750 KMR_DATA = KMEM_DATA,
751
752 /* Entry properties */
753 KMR_GUARD_FIRST = KMEM_GUARD_FIRST,
754 KMR_GUARD_LAST = KMEM_GUARD_LAST,
755 });
756
757 #define KMEM_REALLOC_FLAGS_VALID(flags) \
758 (((flags) & KMR_KOBJECT) == 0 || ((flags) & KMR_FREEOLD))
759
760 /*!
761 * @function kmem_realloc_guard()
762 *
763 * @brief
764 * Reallocates memory allocated with kmem_alloc_guard()
765 *
766 * @discussion
767 * @c kmem_realloc_guard() either mandates a guard with atomicity set,
768 * or must use KMR_DATA (this is not an implementation limitation but
769 * but a security policy).
770 *
771 * If kmem_realloc_guard() is called for the kernel object
772 * (with @c KMR_KOBJECT), then the use of @c KMR_FREEOLD is mandatory.
773 *
774 * When @c KMR_FREEOLD isn't used, if the allocation was relocated
775 * as opposed to be extended or truncated in place, the caller
776 * must free its old mapping manually by calling @c kmem_free_guard().
777 *
778 * Note that if the entry is truncated, it will always be done in place.
779 *
780 *
781 * @param map map to allocate into, must be a kernel map.
782 * @param oldaddr the address to reallocate,
783 * passing 0 means @c kmem_alloc_guard() will be called.
784 * @param oldsize the current size of the entry
785 * @param newsize the new size of the entry,
786 * 0 means kmem_free_guard() will be called.
787 * @param flags a set of @c KMR_* flags, (@see @c kmem_flags_t)
788 * the exact same set of @c KMR_GUARD_* flags must
789 * be passed for all calls (@see kmem_flags_t).
790 * @param guard the allocation guard.
791 *
792 * @returns
793 * - the newly allocated address on success in @c kmr_address
794 * (note that if newsize is 0, then address will be 0 too).
795 * - @c KERN_NO_SPACE if the target map is out of address space.
796 * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages.
797 */
798 extern kmem_return_t kmem_realloc_guard(
799 vm_map_t map,
800 vm_offset_t oldaddr,
801 vm_size_t oldsize,
802 vm_size_t newsize,
803 kmr_flags_t flags,
804 kmem_guard_t guard) __result_use_check
805 __attribute__((diagnose_if(!KMEM_REALLOC_FLAGS_VALID(flags),
806 "invalid realloc flags passed", "error")));
807
808 /*!
809 * @function kmem_realloc_should_free()
810 *
811 * @brief
812 * Returns whether the old address passed to a @c kmem_realloc_guard()
813 * call without @c KMR_FREEOLD must be freed.
814 *
815 * @param oldaddr the "oldaddr" passed to @c kmem_realloc_guard().
816 * @param kmr the result of that @c kmem_realloc_should_free() call.
817 */
818 static inline bool
kmem_realloc_should_free(vm_offset_t oldaddr,kmem_return_t kmr)819 kmem_realloc_should_free(
820 vm_offset_t oldaddr,
821 kmem_return_t kmr)
822 {
823 return oldaddr && oldaddr != kmr.kmr_address;
824 }
825
826
827 #pragma mark kmem free
828
829 /*!
830 * @typedef kmf_flags_t
831 *
832 * @brief
833 * Flags used by the @c kmem_free* family of flags.
834 */
835 __options_decl(kmf_flags_t, uint32_t, {
836 KMF_NONE = KMEM_NONE,
837
838 /* Call behavior */
839
840 /* How the entry is populated */
841
842 /* How to look for addresses */
843 KMF_GUESS_SIZE = KMEM_GUESS_SIZE,
844 });
845
846
847 /*!
848 * @function kmem_free_guard()
849 *
850 * @brief
851 * Frees memory allocated with @c kmem_alloc or @c kmem_realloc.
852 *
853 * @param map map to free from, must be a kernel map.
854 * @param addr the address to free
855 * @param size the size of the memory to free
856 * @param flags a set of @c KMF_* flags, (@see @c kmem_flags_t)
857 * @param guard the allocation guard.
858 *
859 * @returns the size of the entry that was deleted.
860 * (useful when @c KMF_GUESS_SIZE was used)
861 */
862 extern vm_size_t kmem_free_guard(
863 vm_map_t map,
864 vm_offset_t addr,
865 vm_size_t size,
866 kmf_flags_t flags,
867 kmem_guard_t guard);
868
869 static inline void
kmem_free(vm_map_t map,vm_offset_t addr,vm_size_t size)870 kmem_free(
871 vm_map_t map,
872 vm_offset_t addr,
873 vm_size_t size)
874 {
875 kmem_free_guard(map, addr, size, KMF_NONE, KMEM_GUARD_NONE);
876 }
877
878 #pragma mark kmem population
879
880 extern void kernel_memory_populate_object_and_unlock(
881 vm_object_t object, /* must be locked */
882 vm_address_t addr,
883 vm_offset_t offset,
884 vm_size_t size,
885 struct vm_page *page_list,
886 kma_flags_t flags,
887 vm_tag_t tag,
888 vm_prot_t prot);
889
890 extern kern_return_t kernel_memory_populate(
891 vm_offset_t addr,
892 vm_size_t size,
893 kma_flags_t flags,
894 vm_tag_t tag);
895
896 extern void kernel_memory_depopulate(
897 vm_offset_t addr,
898 vm_size_t size,
899 kma_flags_t flags,
900 vm_tag_t tag);
901
902 #pragma GCC visibility pop
903 #elif KERNEL_PRIVATE /* XNU_KERNEL_PRIVATE */
904
905 extern kern_return_t kmem_alloc(
906 vm_map_t map,
907 vm_offset_t *addrp,
908 vm_size_t size);
909
910 extern kern_return_t kmem_alloc_pageable(
911 vm_map_t map,
912 vm_offset_t *addrp,
913 vm_size_t size);
914
915 extern kern_return_t kmem_alloc_kobject(
916 vm_map_t map,
917 vm_offset_t *addrp,
918 vm_size_t size);
919
920 extern void kmem_free(
921 vm_map_t map,
922 vm_offset_t addr,
923 vm_size_t size);
924
925 #endif /* KERNEL_PRIVATE */
926
927 #pragma mark - kernel address obfuscation / hashhing for logging
928
929 extern vm_offset_t vm_kernel_addrperm_ext;
930
931 extern void vm_kernel_addrhide(
932 vm_offset_t addr,
933 vm_offset_t *hide_addr);
934
935 extern void vm_kernel_addrperm_external(
936 vm_offset_t addr,
937 vm_offset_t *perm_addr);
938
939 extern void vm_kernel_unslide_or_perm_external(
940 vm_offset_t addr,
941 vm_offset_t *up_addr);
942
943 #if !XNU_KERNEL_PRIVATE
944
945 extern vm_offset_t vm_kernel_addrhash(
946 vm_offset_t addr);
947
948 #else /* XNU_KERNEL_PRIVATE */
949 #pragma GCC visibility push(hidden)
950
951 extern uint64_t vm_kernel_addrhash_salt;
952 extern uint64_t vm_kernel_addrhash_salt_ext;
953
954 extern vm_offset_t vm_kernel_addrhash_internal(
955 vm_offset_t addr,
956 uint64_t salt);
957
958 static inline vm_offset_t
vm_kernel_addrhash(vm_offset_t addr)959 vm_kernel_addrhash(vm_offset_t addr)
960 {
961 return vm_kernel_addrhash_internal(addr, vm_kernel_addrhash_salt);
962 }
963
964 #pragma mark - kernel variants of the Mach VM interfaces
965
966 extern kern_return_t mach_vm_allocate_kernel(
967 vm_map_t map,
968 mach_vm_offset_t *addr,
969 mach_vm_size_t size,
970 int flags,
971 vm_tag_t tag);
972
973 extern kern_return_t mach_vm_map_kernel(
974 vm_map_t target_map,
975 mach_vm_offset_t *address,
976 mach_vm_size_t initial_size,
977 mach_vm_offset_t mask,
978 int flags,
979 vm_map_kernel_flags_t vmk_flags,
980 vm_tag_t tag,
981 ipc_port_t port,
982 vm_object_offset_t offset,
983 boolean_t copy,
984 vm_prot_t cur_protection,
985 vm_prot_t max_protection,
986 vm_inherit_t inheritance);
987
988
989 extern kern_return_t vm_map_kernel(
990 vm_map_t target_map,
991 vm_offset_t *address,
992 vm_size_t size,
993 vm_offset_t mask,
994 int flags,
995 vm_map_kernel_flags_t vmk_flags,
996 vm_tag_t tag,
997 ipc_port_t port,
998 vm_offset_t offset,
999 boolean_t copy,
1000 vm_prot_t cur_protection,
1001 vm_prot_t max_protection,
1002 vm_inherit_t inheritance);
1003
1004 extern kern_return_t mach_vm_remap_kernel(
1005 vm_map_t target_map,
1006 mach_vm_offset_t *address,
1007 mach_vm_size_t size,
1008 mach_vm_offset_t mask,
1009 int flags,
1010 vm_tag_t tag,
1011 vm_map_t src_map,
1012 mach_vm_offset_t memory_address,
1013 boolean_t copy,
1014 vm_prot_t *cur_protection,
1015 vm_prot_t *max_protection,
1016 vm_inherit_t inheritance);
1017
1018 extern kern_return_t mach_vm_remap_new_kernel(
1019 vm_map_t target_map,
1020 mach_vm_offset_t *address,
1021 mach_vm_size_t size,
1022 mach_vm_offset_t mask,
1023 int flags,
1024 vm_tag_t tag,
1025 vm_map_t src_map,
1026 mach_vm_offset_t memory_address,
1027 boolean_t copy,
1028 vm_prot_t *cur_protection,
1029 vm_prot_t *max_protection,
1030 vm_inherit_t inheritance);
1031
1032 extern kern_return_t vm_remap_kernel(
1033 vm_map_t target_map,
1034 vm_offset_t *address,
1035 vm_size_t size,
1036 vm_offset_t mask,
1037 int flags,
1038 vm_tag_t tag,
1039 vm_map_t src_map,
1040 vm_offset_t memory_address,
1041 boolean_t copy,
1042 vm_prot_t *cur_protection,
1043 vm_prot_t *max_protection,
1044 vm_inherit_t inheritance);
1045
1046 extern kern_return_t vm_map_64_kernel(
1047 vm_map_t target_map,
1048 vm_offset_t *address,
1049 vm_size_t size,
1050 vm_offset_t mask,
1051 int flags,
1052 vm_map_kernel_flags_t vmk_flags,
1053 vm_tag_t tag,
1054 ipc_port_t port,
1055 vm_object_offset_t offset,
1056 boolean_t copy,
1057 vm_prot_t cur_protection,
1058 vm_prot_t max_protection,
1059 vm_inherit_t inheritance);
1060
1061 extern kern_return_t mach_vm_wire_kernel(
1062 host_priv_t host_priv,
1063 vm_map_t map,
1064 mach_vm_offset_t start,
1065 mach_vm_size_t size,
1066 vm_prot_t access,
1067 vm_tag_t tag);
1068
1069 extern kern_return_t vm_map_wire_kernel(
1070 vm_map_t map,
1071 vm_map_offset_t start,
1072 vm_map_offset_t end,
1073 vm_prot_t caller_prot,
1074 vm_tag_t tag,
1075 boolean_t user_wire);
1076
1077 extern kern_return_t vm_map_wire_and_extract_kernel(
1078 vm_map_t map,
1079 vm_map_offset_t start,
1080 vm_prot_t caller_prot,
1081 vm_tag_t tag,
1082 boolean_t user_wire,
1083 ppnum_t *physpage_p);
1084
1085 extern kern_return_t memory_object_iopl_request(
1086 ipc_port_t port,
1087 memory_object_offset_t offset,
1088 upl_size_t *upl_size,
1089 upl_t *upl_ptr,
1090 upl_page_info_array_t user_page_list,
1091 unsigned int *page_list_count,
1092 upl_control_flags_t *flags,
1093 vm_tag_t tag);
1094
1095 #ifdef MACH_KERNEL_PRIVATE
1096
1097 extern kern_return_t copyinmap(
1098 vm_map_t map,
1099 vm_map_offset_t fromaddr,
1100 void *todata,
1101 vm_size_t length);
1102
1103 extern kern_return_t copyoutmap(
1104 vm_map_t map,
1105 void *fromdata,
1106 vm_map_offset_t toaddr,
1107 vm_size_t length);
1108
1109 extern kern_return_t copyoutmap_atomic32(
1110 vm_map_t map,
1111 uint32_t value,
1112 vm_map_offset_t toaddr);
1113
1114 extern kern_return_t copyoutmap_atomic64(
1115 vm_map_t map,
1116 uint64_t value,
1117 vm_map_offset_t toaddr);
1118
1119 #endif /* MACH_KERNEL_PRIVATE */
1120 #pragma GCC visibility pop
1121 #endif /* XNU_KERNEL_PRIVATE */
1122 #ifdef KERNEL_PRIVATE
1123 #pragma mark - unsorted interfaces
1124
1125 #ifdef XNU_KERNEL_PRIVATE
1126 typedef struct vm_allocation_site kern_allocation_name;
1127 typedef kern_allocation_name * kern_allocation_name_t;
1128 #else /* XNU_KERNEL_PRIVATE */
1129 struct kern_allocation_name;
1130 typedef struct kern_allocation_name * kern_allocation_name_t;
1131 #endif /* !XNU_KERNEL_PRIVATE */
1132
1133 extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs);
1134 extern void kern_allocation_name_release(kern_allocation_name_t allocation);
1135 extern const char * kern_allocation_get_name(kern_allocation_name_t allocation);
1136
1137 #endif /* KERNEL_PRIVATE */
1138 #ifdef XNU_KERNEL_PRIVATE
1139 #pragma GCC visibility push(hidden)
1140
1141 extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta);
1142 extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta);
1143 extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation);
1144
1145 struct mach_memory_info;
1146 extern kern_return_t vm_page_diagnose(
1147 struct mach_memory_info *info,
1148 unsigned int num_info,
1149 uint64_t zones_collectable_bytes);
1150
1151 extern uint32_t vm_page_diagnose_estimate(void);
1152
1153 extern void vm_init_before_launchd(void);
1154
1155 typedef enum {
1156 PMAP_FEAT_UEXEC = 1
1157 } pmap_feature_flags_t;
1158
1159 #if defined(__x86_64__)
1160 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat);
1161 #endif
1162
1163 #if DEBUG || DEVELOPMENT
1164
1165 extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size);
1166
1167 #endif /* DEBUG || DEVELOPMENT */
1168
1169 #if HIBERNATION
1170 extern void hibernate_rebuild_vm_structs(void);
1171 #endif /* HIBERNATION */
1172
1173 extern vm_tag_t vm_tag_bt(void);
1174
1175 extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site);
1176
1177 extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP);
1178
1179 extern void vm_tag_update_size(vm_tag_t tag, int64_t size);
1180
1181 extern uint64_t vm_tag_get_size(vm_tag_t tag);
1182
1183 #if VM_TAG_SIZECLASSES
1184
1185 extern void vm_allocation_zones_init(void);
1186 extern vm_tag_t vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags);
1187 extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta);
1188
1189 #endif /* VM_TAG_SIZECLASSES */
1190
1191 extern vm_tag_t vm_tag_bt_debug(void);
1192
1193 extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen);
1194
1195 extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
1196
1197 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
1198
1199 #pragma GCC visibility pop
1200 #endif /* XNU_KERNEL_PRIVATE */
1201
1202 __END_DECLS
1203
1204 #endif /* _VM_VM_KERN_H_ */
1205