1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management definitions.
64 */
65
66 #ifndef _VM_VM_KERN_H_
67 #define _VM_VM_KERN_H_
68
69 #include <mach/mach_types.h>
70 #include <mach/boolean.h>
71 #include <mach/kern_return.h>
72 #include <mach/vm_types.h>
73 #ifdef XNU_KERNEL_PRIVATE
74 #include <kern/locks.h>
75 #endif /* XNU_KERNEL_PRIVATE */
76
77 __BEGIN_DECLS
78
79 #ifdef KERNEL_PRIVATE
80 extern vm_map_t kernel_map;
81 extern vm_map_t ipc_kernel_map;
82 extern vm_map_t g_kext_map;
83 #endif /* KERNEL_PRIVATE */
84
85 #pragma mark - the kmem subsystem
86 #ifdef XNU_KERNEL_PRIVATE
87 #pragma GCC visibility push(hidden)
88
89 /*
90 * "kmem" is a set of methods that provide interfaces suitable
91 * to allocate memory from the VM in the kernel map or submaps.
92 *
93 * It provide leaner alternatives to some of the VM functions,
94 * closer to a typical allocator.
95 */
96
97 struct vm_page;
98 struct vm_map_entry;
99
100 /*!
101 * @typedef
102 *
103 * @brief
104 * Pair of a return code and size/address/... used by kmem interfaces.
105 *
106 * @discussion
107 * Using a pair of integers allows the compiler to return everything
108 * through registers, and doesn't need to use stack values to get results,
109 * which yields significantly better codegen.
110 *
111 * If @c kmr_return is not @c KERN_SUCCESS, then the other field
112 * of the union is always supposed to be 0.
113 */
114 typedef struct {
115 kern_return_t kmr_return;
116 union {
117 vm_address_t kmr_address;
118 vm_size_t kmr_size;
119 void *kmr_ptr;
120 vm_map_t kmr_submap;
121 };
122 } kmem_return_t;
123
124 /*!
125 * @typedef kmem_guard_t
126 *
127 * @brief
128 * KMEM guards are used by the kmem_* subsystem to secure atomic allocations.
129 *
130 * @discussion
131 * This parameter is used to transmit the tag for the allocation.
132 *
133 * If @c kmg_atomic is set, then the other fields are also taken into account
134 * and will affect the allocation behavior for this allocation.
135 *
136 * @field kmg_tag The VM_KERN_MEMORY_* tag for this entry.
137 * @field kmg_type_hash Some hash related to the type of the allocation.
138 * @field kmg_atomic Whether the entry is atomic.
139 * @field kmg_submap Whether the entry is for a submap.
140 * @field kmg_context A use defined 30 bits that will be stored
141 * on the entry on allocation and checked
142 * on other operations.
143 */
144 typedef struct {
145 uint16_t kmg_tag;
146 uint16_t kmg_type_hash;
147 uint32_t kmg_atomic : 1;
148 uint32_t kmg_submap : 1;
149 uint32_t kmg_context : 30;
150 } kmem_guard_t;
151 #define KMEM_GUARD_NONE (kmem_guard_t){ }
152 #define KMEM_GUARD_SUBMAP (kmem_guard_t){ .kmg_atomic = 0, .kmg_submap = 1 }
153
154
155 /*!
156 * @typedef kmem_flags_t
157 *
158 * @brief
159 * Sets of flags taken by several of the @c kmem_* family of functions.
160 *
161 * @discussion
162 * This type is not used directly by any function, it is an underlying raw
163 * type that is re-vended under different namespaces for each @c kmem_*
164 * interface.
165 *
166 * - @c kmem_alloc uses @c kma_flags_t / @c KMA_* namespaced values.
167 * - @c kmem_suballoc uses @c kms_flags_t / @c KMS_* namespaced values.
168 * - @c kmem_realloc uses @c kmr_flags_t / @c KMR_* namespaced values.
169 * - @c kmem_free uses @c kmf_flags_t / @c KMF_* napespaced values.
170 *
171 *
172 * <h2>Call behavior</h2>
173 *
174 * @const KMEM_NONE (all)
175 * Pass this when no special options is to be used.
176 *
177 * @const KMEM_NOFAIL (alloc, suballoc)
178 * When this flag is passed, any allocation failure results into a panic().
179 * Using this flag should really be limited to cases when failure is not
180 * recoverable and possibly during early boot only.
181 *
182 * @const KMEM_NOPAGEWAIT (alloc, realloc)
183 * Pass this flag if the system should not wait in VM_PAGE_WAIT().
184 *
185 * @const KMEM_FREEOLD (realloc)
186 * Pass this flag if @c kmem_realloc should free the old mapping
187 * (when the address changed) as part of the call.
188 *
189 * @const KMEM_REALLOCF (realloc)
190 * Similar to @c Z_REALLOCF: if the call is failing,
191 * then free the old allocation too.
192 *
193 *
194 * <h2>How the entry is populated</h2>
195 *
196 * @const KMEM_VAONLY (alloc)
197 * By default memory allocated by the kmem subsystem is wired and mapped.
198 * Passing @c KMEM_VAONLY will cause the range to still be wired,
199 * but no page is actually mapped.
200 *
201 * @const KMEM_PAGEABLE (alloc)
202 * By default memory allocated by the kmem subsystem is wired and mapped.
203 * Passing @c KMEM_PAGEABLE makes the entry non wired, and pages will be
204 * added to the entry as it faults.
205 *
206 * @const KMEM_ZERO (alloc, realloc)
207 * Any new page added is zeroed.
208 *
209 *
210 * <h2>VM object to use for the entry</h2>
211 *
212 * @const KMEM_KOBJECT (alloc, realloc)
213 * The entry will be made for the @c kernel_object.
214 *
215 * Note that the @c kernel_object is just a "collection of pages".
216 * Pages in that object can't be remaped or present in several VM maps
217 * like traditional objects.
218 *
219 * If neither @c KMEM_KOBJECT nor @c KMEM_COMPRESSOR is passed,
220 * the a new fresh VM object will be made for this allocation.
221 * This is expensive and should be limited to allocations that
222 * need the features associated with a VM object.
223 *
224 * @const KMEM_COMPRESSOR (alloc)
225 * The entry is allocated for the @c compressor_object.
226 * Pages belonging to the compressor are not on the paging queues,
227 * nor are they counted as wired.
228 *
229 * Only the VM Compressor subsystem should use this.
230 *
231 *
232 * <h2>How to look for addresses</h2>
233 *
234 * @const KMEM_LOMEM (alloc, realloc)
235 * The physical memory allocated must be in the first 4G of memory,
236 * in order to support hardware controllers incapable of generating DMAs
237 * with more than 32bits of physical address.
238 *
239 * @const KMEM_LAST_FREE (alloc, suballoc, realloc)
240 * When looking for space in the specified map,
241 * start scanning for addresses from the end of the map
242 * rather than the start.
243 *
244 * @const KMEM_DATA (alloc, suballoc, realloc)
245 * The memory must be allocated from the "Data" range.
246 *
247 * @const KMEM_SPRAYQTN (alloc, realloc)
248 * The memory must be allocated from the "spray quarantine" range. For more
249 * details on what allocations qualify to use this flag see
250 * @c KMEM_RANGE_ID_SPRAYQTN.
251 *
252 * @const KMEM_GUESS_SIZE (free)
253 * When freeing an atomic entry (requires a valid kmem guard),
254 * then look up the entry size because the caller didn't
255 * preserve it.
256 *
257 * This flag is only here in order to support kfree_data_addr(),
258 * and shall not be used by any other clients.
259 *
260 * <h2>Entry properties</h2>
261 *
262 * @const KMEM_PERMANENT (alloc, suballoc)
263 * The entry is made permanent.
264 *
265 * In the kernel maps, permanent entries can never be deleted.
266 * Calling @c kmem_free() on such a range will panic.
267 *
268 * In user maps, permanent entries will only be deleted
269 * whenthe map is terminated.
270 *
271 * @const KMEM_GUARD_FIRST (alloc, realloc)
272 * @const KMEM_GUARD_LAST (alloc, realloc)
273 * Asks @c kmem_* to put a guard page at the beginning (resp. end)
274 * of the allocation.
275 *
276 * The allocation size will not be extended to accomodate for guards,
277 * and the client of this interface must take them into account.
278 * Typically if a usable range of 3 pages is needed with both guards,
279 * then 5 pages must be asked.
280 *
281 * Alignment constraints take guards into account (the aligment applies
282 * to the address right after the first guard page).
283 *
284 * The returned address for allocation will pointing at the entry start,
285 * which is the address of the left guard page if any.
286 *
287 * Note that if @c kmem_realloc* is called, the *exact* same
288 * guard flags must be passed for this entry. The KMEM subsystem
289 * is generally oblivious to guards, and passing inconsistent flags
290 * will cause pages to be moved incorrectly.
291 *
292 * @const KMEM_KSTACK (alloc)
293 * This flag must be passed when the allocation is for kernel stacks.
294 * This only has an effect on Intel.
295 *
296 * @const KMEM_NOENCRYPT (alloc)
297 * Obsolete, will be repurposed soon.
298 */
299 __options_decl(kmem_flags_t, uint32_t, {
300 KMEM_NONE = 0x00000000,
301
302 /* Call behavior */
303 KMEM_NOFAIL = 0x00000001,
304 KMEM_NOPAGEWAIT = 0x00000002,
305 KMEM_FREEOLD = 0x00000004,
306 KMEM_REALLOCF = 0x00000008,
307
308 /* How the entry is populated */
309 KMEM_VAONLY = 0x00000010,
310 KMEM_PAGEABLE = 0x00000020,
311 KMEM_ZERO = 0x00000040,
312
313 /* VM object to use for the entry */
314 KMEM_KOBJECT = 0x00000100,
315 KMEM_COMPRESSOR = 0x00000200,
316
317 /* How to look for addresses */
318 KMEM_LOMEM = 0x00001000,
319 KMEM_LAST_FREE = 0x00002000,
320 KMEM_GUESS_SIZE = 0x00004000,
321 KMEM_DATA = 0x00008000,
322 KMEM_SPRAYQTN = 0x00010000,
323
324 /* Entry properties */
325 KMEM_PERMANENT = 0x00100000,
326 KMEM_GUARD_FIRST = 0x00200000,
327 KMEM_GUARD_LAST = 0x00400000,
328 KMEM_KSTACK = 0x00800000,
329 KMEM_NOENCRYPT = 0x01000000,
330 });
331
332
333 #pragma mark kmem range methods
334
335 extern struct mach_vm_range kmem_ranges[KMEM_RANGE_COUNT];
336 extern struct mach_vm_range kmem_large_ranges[KMEM_RANGE_COUNT];
337 #define KMEM_RANGE_MASK 0x3fff
338 #define KMEM_HASH_SET 0x4000
339 #define KMEM_DIRECTION_MASK 0x8000
340
341 __stateful_pure
342 extern mach_vm_size_t mach_vm_range_size(
343 const struct mach_vm_range *r);
344
345 __attribute__((overloadable, pure))
346 extern bool mach_vm_range_contains(
347 const struct mach_vm_range *r,
348 mach_vm_offset_t addr);
349
350 __attribute__((overloadable, pure))
351 extern bool mach_vm_range_contains(
352 const struct mach_vm_range *r,
353 mach_vm_offset_t addr,
354 mach_vm_offset_t size);
355
356 __attribute__((overloadable, pure))
357 extern bool mach_vm_range_intersects(
358 const struct mach_vm_range *r1,
359 const struct mach_vm_range *r2);
360
361 __attribute__((overloadable, pure))
362 extern bool mach_vm_range_intersects(
363 const struct mach_vm_range *r1,
364 mach_vm_offset_t addr,
365 mach_vm_offset_t size);
366
367 /*
368 * @function kmem_range_id_contains
369 *
370 * @abstract Return whether the region of `[addr, addr + size)` is completely
371 * within the memory range.
372 */
373 __pure2
374 extern bool kmem_range_id_contains(
375 kmem_range_id_t range_id,
376 vm_map_offset_t addr,
377 vm_map_size_t size);
378
379 /*
380 * @function kmem_range_id_size
381 *
382 * @abstract Return the addressable size of the memory range.
383 */
384 __pure2
385 extern vm_map_size_t kmem_range_id_size(
386 kmem_range_id_t range_id);
387
388 __pure2
389 extern kmem_range_id_t kmem_addr_get_range(
390 vm_map_offset_t addr,
391 vm_map_size_t size);
392
393 extern kmem_range_id_t kmem_adjust_range_id(
394 uint32_t hash);
395
396
397 /**
398 * @enum kmem_claims_flags_t
399 *
400 * @abstract
401 * Set of flags used in the processing of kmem_range claims
402 *
403 * @discussion
404 * These flags are used by the kmem subsytem while processing kmem_range
405 * claims and are not explicitly passed by the caller registering the claim.
406 *
407 * @const KC_NO_ENTRY
408 * A vm map entry should not be created for the respective claim.
409 *
410 * @const KC_NO_MOVE
411 * The range shouldn't be moved once it has been placed as it has constraints.
412 */
413 __options_decl(kmem_claims_flags_t, uint32_t, {
414 KC_NONE = 0x00000000,
415 KC_NO_ENTRY = 0x00000001,
416 KC_NO_MOVE = 0x00000002,
417 });
418
419 /*
420 * Security config that creates the data split in kernel_map
421 */
422 #if !defined(__LP64__)
423 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT OFF
424 #else
425 # define ZSECURITY_CONFIG_KERNEL_DATA_SPLIT ON
426 #endif
427
428 /*
429 * Security config that creates the additional splits in non data part of
430 * kernel_map
431 */
432 #if KASAN || (__arm64__ && !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR))
433 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT OFF
434 #else
435 # define ZSECURITY_CONFIG_KERNEL_PTR_SPLIT ON
436 #endif
437
438 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__OFF() 0
439 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__ON() 1
440 #define ZSECURITY_CONFIG2(v) ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__##v()
441 #define ZSECURITY_CONFIG1(v) ZSECURITY_CONFIG2(v)
442 #define ZSECURITY_CONFIG(opt) ZSECURITY_CONFIG1(ZSECURITY_CONFIG_##opt)
443
444 struct kmem_range_startup_spec {
445 const char *kc_name;
446 struct mach_vm_range *kc_range;
447 vm_map_size_t kc_size;
448 vm_map_size_t (^kc_calculate_sz)(void);
449 kmem_claims_flags_t kc_flags;
450 };
451
452 extern void kmem_range_startup_init(
453 struct kmem_range_startup_spec *sp);
454
455 /*!
456 * @macro KMEM_RANGE_REGISTER_*
457 *
458 * @abstract
459 * Register a claim for kmem range or submap.
460 *
461 * @discussion
462 * Claims are shuffled during startup to randomize the layout of the kernel map.
463 * Temporary entries are created in place of the claims, therefore the caller
464 * must provide the start of the assigned range as a hint and
465 * @c VM_FLAGS_FIXED_RANGE_SUBALLOC to kmem_suballoc to replace the mapping.
466 *
467 * Min/max constraints can be provided in the range when the claim is
468 * registered.
469 *
470 * This macro comes in 2 flavors:
471 * - STATIC : When the size of the range/submap is known at compile time
472 * - DYNAMIC: When the size of the range/submap needs to be computed
473 * Temporary entries are create
474 * The start of the
475 *
476 * @param name the name of the claim
477 * @param range the assigned range for the claim
478 * @param size the size of submap/range (if known at compile time)
479 * @param calculate_sz a block that returns the computed size of submap/range
480 */
481 #define KMEM_RANGE_REGISTER_STATIC(name, range, size) \
482 static __startup_data struct kmem_range_startup_spec \
483 __startup_kmem_range_spec_ ## name = { #name, range, size, NULL, KC_NONE}; \
484 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
485 &__startup_kmem_range_spec_ ## name)
486
487 #define KMEM_RANGE_REGISTER_DYNAMIC(name, range, calculate_sz) \
488 static __startup_data struct kmem_range_startup_spec \
489 __startup_kmem_range_spec_ ## name = { #name, range, 0, calculate_sz, \
490 KC_NONE}; \
491 STARTUP_ARG(KMEM, STARTUP_RANK_SECOND, kmem_range_startup_init, \
492 &__startup_kmem_range_spec_ ## name)
493
494 #if XNU_KERNEL_PRIVATE
495 #if ZSECURITY_CONFIG(KERNEL_DATA_SPLIT)
496 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE)
497 #else /* ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
498 #define VM_FLAGS_FIXED_RANGE_SUBALLOC (VM_FLAGS_ANYWHERE)
499 #endif /* !ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
500 #endif /* XNU_KERNEL_PRIVATE */
501
502 __startup_func
503 extern uint16_t kmem_get_random16(
504 uint16_t upper_limit);
505
506 __startup_func
507 extern void kmem_shuffle(
508 uint16_t *shuffle_buf,
509 uint16_t count);
510
511
512 #pragma mark kmem entry parameters
513
514 /*!
515 * @function kmem_entry_validate_guard()
516 *
517 * @brief
518 * Validates that the entry matches the input parameters, panic otherwise.
519 *
520 * @discussion
521 * If the guard has a zero @c kmg_guard value,
522 * then the entry must be non atomic.
523 *
524 * The guard tag is not used for validation as the VM subsystems
525 * (particularly in IOKit) might decide to substitute it in ways
526 * that are difficult to predict for the programmer.
527 *
528 * @param entry the entry to validate
529 * @param addr the supposed start address
530 * @param size the supposed size of the entry
531 * @param guard the guard to use to "authenticate" the allocation.
532 */
533 extern void kmem_entry_validate_guard(
534 vm_map_t map,
535 struct vm_map_entry *entry,
536 vm_offset_t addr,
537 vm_size_t size,
538 kmem_guard_t guard);
539
540 /*!
541 * @function kmem_size_guard()
542 *
543 * @brief
544 * Returns the size of an atomic allocation made in the specified map,
545 * according to the guard.
546 *
547 * @param map a kernel map to lookup the entry into.
548 * @param addr the kernel address to lookup.
549 * @param guard the guard to use to "authenticate" the allocation.
550 */
551 extern vm_size_t kmem_size_guard(
552 vm_map_t map,
553 vm_offset_t addr,
554 kmem_guard_t guard);
555
556 #pragma mark kmem allocations
557
558 /*!
559 * @typedef kma_flags_t
560 *
561 * @brief
562 * Flags used by the @c kmem_alloc* family of flags.
563 */
564 __options_decl(kma_flags_t, uint32_t, {
565 KMA_NONE = KMEM_NONE,
566
567 /* Call behavior */
568 KMA_NOFAIL = KMEM_NOFAIL,
569 KMA_NOPAGEWAIT = KMEM_NOPAGEWAIT,
570
571 /* How the entry is populated */
572 KMA_VAONLY = KMEM_VAONLY,
573 KMA_PAGEABLE = KMEM_PAGEABLE,
574 KMA_ZERO = KMEM_ZERO,
575
576 /* VM object to use for the entry */
577 KMA_KOBJECT = KMEM_KOBJECT,
578 KMA_COMPRESSOR = KMEM_COMPRESSOR,
579
580 /* How to look for addresses */
581 KMA_LOMEM = KMEM_LOMEM,
582 KMA_LAST_FREE = KMEM_LAST_FREE,
583 KMA_DATA = KMEM_DATA,
584 KMA_SPRAYQTN = KMEM_SPRAYQTN,
585
586 /* Entry properties */
587 KMA_PERMANENT = KMEM_PERMANENT,
588 KMA_GUARD_FIRST = KMEM_GUARD_FIRST,
589 KMA_GUARD_LAST = KMEM_GUARD_LAST,
590 KMA_KSTACK = KMEM_KSTACK,
591 KMA_NOENCRYPT = KMEM_NOENCRYPT,
592 });
593
594 #define KMEM_ALLOC_CONTIG_FLAGS ( \
595 /* Call behavior */ \
596 KMA_NOPAGEWAIT | \
597 \
598 /* How the entry is populated */ \
599 KMA_ZERO | \
600 \
601 /* VM object to use for the entry */ \
602 KMA_KOBJECT | \
603 \
604 /* How to look for addresses */ \
605 KMA_LOMEM | \
606 KMA_DATA | \
607 \
608 /* Entry properties */ \
609 KMA_PERMANENT | \
610 \
611 KMA_NONE)
612
613
614 /*!
615 * @function kmem_alloc_guard()
616 *
617 * @brief
618 * Master entry point for allocating kernel memory.
619 *
620 * @param map map to allocate into, must be a kernel map.
621 * @param size the size of the entry to allocate, must not be 0.
622 * @param mask an alignment mask that the returned allocation
623 * will be aligned to (ignoring guards, see @const
624 * KMEM_GUARD_FIRST).
625 * @param flags a set of @c KMA_* flags, (@see @c kmem_flags_t)
626 * @param guard how to guard the allocation.
627 *
628 * @returns
629 * - the non zero address of the allocaation on success in @c kmr_address.
630 * - @c KERN_NO_SPACE if the target map is out of address space.
631 * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages.
632 */
633 extern kmem_return_t kmem_alloc_guard(
634 vm_map_t map,
635 vm_size_t size,
636 vm_offset_t mask,
637 kma_flags_t flags,
638 kmem_guard_t guard) __result_use_check;
639
640 static inline kern_return_t
kernel_memory_allocate(vm_map_t map,vm_offset_t * addrp,vm_size_t size,vm_offset_t mask,kma_flags_t flags,vm_tag_t tag)641 kernel_memory_allocate(
642 vm_map_t map,
643 vm_offset_t *addrp,
644 vm_size_t size,
645 vm_offset_t mask,
646 kma_flags_t flags,
647 vm_tag_t tag)
648 {
649 kmem_guard_t guard = {
650 .kmg_tag = tag,
651 };
652 kmem_return_t kmr;
653
654 kmr = kmem_alloc_guard(map, size, mask, flags, guard);
655 if (kmr.kmr_return == KERN_SUCCESS) {
656 __builtin_assume(kmr.kmr_address != 0);
657 } else {
658 __builtin_assume(kmr.kmr_address == 0);
659 }
660 *addrp = kmr.kmr_address;
661 return kmr.kmr_return;
662 }
663
664 static inline kern_return_t
kmem_alloc(vm_map_t map,vm_offset_t * addrp,vm_size_t size,kma_flags_t flags,vm_tag_t tag)665 kmem_alloc(
666 vm_map_t map,
667 vm_offset_t *addrp,
668 vm_size_t size,
669 kma_flags_t flags,
670 vm_tag_t tag)
671 {
672 return kernel_memory_allocate(map, addrp, size, 0, flags, tag);
673 }
674
675 extern kern_return_t kmem_alloc_contig(
676 vm_map_t map,
677 vm_offset_t *addrp,
678 vm_size_t size,
679 vm_offset_t mask,
680 ppnum_t max_pnum,
681 ppnum_t pnum_mask,
682 kma_flags_t flags,
683 vm_tag_t tag)
684 __attribute__((diagnose_if(flags & ~KMEM_ALLOC_CONTIG_FLAGS,
685 "invalid alloc_contig flags passed", "error")));
686
687
688 /*!
689 * @typedef kms_flags_t
690 *
691 * @brief
692 * Flags used by @c kmem_suballoc.
693 */
694 __options_decl(kms_flags_t, uint32_t, {
695 KMS_NONE = KMEM_NONE,
696
697 /* Call behavior */
698 KMS_NOFAIL = KMEM_NOFAIL,
699
700 /* How to look for addresses */
701 KMS_LAST_FREE = KMEM_LAST_FREE,
702 KMS_DATA = KMEM_DATA,
703
704 /* Entry properties */
705 KMS_PERMANENT = KMEM_PERMANENT,
706 });
707
708 /*!
709 * @function kmem_suballoc()
710 *
711 * @brief
712 * Create a kernel submap, in an atomic entry guarded with KMEM_GUARD_SUBMAP.
713 *
714 * @param parent map to allocate into, must be a kernel map.
715 * @param addr (in/out) the address for the map (see vm_map_enter)
716 * @param size the size of the entry to allocate, must not be 0.
717 * @param vmc_options the map creation options
718 * @param vm_flags a set of @c VM_FLAGS_* flags
719 * @param flags a set of @c KMS_* flags, (@see @c kmem_flags_t)
720 * @param tag the tag for this submap's entry.
721 */
722 extern kmem_return_t kmem_suballoc(
723 vm_map_t parent,
724 mach_vm_offset_t *addr,
725 vm_size_t size,
726 vm_map_create_options_t vmc_options,
727 int vm_flags,
728 kms_flags_t flags,
729 vm_tag_t tag);
730
731
732 #pragma mark kmem reallocation
733
734 /*!
735 * @typedef kmr_flags_t
736 *
737 * @brief
738 * Flags used by the @c kmem_realloc* family of flags.
739 */
740 __options_decl(kmr_flags_t, uint32_t, {
741 KMR_NONE = KMEM_NONE,
742
743 /* Call behavior */
744 KMR_NOPAGEWAIT = KMEM_NOPAGEWAIT,
745 KMR_FREEOLD = KMEM_FREEOLD,
746 KMR_REALLOCF = KMEM_REALLOCF,
747
748 /* How the entry is populated */
749 KMR_ZERO = KMEM_ZERO,
750
751 /* VM object to use for the entry */
752 KMR_KOBJECT = KMEM_KOBJECT,
753
754 /* How to look for addresses */
755 KMR_LOMEM = KMEM_LOMEM,
756 KMR_LAST_FREE = KMEM_LAST_FREE,
757 KMR_DATA = KMEM_DATA,
758 KMR_SPRAYQTN = KMEM_SPRAYQTN,
759
760 /* Entry properties */
761 KMR_GUARD_FIRST = KMEM_GUARD_FIRST,
762 KMR_GUARD_LAST = KMEM_GUARD_LAST,
763 });
764
765 #define KMEM_REALLOC_FLAGS_VALID(flags) \
766 (((flags) & KMR_KOBJECT) == 0 || ((flags) & KMR_FREEOLD))
767
768 /*!
769 * @function kmem_realloc_guard()
770 *
771 * @brief
772 * Reallocates memory allocated with kmem_alloc_guard()
773 *
774 * @discussion
775 * @c kmem_realloc_guard() either mandates a guard with atomicity set,
776 * or must use KMR_DATA (this is not an implementation limitation but
777 * but a security policy).
778 *
779 * If kmem_realloc_guard() is called for the kernel object
780 * (with @c KMR_KOBJECT), then the use of @c KMR_FREEOLD is mandatory.
781 *
782 * When @c KMR_FREEOLD isn't used, if the allocation was relocated
783 * as opposed to be extended or truncated in place, the caller
784 * must free its old mapping manually by calling @c kmem_free_guard().
785 *
786 * Note that if the entry is truncated, it will always be done in place.
787 *
788 *
789 * @param map map to allocate into, must be a kernel map.
790 * @param oldaddr the address to reallocate,
791 * passing 0 means @c kmem_alloc_guard() will be called.
792 * @param oldsize the current size of the entry
793 * @param newsize the new size of the entry,
794 * 0 means kmem_free_guard() will be called.
795 * @param flags a set of @c KMR_* flags, (@see @c kmem_flags_t)
796 * the exact same set of @c KMR_GUARD_* flags must
797 * be passed for all calls (@see kmem_flags_t).
798 * @param guard the allocation guard.
799 *
800 * @returns
801 * - the newly allocated address on success in @c kmr_address
802 * (note that if newsize is 0, then address will be 0 too).
803 * - @c KERN_NO_SPACE if the target map is out of address space.
804 * - @c KERN_RESOURCE_SHORTAGE if the kernel is out of pages.
805 */
806 extern kmem_return_t kmem_realloc_guard(
807 vm_map_t map,
808 vm_offset_t oldaddr,
809 vm_size_t oldsize,
810 vm_size_t newsize,
811 kmr_flags_t flags,
812 kmem_guard_t guard) __result_use_check
813 __attribute__((diagnose_if(!KMEM_REALLOC_FLAGS_VALID(flags),
814 "invalid realloc flags passed", "error")));
815
816 /*!
817 * @function kmem_realloc_should_free()
818 *
819 * @brief
820 * Returns whether the old address passed to a @c kmem_realloc_guard()
821 * call without @c KMR_FREEOLD must be freed.
822 *
823 * @param oldaddr the "oldaddr" passed to @c kmem_realloc_guard().
824 * @param kmr the result of that @c kmem_realloc_should_free() call.
825 */
826 static inline bool
kmem_realloc_should_free(vm_offset_t oldaddr,kmem_return_t kmr)827 kmem_realloc_should_free(
828 vm_offset_t oldaddr,
829 kmem_return_t kmr)
830 {
831 return oldaddr && oldaddr != kmr.kmr_address;
832 }
833
834
835 #pragma mark kmem free
836
837 /*!
838 * @typedef kmf_flags_t
839 *
840 * @brief
841 * Flags used by the @c kmem_free* family of flags.
842 */
843 __options_decl(kmf_flags_t, uint32_t, {
844 KMF_NONE = KMEM_NONE,
845
846 /* Call behavior */
847
848 /* How the entry is populated */
849
850 /* How to look for addresses */
851 KMF_GUESS_SIZE = KMEM_GUESS_SIZE,
852 });
853
854
855 /*!
856 * @function kmem_free_guard()
857 *
858 * @brief
859 * Frees memory allocated with @c kmem_alloc or @c kmem_realloc.
860 *
861 * @param map map to free from, must be a kernel map.
862 * @param addr the address to free
863 * @param size the size of the memory to free
864 * @param flags a set of @c KMF_* flags, (@see @c kmem_flags_t)
865 * @param guard the allocation guard.
866 *
867 * @returns the size of the entry that was deleted.
868 * (useful when @c KMF_GUESS_SIZE was used)
869 */
870 extern vm_size_t kmem_free_guard(
871 vm_map_t map,
872 vm_offset_t addr,
873 vm_size_t size,
874 kmf_flags_t flags,
875 kmem_guard_t guard);
876
877 static inline void
kmem_free(vm_map_t map,vm_offset_t addr,vm_size_t size)878 kmem_free(
879 vm_map_t map,
880 vm_offset_t addr,
881 vm_size_t size)
882 {
883 kmem_free_guard(map, addr, size, KMF_NONE, KMEM_GUARD_NONE);
884 }
885
886 #pragma mark kmem population
887
888 extern void kernel_memory_populate_object_and_unlock(
889 vm_object_t object, /* must be locked */
890 vm_address_t addr,
891 vm_offset_t offset,
892 vm_size_t size,
893 struct vm_page *page_list,
894 kma_flags_t flags,
895 vm_tag_t tag,
896 vm_prot_t prot);
897
898 extern kern_return_t kernel_memory_populate(
899 vm_offset_t addr,
900 vm_size_t size,
901 kma_flags_t flags,
902 vm_tag_t tag);
903
904 extern void kernel_memory_depopulate(
905 vm_offset_t addr,
906 vm_size_t size,
907 kma_flags_t flags,
908 vm_tag_t tag);
909
910 #pragma GCC visibility pop
911 #elif KERNEL_PRIVATE /* XNU_KERNEL_PRIVATE */
912
913 extern kern_return_t kmem_alloc(
914 vm_map_t map,
915 vm_offset_t *addrp,
916 vm_size_t size);
917
918 extern kern_return_t kmem_alloc_pageable(
919 vm_map_t map,
920 vm_offset_t *addrp,
921 vm_size_t size);
922
923 extern kern_return_t kmem_alloc_kobject(
924 vm_map_t map,
925 vm_offset_t *addrp,
926 vm_size_t size);
927
928 extern void kmem_free(
929 vm_map_t map,
930 vm_offset_t addr,
931 vm_size_t size);
932
933 #endif /* KERNEL_PRIVATE */
934
935 #pragma mark - kernel address obfuscation / hashhing for logging
936
937 extern vm_offset_t vm_kernel_addrperm_ext;
938
939 extern void vm_kernel_addrhide(
940 vm_offset_t addr,
941 vm_offset_t *hide_addr);
942
943 extern void vm_kernel_addrperm_external(
944 vm_offset_t addr,
945 vm_offset_t *perm_addr);
946
947 extern void vm_kernel_unslide_or_perm_external(
948 vm_offset_t addr,
949 vm_offset_t *up_addr);
950
951 #if !XNU_KERNEL_PRIVATE
952
953 extern vm_offset_t vm_kernel_addrhash(
954 vm_offset_t addr);
955
956 #else /* XNU_KERNEL_PRIVATE */
957 #pragma GCC visibility push(hidden)
958
959 extern uint64_t vm_kernel_addrhash_salt;
960 extern uint64_t vm_kernel_addrhash_salt_ext;
961
962 extern vm_offset_t vm_kernel_addrhash_internal(
963 vm_offset_t addr,
964 uint64_t salt);
965
966 static inline vm_offset_t
vm_kernel_addrhash(vm_offset_t addr)967 vm_kernel_addrhash(vm_offset_t addr)
968 {
969 return vm_kernel_addrhash_internal(addr, vm_kernel_addrhash_salt);
970 }
971
972 #pragma mark - kernel variants of the Mach VM interfaces
973
974 extern kern_return_t mach_vm_allocate_kernel(
975 vm_map_t map,
976 mach_vm_offset_t *addr,
977 mach_vm_size_t size,
978 int flags,
979 vm_tag_t tag);
980
981 extern kern_return_t mach_vm_map_kernel(
982 vm_map_t target_map,
983 mach_vm_offset_t *address,
984 mach_vm_size_t initial_size,
985 mach_vm_offset_t mask,
986 int flags,
987 vm_map_kernel_flags_t vmk_flags,
988 vm_tag_t tag,
989 ipc_port_t port,
990 vm_object_offset_t offset,
991 boolean_t copy,
992 vm_prot_t cur_protection,
993 vm_prot_t max_protection,
994 vm_inherit_t inheritance);
995
996
997 extern kern_return_t vm_map_kernel(
998 vm_map_t target_map,
999 vm_offset_t *address,
1000 vm_size_t size,
1001 vm_offset_t mask,
1002 int flags,
1003 vm_map_kernel_flags_t vmk_flags,
1004 vm_tag_t tag,
1005 ipc_port_t port,
1006 vm_offset_t offset,
1007 boolean_t copy,
1008 vm_prot_t cur_protection,
1009 vm_prot_t max_protection,
1010 vm_inherit_t inheritance);
1011
1012 extern kern_return_t mach_vm_remap_kernel(
1013 vm_map_t target_map,
1014 mach_vm_offset_t *address,
1015 mach_vm_size_t size,
1016 mach_vm_offset_t mask,
1017 int flags,
1018 vm_tag_t tag,
1019 vm_map_t src_map,
1020 mach_vm_offset_t memory_address,
1021 boolean_t copy,
1022 vm_prot_t *cur_protection,
1023 vm_prot_t *max_protection,
1024 vm_inherit_t inheritance);
1025
1026 extern kern_return_t mach_vm_remap_new_kernel(
1027 vm_map_t target_map,
1028 mach_vm_offset_t *address,
1029 mach_vm_size_t size,
1030 mach_vm_offset_t mask,
1031 int flags,
1032 vm_tag_t tag,
1033 vm_map_t src_map,
1034 mach_vm_offset_t memory_address,
1035 boolean_t copy,
1036 vm_prot_t *cur_protection,
1037 vm_prot_t *max_protection,
1038 vm_inherit_t inheritance);
1039
1040 extern kern_return_t vm_remap_kernel(
1041 vm_map_t target_map,
1042 vm_offset_t *address,
1043 vm_size_t size,
1044 vm_offset_t mask,
1045 int flags,
1046 vm_tag_t tag,
1047 vm_map_t src_map,
1048 vm_offset_t memory_address,
1049 boolean_t copy,
1050 vm_prot_t *cur_protection,
1051 vm_prot_t *max_protection,
1052 vm_inherit_t inheritance);
1053
1054 extern kern_return_t vm_map_64_kernel(
1055 vm_map_t target_map,
1056 vm_offset_t *address,
1057 vm_size_t size,
1058 vm_offset_t mask,
1059 int flags,
1060 vm_map_kernel_flags_t vmk_flags,
1061 vm_tag_t tag,
1062 ipc_port_t port,
1063 vm_object_offset_t offset,
1064 boolean_t copy,
1065 vm_prot_t cur_protection,
1066 vm_prot_t max_protection,
1067 vm_inherit_t inheritance);
1068
1069 extern kern_return_t mach_vm_wire_kernel(
1070 host_priv_t host_priv,
1071 vm_map_t map,
1072 mach_vm_offset_t start,
1073 mach_vm_size_t size,
1074 vm_prot_t access,
1075 vm_tag_t tag);
1076
1077 extern kern_return_t vm_map_wire_kernel(
1078 vm_map_t map,
1079 vm_map_offset_t start,
1080 vm_map_offset_t end,
1081 vm_prot_t caller_prot,
1082 vm_tag_t tag,
1083 boolean_t user_wire);
1084
1085 extern kern_return_t vm_map_wire_and_extract_kernel(
1086 vm_map_t map,
1087 vm_map_offset_t start,
1088 vm_prot_t caller_prot,
1089 vm_tag_t tag,
1090 boolean_t user_wire,
1091 ppnum_t *physpage_p);
1092
1093 extern kern_return_t memory_object_iopl_request(
1094 ipc_port_t port,
1095 memory_object_offset_t offset,
1096 upl_size_t *upl_size,
1097 upl_t *upl_ptr,
1098 upl_page_info_array_t user_page_list,
1099 unsigned int *page_list_count,
1100 upl_control_flags_t *flags,
1101 vm_tag_t tag);
1102
1103 #ifdef MACH_KERNEL_PRIVATE
1104
1105 extern kern_return_t copyinmap(
1106 vm_map_t map,
1107 vm_map_offset_t fromaddr,
1108 void *todata,
1109 vm_size_t length);
1110
1111 extern kern_return_t copyoutmap(
1112 vm_map_t map,
1113 void *fromdata,
1114 vm_map_offset_t toaddr,
1115 vm_size_t length);
1116
1117 extern kern_return_t copyoutmap_atomic32(
1118 vm_map_t map,
1119 uint32_t value,
1120 vm_map_offset_t toaddr);
1121
1122 extern kern_return_t copyoutmap_atomic64(
1123 vm_map_t map,
1124 uint64_t value,
1125 vm_map_offset_t toaddr);
1126
1127 #endif /* MACH_KERNEL_PRIVATE */
1128 #pragma GCC visibility pop
1129 #endif /* XNU_KERNEL_PRIVATE */
1130 #ifdef KERNEL_PRIVATE
1131 #pragma mark - unsorted interfaces
1132
1133 #ifdef XNU_KERNEL_PRIVATE
1134 typedef struct vm_allocation_site kern_allocation_name;
1135 typedef kern_allocation_name * kern_allocation_name_t;
1136 #else /* XNU_KERNEL_PRIVATE */
1137 struct kern_allocation_name;
1138 typedef struct kern_allocation_name * kern_allocation_name_t;
1139 #endif /* !XNU_KERNEL_PRIVATE */
1140
1141 extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs);
1142 extern void kern_allocation_name_release(kern_allocation_name_t allocation);
1143 extern const char * kern_allocation_get_name(kern_allocation_name_t allocation);
1144
1145 #endif /* KERNEL_PRIVATE */
1146 #ifdef XNU_KERNEL_PRIVATE
1147 #pragma GCC visibility push(hidden)
1148
1149 extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta);
1150 extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta);
1151 extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation);
1152
1153 struct mach_memory_info;
1154 extern kern_return_t vm_page_diagnose(
1155 struct mach_memory_info *info,
1156 unsigned int num_info,
1157 uint64_t zones_collectable_bytes);
1158
1159 extern uint32_t vm_page_diagnose_estimate(void);
1160
1161 extern void vm_init_before_launchd(void);
1162
1163 typedef enum {
1164 PMAP_FEAT_UEXEC = 1
1165 } pmap_feature_flags_t;
1166
1167 #if defined(__x86_64__)
1168 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat);
1169 #endif
1170
1171 #if DEBUG || DEVELOPMENT
1172
1173 extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size);
1174
1175 #endif /* DEBUG || DEVELOPMENT */
1176
1177 #if HIBERNATION
1178 extern void hibernate_rebuild_vm_structs(void);
1179 #endif /* HIBERNATION */
1180
1181 extern vm_tag_t vm_tag_bt(void);
1182
1183 extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site);
1184
1185 extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP);
1186
1187 extern void vm_tag_update_size(vm_tag_t tag, int64_t size);
1188
1189 extern uint64_t vm_tag_get_size(vm_tag_t tag);
1190
1191 #if VM_TAG_SIZECLASSES
1192
1193 extern void vm_allocation_zones_init(void);
1194 extern vm_tag_t vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags);
1195 extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta);
1196
1197 #endif /* VM_TAG_SIZECLASSES */
1198
1199 extern vm_tag_t vm_tag_bt_debug(void);
1200
1201 extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen);
1202
1203 extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
1204
1205 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
1206
1207 #pragma GCC visibility pop
1208 #endif /* XNU_KERNEL_PRIVATE */
1209
1210 __END_DECLS
1211
1212 #endif /* _VM_VM_KERN_H_ */
1213