1 /*
2 * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #ifdef KERNEL_PRIVATE
58
59 #ifndef _KERN_KALLOC_H_
60 #define _KERN_KALLOC_H_
61
62 #include <mach/machine/vm_types.h>
63 #include <mach/boolean.h>
64 #include <mach/vm_types.h>
65 #include <kern/zalloc.h>
66 #include <libkern/section_keywords.h>
67 #include <os/alloc_util.h>
68 #if XNU_KERNEL_PRIVATE
69 #include <kern/counter.h>
70 #endif /* XNU_KERNEL_PRIVATE */
71
72 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
73
74 /*!
75 * @const KALLOC_SAFE_ALLOC_SIZE
76 *
77 * @brief
78 * The maximum allocation size that is safe to allocate with Z_NOFAIL in kalloc.
79 */
80 #if __LP64__
81 #define KALLOC_SAFE_ALLOC_SIZE (16u * 1024u)
82 #else
83 #define KALLOC_SAFE_ALLOC_SIZE (8u * 1024u)
84 #endif
85
86 #if XNU_KERNEL_PRIVATE
87 /*!
88 * @typedef kalloc_heap_t
89 *
90 * @abstract
91 * A kalloc heap view represents a sub-accounting context
92 * for a given kalloc heap.
93 */
94 typedef struct kalloc_heap {
95 struct kheap_zones *kh_zones;
96 zone_stats_t kh_stats;
97 const char *kh_name __unsafe_indexable;
98 struct kalloc_heap *kh_next;
99 zone_kheap_id_t kh_heap_id;
100 vm_tag_t kh_tag;
101 } *kalloc_heap_t;
102
103 /*!
104 * @macro KALLOC_HEAP_DECLARE
105 *
106 * @abstract
107 * (optionally) declare a kalloc heap view in a header.
108 *
109 * @discussion
110 * Unlike kernel zones, new full blown heaps cannot be instantiated.
111 * However new accounting views of the base heaps can be made.
112 */
113 #define KALLOC_HEAP_DECLARE(var) \
114 extern struct kalloc_heap var[1]
115
116 /**
117 * @const KHEAP_ANY
118 *
119 * @brief
120 * A value that represents either the default or kext heap for codepaths that
121 * need to allow @c kheap_free() to either one.
122 *
123 * @discussion
124 * When the memory provenance is not known, this value can be used to free
125 * memory indiscriminately.
126 *
127 * Note: code using this constant can likely be used as a gadget to free
128 * arbitrary memory and its use is strongly discouraged.
129 */
130 #define KHEAP_ANY ((struct kalloc_heap *)NULL)
131
132 /**
133 * @const KHEAP_DATA_BUFFERS
134 *
135 * @brief
136 * The builtin heap for bags of pure bytes.
137 *
138 * @discussion
139 * This set of kalloc zones should contain pure bags of bytes with no pointers
140 * or length/offset fields.
141 *
142 * The zones forming the heap aren't sequestered from each other, however the
143 * entire heap lives in a different submap from any other kernel allocation.
144 *
145 * The main motivation behind this separation is due to the fact that a lot of
146 * these objects have been used by attackers to spray the heap to make it more
147 * predictable while exploiting use-after-frees or overflows.
148 *
149 * Common attributes that make these objects useful for spraying includes
150 * control of:
151 * - Data in allocation
152 * - Time of alloc and free (lifetime)
153 * - Size of allocation
154 */
155 KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS);
156
157 /**
158 * @const KHEAP_DEFAULT
159 *
160 * @brief
161 * The builtin default core kernel kalloc heap.
162 *
163 * @discussion
164 * This set of kalloc zones should contain other objects that don't have their
165 * own security mitigations. The individual zones are themselves sequestered.
166 */
167 KALLOC_HEAP_DECLARE(KHEAP_DEFAULT);
168
169 /**
170 * @const KHEAP_KT_VAR
171 *
172 * @brief
173 * Temporary heap for variable sized kalloc type allocations
174 *
175 * @discussion
176 * This heap will be removed when logic for kalloc_type_var_views is added
177 *
178 */
179 KALLOC_HEAP_DECLARE(KHEAP_KT_VAR);
180
181 /*!
182 * @macro KALLOC_HEAP_DEFINE
183 *
184 * @abstract
185 * Defines a given kalloc heap view and what it points to.
186 *
187 * @discussion
188 * Kalloc heaps are views over one of the pre-defined builtin heaps
189 * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating
190 * a new one allows for accounting of allocations through this view.
191 *
192 * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase,
193 * as the last rank. If views on zones are created, these must have been
194 * created before this stage.
195 *
196 * @param var the name for the zone view.
197 * @param name a string describing the zone view.
198 * @param heap_id a @c KHEAP_ID_* constant.
199 */
200 #define KALLOC_HEAP_DEFINE(var, name, heap_id) \
201 SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \
202 .kh_name = name, \
203 .kh_heap_id = heap_id, \
204 } }; \
205 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, kheap_startup_init, var)
206
207
208 /*
209 * Allocations of type SO_NAME are known to not have pointers for
210 * most platforms -- for macOS this is not guaranteed
211 */
212 #if XNU_TARGET_OS_OSX
213 #define KHEAP_SONAME KHEAP_DEFAULT
214 #else /* XNU_TARGET_OS_OSX */
215 #define KHEAP_SONAME KHEAP_DATA_BUFFERS
216 #endif /* XNU_TARGET_OS_OSX */
217
218 #endif /* XNU_KERNEL_PRIVATE */
219
220 /*!
221 * @enum kalloc_type_flags_t
222 *
223 * @brief
224 * Flags that can be passed to @c KALLOC_TYPE_DEFINE
225 *
226 * @discussion
227 * These flags can be used to request for a specific accounting
228 * behavior.
229 *
230 * @const KT_DEFAULT
231 * Passing this flag will provide default accounting behavior
232 * i.e shared accounting unless toggled with KT_OPTIONS_ACCT is
233 * set in kt boot-arg.
234 *
235 * @const KT_PRIV_ACCT
236 * Passing this flag will provide individual stats for your
237 * @c kalloc_type_view that is defined.
238 *
239 * @const KT_SHARED_ACCT
240 * Passing this flag will accumulate stats as a part of the
241 * zone that your @c kalloc_type_view points to.
242 *
243 * @const KT_DATA_ONLY
244 * Represents that the type is "data-only". Adopters should not
245 * set this flag manually, it is meant for the compiler to set
246 * automatically when KALLOC_TYPE_CHECK(DATA) passes.
247 *
248 * @const KT_VM
249 * Represents that the type is large enough to use the VM. Adopters
250 * should not set this flag manually, it is meant for the compiler
251 * to set automatically when KALLOC_TYPE_VM_SIZE_CHECK passes.
252 *
253 * @const KT_PTR_ARRAY
254 * Represents that the type is an array of pointers. Adopters should not
255 * set this flag manually, it is meant for the compiler to set
256 * automatically when KALLOC_TYPE_CHECK(PTR) passes.
257 *
258 * @const KT_CHANGED*
259 * Represents a change in the version of the kalloc_type_view. This
260 * is required inorder to decouple requiring kexts to be rebuilt to
261 * use the new defintions right away. This flags should not be used
262 * manually at a callsite, it is meant for internal use only. Future
263 * changes to kalloc_type_view defintion should toggle this flag.
264 *
265 #if XNU_KERNEL_PRIVATE
266 *
267 * @const KT_SLID
268 * To indicate that strings in the view were slid during early boot.
269 *
270 * @const KT_PROCESSED
271 * This flag is set once the view is parse during early boot. Views
272 * that are not in BootKC on macOS aren't parsed and therefore will
273 * not have this flag set. The runtime can use this as an indication
274 * to appropriately redirect the call.
275 *
276 * @const KT_VM_TAG_MASK
277 * Represents bits in which a vm_tag_t for the allocation can be passed.
278 * (used for the zone tagging debugging feature).
279 #endif
280 */
281 __options_decl(kalloc_type_flags_t, uint32_t, {
282 KT_DEFAULT = 0x0001,
283 KT_PRIV_ACCT = 0x0002,
284 KT_SHARED_ACCT = 0x0004,
285 KT_DATA_ONLY = 0x0008,
286 KT_VM = 0x0010,
287 KT_CHANGED = 0x0020,
288 KT_CHANGED2 = 0x0040,
289 KT_PTR_ARRAY = 0x0080,
290 #if XNU_KERNEL_PRIVATE
291 KT_SLID = 0x4000,
292 KT_PROCESSED = 0x8000,
293 /** used to propagate vm tags for -zt */
294 KT_VM_TAG_MASK = 0xffff0000,
295 #endif
296 });
297
298 /*!
299 * @typedef kalloc_type_view_t
300 *
301 * @abstract
302 * A kalloc type view is a structure used to redirect callers
303 * of @c kalloc_type to a particular zone based on the signature of
304 * their type.
305 *
306 * @discussion
307 * These structures are automatically created under the hood for every
308 * @c kalloc_type and @c kfree_type callsite. They are ingested during startup
309 * and are assigned zones based on the security policy for their signature.
310 *
311 * These structs are protected by the kernel lockdown and can't be initialized
312 * dynamically. They must be created using @c KALLOC_TYPE_DEFINE() or
313 * @c kalloc_type or @c kfree_type.
314 *
315 */
316 struct kalloc_type_view {
317 struct zone_view kt_zv;
318 const char *kt_signature __unsafe_indexable;
319 kalloc_type_flags_t kt_flags;
320 uint32_t kt_size;
321 void *unused1;
322 void *unused2;
323 };
324
325 typedef struct kalloc_type_view *kalloc_type_view_t;
326
327 /*
328 * "Heaps" or sets of zones, used for variable size kalloc_type allocations
329 * are defined by the constants below.
330 *
331 * KHEAP_START_SIZE: Size of the first sequential zone.
332 * KHEAP_MAX_SIZE : Size of the last sequential zone.
333 * KHEAP_STEP_WIDTH: Number of zones created at every step (power of 2).
334 * KHEAP_STEP_START: Size of the first step.
335 * We also create some extra initial zones that don't follow the sequence
336 * for sizes 8 (on armv7 only), 16 and 32.
337 *
338 * idx step_increment zone_elem_size
339 * 0 - 16
340 * 1 - 32
341 * 2 16 48
342 * 3 16 64
343 * 4 32 96
344 * 5 32 128
345 * 6 64 192
346 * 7 64 256
347 * 8 128 384
348 * 9 128 512
349 * 10 256 768
350 * 11 256 1024
351 * 12 512 1536
352 * 13 512 2048
353 * 14 1024 3072
354 * 15 1024 4096
355 * 16 2048 6144
356 * 17 2048 8192
357 * 18 4096 12288
358 * 19 4096 16384
359 * 20 8192 24576
360 * 21 8192 32768
361 */
362 #define kalloc_log2down(mask) (31 - __builtin_clz(mask))
363 #define KHEAP_START_SIZE 32
364 #if !defined(__LP64__)
365 #define KHEAP_MAX_SIZE 8 * 1024
366 #define KHEAP_EXTRA_ZONES 3
367 #elif __x86_64__
368 #define KHEAP_MAX_SIZE 16 * 1024
369 #define KHEAP_EXTRA_ZONES 2
370 #else
371 #define KHEAP_MAX_SIZE 32 * 1024
372 #define KHEAP_EXTRA_ZONES 2
373 #endif
374 #define KHEAP_STEP_WIDTH 2
375 #define KHEAP_STEP_START 16
376 #define KHEAP_START_IDX kalloc_log2down(KHEAP_START_SIZE)
377 #define KHEAP_NUM_STEPS (kalloc_log2down(KHEAP_MAX_SIZE) - \
378 kalloc_log2down(KHEAP_START_SIZE))
379 #define KHEAP_NUM_ZONES (KHEAP_NUM_STEPS * KHEAP_STEP_WIDTH + \
380 KHEAP_EXTRA_ZONES)
381
382 /*!
383 * @enum kalloc_type_version_t
384 *
385 * @brief
386 * Enum that holds versioning information for @c kalloc_type_var_view
387 *
388 * @const KT_V1
389 * Version 1
390 *
391 */
392 __options_decl(kalloc_type_version_t, uint16_t, {
393 KT_V1 = 0x0001,
394 });
395
396 /*!
397 * @typedef kalloc_type_var_view_t
398 *
399 * @abstract
400 * This structure is analoguous to @c kalloc_type_view but handles
401 * @c kalloc_type callsites that are variable in size.
402 *
403 * @discussion
404 * These structures are automatically created under the hood for every
405 * variable sized @c kalloc_type and @c kfree_type callsite. They are ingested
406 * during startup and are assigned zones based on the security policy for
407 * their signature.
408 *
409 * These structs are protected by the kernel lockdown and can't be initialized
410 * dynamically. They must be created using @c KALLOC_TYPE_VAR_DEFINE() or
411 * @c kalloc_type or @c kfree_type.
412 *
413 */
414 struct kalloc_type_var_view {
415 kalloc_type_version_t kt_version;
416 uint16_t kt_size_hdr;
417 /*
418 * Temporary: Needs to be 32bits cause we have many structs that use
419 * IONew/Delete that are larger than 32K.
420 */
421 uint32_t kt_size_type;
422 zone_stats_t kt_stats;
423 const char *kt_name __unsafe_indexable;
424 zone_view_t kt_next;
425 zone_id_t kt_heap_start;
426 uint8_t kt_zones[KHEAP_NUM_ZONES];
427 const char *kt_sig_hdr __unsafe_indexable;
428 const char *kt_sig_type __unsafe_indexable;
429 kalloc_type_flags_t kt_flags;
430 };
431
432 typedef struct kalloc_type_var_view *kalloc_type_var_view_t;
433
434 /*!
435 * @macro KALLOC_TYPE_DECLARE
436 *
437 * @abstract
438 * (optionally) declares a kalloc type view (in a header).
439 *
440 * @param var the name for the kalloc type view.
441 */
442 #define KALLOC_TYPE_DECLARE(var) \
443 extern struct kalloc_type_view var[1]
444
445 /*!
446 * @macro KALLOC_TYPE_DEFINE
447 *
448 * @abstract
449 * Defines a given kalloc type view with prefered accounting
450 *
451 * @discussion
452 * This macro allows you to define a kalloc type with private
453 * accounting. The defined kalloc_type_view can be used with
454 * kalloc_type_impl/kfree_type_impl to allocate/free memory.
455 * zalloc/zfree can also be used from inside xnu. However doing
456 * so doesn't handle freeing a NULL pointer or the use of tags.
457 *
458 * @param var the name for the kalloc type view.
459 * @param type the type of your allocation.
460 * @param flags a @c KT_* flag.
461 */
462 #define KALLOC_TYPE_DEFINE(var, type, flags) \
463 _KALLOC_TYPE_DEFINE(var, type, flags)
464
465 /*!
466 * @macro KALLOC_TYPE_VAR_DEFINE
467 *
468 * @abstract
469 * Defines a given kalloc type view with prefered accounting for
470 * variable sized typed allocations.
471 *
472 * @discussion
473 * As the views aren't yet being ingested, individual stats aren't
474 * available. The defined kalloc_type_var_view should be used with
475 * kalloc_type_var_impl/kfree_type_var_impl to allocate/free memory.
476 *
477 * This macro comes in 2 variants:
478 *
479 * 1. @c KALLOC_TYPE_VAR_DEFINE(var, e_ty, flags)
480 * 2. @c KALLOC_TYPE_VAR_DEFINE(var, h_ty, e_ty, flags)
481 *
482 * @param var the name for the kalloc type var view.
483 * @param h_ty the type of header in the allocation.
484 * @param e_ty the type of repeating part in the allocation.
485 * @param flags a @c KT_* flag.
486 */
487 #define KALLOC_TYPE_VAR_DEFINE(...) KALLOC_DISPATCH(KALLOC_TYPE_VAR_DEFINE, ##__VA_ARGS__)
488
489 #ifdef XNU_KERNEL_PRIVATE
490
491 /*
492 * These versions allow specifying the kalloc heap to allocate memory
493 * from
494 */
495 #define kheap_alloc_tag(kalloc_heap, size, flags, itag) \
496 __kheap_alloc(kalloc_heap, size, __zone_flags_mix_tag(flags, itag), NULL)
497 #define kheap_alloc(kalloc_heap, size, flags) \
498 kheap_alloc_tag(kalloc_heap, size, flags, VM_ALLOC_SITE_TAG())
499
500 /*
501 * These versions should be used for allocating pure data bytes that
502 * do not contain any pointers
503 */
504 #define kalloc_data_tag(size, flags, itag) \
505 kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, flags, itag)
506 #define kalloc_data(size, flags) \
507 kheap_alloc(KHEAP_DATA_BUFFERS, size, flags)
508
509 #define krealloc_data_tag(elem, old_size, new_size, flags, itag) \
510 __kheap_realloc(KHEAP_DATA_BUFFERS, elem, old_size, new_size, \
511 __zone_flags_mix_tag(flags, itag), NULL)
512 #define krealloc_data(elem, old_size, new_size, flags) \
513 krealloc_data_tag(elem, old_size, new_size, flags, \
514 VM_ALLOC_SITE_TAG())
515
516 #define kfree_data(elem, size) \
517 kheap_free(KHEAP_DATA_BUFFERS, elem, size);
518
519 #define kfree_data_addr(elem) \
520 kheap_free_addr(KHEAP_DATA_BUFFERS, elem);
521
522 extern void
523 kheap_free_bounded(
524 kalloc_heap_t heap,
525 void *addr __unsafe_indexable,
526 vm_size_t min_sz,
527 vm_size_t max_sz);
528
529 extern void
530 kalloc_data_require(
531 void *data __unsafe_indexable,
532 vm_size_t size);
533
534 extern void
535 kalloc_non_data_require(
536 void *data __unsafe_indexable,
537 vm_size_t size);
538
539 #else /* XNU_KERNEL_PRIVATE */
540
541 extern void *__sized_by(size)
542 kalloc(
543 vm_size_t size) __attribute__((malloc, alloc_size(1)));
544
545 extern void *__sized_by(size)
546 kalloc_data(
547 vm_size_t size,
548 zalloc_flags_t flags) __attribute__((malloc, alloc_size(1)));
549
550 extern void *__sized_by(new_size)
551 krealloc_data(
552 void *ptr __unsafe_indexable,
553 vm_size_t old_size,
554 vm_size_t new_size,
555 zalloc_flags_t flags) __attribute__((malloc, alloc_size(3)));
556
557 extern void
558 kfree(
559 void *data __unsafe_indexable,
560 vm_size_t size);
561
562 extern void
563 kfree_data(
564 void *ptr __unsafe_indexable,
565 vm_size_t size);
566
567 extern void
568 kfree_data_addr(
569 void *ptr __unsafe_indexable);
570
571 #endif /* !XNU_KERNEL_PRIVATE */
572
573 /*!
574 * @macro kalloc_type
575 *
576 * @abstract
577 * Allocates element of a particular type
578 *
579 * @discussion
580 * This family of allocators segregate kalloc allocations based on their type.
581 *
582 * This macro comes in 3 variants:
583 *
584 * 1. @c kalloc_type(type, flags)
585 * Use this macro for fixed sized allocation of a particular type.
586 *
587 * 2. @c kalloc_type(e_type, count, flags)
588 * Use this macro for variable sized allocations that form an array,
589 * do note that @c kalloc_type(e_type, 1, flags) is not equivalent to
590 * @c kalloc_type(e_type, flags).
591 *
592 * 3. @c kalloc_type(hdr_type, e_type, count, flags)
593 * Use this macro for variable sized allocations formed with
594 * a header of type @c hdr_type followed by a variable sized array
595 * with elements of type @c e_type, equivalent to this:
596 *
597 * <code>
598 * struct {
599 * hdr_type hdr;
600 * e_type arr[];
601 * }
602 * </code>
603 *
604 * @param flags @c zalloc_flags_t that get passed to zalloc_internal
605 */
606 #define kalloc_type(...) KALLOC_DISPATCH(kalloc_type, ##__VA_ARGS__)
607
608 /*!
609 * @macro kfree_type
610 *
611 * @abstract
612 * Allocates element of a particular type
613 *
614 * @discussion
615 * This pairs with the @c kalloc_type() that was made to allocate this element.
616 * Arguments passed to @c kfree_type() must match the one passed at allocation
617 * time precisely.
618 *
619 * This macro comes in the same 3 variants kalloc_type() does:
620 *
621 * 1. @c kfree_type(type, elem)
622 * 2. @c kfree_type(e_type, count, elem)
623 * 3. @c kfree_type(hdr_type, e_type, count, elem)
624 *
625 * @param elem The address of the element to free
626 */
627 #define kfree_type(...) KALLOC_DISPATCH(kfree_type, ##__VA_ARGS__)
628
629 #ifdef XNU_KERNEL_PRIVATE
630 #define kalloc_type_tag(...) KALLOC_DISPATCH(kalloc_type_tag, ##__VA_ARGS__)
631 #define krealloc_type_tag(...) KALLOC_DISPATCH(krealloc_type_tag, ##__VA_ARGS__)
632 #define krealloc_type(...) KALLOC_DISPATCH(krealloc_type, ##__VA_ARGS__)
633
634 /*
635 * kalloc_type_require can't be made available to kexts as the
636 * kalloc_type_view's zone could be NULL in the following cases:
637 * - Size greater than KALLOC_SAFE_ALLOC_SIZE
638 * - On macOS, if call is not in BootKC
639 * - All allocations in kext for armv7
640 */
641 #define kalloc_type_require(type, value) ({ \
642 static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
643 zone_require(kt_view_var->kt_zv, value); \
644 })
645
646 #endif
647
648 /*!
649 * @enum kt_granule_t
650 *
651 * @brief
652 * Granule encodings used by the compiler for the type signature.
653 *
654 * @discussion
655 * Given a type, the XNU signature type system (__builtin_xnu_type_signature)
656 * produces a signature by analyzing its memory layout, in chunks of 8 bytes,
657 * which we call granules. The encoding produced for each granule is the
658 * bitwise or of the encodings of all the types of the members included
659 * in that granule.
660 *
661 * @const KT_GRANULE_PADDING
662 * Represents padding inside a record type.
663 *
664 * @const KT_GRANULE_POINTER
665 * Represents a pointer type.
666 *
667 * @const KT_GRANULE_DATA
668 * Represents a scalar type that is not a pointer.
669 *
670 * @const KT_GRANULE_DUAL
671 * Currently unused.
672 *
673 * @const KT_GRANULE_PAC
674 * Represents a pointer which is subject to PAC.
675 */
676 __options_decl(kt_granule_t, uint32_t, {
677 KT_GRANULE_PADDING = 0,
678 KT_GRANULE_POINTER = 1,
679 KT_GRANULE_DATA = 2,
680 KT_GRANULE_DUAL = 4,
681 KT_GRANULE_PAC = 8
682 });
683
684 #define KT_GRANULE_MAX \
685 (KT_GRANULE_PADDING | KT_GRANULE_POINTER | KT_GRANULE_DATA | \
686 KT_GRANULE_DUAL | KT_GRANULE_PAC)
687
688 /*
689 * Convert a granule encoding to the index of the bit that
690 * represents such granule in the type summary.
691 *
692 * The XNU type summary (__builtin_xnu_type_summary) produces a 32-bit
693 * summary of the type signature of a given type. If the bit at index
694 * (1 << G) is set in the summary, that means that the type contains
695 * one or more granules with encoding G.
696 */
697 #define KT_SUMMARY_GRANULE_TO_IDX(g) (1UL << g)
698
699 #define KT_SUMMARY_MASK_TYPE_BITS (0xffff)
700
701 #define KT_SUMMARY_MASK_DATA \
702 (KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) | \
703 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA))
704
705 #define KT_SUMMARY_MASK_PTR \
706 (KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) | \
707 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) | \
708 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
709
710 #define KT_SUMMARY_MASK_ALL_GRANULES \
711 (KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) | \
712 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) | \
713 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA) | \
714 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DUAL) | \
715 KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
716
717 /*!
718 * @macro KT_SUMMARY_GRANULES
719 *
720 * @abstract
721 * Return the granule type summary for a given type
722 *
723 * @discussion
724 * This macro computes the type summary of a type, and it then extracts the
725 * bits which carry information about the granules in the memory layout.
726 *
727 * Note: you should never have to use __builtin_xnu_type_summary
728 * directly, as we reserve the right to use the remaining bits with
729 * different semantics.
730 *
731 * @param type The type to analyze
732 */
733 #define KT_SUMMARY_GRANULES(type) \
734 (__builtin_xnu_type_summary(type) & KT_SUMMARY_MASK_TYPE_BITS)
735
736 /*!
737 * @macro KALLOC_TYPE_IS_DATA_ONLY
738 *
739 * @abstract
740 * Return whether a given type is considered a data-only type.
741 *
742 * @param type The type to analyze
743 */
744 #define KALLOC_TYPE_IS_DATA_ONLY(type) \
745 ((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_DATA) == 0)
746
747 /*!
748 * @macro KALLOC_TYPE_SIG_CHECK
749 *
750 * @abstract
751 * Return whether a given type is only made up of granules specified in mask
752 *
753 * @param mask Granules to check for
754 * @param type The type to analyze
755 */
756 #define KALLOC_TYPE_SIG_CHECK(mask, type) \
757 ((KT_SUMMARY_GRANULES(type) & ~(mask)) == 0)
758
759 /*!
760 * @macro KALLOC_TYPE_HAS_OVERLAPS
761 *
762 * @abstract
763 * Return whether a given type has overlapping granules.
764 *
765 * @discussion
766 * This macro returns whether the memory layout for a given type contains
767 * overlapping granules. An overlapping granule is a granule which includes
768 * members with types that have different encodings under the XNU signature
769 * type system.
770 *
771 * @param type The type to analyze
772 */
773 #define KALLOC_TYPE_HAS_OVERLAPS(type) \
774 ((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_ALL_GRANULES) != 0)
775
776 /*!
777 * @macro KALLOC_TYPE_IS_COMPATIBLE_PTR
778 *
779 * @abstract
780 * Return whether pointer is compatible with a given type, in the XNU
781 * signature type system.
782 *
783 * @discussion
784 * This macro returns whether type pointed to by @c ptr is either the same
785 * type as @c type, or it has the same signature. The implementation relies
786 * on the @c __builtin_xnu_types_compatible builtin, and the value returned
787 * can be evaluated at compile time in both C and C++.
788 *
789 * Note: void pointers are treated as wildcards, and are thus compatible
790 * with any given type.
791 *
792 * @param ptr the pointer whose type needs to be checked.
793 * @param type the type which the pointer will be checked against.
794 */
795 #define KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type) \
796 _Pragma("clang diagnostic push") \
797 _Pragma("clang diagnostic ignored \"-Wvoid-ptr-dereference\"") \
798 (__builtin_xnu_types_compatible(__typeof__(*ptr), type) || \
799 __builtin_xnu_types_compatible(__typeof__(*ptr), void)) \
800 _Pragma("clang diagnostic pop")
801
802 #define KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type) \
803 _Static_assert(KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type), \
804 "Pointer type is not compatible with specified type")
805
806 #pragma mark implementation details
807
808
809 static inline void *
kt_mangle_var_view(kalloc_type_var_view_t kt_view)810 kt_mangle_var_view(kalloc_type_var_view_t kt_view)
811 {
812 return (void *)((uintptr_t)kt_view | 1ul);
813 }
814
815 static inline kalloc_type_var_view_t
kt_demangle_var_view(void * ptr)816 kt_demangle_var_view(void *ptr)
817 {
818 return (kalloc_type_var_view_t)((uintptr_t)ptr & ~1ul);
819 }
820
821 #define kt_is_var_view(ptr) ((uintptr_t)(ptr) & 1)
822
823 static inline vm_size_t
kt_size(vm_size_t s1,vm_size_t s2,vm_size_t c2)824 kt_size(vm_size_t s1, vm_size_t s2, vm_size_t c2)
825 {
826 /* kalloc_large() will reject this size before even asking the VM */
827 const vm_size_t limit = 1ull << (8 * sizeof(vm_size_t) - 1);
828
829 if (os_mul_and_add_overflow(s2, c2, s1, &s1) || (s1 & limit)) {
830 return limit;
831 }
832 return s1;
833 }
834
835 #define kalloc_type_2(type, flags) ({ \
836 static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
837 __unsafe_forge_single(type *, kalloc_type_impl(kt_view_var, flags)); \
838 })
839
840 #define kfree_type_2(type, elem) ({ \
841 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type); \
842 static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
843 kfree_type_impl(kt_view_var, os_ptr_load_and_erase(elem)); \
844 })
845
846 #define kfree_type_3(type, count, elem) ({ \
847 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type); \
848 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
849 __auto_type __kfree_count = (count); \
850 kfree_type_var_impl(kt_view_var, os_ptr_load_and_erase(elem), \
851 kt_size(0, sizeof(type), __kfree_count)); \
852 })
853
854 #define kfree_type_4(hdr_ty, e_ty, count, elem) ({ \
855 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty); \
856 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
857 KT_SHARED_ACCT); \
858 __auto_type __kfree_count = (count); \
859 kfree_type_var_impl(kt_view_var, \
860 os_ptr_load_and_erase(elem), \
861 kt_size(sizeof(hdr_ty), sizeof(e_ty), __kfree_count)); \
862 })
863
864 #ifdef XNU_KERNEL_PRIVATE
865 #define kalloc_type_tag_3(type, flags, tag) ({ \
866 static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT); \
867 __unsafe_forge_single(type *, zalloc_flags(kt_view_var, \
868 Z_VM_TAG(flags, tag))); \
869 })
870
871 #define kalloc_type_tag_4(type, count, flags, tag) ({ \
872 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
873 (type *)kalloc_type_var_impl(kt_view_var, \
874 kt_size(0, sizeof(type), count), \
875 __zone_flags_mix_tag(flags, tag), NULL); \
876 })
877 #define kalloc_type_3(type, count, flags) \
878 kalloc_type_tag_4(type, count, flags, VM_ALLOC_SITE_TAG())
879
880 #define kalloc_type_tag_5(hdr_ty, e_ty, count, flags, tag) ({ \
881 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
882 KT_SHARED_ACCT); \
883 (hdr_ty *)kalloc_type_var_impl(kt_view_var, \
884 kt_size(sizeof(hdr_ty), sizeof(e_ty), count), \
885 __zone_flags_mix_tag(flags, tag), NULL); \
886 })
887 #define kalloc_type_4(hdr_ty, e_ty, count, flags) \
888 kalloc_type_tag_5(hdr_ty, e_ty, count, flags, VM_ALLOC_SITE_TAG())
889
890 #define krealloc_type_tag_6(type, old_count, new_count, elem, flags, tag) ({ \
891 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
892 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type); \
893 (type *)__krealloc_type(kt_view_var, elem, \
894 kt_size(0, sizeof(type), old_count), \
895 kt_size(0, sizeof(type), new_count), \
896 __zone_flags_mix_tag(flags, tag), NULL); \
897 })
898 #define krealloc_type_5(type, old_count, new_count, elem, flags) \
899 krealloc_type_tag_6(type, old_count, new_count, elem, flags, \
900 VM_ALLOC_SITE_TAG())
901
902 #define krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, \
903 flags, tag) ({ \
904 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
905 KT_SHARED_ACCT); \
906 KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty); \
907 (hdr_ty *)__krealloc_type(kt_view_var, elem, \
908 kt_size(sizeof(hdr_ty), sizeof(e_ty), old_count), \
909 kt_size(sizeof(hdr_ty), sizeof(e_ty), new_count), \
910 __zone_flags_mix_tag(flags, tag), NULL); \
911 })
912 #define krealloc_type_6(hdr_ty, e_ty, old_count, new_count, elem, flags) \
913 krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, flags, \
914 VM_ALLOC_SITE_TAG())
915
916 #else /* XNU_KERNEL_PRIVATE */
917
918 #define kalloc_type_3(type, count, flags) ({ \
919 _Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK"); \
920 static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT); \
921 (type *)kalloc_type_var_impl(kt_view_var, \
922 kt_size(0, sizeof(type), count), flags, NULL); \
923 })
924
925 #define kalloc_type_4(hdr_ty, e_ty, count, flags) ({ \
926 _Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK"); \
927 static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty, \
928 KT_SHARED_ACCT); \
929 (hdr_ty *)kalloc_type_var_impl(kt_view_var, kt_size(sizeof(hdr_ty), \
930 sizeof(e_ty), count), flags, NULL); \
931 })
932
933 #endif /* !XNU_KERNEL_PRIVATE */
934
935 /*
936 * All k*free macros set "elem" to NULL on free.
937 *
938 * Note: all values passed to k*free() might be in the element to be freed,
939 * temporaries must be taken, and the resetting to be done prior to free.
940 */
941 #ifdef XNU_KERNEL_PRIVATE
942
943 #define kheap_free(heap, elem, size) ({ \
944 kalloc_heap_t __kfree_heap = (heap); \
945 __auto_type __kfree_size = (size); \
946 __builtin_assume(!kt_is_var_view(__kfree_heap)); \
947 kfree_ext((void *)__kfree_heap, \
948 (void *)os_ptr_load_and_erase(elem), __kfree_size); \
949 })
950
951 #define kheap_free_addr(heap, elem) ({ \
952 kalloc_heap_t __kfree_heap = (heap); \
953 kfree_addr_ext(__kfree_heap, (void *)os_ptr_load_and_erase(elem)); \
954 })
955
956 #define kheap_free_bounded(heap, elem, min_sz, max_sz) ({ \
957 static_assert(max_sz <= KALLOC_SAFE_ALLOC_SIZE); \
958 kalloc_heap_t __kfree_heap = (heap); \
959 __auto_type __kfree_min_sz = (min_sz); \
960 __auto_type __kfree_max_sz = (max_sz); \
961 (kheap_free_bounded)(__kfree_heap, \
962 (void *)os_ptr_load_and_erase(elem), \
963 __kfree_min_sz, __kfree_max_sz); \
964 })
965
966 #else /* XNU_KERNEL_PRIVATE */
967
968 #define kfree_data(elem, size) ({ \
969 __auto_type __kfree_size = (size); \
970 (kfree_data)((void *)os_ptr_load_and_erase(elem), __kfree_size); \
971 })
972
973 #define kfree_data_addr(elem) \
974 (kfree_data_addr)((void *)os_ptr_load_and_erase(elem))
975
976 #endif /* !XNU_KERNEL_PRIVATE */
977
978 #if __has_feature(address_sanitizer)
979 # define __kalloc_no_kasan __attribute__((no_sanitize("address")))
980 #else
981 # define __kalloc_no_kasan
982 #endif
983
984 #define KALLOC_CONCAT(x, y) __CONCAT(x,y)
985
986 #define KALLOC_COUNT_ARGS1(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, N, ...) N
987 #define KALLOC_COUNT_ARGS(...) \
988 KALLOC_COUNT_ARGS1(, ##__VA_ARGS__, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0)
989 #define KALLOC_DISPATCH1(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
990 #define KALLOC_DISPATCH(base, ...) \
991 KALLOC_DISPATCH1(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
992 #define KALLOC_DISPATCH1_R(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
993 #define KALLOC_DISPATCH_R(base, ...) \
994 KALLOC_DISPATCH1_R(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
995
996 #define kt_view_var \
997 KALLOC_CONCAT(kalloc_type_view_, __LINE__)
998
999 #if __LP64__
1000 #define KALLOC_TYPE_SEGMENT "__DATA_CONST"
1001 #else
1002 #define KALLOC_TYPE_SEGMENT "__DATA"
1003 #endif
1004
1005 /*
1006 * When kalloc_type_impl is called from xnu, it calls zalloc_flags
1007 * directly and doesn't redirect zone-less sites to kheap_alloc.
1008 * Passing a size larger than KHEAP_MAX_SIZE for these allocations will
1009 * lead to a panic as the zone is null. Therefore assert that size
1010 * is less than KALLOC_SAFE_ALLOC_SIZE.
1011 */
1012 #ifdef XNU_KERNEL_PRIVATE
1013 #define KALLOC_TYPE_SIZE_CHECK(size) \
1014 _Static_assert(size <= KALLOC_SAFE_ALLOC_SIZE, \
1015 "type is too large");
1016 #else
1017 #define KALLOC_TYPE_SIZE_CHECK(size)
1018 #endif
1019
1020 #define KALLOC_TYPE_CHECK_2(check, type) \
1021 (KALLOC_TYPE_SIG_CHECK(check, type))
1022
1023 #define KALLOC_TYPE_CHECK_3(check, type1, type2) \
1024 (KALLOC_TYPE_SIG_CHECK(check, type1) && \
1025 KALLOC_TYPE_SIG_CHECK(check, type2))
1026
1027 #define KALLOC_TYPE_CHECK(...) \
1028 KALLOC_DISPATCH_R(KALLOC_TYPE_CHECK, ##__VA_ARGS__)
1029
1030 #define KALLOC_TYPE_VM_SIZE_CHECK_1(type) \
1031 (sizeof(type) > KHEAP_MAX_SIZE)
1032
1033 #define KALLOC_TYPE_VM_SIZE_CHECK_2(type1, type2) \
1034 (sizeof(type1) + sizeof(type2) > KHEAP_MAX_SIZE)
1035
1036 #define KALLOC_TYPE_VM_SIZE_CHECK(...) \
1037 KALLOC_DISPATCH_R(KALLOC_TYPE_VM_SIZE_CHECK, ##__VA_ARGS__)
1038
1039 #ifdef __cplusplus
1040 #define KALLOC_TYPE_CAST_FLAGS(flags) static_cast<kalloc_type_flags_t>(flags)
1041 #else
1042 #define KALLOC_TYPE_CAST_FLAGS(flags) (kalloc_type_flags_t)(flags)
1043 #endif
1044
1045 /*
1046 * Don't emit signature if type is "data-only" or is large enough that it
1047 * uses the VM.
1048 *
1049 * Note: sig_type is the type you want to emit signature for. The variable
1050 * args can be used to provide other types in the allocation, to make the
1051 * decision of whether to emit the signature.
1052 */
1053 #define KALLOC_TYPE_EMIT_SIG(sig_type, ...) \
1054 (KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, sig_type, ##__VA_ARGS__) || \
1055 KALLOC_TYPE_VM_SIZE_CHECK(sig_type, ##__VA_ARGS__))? \
1056 "" : __builtin_xnu_type_signature(sig_type)
1057
1058 /*
1059 * Kalloc type flags are adjusted to indicate if the type is "data-only" or
1060 * will use the VM or is a pointer array.
1061 */
1062 #define KALLOC_TYPE_ADJUST_FLAGS(flags, ...) \
1063 KALLOC_TYPE_CAST_FLAGS((flags | KT_CHANGED | KT_CHANGED2 | \
1064 (KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, __VA_ARGS__)? KT_DATA_ONLY: 0) |\
1065 (KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_PTR, __VA_ARGS__)? KT_PTR_ARRAY: 0) | \
1066 (KALLOC_TYPE_VM_SIZE_CHECK(__VA_ARGS__)? KT_VM : 0)))
1067
1068 #define _KALLOC_TYPE_DEFINE(var, type, flags) \
1069 __kalloc_no_kasan \
1070 __PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_type") \
1071 struct kalloc_type_view var[1] = { { \
1072 .kt_zv.zv_name = "site." #type, \
1073 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type), \
1074 .kt_size = sizeof(type), \
1075 .kt_signature = KALLOC_TYPE_EMIT_SIG(type), \
1076 } }; \
1077 KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1078
1079 #define KALLOC_TYPE_VAR_DEFINE_3(var, type, flags) \
1080 __kalloc_no_kasan \
1081 __PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var") \
1082 struct kalloc_type_var_view var[1] = { { \
1083 .kt_version = KT_V1, \
1084 .kt_name = "site." #type, \
1085 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type), \
1086 .kt_size_type = sizeof(type), \
1087 .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type), \
1088 } }; \
1089 KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1090
1091 #define KALLOC_TYPE_VAR_DEFINE_4(var, hdr, type, flags) \
1092 __kalloc_no_kasan \
1093 __PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var") \
1094 struct kalloc_type_var_view var[1] = { { \
1095 .kt_version = KT_V1, \
1096 .kt_name = "site." #hdr "." #type, \
1097 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, hdr, type), \
1098 .kt_size_hdr = sizeof(hdr), \
1099 .kt_size_type = sizeof(type), \
1100 .kt_sig_hdr = KALLOC_TYPE_EMIT_SIG(hdr, type), \
1101 .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type, hdr), \
1102 } }; \
1103 KALLOC_TYPE_SIZE_CHECK(sizeof(hdr)); \
1104 KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1105
1106 #ifndef XNU_KERNEL_PRIVATE
1107 /*
1108 * This macro is currently used by AppleImage4
1109 */
1110 #define KALLOC_TYPE_DEFINE_SITE(var, type, flags) \
1111 static _KALLOC_TYPE_DEFINE(var, type, flags)
1112
1113 #endif /* !XNU_KERNEL_PRIVATE */
1114
1115 #ifdef XNU_KERNEL_PRIVATE
1116
1117 #define kalloc_type_impl(kt_view, flags) \
1118 zalloc_flags(kt_view, flags)
1119
1120 static inline void
kfree_type_impl(kalloc_type_view_t kt_view,void * __unsafe_indexable ptr)1121 kfree_type_impl(kalloc_type_view_t kt_view, void *__unsafe_indexable ptr)
1122 {
1123 if (NULL == ptr) {
1124 return;
1125 }
1126 zfree(kt_view, ptr);
1127 }
1128
1129 #define kalloc_type_var_impl(kt_view, size, flags, site) \
1130 kalloc_ext(kt_mangle_var_view(kt_view), size, flags, site).addr
1131
1132 #define kfree_type_var_impl(kt_view, ptr, size) \
1133 kfree_ext(kt_mangle_var_view(kt_view), ptr, size)
1134
1135 #else /* XNU_KERNEL_PRIVATE */
1136
1137 extern void *__unsafe_indexable
1138 kalloc_type_impl(
1139 kalloc_type_view_t kt_view,
1140 zalloc_flags_t flags);
1141
1142 extern void
1143 kfree_type_impl(
1144 kalloc_type_view_t kt_view,
1145 void *ptr __unsafe_indexable);
1146
1147 __attribute__((malloc, alloc_size(2)))
1148 extern void *__sized_by(size)
1149 kalloc_type_var_impl(
1150 kalloc_type_var_view_t kt_view,
1151 vm_size_t size,
1152 zalloc_flags_t flags,
1153 void *site);
1154
1155 extern void
1156 kfree_type_var_impl(
1157 kalloc_type_var_view_t kt_view,
1158 void *ptr __unsafe_indexable,
1159 vm_size_t size);
1160
1161 #endif /* !XNU_KERNEL_PRIVATE */
1162
1163 void *
1164 kalloc_type_impl_external(
1165 kalloc_type_view_t kt_view,
1166 zalloc_flags_t flags);
1167
1168 void
1169 kfree_type_impl_external(
1170 kalloc_type_view_t kt_view,
1171 void *ptr __unsafe_indexable);
1172
1173 extern void *
1174 OSObject_typed_operator_new(
1175 kalloc_type_view_t ktv,
1176 vm_size_t size);
1177
1178 extern void
1179 OSObject_typed_operator_delete(
1180 kalloc_type_view_t ktv,
1181 void *mem __unsafe_indexable,
1182 vm_size_t size);
1183
1184 #ifdef XNU_KERNEL_PRIVATE
1185 #pragma GCC visibility push(hidden)
1186
1187 #define KALLOC_TYPE_SIZE_MASK 0xffffff
1188 #define KALLOC_TYPE_IDX_SHIFT 24
1189 #define KALLOC_TYPE_IDX_MASK 0xff
1190
1191 static inline uint16_t
kalloc_type_get_idx(uint32_t kt_size)1192 kalloc_type_get_idx(uint32_t kt_size)
1193 {
1194 return (uint16_t) (kt_size >> KALLOC_TYPE_IDX_SHIFT);
1195 }
1196
1197 static inline uint32_t
kalloc_type_set_idx(uint32_t kt_size,uint16_t idx)1198 kalloc_type_set_idx(uint32_t kt_size, uint16_t idx)
1199 {
1200 return kt_size | ((uint32_t) idx << KALLOC_TYPE_IDX_SHIFT);
1201 }
1202
1203 static inline uint32_t
kalloc_type_get_size(uint32_t kt_size)1204 kalloc_type_get_size(uint32_t kt_size)
1205 {
1206 return kt_size & KALLOC_TYPE_SIZE_MASK;
1207 }
1208
1209 bool
1210 IOMallocType_from_vm(
1211 kalloc_type_view_t ktv);
1212
1213 /* Used by kern_os_* and operator new */
1214 KALLOC_HEAP_DECLARE(KERN_OS_MALLOC);
1215
1216 extern void
1217 kheap_startup_init(
1218 kalloc_heap_t heap);
1219
1220 extern struct kalloc_result
1221 kalloc_ext(
1222 void *kheap_or_kt_view,
1223 vm_size_t size,
1224 zalloc_flags_t flags,
1225 void *site);
1226
1227 __attribute__((malloc, alloc_size(2)))
1228 static inline void *
__sized_by(size)1229 __sized_by(size)
1230 __kheap_alloc(
1231 kalloc_heap_t kheap,
1232 vm_size_t size,
1233 zalloc_flags_t flags,
1234 void *site)
1235 {
1236 struct kalloc_result kr;
1237 __builtin_assume(!kt_is_var_view(kheap));
1238 kr = kalloc_ext(kheap, size, flags, site);
1239 return __unsafe_forge_bidi_indexable(void *, kr.addr, size);
1240 }
1241
1242 extern struct kalloc_result
1243 krealloc_ext(
1244 void *kheap_or_kt_view,
1245 void *addr __unsafe_indexable,
1246 vm_size_t old_size,
1247 vm_size_t new_size,
1248 zalloc_flags_t flags,
1249 void *site);
1250
1251 __attribute__((malloc, alloc_size(4)))
1252 static inline void *
__sized_by(new_size)1253 __sized_by(new_size)
1254 __kheap_realloc(
1255 kalloc_heap_t kheap,
1256 void *addr __unsafe_indexable,
1257 vm_size_t old_size,
1258 vm_size_t new_size,
1259 zalloc_flags_t flags,
1260 void *site)
1261 {
1262 struct kalloc_result kr;
1263 __builtin_assume(!kt_is_var_view(kheap));
1264 kr = krealloc_ext(kheap, addr, old_size, new_size, flags, site);
1265 return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1266 }
1267
1268 __attribute__((malloc, alloc_size(4)))
1269 static inline void *
__sized_by(new_size)1270 __sized_by(new_size)
1271 __krealloc_type(
1272 kalloc_type_var_view_t kt_view,
1273 void *addr __unsafe_indexable,
1274 vm_size_t old_size,
1275 vm_size_t new_size,
1276 zalloc_flags_t flags,
1277 void *site)
1278 {
1279 struct kalloc_result kr;
1280 kr = krealloc_ext(kt_mangle_var_view(kt_view), addr,
1281 old_size, new_size, flags, site);
1282 return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1283 }
1284
1285 extern void
1286 kfree_addr_ext(
1287 kalloc_heap_t kheap,
1288 void *addr __unsafe_indexable);
1289
1290 extern void
1291 kfree_ext(
1292 void *kheap_or_kt_view,
1293 void *addr __unsafe_indexable,
1294 vm_size_t size);
1295
1296 extern zone_t
1297 kalloc_heap_zone_for_size(
1298 kalloc_heap_t heap,
1299 vm_size_t size);
1300
1301 extern vm_size_t kalloc_large_max;
1302 SCALABLE_COUNTER_DECLARE(kalloc_large_count);
1303 SCALABLE_COUNTER_DECLARE(kalloc_large_total);
1304
1305 extern void
1306 kern_os_typed_free(
1307 kalloc_type_view_t ktv,
1308 void *addr __unsafe_indexable,
1309 vm_size_t esize);
1310
1311 #pragma GCC visibility pop
1312 #endif /* !XNU_KERNEL_PRIVATE */
1313
1314 extern void
1315 kern_os_zfree(
1316 zone_t zone,
1317 void *addr __unsafe_indexable,
1318 vm_size_t size);
1319
1320 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1321
1322 #endif /* _KERN_KALLOC_H_ */
1323
1324 #endif /* KERNEL_PRIVATE */
1325