xref: /xnu-8792.41.9/osfmk/kern/kalloc.h (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifdef  KERNEL_PRIVATE
58 
59 #ifndef _KERN_KALLOC_H_
60 #define _KERN_KALLOC_H_
61 
62 #include <mach/machine/vm_types.h>
63 #include <mach/boolean.h>
64 #include <mach/vm_types.h>
65 #include <kern/zalloc.h>
66 #include <libkern/section_keywords.h>
67 #include <os/alloc_util.h>
68 #if XNU_KERNEL_PRIVATE
69 #include <kern/counter.h>
70 #endif /* XNU_KERNEL_PRIVATE */
71 
72 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
73 
74 /*!
75  * @const KALLOC_SAFE_ALLOC_SIZE
76  *
77  * @brief
78  * The maximum allocation size that is safe to allocate with Z_NOFAIL in kalloc.
79  */
80 #if __LP64__
81 #define KALLOC_SAFE_ALLOC_SIZE  (16u * 1024u)
82 #else
83 #define KALLOC_SAFE_ALLOC_SIZE  (8u * 1024u)
84 #endif
85 
86 #if XNU_KERNEL_PRIVATE
87 /*!
88  * @typedef kalloc_heap_t
89  *
90  * @abstract
91  * A kalloc heap view represents a sub-accounting context
92  * for a given kalloc heap.
93  */
94 typedef struct kalloc_heap {
95 	struct kheap_zones *kh_zones;
96 	zone_stats_t        kh_stats;
97 	const char         *__unsafe_indexable kh_name;
98 	struct kalloc_heap *kh_next;
99 	zone_kheap_id_t     kh_heap_id;
100 	vm_tag_t            kh_tag;
101 	uint16_t            kh_type_hash;
102 } *kalloc_heap_t;
103 
104 /*!
105  * @macro KALLOC_HEAP_DECLARE
106  *
107  * @abstract
108  * (optionally) declare a kalloc heap view in a header.
109  *
110  * @discussion
111  * Unlike kernel zones, new full blown heaps cannot be instantiated.
112  * However new accounting views of the base heaps can be made.
113  */
114 #define KALLOC_HEAP_DECLARE(var) \
115 	extern struct kalloc_heap var[1]
116 
117 /**
118  * @const KHEAP_ANY
119  *
120  * @brief
121  * A value that represents either the default or kext heap for codepaths that
122  * need to allow @c kheap_free() to either one.
123  *
124  * @discussion
125  * When the memory provenance is not known, this value can be used to free
126  * memory indiscriminately.
127  *
128  * Note: code using this constant can likely be used as a gadget to free
129  * arbitrary memory and its use is strongly discouraged.
130  */
131 #define KHEAP_ANY  ((struct kalloc_heap *)NULL)
132 
133 /**
134  * @const KHEAP_DATA_BUFFERS
135  *
136  * @brief
137  * The builtin heap for bags of pure bytes.
138  *
139  * @discussion
140  * This set of kalloc zones should contain pure bags of bytes with no pointers
141  * or length/offset fields.
142  *
143  * The zones forming the heap aren't sequestered from each other, however the
144  * entire heap lives in a different submap from any other kernel allocation.
145  *
146  * The main motivation behind this separation is due to the fact that a lot of
147  * these objects have been used by attackers to spray the heap to make it more
148  * predictable while exploiting use-after-frees or overflows.
149  *
150  * Common attributes that make these objects useful for spraying includes
151  * control of:
152  * - Data in allocation
153  * - Time of alloc and free (lifetime)
154  * - Size of allocation
155  */
156 KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS);
157 
158 /**
159  * @const KHEAP_DEFAULT
160  *
161  * @brief
162  * The builtin default core kernel kalloc heap.
163  *
164  * @discussion
165  * This set of kalloc zones should contain other objects that don't have their
166  * own security mitigations. The individual zones are themselves sequestered.
167  */
168 KALLOC_HEAP_DECLARE(KHEAP_DEFAULT);
169 
170 /**
171  * @const KHEAP_KT_VAR
172  *
173  * @brief
174  * Temporary heap for variable sized kalloc type allocations
175  *
176  * @discussion
177  * This heap will be removed when logic for kalloc_type_var_views is added
178  *
179  */
180 KALLOC_HEAP_DECLARE(KHEAP_KT_VAR);
181 
182 /*!
183  * @macro KALLOC_HEAP_DEFINE
184  *
185  * @abstract
186  * Defines a given kalloc heap view and what it points to.
187  *
188  * @discussion
189  * Kalloc heaps are views over one of the pre-defined builtin heaps
190  * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating
191  * a new one allows for accounting of allocations through this view.
192  *
193  * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase,
194  * as the last rank. If views on zones are created, these must have been
195  * created before this stage.
196  *
197  * @param var           the name for the zone view.
198  * @param name          a string describing the zone view.
199  * @param heap_id       a @c KHEAP_ID_* constant.
200  */
201 #define KALLOC_HEAP_DEFINE(var, name, heap_id) \
202 	SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \
203 	    .kh_name = name, \
204 	    .kh_heap_id = heap_id, \
205 	} }; \
206 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, kheap_startup_init, var)
207 
208 
209 /*
210  * Allocations of type SO_NAME are known to not have pointers for
211  * most platforms -- for macOS this is not guaranteed
212  */
213 #if XNU_TARGET_OS_OSX
214 #define KHEAP_SONAME KHEAP_DEFAULT
215 #else /* XNU_TARGET_OS_OSX */
216 #define KHEAP_SONAME KHEAP_DATA_BUFFERS
217 #endif /* XNU_TARGET_OS_OSX */
218 
219 #endif /* XNU_KERNEL_PRIVATE */
220 
221 /*!
222  * @enum kalloc_type_flags_t
223  *
224  * @brief
225  * Flags that can be passed to @c KALLOC_TYPE_DEFINE
226  *
227  * @discussion
228  * These flags can be used to request for a specific accounting
229  * behavior.
230  *
231  * @const KT_DEFAULT
232  * Passing this flag will provide default accounting behavior
233  * i.e shared accounting unless toggled with KT_OPTIONS_ACCT is
234  * set in kt boot-arg.
235  *
236  * @const KT_PRIV_ACCT
237  * Passing this flag will provide individual stats for your
238  * @c kalloc_type_view that is defined.
239  *
240  * @const KT_SHARED_ACCT
241  * Passing this flag will accumulate stats as a part of the
242  * zone that your @c kalloc_type_view points to.
243  *
244  * @const KT_DATA_ONLY
245  * Represents that the type is "data-only". Adopters should not
246  * set this flag manually, it is meant for the compiler to set
247  * automatically when KALLOC_TYPE_CHECK(DATA) passes.
248  *
249  * @const KT_VM
250  * Represents that the type is large enough to use the VM. Adopters
251  * should not set this flag manually, it is meant for the compiler
252  * to set automatically when KALLOC_TYPE_VM_SIZE_CHECK passes.
253  *
254  * @const KT_PTR_ARRAY
255  * Represents that the type is an array of pointers. Adopters should not
256  * set this flag manually, it is meant for the compiler to set
257  * automatically when KALLOC_TYPE_CHECK(PTR) passes.
258  *
259  * @const KT_CHANGED*
260  * Represents a change in the version of the kalloc_type_view. This
261  * is required inorder to decouple requiring kexts to be rebuilt to
262  * use the new defintions right away. This flags should not be used
263  * manually at a callsite, it is meant for internal use only. Future
264  * changes to kalloc_type_view defintion should toggle this flag.
265  *
266  #if XNU_KERNEL_PRIVATE
267  *
268  * @const KT_SLID
269  * To indicate that strings in the view were slid during early boot.
270  *
271  * @const KT_PROCESSED
272  * This flag is set once the view is parse during early boot. Views
273  * that are not in BootKC on macOS aren't parsed and therefore will
274  * not have this flag set. The runtime can use this as an indication
275  * to appropriately redirect the call.
276  *
277  * @const KT_HASH
278  * Hash of signature used by kmem_*_guard to determine range and
279  * direction for allocation
280  #endif
281  */
282 __options_decl(kalloc_type_flags_t, uint32_t, {
283 	KT_DEFAULT        = 0x0001,
284 	KT_PRIV_ACCT      = 0x0002,
285 	KT_SHARED_ACCT    = 0x0004,
286 	KT_DATA_ONLY      = 0x0008,
287 	KT_VM             = 0x0010,
288 	KT_CHANGED        = 0x0020,
289 	KT_CHANGED2       = 0x0040,
290 	KT_PTR_ARRAY      = 0x0080,
291 #if XNU_KERNEL_PRIVATE
292 	KT_SLID           = 0x4000,
293 	KT_PROCESSED      = 0x8000,
294 	KT_HASH           = 0xffff0000,
295 #endif
296 });
297 
298 /*!
299  * @typedef kalloc_type_view_t
300  *
301  * @abstract
302  * A kalloc type view is a structure used to redirect callers
303  * of @c kalloc_type to a particular zone based on the signature of
304  * their type.
305  *
306  * @discussion
307  * These structures are automatically created under the hood for every
308  * @c kalloc_type and @c kfree_type callsite. They are ingested during startup
309  * and are assigned zones based on the security policy for their signature.
310  *
311  * These structs are protected by the kernel lockdown and can't be initialized
312  * dynamically. They must be created using @c KALLOC_TYPE_DEFINE() or
313  * @c kalloc_type or @c kfree_type.
314  *
315  */
316 struct kalloc_type_view {
317 	struct zone_view        kt_zv;
318 	const char             *kt_signature __unsafe_indexable;
319 	kalloc_type_flags_t     kt_flags;
320 	uint32_t                kt_size;
321 	void                   *unused1;
322 	void                   *unused2;
323 };
324 
325 typedef struct kalloc_type_view *kalloc_type_view_t;
326 
327 /*
328  * "Heaps" or sets of zones, used for variable size kalloc_type allocations
329  * are defined by the constants below.
330  *
331  * KHEAP_START_SIZE: Size of the first sequential zone.
332  * KHEAP_MAX_SIZE  : Size of the last sequential zone.
333  * KHEAP_STEP_WIDTH: Number of zones created at every step (power of 2).
334  * KHEAP_STEP_START: Size of the first step.
335  * We also create some extra initial zones that don't follow the sequence
336  * for sizes 8 (on armv7 only), 16 and 32.
337  *
338  * idx step_increment   zone_elem_size
339  * 0       -                  16
340  * 1       -                  32
341  * 2       16                 48
342  * 3       16                 64
343  * 4       32                 96
344  * 5       32                 128
345  * 6       64                 192
346  * 7       64                 256
347  * 8       128                384
348  * 9       128                512
349  * 10      256                768
350  * 11      256                1024
351  * 12      512                1536
352  * 13      512                2048
353  * 14      1024               3072
354  * 15      1024               4096
355  * 16      2048               6144
356  * 17      2048               8192
357  * 18      4096               12288
358  * 19      4096               16384
359  * 20      8192               24576
360  * 21      8192               32768
361  */
362 #define kalloc_log2down(mask)   (31 - __builtin_clz(mask))
363 #define KHEAP_START_SIZE        32
364 #if !defined(__LP64__)
365 #define KHEAP_MAX_SIZE          (8 * 1024)
366 #define KHEAP_EXTRA_ZONES       3
367 #elif  __x86_64__
368 #define KHEAP_MAX_SIZE          (16 * 1024)
369 #define KHEAP_EXTRA_ZONES       2
370 #else
371 #define KHEAP_MAX_SIZE          (32 * 1024)
372 #define KHEAP_EXTRA_ZONES       2
373 #endif
374 #define KHEAP_STEP_WIDTH        2
375 #define KHEAP_STEP_START        16
376 #define KHEAP_START_IDX         kalloc_log2down(KHEAP_START_SIZE)
377 #define KHEAP_NUM_STEPS         (kalloc_log2down(KHEAP_MAX_SIZE) - \
378 	                                kalloc_log2down(KHEAP_START_SIZE))
379 #define KHEAP_NUM_ZONES         (KHEAP_NUM_STEPS * KHEAP_STEP_WIDTH + \
380 	                                KHEAP_EXTRA_ZONES)
381 
382 /*!
383  * @enum kalloc_type_version_t
384  *
385  * @brief
386  * Enum that holds versioning information for @c kalloc_type_var_view
387  *
388  * @const KT_V1
389  * Version 1
390  *
391  */
392 __options_decl(kalloc_type_version_t, uint16_t, {
393 	KT_V1             = 0x0001,
394 });
395 
396 /*!
397  * @typedef kalloc_type_var_view_t
398  *
399  * @abstract
400  * This structure is analoguous to @c kalloc_type_view but handles
401  * @c kalloc_type callsites that are variable in size.
402  *
403  * @discussion
404  * These structures are automatically created under the hood for every
405  * variable sized @c kalloc_type and @c kfree_type callsite. They are ingested
406  * during startup and are assigned zones based on the security policy for
407  * their signature.
408  *
409  * These structs are protected by the kernel lockdown and can't be initialized
410  * dynamically. They must be created using @c KALLOC_TYPE_VAR_DEFINE() or
411  * @c kalloc_type or @c kfree_type.
412  *
413  */
414 struct kalloc_type_var_view {
415 	kalloc_type_version_t   kt_version;
416 	uint16_t                kt_size_hdr;
417 	/*
418 	 * Temporary: Needs to be 32bits cause we have many structs that use
419 	 * IONew/Delete that are larger than 32K.
420 	 */
421 	uint32_t                kt_size_type;
422 	zone_stats_t            kt_stats;
423 	const char             *__unsafe_indexable kt_name;
424 	zone_view_t             kt_next;
425 	zone_id_t               kt_heap_start;
426 	uint8_t                 kt_zones[KHEAP_NUM_ZONES];
427 	const char             * __unsafe_indexable kt_sig_hdr;
428 	const char             * __unsafe_indexable kt_sig_type;
429 	kalloc_type_flags_t     kt_flags;
430 };
431 
432 typedef struct kalloc_type_var_view *kalloc_type_var_view_t;
433 
434 /*!
435  * @macro KALLOC_TYPE_DECLARE
436  *
437  * @abstract
438  * (optionally) declares a kalloc type view (in a header).
439  *
440  * @param var           the name for the kalloc type view.
441  */
442 #define KALLOC_TYPE_DECLARE(var) \
443 	extern struct kalloc_type_view var[1]
444 
445 /*!
446  * @macro KALLOC_TYPE_DEFINE
447  *
448  * @abstract
449  * Defines a given kalloc type view with prefered accounting
450  *
451  * @discussion
452  * This macro allows you to define a kalloc type with private
453  * accounting. The defined kalloc_type_view can be used with
454  * kalloc_type_impl/kfree_type_impl to allocate/free memory.
455  * zalloc/zfree can also be used from inside xnu. However doing
456  * so doesn't handle freeing a NULL pointer or the use of tags.
457  *
458  * @param var           the name for the kalloc type view.
459  * @param type          the type of your allocation.
460  * @param flags         a @c KT_* flag.
461  */
462 #define KALLOC_TYPE_DEFINE(var, type, flags) \
463 	_KALLOC_TYPE_DEFINE(var, type, flags)
464 
465 /*!
466  * @macro KALLOC_TYPE_VAR_DECLARE
467  *
468  * @abstract
469  * (optionally) declares a kalloc type var view (in a header).
470  *
471  * @param var           the name for the kalloc type var view.
472  */
473 #define KALLOC_TYPE_VAR_DECLARE(var) \
474 	extern struct kalloc_type_var_view var[1]
475 
476 /*!
477  * @macro KALLOC_TYPE_VAR_DEFINE
478  *
479  * @abstract
480  * Defines a given kalloc type view with prefered accounting for
481  * variable sized typed allocations.
482  *
483  * @discussion
484  * As the views aren't yet being ingested, individual stats aren't
485  * available. The defined kalloc_type_var_view should be used with
486  * kalloc_type_var_impl/kfree_type_var_impl to allocate/free memory.
487  *
488  * This macro comes in 2 variants:
489  *
490  * 1. @c KALLOC_TYPE_VAR_DEFINE(var, e_ty, flags)
491  * 2. @c KALLOC_TYPE_VAR_DEFINE(var, h_ty, e_ty, flags)
492  *
493  * @param var           the name for the kalloc type var view.
494  * @param h_ty          the type of header in the allocation.
495  * @param e_ty          the type of repeating part in the allocation.
496  * @param flags         a @c KT_* flag.
497  */
498 #define KALLOC_TYPE_VAR_DEFINE(...) KALLOC_DISPATCH(KALLOC_TYPE_VAR_DEFINE, ##__VA_ARGS__)
499 
500 #ifdef XNU_KERNEL_PRIVATE
501 
502 /*
503  * These versions allow specifying the kalloc heap to allocate memory
504  * from
505  */
506 #define kheap_alloc_tag(kalloc_heap, size, flags, itag) \
507 	__kheap_alloc(kalloc_heap, size, __zone_flags_mix_tag(flags, itag), NULL)
508 #define kheap_alloc(kalloc_heap, size, flags) \
509 	kheap_alloc_tag(kalloc_heap, size, flags, VM_ALLOC_SITE_TAG())
510 
511 /*
512  * These versions should be used for allocating pure data bytes that
513  * do not contain any pointers
514  */
515 #define kalloc_data_tag(size, flags, itag) \
516 	kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, flags, itag)
517 #define kalloc_data(size, flags) \
518 	kheap_alloc(KHEAP_DATA_BUFFERS, size, flags)
519 
520 #define krealloc_data_tag(elem, old_size, new_size, flags, itag) \
521 	__kheap_realloc(KHEAP_DATA_BUFFERS, elem, old_size, new_size, \
522 	    __zone_flags_mix_tag(flags, itag), NULL)
523 #define krealloc_data(elem, old_size, new_size, flags) \
524 	krealloc_data_tag(elem, old_size, new_size, flags, \
525 	    VM_ALLOC_SITE_TAG())
526 
527 #define kfree_data(elem, size) \
528 	kheap_free(KHEAP_DATA_BUFFERS, elem, size);
529 
530 #define kfree_data_addr(elem) \
531 	kheap_free_addr(KHEAP_DATA_BUFFERS, elem);
532 
533 extern void
534 kheap_free_bounded(
535 	kalloc_heap_t heap,
536 	void         *addr __unsafe_indexable,
537 	vm_size_t     min_sz,
538 	vm_size_t     max_sz);
539 
540 extern void
541 kalloc_data_require(
542 	void         *data __unsafe_indexable,
543 	vm_size_t     size);
544 
545 extern void
546 kalloc_non_data_require(
547 	void         *data __unsafe_indexable,
548 	vm_size_t     size);
549 
550 #else /* XNU_KERNEL_PRIVATE */
551 
552 extern void *__sized_by(size)
553 kalloc(
554 	vm_size_t           size) __attribute__((malloc, alloc_size(1)));
555 
556 extern void *__unsafe_indexable
557 kalloc_data(
558 	vm_size_t           size,
559 	zalloc_flags_t      flags);
560 
561 __attribute__((malloc, alloc_size(1)))
562 static inline void *
__sized_by(size)563 __sized_by(size)
564 __kalloc_data(vm_size_t size, zalloc_flags_t flags)
565 {
566 	void *addr = (kalloc_data)(size, flags);
567 	if (flags & Z_NOFAIL) {
568 		__builtin_assume(addr != NULL);
569 	}
570 	return addr;
571 }
572 
573 #define kalloc_data(size, fl) __kalloc_data(size, fl)
574 
575 extern void *__unsafe_indexable
576 krealloc_data(
577 	void               *ptr __unsafe_indexable,
578 	vm_size_t           old_size,
579 	vm_size_t           new_size,
580 	zalloc_flags_t      flags);
581 
582 __attribute__((malloc, alloc_size(3)))
583 static inline void *
__sized_by(new_size)584 __sized_by(new_size)
585 __krealloc_data(
586 	void               *ptr __sized_by(old_size),
587 	vm_size_t           old_size,
588 	vm_size_t           new_size,
589 	zalloc_flags_t      flags)
590 {
591 	void *addr = (krealloc_data)(ptr, old_size, new_size, flags);
592 	if (flags & Z_NOFAIL) {
593 		__builtin_assume(addr != NULL);
594 	}
595 	return addr;
596 }
597 
598 #define krealloc_data(ptr, old_size, new_size, fl) \
599 	__krealloc_data(ptr, old_size, new_size, fl)
600 
601 extern void
602 kfree(
603 	void               *data __unsafe_indexable,
604 	vm_size_t           size);
605 
606 extern void
607 kfree_data(
608 	void               *ptr __unsafe_indexable,
609 	vm_size_t           size);
610 
611 extern void
612 kfree_data_addr(
613 	void               *ptr __unsafe_indexable);
614 
615 #endif /* !XNU_KERNEL_PRIVATE */
616 
617 /*!
618  * @macro kalloc_type
619  *
620  * @abstract
621  * Allocates element of a particular type
622  *
623  * @discussion
624  * This family of allocators segregate kalloc allocations based on their type.
625  *
626  * This macro comes in 3 variants:
627  *
628  * 1. @c kalloc_type(type, flags)
629  *    Use this macro for fixed sized allocation of a particular type.
630  *
631  * 2. @c kalloc_type(e_type, count, flags)
632  *    Use this macro for variable sized allocations that form an array,
633  *    do note that @c kalloc_type(e_type, 1, flags) is not equivalent to
634  *    @c kalloc_type(e_type, flags).
635  *
636  * 3. @c kalloc_type(hdr_type, e_type, count, flags)
637  *    Use this macro for variable sized allocations formed with
638  *    a header of type @c hdr_type followed by a variable sized array
639  *    with elements of type @c e_type, equivalent to this:
640  *
641  *    <code>
642  *    struct {
643  *        hdr_type hdr;
644  *        e_type   arr[];
645  *    }
646  *    </code>
647  *
648  * @param flags         @c zalloc_flags_t that get passed to zalloc_internal
649  */
650 #define kalloc_type(...)  KALLOC_DISPATCH(kalloc_type, ##__VA_ARGS__)
651 
652 /*!
653  * @macro kfree_type
654  *
655  * @abstract
656  * Allocates element of a particular type
657  *
658  * @discussion
659  * This pairs with the @c kalloc_type() that was made to allocate this element.
660  * Arguments passed to @c kfree_type() must match the one passed at allocation
661  * time precisely.
662  *
663  * This macro comes in the same 3 variants kalloc_type() does:
664  *
665  * 1. @c kfree_type(type, elem)
666  * 2. @c kfree_type(e_type, count, elem)
667  * 3. @c kfree_type(hdr_type, e_type, count, elem)
668  *
669  * @param elem          The address of the element to free
670  */
671 #define kfree_type(...)  KALLOC_DISPATCH(kfree_type, ##__VA_ARGS__)
672 
673 #ifdef XNU_KERNEL_PRIVATE
674 #define kalloc_type_tag(...)     KALLOC_DISPATCH(kalloc_type_tag, ##__VA_ARGS__)
675 #define krealloc_type_tag(...)   KALLOC_DISPATCH(krealloc_type_tag, ##__VA_ARGS__)
676 #define krealloc_type(...)       KALLOC_DISPATCH(krealloc_type, ##__VA_ARGS__)
677 
678 /*
679  * kalloc_type_require can't be made available to kexts as the
680  * kalloc_type_view's zone could be NULL in the following cases:
681  * - Size greater than KALLOC_SAFE_ALLOC_SIZE
682  * - On macOS, if call is not in BootKC
683  * - All allocations in kext for armv7
684  */
685 #define kalloc_type_require(type, value) ({                                    \
686 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
687 	zone_require(kt_view_var->kt_zv.zv_zone, value);                       \
688 })
689 
690 #endif
691 
692 /*!
693  * @enum kt_granule_t
694  *
695  * @brief
696  * Granule encodings used by the compiler for the type signature.
697  *
698  * @discussion
699  * Given a type, the XNU signature type system (__builtin_xnu_type_signature)
700  * produces a signature by analyzing its memory layout, in chunks of 8 bytes,
701  * which we call granules. The encoding produced for each granule is the
702  * bitwise or of the encodings of all the types of the members included
703  * in that granule.
704  *
705  * @const KT_GRANULE_PADDING
706  * Represents padding inside a record type.
707  *
708  * @const KT_GRANULE_POINTER
709  * Represents a pointer type.
710  *
711  * @const KT_GRANULE_DATA
712  * Represents a scalar type that is not a pointer.
713  *
714  * @const KT_GRANULE_DUAL
715  * Currently unused.
716  *
717  * @const KT_GRANULE_PAC
718  * Represents a pointer which is subject to PAC.
719  */
720 __options_decl(kt_granule_t, uint32_t, {
721 	KT_GRANULE_PADDING = 0,
722 	KT_GRANULE_POINTER = 1,
723 	KT_GRANULE_DATA    = 2,
724 	KT_GRANULE_DUAL    = 4,
725 	KT_GRANULE_PAC     = 8
726 });
727 
728 #define KT_GRANULE_MAX                                                \
729 	(KT_GRANULE_PADDING | KT_GRANULE_POINTER | KT_GRANULE_DATA |  \
730 	    KT_GRANULE_DUAL | KT_GRANULE_PAC)
731 
732 /*
733  * Convert a granule encoding to the index of the bit that
734  * represents such granule in the type summary.
735  *
736  * The XNU type summary (__builtin_xnu_type_summary) produces a 32-bit
737  * summary of the type signature of a given type. If the bit at index
738  * (1 << G) is set in the summary, that means that the type contains
739  * one or more granules with encoding G.
740  */
741 #define KT_SUMMARY_GRANULE_TO_IDX(g)  (1UL << g)
742 
743 #define KT_SUMMARY_MASK_TYPE_BITS  (0xffff)
744 
745 #define KT_SUMMARY_MASK_DATA                             \
746 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |  \
747 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA))
748 
749 #define KT_SUMMARY_MASK_PTR                              \
750 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
751 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
752 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
753 
754 #define KT_SUMMARY_MASK_ALL_GRANULES                        \
755 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
756 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
757 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA) |     \
758 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DUAL) |     \
759 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
760 
761 /*!
762  * @macro KT_SUMMARY_GRANULES
763  *
764  * @abstract
765  * Return the granule type summary for a given type
766  *
767  * @discussion
768  * This macro computes the type summary of a type, and it then extracts the
769  * bits which carry information about the granules in the memory layout.
770  *
771  * Note: you should never have to use __builtin_xnu_type_summary
772  * directly, as we reserve the right to use the remaining bits with
773  * different semantics.
774  *
775  * @param type          The type to analyze
776  */
777 #define KT_SUMMARY_GRANULES(type) \
778 	(__builtin_xnu_type_summary(type) & KT_SUMMARY_MASK_TYPE_BITS)
779 
780 /*!
781  * @macro KALLOC_TYPE_SIG_CHECK
782  *
783  * @abstract
784  * Return whether a given type is only made up of granules specified in mask
785  *
786  * @param mask          Granules to check for
787  * @param type          The type to analyze
788  */
789 #define KALLOC_TYPE_SIG_CHECK(mask, type) \
790 	((KT_SUMMARY_GRANULES(type) & ~(mask)) == 0)
791 
792 /*!
793  * @macro KALLOC_TYPE_IS_DATA_ONLY
794  *
795  * @abstract
796  * Return whether a given type is considered a data-only type.
797  *
798  * @param type          The type to analyze
799  */
800 #define KALLOC_TYPE_IS_DATA_ONLY(type) \
801 	KALLOC_TYPE_SIG_CHECK(KT_SUMMARY_MASK_DATA, type)
802 
803 /*!
804  * @macro KALLOC_TYPE_HAS_OVERLAPS
805  *
806  * @abstract
807  * Return whether a given type has overlapping granules.
808  *
809  * @discussion
810  * This macro returns whether the memory layout for a given type contains
811  * overlapping granules. An overlapping granule is a granule which includes
812  * members with types that have different encodings under the XNU signature
813  * type system.
814  *
815  * @param type          The type to analyze
816  */
817 #define KALLOC_TYPE_HAS_OVERLAPS(type) \
818 	((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_ALL_GRANULES) != 0)
819 
820 /*!
821  * @macro KALLOC_TYPE_IS_COMPATIBLE_PTR
822  *
823  * @abstract
824  * Return whether pointer is compatible with a given type, in the XNU
825  * signature type system.
826  *
827  * @discussion
828  * This macro returns whether type pointed to by @c ptr is either the same
829  * type as @c type, or it has the same signature. The implementation relies
830  * on the @c __builtin_xnu_types_compatible builtin, and the value returned
831  * can be evaluated at compile time in both C and C++.
832  *
833  * Note: void pointers are treated as wildcards, and are thus compatible
834  * with any given type.
835  *
836  * @param ptr           the pointer whose type needs to be checked.
837  * @param type          the type which the pointer will be checked against.
838  */
839 #define KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type)                   \
840 	_Pragma("clang diagnostic push")                               \
841 	_Pragma("clang diagnostic ignored \"-Wvoid-ptr-dereference\"") \
842 	(__builtin_xnu_types_compatible(__typeof__(*ptr), type) ||     \
843 	    __builtin_xnu_types_compatible(__typeof__(*ptr), void))    \
844 	_Pragma("clang diagnostic pop")
845 
846 #define KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type) \
847 	_Static_assert(KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type), \
848 	    "Pointer type is not compatible with specified type")
849 
850 
851 /*!
852  * @const KALLOC_ARRAY_SIZE_MAX
853  *
854  * @brief
855  * The maximum size that can be allocated with the @c KALLOC_ARRAY interface.
856  *
857  * @discussion
858  * This size is:
859  * - ~256M on 4k or PAC systems with 16k pages
860  * - ~1G on other 16k systems.
861  */
862 #if __arm64e__
863 #define KALLOC_ARRAY_SIZE_MAX   (PAGE_MASK << PAGE_SHIFT)
864 #else
865 #define KALLOC_ARRAY_SIZE_MAX   (UINT16_MAX << PAGE_SHIFT)
866 #endif
867 
868 /*!
869  * @macro KALLOC_ARRAY_TYPE_DECL
870  *
871  * @brief
872  * Declares a type used as a packed kalloc array type.
873  *
874  * @discussion
875  * This macro comes in two variants
876  *
877  * - KALLOC_ARRAY_TYPE_DECL(name, e_ty)
878  * - KALLOC_ARRAY_TYPE_DECL(name, h_ty, e_ty)
879  *
880  * The first one defines an array of elements of type @c e_ty,
881  * and the second a header of type @c h_ty followed by
882  * an array of elements of type @c e_ty.
883  *
884  * Those macros will then define the type @c ${name}_t as a typedef
885  * to a non existent structure type, in order to avoid accidental
886  * dereference of those pointers.
887  *
888  * kalloc array pointers are actually pointers that in addition to encoding
889  * the array base pointer, also encode the allocation size (only sizes
890  * up to @c KALLOC_ARRAY_SIZE_MAX bytes).
891  *
892  * Such pointers can be signed with data PAC properly, which will provide
893  * integrity of both the base pointer, and its size.
894  *
895  * kalloc arrays are useful to use instead of embedding the length
896  * of the allocation inside of itself, which tends to be driven by:
897  *
898  * - a desire to not grow the outer structure holding the pointer
899  *   to this array with an extra "length" field for optional arrays,
900  *   in order to save memory (see the @c ip_requests field in ports),
901  *
902  * - a need to be able to atomically consult the size of an allocation
903  *   with respect to loading its pointer (where address dependencies
904  *   traditionally gives this property) for lockless algorithms
905  *   (see the IPC space table).
906  *
907  * Using a kalloc array is preferable for two reasons:
908  *
909  * - embedding lengths inside the allocation is self-referential
910  *   and an appetizing target for post-exploitation strategies,
911  *
912  * - having a dependent load to get to the length loses out-of-order
913  *   opportunities for the CPU and prone to back-to-back cache misses.
914  *
915  * Holding information such as a level of usage of this array
916  * within itself is fine provided those quantities are validated
917  * against the "count" (number of elements) or "size" (allocation
918  * size in bytes) of the array before use.
919  *
920  *
921  * This macro will define a series of functions:
922  *
923  * - ${name}_count_to_size() and ${name}_size_to_count()
924  *   to convert between memory sizes and array element counts
925  *   (taking the header size into account when it exists);
926  *
927  *   Note that those functions assume the count/size are corresponding
928  *   to a valid allocation size within [0, KALLOC_ARRAY_SIZE_MAX].
929  *
930  * - ${name}_next_size() to build good allocation growth policies;
931  *
932  * - ${name}_base() returning a (bound-checked indexable) pointer
933  *   to the header of the array (or its first element when there is
934  *   no header);
935  *
936  * - ${name}_begin() returning a (bound-checked indexable)
937  *   pointer to the first element of the the array;
938  *
939  * - ${name}_contains() to check if an element index is within
940  *   the valid range of this allocation;
941  *
942  * - ${name}_next_elem() to get the next element of an array.
943  *
944  * - ${name}_get() and ${name}_get_nocheck() to return a pointer
945  *   to a given cell of the array with (resp. without) a bound
946  *   check against the array size. The bound-checked variant
947  *   returns NULL for invalid indexes.
948  *
949  * - ${name}_alloc_by_count() and ${name}_alloc_by_size()
950  *   to allocate a new array able to hold at least that many elements
951  *   (resp. bytes).
952  *
953  * - ${name}_realloc_by_count() and ${name}_realloc_by_size()
954  *   to re-allocate a new array able to hold at least that many elements
955  *   (resp. bytes).
956  *
957  * - ${name}_free() and ${name}_free_noclear() to free such an array
958  *   (resp. without nil-ing the pointer). The non-clearing variant
959  *   is to be used only when nil-ing out the pointer is otherwise
960  *   not allowed by C (const value, unable to take address of, ...),
961  *   otherwise the normal ${name}_free() must be used.
962  */
963 #define KALLOC_ARRAY_TYPE_DECL(...) \
964 	KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DECL, ##__VA_ARGS__)
965 
966 #if XNU_KERNEL_PRIVATE
967 
968 #define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
969 	static_assert(!KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA,                  \
970 	    h_type_t, e_type_t), "data only not supported yet");                \
971 	KALLOC_TYPE_VAR_DECLARE(name ## _kt_view);                              \
972 	typedef struct name * __unsafe_indexable name ## _t;                    \
973                                                                                 \
974 	__pure2                                                                 \
975 	static inline uint32_t                                                  \
976 	name ## _count_to_size(uint32_t count)                                  \
977 	{                                                                       \
978 	        return (uint32_t)((h_sz) + (e_sz) * count);                     \
979 	}                                                                       \
980                                                                                 \
981 	__pure2                                                                 \
982 	static inline uint32_t                                                  \
983 	name ## _size_to_count(vm_size_t size)                                  \
984 	{                                                                       \
985 	        return (uint32_t)((size - (h_sz)) / (e_sz));                    \
986 	}                                                                       \
987                                                                                 \
988 	__pure2                                                                 \
989 	static inline uint32_t                                                  \
990 	name ## _size(name ## _t array)                                         \
991 	{                                                                       \
992 	        return __kalloc_array_size((vm_address_t)array);                \
993 	}                                                                       \
994                                                                                 \
995 	__pure2                                                                 \
996 	static inline uint32_t                                                  \
997 	name ## _next_size(                                                     \
998 	        uint32_t                min_count,                              \
999 	        vm_size_t               cur_size,                               \
1000 	        uint32_t                vm_period)                              \
1001 	{                                                                       \
1002 	        vm_size_t size;                                                 \
1003                                                                                 \
1004 	        if (cur_size) {                                                 \
1005 	                size = cur_size + (e_sz);                               \
1006 	        } else {                                                        \
1007 	                size = kt_size(h_sz, e_sz, min_count) - 1;              \
1008 	        }                                                               \
1009 	        size = kalloc_next_good_size(size, vm_period);                  \
1010 	        if (size <= KALLOC_ARRAY_SIZE_MAX) {                            \
1011 	               return (uint32_t)size;                                   \
1012 	        }                                                               \
1013 	        return 2 * KALLOC_ARRAY_SIZE_MAX; /* will fail */               \
1014 	}                                                                       \
1015                                                                                 \
1016 	__pure2                                                                 \
1017 	static inline uint32_t                                                  \
1018 	name ## _count(name ## _t array)                                        \
1019 	{                                                                       \
1020 	        return name ## _size_to_count(name ## _size(array));            \
1021 	}                                                                       \
1022                                                                                 \
1023 	__pure2                                                                 \
1024 	static inline h_type_t *__header_bidi_indexable                         \
1025 	name ## _base(name ## _t array)                                         \
1026 	{                                                                       \
1027 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1028 	        uint32_t     size = __kalloc_array_size((vm_address_t)array);   \
1029                                                                                 \
1030 	        (void)size;                                                     \
1031 	        return __unsafe_forge_bidi_indexable(h_type_t *, base, size);   \
1032 	}                                                                       \
1033                                                                                 \
1034 	__pure2                                                                 \
1035 	static inline e_type_t *__header_bidi_indexable                         \
1036 	name ## _begin(name ## _t array)                                        \
1037 	{                                                                       \
1038 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1039 	        uint32_t     size = __kalloc_array_size((vm_address_t)array);   \
1040                                                                                 \
1041 	        (void)size;                                                     \
1042 	        return __unsafe_forge_bidi_indexable(e_type_t *, base, size);   \
1043 	}                                                                       \
1044                                                                                 \
1045 	__pure2                                                                 \
1046 	static inline e_type_t *                                                \
1047 	name ## _next_elem(name ## _t array, e_type_t *e)                       \
1048 	{                                                                       \
1049 	        vm_address_t end = __kalloc_array_end((vm_address_t)array);     \
1050 	        vm_address_t ptr = (vm_address_t)e + sizeof(e_type_t);          \
1051                                                                                 \
1052 	        if (ptr + sizeof(e_type_t) <= end) {                            \
1053 	                return __unsafe_forge_single(e_type_t *, ptr);          \
1054 	        }                                                               \
1055 	        return NULL;                                                    \
1056 	}                                                                       \
1057                                                                                 \
1058 	__pure2                                                                 \
1059 	static inline bool                                                      \
1060 	name ## _contains(name ## _t array, vm_size_t i)                        \
1061 	{                                                                       \
1062 	        vm_size_t offs = (e_sz) + (h_sz);                               \
1063 	        vm_size_t s;                                                    \
1064                                                                                 \
1065 	        if (__improbable(os_mul_and_add_overflow(i, e_sz, offs, &s))) { \
1066 	                return false;                                           \
1067 	        }                                                               \
1068 	        if (__improbable(s > name ## _size(array))) {                   \
1069 	                return false;                                           \
1070 	        }                                                               \
1071 	        return true;                                                    \
1072 	}                                                                       \
1073                                                                                 \
1074 	__pure2                                                                 \
1075 	static inline e_type_t * __single                                       \
1076 	name ## _get_nocheck(name ## _t array, vm_size_t i)                     \
1077 	{                                                                       \
1078 	        return name ## _begin(array) + i;                               \
1079 	}                                                                       \
1080                                                                                 \
1081 	__pure2                                                                 \
1082 	static inline e_type_t * __single                                       \
1083 	name ## _get(name ## _t array, vm_size_t i)                             \
1084 	{                                                                       \
1085 	        if (__probable(name ## _contains(array, i))) {                  \
1086 	            return name ## _get_nocheck(array, i);                      \
1087 	        }                                                               \
1088 	        return NULL;                                                    \
1089 	}                                                                       \
1090                                                                                 \
1091 	static inline name ## _t                                                \
1092 	name ## _alloc_by_size(vm_size_t size, zalloc_flags_t fl)               \
1093 	{                                                                       \
1094 	        fl |= Z_KALLOC_ARRAY;                                           \
1095 	        fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG());             \
1096 	        return (name ## _t)kalloc_type_var_impl(name ## _kt_view,       \
1097 	                        size, fl, NULL);                                \
1098 	}                                                                       \
1099                                                                                 \
1100 	static inline name ## _t                                                \
1101 	name ## _alloc_by_count(uint32_t count, zalloc_flags_t fl)              \
1102 	{                                                                       \
1103 	        return name ## _alloc_by_size(kt_size(h_sz, e_sz, count), fl);  \
1104 	}                                                                       \
1105                                                                                 \
1106 	static inline name ## _t                                                \
1107 	name ## _realloc_by_size(                                               \
1108 	        name ## _t              array,                                  \
1109 	        vm_size_t               new_size,                               \
1110 	        zalloc_flags_t          fl)                                     \
1111 	{                                                                       \
1112 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1113 	        vm_size_t    size = __kalloc_array_size((vm_address_t)array);   \
1114                                                                                 \
1115 	        fl |= Z_KALLOC_ARRAY;                                           \
1116 	        fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG());             \
1117 	        return (name ## _t)(krealloc_ext)(name ## _kt_view,             \
1118 	                        (void *)base, size, new_size, fl, NULL).addr;   \
1119 	}                                                                       \
1120                                                                                 \
1121 	static inline name ## _t                                                \
1122 	name ## _realloc_by_count(                                              \
1123 	        name ## _t              array,                                  \
1124 	        uint32_t                new_count,                              \
1125 	        zalloc_flags_t          fl)                                     \
1126 	{                                                                       \
1127 	        vm_size_t new_size = kt_size(h_sz, e_sz, new_count);            \
1128                                                                                 \
1129 	        return name ## _realloc_by_size(array, new_size, fl);           \
1130 	}                                                                       \
1131                                                                                 \
1132 	static inline void                                                      \
1133 	name ## _free_noclear(name ## _t array)                                 \
1134 	{                                                                       \
1135 	        kfree_type_var_impl(name ## _kt_view,                           \
1136 	            name ## _base(array), name ## _size(array));                \
1137 	}                                                                       \
1138                                                                                 \
1139 	static inline void                                                      \
1140 	name ## _free(name ## _t *arrayp)                                       \
1141 	{                                                                       \
1142 	        name ## _t array = *arrayp;                                     \
1143                                                                                 \
1144 	        *arrayp = NULL;                                                 \
1145 	        kfree_type_var_impl(name ## _kt_view,                           \
1146 	            name ## _base(array), name ## _size(array));                \
1147 	}
1148 
1149 
1150 /*!
1151  * @macro KALLOC_ARRAY_TYPE_DEFINE()
1152  *
1153  * @description
1154  * Defines the data structures required to pair with a KALLOC_ARRAY_TYPE_DECL()
1155  * kalloc array declaration.
1156  *
1157  * @discussion
1158  * This macro comes in two variants
1159  *
1160  * - KALLOC_ARRAY_TYPE_DEFINE(name, e_ty, flags)
1161  * - KALLOC_ARRAY_TYPE_DEFINE(name, h_ty, e_ty, flags)
1162  *
1163  * Those must pair with the KALLOC_ARRAY_TYPE_DECL() form being used.
1164  * The flags must be valid @c kalloc_type_flags_t flags.
1165  */
1166 #define KALLOC_ARRAY_TYPE_DEFINE(...) \
1167 	KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DEFINE, ##__VA_ARGS__)
1168 
1169 /*!
1170  * @function kalloc_next_good_size()
1171  *
1172  * @brief
1173  * Allows to implement "allocation growth policies" that work well
1174  * with the allocator.
1175  *
1176  * @discussion
1177  * Note that if the caller tracks a number of elements for an array,
1178  * where the elements are of size S, and the current count is C,
1179  * then it is possible for kalloc_next_good_size(C * S, ..) to hit
1180  * a fixed point, clients must call with a size at least of ((C + 1) * S).
1181  *
1182  * @param size         the current "size" of the allocation (in bytes).
1183  * @param period       the "period" (power of 2) for the allocation growth
1184  *                     policy once hitting the VM sized allocations.
1185  */
1186 extern vm_size_t kalloc_next_good_size(
1187 	vm_size_t               size,
1188 	uint32_t                period);
1189 
1190 #pragma mark kalloc_array implementation details
1191 
1192 #define KALLOC_ARRAY_TYPE_DECL_2(name, e_type_t) \
1193 	KALLOC_ARRAY_TYPE_DECL_(name, e_type_t, 0, e_type_t, sizeof(e_type_t))
1194 
1195 #define KALLOC_ARRAY_TYPE_DECL_3(name, h_type_t, e_type_t) \
1196 	KALLOC_ARRAY_TYPE_DECL_(name, e_type_t, 0, e_type_t, sizeof(e_type_t))
1197 
1198 #define KALLOC_ARRAY_TYPE_DEFINE_3(name, e_type_t, flags) \
1199 	KALLOC_TYPE_VAR_DEFINE_3(name ## _kt_view, e_type_t, flags)
1200 
1201 #define KALLOC_ARRAY_TYPE_DEFINE_4(name, h_type_t, e_type_t, flags) \
1202 	KALLOC_TYPE_VAR_DEFINE_4(name ## _kt_view, h_type_t, e_type_t, flags)
1203 
1204 extern struct kalloc_result __kalloc_array_decode(
1205 	vm_address_t            array) __pure2;
1206 
1207 __pure2
1208 static inline uint32_t
__kalloc_array_size(vm_address_t array)1209 __kalloc_array_size(vm_address_t array)
1210 {
1211 	vm_address_t size = __kalloc_array_decode(array).size;
1212 
1213 	__builtin_assume(size <= KALLOC_ARRAY_SIZE_MAX);
1214 	return (uint32_t)size;
1215 }
1216 
1217 __pure2
1218 static inline vm_address_t
__kalloc_array_base(vm_address_t array)1219 __kalloc_array_base(vm_address_t array)
1220 {
1221 	return (vm_address_t)__kalloc_array_decode(array).addr;
1222 }
1223 
1224 __pure2
1225 static inline vm_address_t
__kalloc_array_begin(vm_address_t array,vm_size_t hdr_size)1226 __kalloc_array_begin(vm_address_t array, vm_size_t hdr_size)
1227 {
1228 	return (vm_address_t)__kalloc_array_decode(array).addr + hdr_size;
1229 }
1230 
1231 __pure2
1232 static inline vm_address_t
__kalloc_array_end(vm_address_t array)1233 __kalloc_array_end(vm_address_t array)
1234 {
1235 	struct kalloc_result kr = __kalloc_array_decode(array);
1236 
1237 	return (vm_address_t)kr.addr + kr.size;
1238 }
1239 
1240 #else /* !XNU_KERNEL_PRIVATE */
1241 
1242 #define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
1243 	typedef struct name * __unsafe_indexable name ## _t
1244 
1245 #endif /* !XNU_KERNEL_PRIVATE */
1246 #pragma mark implementation details
1247 
1248 
1249 static inline void *__unsafe_indexable
kt_mangle_var_view(kalloc_type_var_view_t kt_view)1250 kt_mangle_var_view(kalloc_type_var_view_t kt_view)
1251 {
1252 	return (void *__unsafe_indexable)((uintptr_t)kt_view | 1ul);
1253 }
1254 
1255 static inline kalloc_type_var_view_t __unsafe_indexable
kt_demangle_var_view(void * ptr)1256 kt_demangle_var_view(void *ptr)
1257 {
1258 	return (kalloc_type_var_view_t __unsafe_indexable)((uintptr_t)ptr & ~1ul);
1259 }
1260 
1261 #define kt_is_var_view(ptr)  ((uintptr_t)(ptr) & 1)
1262 
1263 static inline vm_size_t
kt_size(vm_size_t s1,vm_size_t s2,vm_size_t c2)1264 kt_size(vm_size_t s1, vm_size_t s2, vm_size_t c2)
1265 {
1266 	/* kalloc_large() will reject this size before even asking the VM  */
1267 	const vm_size_t limit = 1ull << (8 * sizeof(vm_size_t) - 1);
1268 
1269 	if (os_mul_and_add_overflow(s2, c2, s1, &s1) || (s1 & limit)) {
1270 		return limit;
1271 	}
1272 	return s1;
1273 }
1274 
1275 #ifndef __ZONE_DECLARE_TYPE
1276 #define __ZONE_DECLARE_TYPE(var, type_t)  ((void)0)
1277 #endif
1278 
1279 #define kalloc_type_2(type, flags) ({                                          \
1280 	__ZONE_DECLARE_TYPE(kt_view_var, type);                                \
1281 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
1282 	__unsafe_forge_single(type *, kalloc_type_impl(kt_view_var, flags));   \
1283 })
1284 
1285 #define kfree_type_2(type, elem) ({                                            \
1286 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1287 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
1288 	kfree_type_impl(kt_view_var, os_ptr_load_and_erase(elem));             \
1289 })
1290 
1291 #define kfree_type_3(type, count, elem) ({                                     \
1292 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1293 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1294 	__auto_type __kfree_count = (count);                                   \
1295 	kfree_type_var_impl(kt_view_var, os_ptr_load_and_erase(elem),          \
1296 	    kt_size(0, sizeof(type), __kfree_count));                          \
1297 })
1298 
1299 #define kfree_type_4(hdr_ty, e_ty, count, elem) ({                             \
1300 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                   \
1301 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1302 	    KT_SHARED_ACCT);                                                   \
1303 	__auto_type __kfree_count = (count);                                   \
1304 	kfree_type_var_impl(kt_view_var,                                       \
1305 	    os_ptr_load_and_erase(elem),                                       \
1306 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), __kfree_count));             \
1307 })
1308 
1309 #ifdef XNU_KERNEL_PRIVATE
1310 #define kalloc_type_tag_3(type, flags, tag) ({                                 \
1311 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
1312 	__unsafe_forge_single(type *, zalloc_flags(kt_view_var,                \
1313 	    Z_VM_TAG(flags, tag)));                                            \
1314 })
1315 
1316 #define kalloc_type_tag_4(type, count, flags, tag) ({                          \
1317 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1318 	(type *)kalloc_type_var_impl(kt_view_var,                              \
1319 	    kt_size(0, sizeof(type), count),                                   \
1320 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1321 })
1322 #define kalloc_type_3(type, count, flags)  \
1323 	kalloc_type_tag_4(type, count, flags, VM_ALLOC_SITE_TAG())
1324 
1325 #define kalloc_type_tag_5(hdr_ty, e_ty, count, flags, tag) ({                  \
1326 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1327 	    KT_SHARED_ACCT);                                                   \
1328 	(hdr_ty *)kalloc_type_var_impl(kt_view_var,                            \
1329 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), count),                      \
1330 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1331 })
1332 #define kalloc_type_4(hdr_ty, e_ty, count, flags) \
1333 	kalloc_type_tag_5(hdr_ty, e_ty, count, flags, VM_ALLOC_SITE_TAG())
1334 
1335 #define krealloc_type_tag_6(type, old_count, new_count, elem, flags, tag) ({   \
1336 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1337 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1338 	(type *)__krealloc_type(kt_view_var, elem,                             \
1339 	    kt_size(0, sizeof(type), old_count),                               \
1340 	    kt_size(0, sizeof(type), new_count),                               \
1341 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1342 })
1343 #define krealloc_type_5(type, old_count, new_count, elem, flags) \
1344 	krealloc_type_tag_6(type, old_count, new_count, elem, flags, \
1345 	    VM_ALLOC_SITE_TAG())
1346 
1347 #define krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem,          \
1348 	    flags, tag) ({                                                     \
1349 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1350 	    KT_SHARED_ACCT);                                                   \
1351 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                   \
1352 	(hdr_ty *)__krealloc_type(kt_view_var, elem,                           \
1353 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), old_count),                  \
1354 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), new_count),                  \
1355 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1356 })
1357 #define krealloc_type_6(hdr_ty, e_ty, old_count, new_count, elem, flags) \
1358 	krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, flags,   \
1359 	    VM_ALLOC_SITE_TAG())
1360 
1361 #else /* XNU_KERNEL_PRIVATE */
1362 
1363 #define kalloc_type_3(type, count, flags) ({                                   \
1364 	_Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK");   \
1365 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1366 	(type *)kalloc_type_var_impl(kt_view_var,                              \
1367 	    kt_size(0, sizeof(type), count), flags, NULL);                     \
1368 })
1369 
1370 #define kalloc_type_4(hdr_ty, e_ty, count, flags) ({                           \
1371 	_Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK");   \
1372 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1373 	    KT_SHARED_ACCT);                                                   \
1374 	(hdr_ty *)kalloc_type_var_impl(kt_view_var, kt_size(sizeof(hdr_ty),    \
1375 	    sizeof(e_ty), count), flags, NULL);                                \
1376 })
1377 
1378 #endif /* !XNU_KERNEL_PRIVATE */
1379 
1380 /*
1381  * All k*free macros set "elem" to NULL on free.
1382  *
1383  * Note: all values passed to k*free() might be in the element to be freed,
1384  *       temporaries must be taken, and the resetting to be done prior to free.
1385  */
1386 #ifdef XNU_KERNEL_PRIVATE
1387 
1388 #define kheap_free(heap, elem, size) ({                                        \
1389 	kalloc_heap_t __kfree_heap = (heap);                                   \
1390 	__auto_type __kfree_size = (size);                                     \
1391 	__builtin_assume(!kt_is_var_view(__kfree_heap));                       \
1392 	kfree_ext((void *)__kfree_heap,                                        \
1393 	    (void *)os_ptr_load_and_erase(elem), __kfree_size);                \
1394 })
1395 
1396 #define kheap_free_addr(heap, elem) ({                                         \
1397 	kalloc_heap_t __kfree_heap = (heap);                                   \
1398 	kfree_addr_ext(__kfree_heap, (void *)os_ptr_load_and_erase(elem));     \
1399 })
1400 
1401 #define kheap_free_bounded(heap, elem, min_sz, max_sz) ({                      \
1402 	static_assert(max_sz <= KALLOC_SAFE_ALLOC_SIZE);                       \
1403 	kalloc_heap_t __kfree_heap = (heap);                                   \
1404 	__auto_type __kfree_min_sz = (min_sz);                                 \
1405 	__auto_type __kfree_max_sz = (max_sz);                                 \
1406 	(kheap_free_bounded)(__kfree_heap,                                     \
1407 	    (void *)os_ptr_load_and_erase(elem),                               \
1408 	    __kfree_min_sz, __kfree_max_sz);                                   \
1409 })
1410 
1411 #else /* XNU_KERNEL_PRIVATE */
1412 
1413 #define kfree_data(elem, size) ({                                              \
1414 	__auto_type __kfree_size = (size);                                     \
1415 	(kfree_data)((void *)os_ptr_load_and_erase(elem), __kfree_size);       \
1416 })
1417 
1418 #define kfree_data_addr(elem) \
1419 	(kfree_data_addr)((void *)os_ptr_load_and_erase(elem))
1420 
1421 #endif /* !XNU_KERNEL_PRIVATE */
1422 
1423 #if __has_feature(address_sanitizer)
1424 # define __kalloc_no_kasan __attribute__((no_sanitize("address")))
1425 #else
1426 # define __kalloc_no_kasan
1427 #endif
1428 
1429 #define KALLOC_CONCAT(x, y) __CONCAT(x,y)
1430 
1431 #define KALLOC_COUNT_ARGS1(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, N, ...) N
1432 #define KALLOC_COUNT_ARGS(...) \
1433 	KALLOC_COUNT_ARGS1(, ##__VA_ARGS__, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0)
1434 #define KALLOC_DISPATCH1(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1435 #define KALLOC_DISPATCH(base, ...) \
1436 	KALLOC_DISPATCH1(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1437 #define KALLOC_DISPATCH1_R(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1438 #define KALLOC_DISPATCH_R(base, ...) \
1439 	KALLOC_DISPATCH1_R(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1440 
1441 #define kt_view_var \
1442 	KALLOC_CONCAT(kalloc_type_view_, __LINE__)
1443 
1444 #if __LP64__
1445 #define KALLOC_TYPE_SEGMENT "__DATA_CONST"
1446 #else
1447 #define KALLOC_TYPE_SEGMENT "__DATA"
1448 #endif
1449 
1450 /*
1451  * When kalloc_type_impl is called from xnu, it calls zalloc_flags
1452  * directly and doesn't redirect zone-less sites to kheap_alloc.
1453  * Passing a size larger than KHEAP_MAX_SIZE for these allocations will
1454  * lead to a panic as the zone is null. Therefore assert that size
1455  * is less than KALLOC_SAFE_ALLOC_SIZE.
1456  */
1457 #ifdef XNU_KERNEL_PRIVATE
1458 #define KALLOC_TYPE_SIZE_CHECK(size)                           \
1459 	_Static_assert(size <= KALLOC_SAFE_ALLOC_SIZE,             \
1460 	"type is too large");
1461 #else
1462 #define KALLOC_TYPE_SIZE_CHECK(size)
1463 #endif
1464 
1465 #define KALLOC_TYPE_CHECK_2(check, type) \
1466 	(KALLOC_TYPE_SIG_CHECK(check, type))
1467 
1468 #define KALLOC_TYPE_CHECK_3(check, type1, type2) \
1469 	(KALLOC_TYPE_SIG_CHECK(check, type1) && \
1470 	    KALLOC_TYPE_SIG_CHECK(check, type2))
1471 
1472 #define KALLOC_TYPE_CHECK(...) \
1473 	KALLOC_DISPATCH_R(KALLOC_TYPE_CHECK, ##__VA_ARGS__)
1474 
1475 #define KALLOC_TYPE_VM_SIZE_CHECK_1(type) \
1476 	(sizeof(type) > KHEAP_MAX_SIZE)
1477 
1478 #define KALLOC_TYPE_VM_SIZE_CHECK_2(type1, type2) \
1479 	(sizeof(type1) + sizeof(type2) > KHEAP_MAX_SIZE)
1480 
1481 #define KALLOC_TYPE_VM_SIZE_CHECK(...) \
1482 	KALLOC_DISPATCH_R(KALLOC_TYPE_VM_SIZE_CHECK, ##__VA_ARGS__)
1483 
1484 #define KALLOC_TYPE_TRAILING_DATA_CHECK(hdr_ty, elem_ty)     \
1485 	_Static_assert((KALLOC_TYPE_IS_DATA_ONLY(hdr_ty) ||  \
1486 	    !KALLOC_TYPE_IS_DATA_ONLY(elem_ty)),             \
1487 	"cannot allocate data-only array of " #elem_ty       \
1488 	" contiguously to " #hdr_ty)
1489 
1490 #ifdef __cplusplus
1491 #define KALLOC_TYPE_CAST_FLAGS(flags) static_cast<kalloc_type_flags_t>(flags)
1492 #else
1493 #define KALLOC_TYPE_CAST_FLAGS(flags) (kalloc_type_flags_t)(flags)
1494 #endif
1495 
1496 /*
1497  * Don't emit signature if type is "data-only" or is large enough that it
1498  * uses the VM.
1499  *
1500  * Note: sig_type is the type you want to emit signature for. The variable
1501  * args can be used to provide other types in the allocation, to make the
1502  * decision of whether to emit the signature.
1503  */
1504 #define KALLOC_TYPE_EMIT_SIG(sig_type, ...)                              \
1505 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, sig_type, ##__VA_ARGS__) || \
1506 	KALLOC_TYPE_VM_SIZE_CHECK(sig_type, ##__VA_ARGS__))?                 \
1507 	"" : __builtin_xnu_type_signature(sig_type)
1508 
1509 /*
1510  * Kalloc type flags are adjusted to indicate if the type is "data-only" or
1511  * will use the VM or is a pointer array.
1512  */
1513 #define KALLOC_TYPE_ADJUST_FLAGS(flags, ...)                                 \
1514 	KALLOC_TYPE_CAST_FLAGS((flags | KT_CHANGED | KT_CHANGED2 |               \
1515 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, __VA_ARGS__)? KT_DATA_ONLY: 0) |\
1516 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_PTR, __VA_ARGS__)? KT_PTR_ARRAY: 0) | \
1517 	(KALLOC_TYPE_VM_SIZE_CHECK(__VA_ARGS__)? KT_VM : 0)))
1518 
1519 #define _KALLOC_TYPE_DEFINE(var, type, flags)                       \
1520 	__kalloc_no_kasan                                               \
1521 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_type, "      \
1522 	    "regular, live_support")                                    \
1523 	struct kalloc_type_view var[1] = { {                            \
1524 	    .kt_zv.zv_name = "site." #type,                             \
1525 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1526 	    .kt_size = sizeof(type),                                    \
1527 	    .kt_signature = KALLOC_TYPE_EMIT_SIG(type),                 \
1528 	} };                                                            \
1529 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1530 
1531 #define KALLOC_TYPE_VAR_DEFINE_3(var, type, flags)                  \
1532 	__kalloc_no_kasan                                               \
1533 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, "       \
1534 	    "regular, live_support")                                    \
1535 	struct kalloc_type_var_view var[1] = { {                        \
1536 	    .kt_version = KT_V1,                                        \
1537 	    .kt_name = "site." #type,                                   \
1538 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1539 	    .kt_size_type = sizeof(type),                               \
1540 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type),                  \
1541 	} };                                                            \
1542 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1543 
1544 #define KALLOC_TYPE_VAR_DEFINE_4(var, hdr, type, flags)             \
1545 	__kalloc_no_kasan                                               \
1546 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, "       \
1547 	    "regular, live_support")                                    \
1548 	struct kalloc_type_var_view var[1] = { {                        \
1549 	    .kt_version = KT_V1,                                        \
1550 	    .kt_name = "site." #hdr "." #type,                          \
1551 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, hdr, type),     \
1552 	    .kt_size_hdr = sizeof(hdr),                                 \
1553 	    .kt_size_type = sizeof(type),                               \
1554 	    .kt_sig_hdr = KALLOC_TYPE_EMIT_SIG(hdr, type),              \
1555 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type, hdr),             \
1556 	} };                                                            \
1557 	KALLOC_TYPE_SIZE_CHECK(sizeof(hdr));                            \
1558 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));                           \
1559 	KALLOC_TYPE_TRAILING_DATA_CHECK(hdr, type);
1560 
1561 #ifndef XNU_KERNEL_PRIVATE
1562 /*
1563  * This macro is currently used by AppleImage4
1564  */
1565 #define KALLOC_TYPE_DEFINE_SITE(var, type, flags)       \
1566 	static _KALLOC_TYPE_DEFINE(var, type, flags)
1567 
1568 #endif /* !XNU_KERNEL_PRIVATE */
1569 
1570 #ifdef XNU_KERNEL_PRIVATE
1571 
1572 #define kalloc_type_impl(kt_view, flags) \
1573 	zalloc_flags(kt_view, flags)
1574 
1575 static inline void
kfree_type_impl(kalloc_type_view_t kt_view,void * __unsafe_indexable ptr)1576 kfree_type_impl(kalloc_type_view_t kt_view, void *__unsafe_indexable ptr)
1577 {
1578 	if (NULL == ptr) {
1579 		return;
1580 	}
1581 	zfree(kt_view, ptr);
1582 }
1583 
1584 // rdar://87559422
1585 #define kalloc_type_var_impl(kt_view, size, flags, site) ({ \
1586 	struct kalloc_result kalloc_type_var_tmp__ = kalloc_ext(kt_mangle_var_view(kt_view), size, flags, site); \
1587 	kalloc_type_var_tmp__.addr; \
1588 })
1589 
1590 #define kfree_type_var_impl(kt_view, ptr, size) \
1591 	kfree_ext(kt_mangle_var_view(kt_view), ptr, size)
1592 
1593 #else /* XNU_KERNEL_PRIVATE */
1594 
1595 extern void *__unsafe_indexable
1596 kalloc_type_impl(
1597 	kalloc_type_view_t  kt_view,
1598 	zalloc_flags_t      flags);
1599 
1600 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)1601 __kalloc_type_impl(
1602 	kalloc_type_view_t  kt_view,
1603 	zalloc_flags_t      flags)
1604 {
1605 	void *addr = (kalloc_type_impl)(kt_view, flags);
1606 	if (flags & Z_NOFAIL) {
1607 		__builtin_assume(addr != NULL);
1608 	}
1609 	return addr;
1610 }
1611 
1612 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
1613 
1614 extern void
1615 kfree_type_impl(
1616 	kalloc_type_view_t  kt_view,
1617 	void                *ptr __unsafe_indexable);
1618 
1619 extern void *__unsafe_indexable
1620 kalloc_type_var_impl(
1621 	kalloc_type_var_view_t  kt_view,
1622 	vm_size_t               size,
1623 	zalloc_flags_t          flags,
1624 	void                   *site);
1625 
1626 __attribute__((malloc, alloc_size(2)))
1627 static inline void *
__sized_by(size)1628 __sized_by(size)
1629 __kalloc_type_var_impl(
1630 	kalloc_type_var_view_t  kt_view,
1631 	vm_size_t               size,
1632 	zalloc_flags_t          flags,
1633 	void                   *site)
1634 {
1635 	void *addr = (kalloc_type_var_impl)(kt_view, size, flags, site);
1636 	if (flags & Z_NOFAIL) {
1637 		__builtin_assume(addr != NULL);
1638 	}
1639 	return addr;
1640 }
1641 
1642 #define kalloc_type_var_impl(ktv, size, fl, site) \
1643 	__kalloc_type_var_impl(ktv, size, fl, site)
1644 
1645 extern void
1646 kfree_type_var_impl(
1647 	kalloc_type_var_view_t  kt_view,
1648 	void                   *ptr __unsafe_indexable,
1649 	vm_size_t               size);
1650 
1651 #endif /* !XNU_KERNEL_PRIVATE */
1652 
1653 void *
1654 kalloc_type_impl_external(
1655 	kalloc_type_view_t  kt_view,
1656 	zalloc_flags_t      flags);
1657 
1658 void
1659 kfree_type_impl_external(
1660 	kalloc_type_view_t  kt_view,
1661 	void               *ptr __unsafe_indexable);
1662 
1663 extern void *
1664 OSObject_typed_operator_new(
1665 	kalloc_type_view_t  ktv,
1666 	vm_size_t           size);
1667 
1668 extern void
1669 OSObject_typed_operator_delete(
1670 	kalloc_type_view_t  ktv,
1671 	void               *mem __unsafe_indexable,
1672 	vm_size_t           size);
1673 
1674 #ifdef XNU_KERNEL_PRIVATE
1675 #pragma GCC visibility push(hidden)
1676 
1677 #define KALLOC_TYPE_SIZE_MASK  0xffffff
1678 #define KALLOC_TYPE_IDX_SHIFT  24
1679 #define KALLOC_TYPE_IDX_MASK   0xff
1680 
1681 static inline uint32_t
kalloc_type_get_size(uint32_t kt_size)1682 kalloc_type_get_size(uint32_t kt_size)
1683 {
1684 	return kt_size & KALLOC_TYPE_SIZE_MASK;
1685 }
1686 
1687 bool
1688 IOMallocType_from_vm(
1689 	kalloc_type_view_t ktv);
1690 
1691 /* Used by kern_os_* and operator new */
1692 KALLOC_HEAP_DECLARE(KERN_OS_MALLOC);
1693 
1694 extern void
1695 kheap_startup_init(
1696 	kalloc_heap_t heap);
1697 
1698 extern struct kalloc_result
1699 kalloc_ext(
1700 	void                   *kheap_or_kt_view __unsafe_indexable,
1701 	vm_size_t               size,
1702 	zalloc_flags_t          flags,
1703 	void                   *site);
1704 
1705 static inline struct kalloc_result
__kalloc_ext(void * kheap_or_kt_view __unsafe_indexable,vm_size_t size,zalloc_flags_t flags,void * site)1706 __kalloc_ext(
1707 	void                   *kheap_or_kt_view __unsafe_indexable,
1708 	vm_size_t               size,
1709 	zalloc_flags_t          flags,
1710 	void                   *site)
1711 {
1712 	struct kalloc_result kr = (kalloc_ext)(kheap_or_kt_view, size, flags, site);
1713 	if (flags & Z_NOFAIL) {
1714 		__builtin_assume(kr.addr != NULL);
1715 	}
1716 	return kr;
1717 }
1718 
1719 #define kalloc_ext(hov, size, fl, site) __kalloc_ext(hov, size, fl, site)
1720 
1721 __attribute__((malloc, alloc_size(2)))
1722 static inline void *
__sized_by(size)1723 __sized_by(size)
1724 __kheap_alloc(
1725 	kalloc_heap_t           kheap,
1726 	vm_size_t               size,
1727 	zalloc_flags_t          flags,
1728 	void                   *site)
1729 {
1730 	struct kalloc_result kr;
1731 	__builtin_assume(!kt_is_var_view(kheap));
1732 	kr = kalloc_ext(kheap, size, flags, site);
1733 	return __unsafe_forge_bidi_indexable(void *, kr.addr, size);
1734 }
1735 
1736 extern struct kalloc_result
1737 krealloc_ext(
1738 	void                   *kheap_or_kt_view __unsafe_indexable,
1739 	void                   *addr __unsafe_indexable,
1740 	vm_size_t               old_size,
1741 	vm_size_t               new_size,
1742 	zalloc_flags_t          flags,
1743 	void                   *site);
1744 
1745 static inline struct kalloc_result
__krealloc_ext(void * kheap_or_kt_view __unsafe_indexable,void * addr __sized_by (old_size),vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,void * site)1746 __krealloc_ext(
1747 	void                   *kheap_or_kt_view __unsafe_indexable,
1748 	void                   *addr __sized_by(old_size),
1749 	vm_size_t               old_size,
1750 	vm_size_t               new_size,
1751 	zalloc_flags_t          flags,
1752 	void                   *site)
1753 {
1754 	struct kalloc_result kr = (krealloc_ext)(kheap_or_kt_view, addr, old_size,
1755 	    new_size, flags, site);
1756 	if (flags & Z_NOFAIL) {
1757 		__builtin_assume(kr.addr != NULL);
1758 	}
1759 	return kr;
1760 }
1761 
1762 #define krealloc_ext(hov, addr, old_size, new_size, fl, site) \
1763 	__krealloc_ext(hov, addr, old_size, new_size, fl, site)
1764 
1765 __attribute__((malloc, alloc_size(4)))
1766 static inline void *
__sized_by(new_size)1767 __sized_by(new_size)
1768 __kheap_realloc(
1769 	kalloc_heap_t           kheap,
1770 	void                   *addr __sized_by(old_size),
1771 	vm_size_t               old_size,
1772 	vm_size_t               new_size,
1773 	zalloc_flags_t          flags,
1774 	void                   *site)
1775 {
1776 	struct kalloc_result kr;
1777 	__builtin_assume(!kt_is_var_view(kheap));
1778 	kr = krealloc_ext(kheap, addr, old_size, new_size, flags, site);
1779 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1780 }
1781 
1782 __attribute__((malloc, alloc_size(4)))
1783 static inline void *
__sized_by(new_size)1784 __sized_by(new_size)
1785 __krealloc_type(
1786 	kalloc_type_var_view_t  kt_view,
1787 	void                   *addr __sized_by(old_size),
1788 	vm_size_t               old_size,
1789 	vm_size_t               new_size,
1790 	zalloc_flags_t          flags,
1791 	void                   *site)
1792 {
1793 	struct kalloc_result kr;
1794 	kr = krealloc_ext(kt_mangle_var_view(kt_view), addr,
1795 	    old_size, new_size, flags, site);
1796 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1797 }
1798 
1799 extern void
1800 kfree_addr_ext(
1801 	kalloc_heap_t           kheap,
1802 	void                   *addr __unsafe_indexable);
1803 
1804 extern void
1805 kfree_ext(
1806 	void                   *kheap_or_kt_view __unsafe_indexable,
1807 	void                   *addr __unsafe_indexable,
1808 	vm_size_t               size);
1809 
1810 extern zone_t
1811 kalloc_heap_zone_for_size(
1812 	kalloc_heap_t         heap,
1813 	vm_size_t             size);
1814 
1815 extern vm_size_t kalloc_large_max;
1816 SCALABLE_COUNTER_DECLARE(kalloc_large_count);
1817 SCALABLE_COUNTER_DECLARE(kalloc_large_total);
1818 
1819 extern void
1820 kern_os_typed_free(
1821 	kalloc_type_view_t    ktv,
1822 	void                 *addr __unsafe_indexable,
1823 	vm_size_t             esize);
1824 
1825 #pragma GCC visibility pop
1826 #endif  /* !XNU_KERNEL_PRIVATE */
1827 
1828 extern void
1829 kern_os_zfree(
1830 	zone_t        zone,
1831 	void         *addr __unsafe_indexable,
1832 	vm_size_t     size);
1833 
1834 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1835 
1836 #endif  /* _KERN_KALLOC_H_ */
1837 
1838 #endif  /* KERNEL_PRIVATE */
1839