xref: /xnu-8792.81.2/osfmk/kern/kalloc.h (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90) !
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifdef  KERNEL_PRIVATE
58 
59 #ifndef _KERN_KALLOC_H_
60 #define _KERN_KALLOC_H_
61 
62 #include <mach/machine/vm_types.h>
63 #include <mach/boolean.h>
64 #include <mach/vm_types.h>
65 #include <kern/zalloc.h>
66 #include <libkern/section_keywords.h>
67 #include <os/alloc_util.h>
68 #if XNU_KERNEL_PRIVATE
69 #include <kern/counter.h>
70 #endif /* XNU_KERNEL_PRIVATE */
71 
72 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
73 
74 /*!
75  * @const KALLOC_SAFE_ALLOC_SIZE
76  *
77  * @brief
78  * The maximum allocation size that is safe to allocate with Z_NOFAIL in kalloc.
79  */
80 #define KALLOC_SAFE_ALLOC_SIZE  (16u * 1024u)
81 
82 #if XNU_KERNEL_PRIVATE
83 /*!
84  * @typedef kalloc_heap_t
85  *
86  * @abstract
87  * A kalloc heap view represents a sub-accounting context
88  * for a given kalloc heap.
89  */
90 typedef struct kalloc_heap {
91 	zone_stats_t        kh_stats;
92 	const char         *__unsafe_indexable kh_name;
93 	zone_kheap_id_t     kh_heap_id;
94 	vm_tag_t            kh_tag;
95 	uint16_t            kh_type_hash;
96 	zone_id_t           kh_zstart;
97 	struct kalloc_heap *kh_views;
98 } *kalloc_heap_t;
99 
100 /*!
101  * @macro KALLOC_HEAP_DECLARE
102  *
103  * @abstract
104  * (optionally) declare a kalloc heap view in a header.
105  *
106  * @discussion
107  * Unlike kernel zones, new full blown heaps cannot be instantiated.
108  * However new accounting views of the base heaps can be made.
109  */
110 #define KALLOC_HEAP_DECLARE(var) \
111 	extern struct kalloc_heap var[1]
112 
113 /**
114  * @const KHEAP_ANY
115  *
116  * @brief
117  * A value that represents either the default or kext heap for codepaths that
118  * need to allow @c kheap_free() to either one.
119  *
120  * @discussion
121  * When the memory provenance is not known, this value can be used to free
122  * memory indiscriminately.
123  *
124  * Note: code using this constant can likely be used as a gadget to free
125  * arbitrary memory and its use is strongly discouraged.
126  */
127 #define KHEAP_ANY  ((struct kalloc_heap *)NULL)
128 
129 /**
130  * @const KHEAP_DATA_BUFFERS
131  *
132  * @brief
133  * The builtin heap for bags of pure bytes.
134  *
135  * @discussion
136  * This set of kalloc zones should contain pure bags of bytes with no pointers
137  * or length/offset fields.
138  *
139  * The zones forming the heap aren't sequestered from each other, however the
140  * entire heap lives in a different submap from any other kernel allocation.
141  *
142  * The main motivation behind this separation is due to the fact that a lot of
143  * these objects have been used by attackers to spray the heap to make it more
144  * predictable while exploiting use-after-frees or overflows.
145  *
146  * Common attributes that make these objects useful for spraying includes
147  * control of:
148  * - Data in allocation
149  * - Time of alloc and free (lifetime)
150  * - Size of allocation
151  */
152 KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS);
153 
154 /**
155  * @const KHEAP_DEFAULT
156  *
157  * @brief
158  * The builtin default core kernel kalloc heap.
159  *
160  * @discussion
161  * This set of kalloc zones should contain other objects that don't have their
162  * own security mitigations. The individual zones are themselves sequestered.
163  */
164 KALLOC_HEAP_DECLARE(KHEAP_DEFAULT);
165 
166 /**
167  * @const KHEAP_KT_VAR
168  *
169  * @brief
170  * Temporary heap for variable sized kalloc type allocations
171  *
172  * @discussion
173  * This heap will be removed when logic for kalloc_type_var_views is added
174  *
175  */
176 KALLOC_HEAP_DECLARE(KHEAP_KT_VAR);
177 
178 /*!
179  * @macro KALLOC_HEAP_DEFINE
180  *
181  * @abstract
182  * Defines a given kalloc heap view and what it points to.
183  *
184  * @discussion
185  * Kalloc heaps are views over one of the pre-defined builtin heaps
186  * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating
187  * a new one allows for accounting of allocations through this view.
188  *
189  * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase,
190  * as the last rank. If views on zones are created, these must have been
191  * created before this stage.
192  *
193  * @param var           the name for the zone view.
194  * @param name          a string describing the zone view.
195  * @param heap_id       a @c KHEAP_ID_* constant.
196  */
197 #define KALLOC_HEAP_DEFINE(var, name, heap_id) \
198 	SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \
199 	    .kh_name = name, \
200 	    .kh_heap_id = heap_id, \
201 	} }; \
202 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, kheap_startup_init, var)
203 
204 
205 /*
206  * Allocations of type SO_NAME are known to not have pointers for
207  * most platforms -- for macOS this is not guaranteed
208  */
209 #if XNU_TARGET_OS_OSX
210 #define KHEAP_SONAME KHEAP_DEFAULT
211 #else /* XNU_TARGET_OS_OSX */
212 #define KHEAP_SONAME KHEAP_DATA_BUFFERS
213 #endif /* XNU_TARGET_OS_OSX */
214 
215 #endif /* XNU_KERNEL_PRIVATE */
216 
217 /*!
218  * @enum kalloc_type_flags_t
219  *
220  * @brief
221  * Flags that can be passed to @c KALLOC_TYPE_DEFINE
222  *
223  * @discussion
224  * These flags can be used to request for a specific accounting
225  * behavior.
226  *
227  * @const KT_DEFAULT
228  * Passing this flag will provide default accounting behavior
229  * i.e shared accounting unless toggled with KT_OPTIONS_ACCT is
230  * set in kt boot-arg.
231  *
232  * @const KT_PRIV_ACCT
233  * Passing this flag will provide individual stats for your
234  * @c kalloc_type_view that is defined.
235  *
236  * @const KT_SHARED_ACCT
237  * Passing this flag will accumulate stats as a part of the
238  * zone that your @c kalloc_type_view points to.
239  *
240  * @const KT_DATA_ONLY
241  * Represents that the type is "data-only". Adopters should not
242  * set this flag manually, it is meant for the compiler to set
243  * automatically when KALLOC_TYPE_CHECK(DATA) passes.
244  *
245  * @const KT_VM
246  * Represents that the type is large enough to use the VM. Adopters
247  * should not set this flag manually, it is meant for the compiler
248  * to set automatically when KALLOC_TYPE_VM_SIZE_CHECK passes.
249  *
250  * @const KT_PTR_ARRAY
251  * Represents that the type is an array of pointers. Adopters should not
252  * set this flag manually, it is meant for the compiler to set
253  * automatically when KALLOC_TYPE_CHECK(PTR) passes.
254  *
255  * @const KT_CHANGED*
256  * Represents a change in the version of the kalloc_type_view. This
257  * is required inorder to decouple requiring kexts to be rebuilt to
258  * use the new defintions right away. This flags should not be used
259  * manually at a callsite, it is meant for internal use only. Future
260  * changes to kalloc_type_view defintion should toggle this flag.
261  *
262  #if XNU_KERNEL_PRIVATE
263  *
264  * @const KT_SLID
265  * To indicate that strings in the view were slid during early boot.
266  *
267  * @const KT_PROCESSED
268  * This flag is set once the view is parse during early boot. Views
269  * that are not in BootKC on macOS aren't parsed and therefore will
270  * not have this flag set. The runtime can use this as an indication
271  * to appropriately redirect the call.
272  *
273  * @const KT_HASH
274  * Hash of signature used by kmem_*_guard to determine range and
275  * direction for allocation
276  #endif
277  */
278 __options_decl(kalloc_type_flags_t, uint32_t, {
279 	KT_DEFAULT        = 0x0001,
280 	KT_PRIV_ACCT      = 0x0002,
281 	KT_SHARED_ACCT    = 0x0004,
282 	KT_DATA_ONLY      = 0x0008,
283 	KT_VM             = 0x0010,
284 	KT_CHANGED        = 0x0020,
285 	KT_CHANGED2       = 0x0040,
286 	KT_PTR_ARRAY      = 0x0080,
287 #if XNU_KERNEL_PRIVATE
288 	KT_SLID           = 0x4000,
289 	KT_PROCESSED      = 0x8000,
290 	KT_HASH           = 0xffff0000,
291 #endif
292 });
293 
294 /*!
295  * @typedef kalloc_type_view_t
296  *
297  * @abstract
298  * A kalloc type view is a structure used to redirect callers
299  * of @c kalloc_type to a particular zone based on the signature of
300  * their type.
301  *
302  * @discussion
303  * These structures are automatically created under the hood for every
304  * @c kalloc_type and @c kfree_type callsite. They are ingested during startup
305  * and are assigned zones based on the security policy for their signature.
306  *
307  * These structs are protected by the kernel lockdown and can't be initialized
308  * dynamically. They must be created using @c KALLOC_TYPE_DEFINE() or
309  * @c kalloc_type or @c kfree_type.
310  *
311  */
312 struct kalloc_type_view {
313 	struct zone_view        kt_zv;
314 	const char             *kt_signature __unsafe_indexable;
315 	kalloc_type_flags_t     kt_flags;
316 	uint32_t                kt_size;
317 	void                   *unused1;
318 	void                   *unused2;
319 };
320 
321 typedef struct kalloc_type_view *kalloc_type_view_t;
322 
323 /*
324  * The set of zones used by all kalloc heaps are defined by the constants
325  * below.
326  *
327  * KHEAP_START_SIZE: Size of the first sequential zone.
328  * KHEAP_MAX_SIZE  : Size of the last sequential zone.
329  * KHEAP_STEP_WIDTH: Number of zones created at every step (power of 2).
330  * KHEAP_STEP_START: Size of the first step.
331  * We also create some extra initial zones that don't follow the sequence
332  * for sizes 8 (on armv7 only), 16 and 32.
333  *
334  * idx step_increment   zone_elem_size
335  * 0       -                  16
336  * 1       -                  32
337  * 2       16                 48
338  * 3       16                 64
339  * 4       32                 96
340  * 5       32                 128
341  * 6       64                 192
342  * 7       64                 256
343  * 8       128                384
344  * 9       128                512
345  * 10      256                768
346  * 11      256                1024
347  * 12      512                1536
348  * 13      512                2048
349  * 14      1024               3072
350  * 15      1024               4096
351  * 16      2048               6144
352  * 17      2048               8192
353  * 18      4096               12288
354  * 19      4096               16384
355  * 20      8192               24576
356  * 21      8192               32768
357  */
358 #define kalloc_log2down(mask)   (31 - __builtin_clz(mask))
359 #define KHEAP_START_SIZE        32
360 #if  __x86_64__
361 #define KHEAP_MAX_SIZE          (16 * 1024)
362 #define KHEAP_EXTRA_ZONES       2
363 #else
364 #define KHEAP_MAX_SIZE          (32 * 1024)
365 #define KHEAP_EXTRA_ZONES       2
366 #endif
367 #define KHEAP_STEP_WIDTH        2
368 #define KHEAP_STEP_START        16
369 #define KHEAP_START_IDX         kalloc_log2down(KHEAP_START_SIZE)
370 #define KHEAP_NUM_STEPS         (kalloc_log2down(KHEAP_MAX_SIZE) - \
371 	                                kalloc_log2down(KHEAP_START_SIZE))
372 #define KHEAP_NUM_ZONES         (KHEAP_NUM_STEPS * KHEAP_STEP_WIDTH + \
373 	                                KHEAP_EXTRA_ZONES)
374 
375 /*!
376  * @enum kalloc_type_version_t
377  *
378  * @brief
379  * Enum that holds versioning information for @c kalloc_type_var_view
380  *
381  * @const KT_V1
382  * Version 1
383  *
384  */
385 __options_decl(kalloc_type_version_t, uint16_t, {
386 	KT_V1             = 0x0001,
387 });
388 
389 /*!
390  * @typedef kalloc_type_var_view_t
391  *
392  * @abstract
393  * This structure is analoguous to @c kalloc_type_view but handles
394  * @c kalloc_type callsites that are variable in size.
395  *
396  * @discussion
397  * These structures are automatically created under the hood for every
398  * variable sized @c kalloc_type and @c kfree_type callsite. They are ingested
399  * during startup and are assigned zones based on the security policy for
400  * their signature.
401  *
402  * These structs are protected by the kernel lockdown and can't be initialized
403  * dynamically. They must be created using @c KALLOC_TYPE_VAR_DEFINE() or
404  * @c kalloc_type or @c kfree_type.
405  *
406  */
407 struct kalloc_type_var_view {
408 	kalloc_type_version_t   kt_version;
409 	uint16_t                kt_size_hdr;
410 	/*
411 	 * Temporary: Needs to be 32bits cause we have many structs that use
412 	 * IONew/Delete that are larger than 32K.
413 	 */
414 	uint32_t                kt_size_type;
415 	zone_stats_t            kt_stats;
416 	const char             *__unsafe_indexable kt_name;
417 	zone_view_t             kt_next;
418 	zone_id_t               kt_heap_start;
419 	uint8_t                 kt_zones[KHEAP_NUM_ZONES];
420 	const char             * __unsafe_indexable kt_sig_hdr;
421 	const char             * __unsafe_indexable kt_sig_type;
422 	kalloc_type_flags_t     kt_flags;
423 };
424 
425 typedef struct kalloc_type_var_view *kalloc_type_var_view_t;
426 
427 /*!
428  * @macro KALLOC_TYPE_DECLARE
429  *
430  * @abstract
431  * (optionally) declares a kalloc type view (in a header).
432  *
433  * @param var           the name for the kalloc type view.
434  */
435 #define KALLOC_TYPE_DECLARE(var) \
436 	extern struct kalloc_type_view var[1]
437 
438 /*!
439  * @macro KALLOC_TYPE_DEFINE
440  *
441  * @abstract
442  * Defines a given kalloc type view with prefered accounting
443  *
444  * @discussion
445  * This macro allows you to define a kalloc type with private
446  * accounting. The defined kalloc_type_view can be used with
447  * kalloc_type_impl/kfree_type_impl to allocate/free memory.
448  * zalloc/zfree can also be used from inside xnu. However doing
449  * so doesn't handle freeing a NULL pointer or the use of tags.
450  *
451  * @param var           the name for the kalloc type view.
452  * @param type          the type of your allocation.
453  * @param flags         a @c KT_* flag.
454  */
455 #define KALLOC_TYPE_DEFINE(var, type, flags) \
456 	_KALLOC_TYPE_DEFINE(var, type, flags)
457 
458 /*!
459  * @macro KALLOC_TYPE_VAR_DECLARE
460  *
461  * @abstract
462  * (optionally) declares a kalloc type var view (in a header).
463  *
464  * @param var           the name for the kalloc type var view.
465  */
466 #define KALLOC_TYPE_VAR_DECLARE(var) \
467 	extern struct kalloc_type_var_view var[1]
468 
469 /*!
470  * @macro KALLOC_TYPE_VAR_DEFINE
471  *
472  * @abstract
473  * Defines a given kalloc type view with prefered accounting for
474  * variable sized typed allocations.
475  *
476  * @discussion
477  * As the views aren't yet being ingested, individual stats aren't
478  * available. The defined kalloc_type_var_view should be used with
479  * kalloc_type_var_impl/kfree_type_var_impl to allocate/free memory.
480  *
481  * This macro comes in 2 variants:
482  *
483  * 1. @c KALLOC_TYPE_VAR_DEFINE(var, e_ty, flags)
484  * 2. @c KALLOC_TYPE_VAR_DEFINE(var, h_ty, e_ty, flags)
485  *
486  * @param var           the name for the kalloc type var view.
487  * @param h_ty          the type of header in the allocation.
488  * @param e_ty          the type of repeating part in the allocation.
489  * @param flags         a @c KT_* flag.
490  */
491 #define KALLOC_TYPE_VAR_DEFINE(...) KALLOC_DISPATCH(KALLOC_TYPE_VAR_DEFINE, ##__VA_ARGS__)
492 
493 #ifdef XNU_KERNEL_PRIVATE
494 
495 /*
496  * These versions allow specifying the kalloc heap to allocate memory
497  * from
498  */
499 #define kheap_alloc_tag(kalloc_heap, size, flags, itag) \
500 	__kheap_alloc(kalloc_heap, size, __zone_flags_mix_tag(flags, itag), NULL)
501 #define kheap_alloc(kalloc_heap, size, flags) \
502 	kheap_alloc_tag(kalloc_heap, size, flags, VM_ALLOC_SITE_TAG())
503 
504 /*
505  * These versions should be used for allocating pure data bytes that
506  * do not contain any pointers
507  */
508 #define kalloc_data_tag(size, flags, itag) \
509 	kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, flags, itag)
510 #define kalloc_data(size, flags) \
511 	kheap_alloc(KHEAP_DATA_BUFFERS, size, flags)
512 
513 #define krealloc_data_tag(elem, old_size, new_size, flags, itag) \
514 	__kheap_realloc(KHEAP_DATA_BUFFERS, elem, old_size, new_size, \
515 	    __zone_flags_mix_tag(flags, itag), NULL)
516 #define krealloc_data(elem, old_size, new_size, flags) \
517 	krealloc_data_tag(elem, old_size, new_size, flags, \
518 	    VM_ALLOC_SITE_TAG())
519 
520 #define kfree_data(elem, size) \
521 	kheap_free(KHEAP_DATA_BUFFERS, elem, size);
522 
523 #define kfree_data_addr(elem) \
524 	kheap_free_addr(KHEAP_DATA_BUFFERS, elem);
525 
526 extern void
527 kheap_free_bounded(
528 	kalloc_heap_t heap,
529 	void         *addr __unsafe_indexable,
530 	vm_size_t     min_sz,
531 	vm_size_t     max_sz);
532 
533 extern void
534 kalloc_data_require(
535 	void         *data __unsafe_indexable,
536 	vm_size_t     size);
537 
538 extern void
539 kalloc_non_data_require(
540 	void         *data __unsafe_indexable,
541 	vm_size_t     size);
542 
543 #else /* XNU_KERNEL_PRIVATE */
544 
545 extern void *__sized_by(size)
546 kalloc(
547 	vm_size_t           size) __attribute__((malloc, alloc_size(1)));
548 
549 extern void *__unsafe_indexable
550 kalloc_data(
551 	vm_size_t           size,
552 	zalloc_flags_t      flags);
553 
554 __attribute__((malloc, alloc_size(1)))
555 static inline void *
__sized_by(size)556 __sized_by(size)
557 __kalloc_data(vm_size_t size, zalloc_flags_t flags)
558 {
559 	void *addr = (kalloc_data)(size, flags);
560 	if (flags & Z_NOFAIL) {
561 		__builtin_assume(addr != NULL);
562 	}
563 	return addr;
564 }
565 
566 #define kalloc_data(size, fl) __kalloc_data(size, fl)
567 
568 extern void *__unsafe_indexable
569 krealloc_data(
570 	void               *ptr __unsafe_indexable,
571 	vm_size_t           old_size,
572 	vm_size_t           new_size,
573 	zalloc_flags_t      flags);
574 
575 __attribute__((malloc, alloc_size(3)))
576 static inline void *
__sized_by(new_size)577 __sized_by(new_size)
578 __krealloc_data(
579 	void               *ptr __sized_by(old_size),
580 	vm_size_t           old_size,
581 	vm_size_t           new_size,
582 	zalloc_flags_t      flags)
583 {
584 	void *addr = (krealloc_data)(ptr, old_size, new_size, flags);
585 	if (flags & Z_NOFAIL) {
586 		__builtin_assume(addr != NULL);
587 	}
588 	return addr;
589 }
590 
591 #define krealloc_data(ptr, old_size, new_size, fl) \
592 	__krealloc_data(ptr, old_size, new_size, fl)
593 
594 extern void
595 kfree(
596 	void               *data __unsafe_indexable,
597 	vm_size_t           size);
598 
599 extern void
600 kfree_data(
601 	void               *ptr __unsafe_indexable,
602 	vm_size_t           size);
603 
604 extern void
605 kfree_data_addr(
606 	void               *ptr __unsafe_indexable);
607 
608 #endif /* !XNU_KERNEL_PRIVATE */
609 
610 /*!
611  * @macro kalloc_type
612  *
613  * @abstract
614  * Allocates element of a particular type
615  *
616  * @discussion
617  * This family of allocators segregate kalloc allocations based on their type.
618  *
619  * This macro comes in 3 variants:
620  *
621  * 1. @c kalloc_type(type, flags)
622  *    Use this macro for fixed sized allocation of a particular type.
623  *
624  * 2. @c kalloc_type(e_type, count, flags)
625  *    Use this macro for variable sized allocations that form an array,
626  *    do note that @c kalloc_type(e_type, 1, flags) is not equivalent to
627  *    @c kalloc_type(e_type, flags).
628  *
629  * 3. @c kalloc_type(hdr_type, e_type, count, flags)
630  *    Use this macro for variable sized allocations formed with
631  *    a header of type @c hdr_type followed by a variable sized array
632  *    with elements of type @c e_type, equivalent to this:
633  *
634  *    <code>
635  *    struct {
636  *        hdr_type hdr;
637  *        e_type   arr[];
638  *    }
639  *    </code>
640  *
641  * @param flags         @c zalloc_flags_t that get passed to zalloc_internal
642  */
643 #define kalloc_type(...)  KALLOC_DISPATCH(kalloc_type, ##__VA_ARGS__)
644 
645 /*!
646  * @macro kfree_type
647  *
648  * @abstract
649  * Allocates element of a particular type
650  *
651  * @discussion
652  * This pairs with the @c kalloc_type() that was made to allocate this element.
653  * Arguments passed to @c kfree_type() must match the one passed at allocation
654  * time precisely.
655  *
656  * This macro comes in the same 3 variants kalloc_type() does:
657  *
658  * 1. @c kfree_type(type, elem)
659  * 2. @c kfree_type(e_type, count, elem)
660  * 3. @c kfree_type(hdr_type, e_type, count, elem)
661  *
662  * @param elem          The address of the element to free
663  */
664 #define kfree_type(...)  KALLOC_DISPATCH(kfree_type, ##__VA_ARGS__)
665 
666 #ifdef XNU_KERNEL_PRIVATE
667 #define kalloc_type_tag(...)     KALLOC_DISPATCH(kalloc_type_tag, ##__VA_ARGS__)
668 #define krealloc_type_tag(...)   KALLOC_DISPATCH(krealloc_type_tag, ##__VA_ARGS__)
669 #define krealloc_type(...)       KALLOC_DISPATCH(krealloc_type, ##__VA_ARGS__)
670 
671 /*
672  * kalloc_type_require can't be made available to kexts as the
673  * kalloc_type_view's zone could be NULL in the following cases:
674  * - Size greater than KALLOC_SAFE_ALLOC_SIZE
675  * - On macOS, if call is not in BootKC
676  * - All allocations in kext for armv7
677  */
678 #define kalloc_type_require(type, value) ({                                    \
679 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
680 	zone_require(kt_view_var->kt_zv.zv_zone, value);                       \
681 })
682 
683 #endif
684 
685 /*!
686  * @enum kt_granule_t
687  *
688  * @brief
689  * Granule encodings used by the compiler for the type signature.
690  *
691  * @discussion
692  * Given a type, the XNU signature type system (__builtin_xnu_type_signature)
693  * produces a signature by analyzing its memory layout, in chunks of 8 bytes,
694  * which we call granules. The encoding produced for each granule is the
695  * bitwise or of the encodings of all the types of the members included
696  * in that granule.
697  *
698  * @const KT_GRANULE_PADDING
699  * Represents padding inside a record type.
700  *
701  * @const KT_GRANULE_POINTER
702  * Represents a pointer type.
703  *
704  * @const KT_GRANULE_DATA
705  * Represents a scalar type that is not a pointer.
706  *
707  * @const KT_GRANULE_DUAL
708  * Currently unused.
709  *
710  * @const KT_GRANULE_PAC
711  * Represents a pointer which is subject to PAC.
712  */
713 __options_decl(kt_granule_t, uint32_t, {
714 	KT_GRANULE_PADDING = 0,
715 	KT_GRANULE_POINTER = 1,
716 	KT_GRANULE_DATA    = 2,
717 	KT_GRANULE_DUAL    = 4,
718 	KT_GRANULE_PAC     = 8
719 });
720 
721 #define KT_GRANULE_MAX                                                \
722 	(KT_GRANULE_PADDING | KT_GRANULE_POINTER | KT_GRANULE_DATA |  \
723 	    KT_GRANULE_DUAL | KT_GRANULE_PAC)
724 
725 /*
726  * Convert a granule encoding to the index of the bit that
727  * represents such granule in the type summary.
728  *
729  * The XNU type summary (__builtin_xnu_type_summary) produces a 32-bit
730  * summary of the type signature of a given type. If the bit at index
731  * (1 << G) is set in the summary, that means that the type contains
732  * one or more granules with encoding G.
733  */
734 #define KT_SUMMARY_GRANULE_TO_IDX(g)  (1UL << g)
735 
736 #define KT_SUMMARY_MASK_TYPE_BITS  (0xffff)
737 
738 #define KT_SUMMARY_MASK_DATA                             \
739 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |  \
740 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA))
741 
742 #define KT_SUMMARY_MASK_PTR                              \
743 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
744 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
745 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
746 
747 #define KT_SUMMARY_MASK_ALL_GRANULES                        \
748 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
749 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
750 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA) |     \
751 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DUAL) |     \
752 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
753 
754 /*!
755  * @macro KT_SUMMARY_GRANULES
756  *
757  * @abstract
758  * Return the granule type summary for a given type
759  *
760  * @discussion
761  * This macro computes the type summary of a type, and it then extracts the
762  * bits which carry information about the granules in the memory layout.
763  *
764  * Note: you should never have to use __builtin_xnu_type_summary
765  * directly, as we reserve the right to use the remaining bits with
766  * different semantics.
767  *
768  * @param type          The type to analyze
769  */
770 #define KT_SUMMARY_GRANULES(type) \
771 	(__builtin_xnu_type_summary(type) & KT_SUMMARY_MASK_TYPE_BITS)
772 
773 /*!
774  * @macro KALLOC_TYPE_SIG_CHECK
775  *
776  * @abstract
777  * Return whether a given type is only made up of granules specified in mask
778  *
779  * @param mask          Granules to check for
780  * @param type          The type to analyze
781  */
782 #define KALLOC_TYPE_SIG_CHECK(mask, type) \
783 	((KT_SUMMARY_GRANULES(type) & ~(mask)) == 0)
784 
785 /*!
786  * @macro KALLOC_TYPE_IS_DATA_ONLY
787  *
788  * @abstract
789  * Return whether a given type is considered a data-only type.
790  *
791  * @param type          The type to analyze
792  */
793 #define KALLOC_TYPE_IS_DATA_ONLY(type) \
794 	KALLOC_TYPE_SIG_CHECK(KT_SUMMARY_MASK_DATA, type)
795 
796 /*!
797  * @macro KALLOC_TYPE_HAS_OVERLAPS
798  *
799  * @abstract
800  * Return whether a given type has overlapping granules.
801  *
802  * @discussion
803  * This macro returns whether the memory layout for a given type contains
804  * overlapping granules. An overlapping granule is a granule which includes
805  * members with types that have different encodings under the XNU signature
806  * type system.
807  *
808  * @param type          The type to analyze
809  */
810 #define KALLOC_TYPE_HAS_OVERLAPS(type) \
811 	((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_ALL_GRANULES) != 0)
812 
813 /*!
814  * @macro KALLOC_TYPE_IS_COMPATIBLE_PTR
815  *
816  * @abstract
817  * Return whether pointer is compatible with a given type, in the XNU
818  * signature type system.
819  *
820  * @discussion
821  * This macro returns whether type pointed to by @c ptr is either the same
822  * type as @c type, or it has the same signature. The implementation relies
823  * on the @c __builtin_xnu_types_compatible builtin, and the value returned
824  * can be evaluated at compile time in both C and C++.
825  *
826  * Note: void pointers are treated as wildcards, and are thus compatible
827  * with any given type.
828  *
829  * @param ptr           the pointer whose type needs to be checked.
830  * @param type          the type which the pointer will be checked against.
831  */
832 #define KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type)                   \
833 	_Pragma("clang diagnostic push")                               \
834 	_Pragma("clang diagnostic ignored \"-Wvoid-ptr-dereference\"") \
835 	(__builtin_xnu_types_compatible(__typeof__(*ptr), type) ||     \
836 	    __builtin_xnu_types_compatible(__typeof__(*ptr), void))    \
837 	_Pragma("clang diagnostic pop")
838 
839 #define KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type) \
840 	_Static_assert(KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type), \
841 	    "Pointer type is not compatible with specified type")
842 
843 
844 /*!
845  * @const KALLOC_ARRAY_SIZE_MAX
846  *
847  * @brief
848  * The maximum size that can be allocated with the @c KALLOC_ARRAY interface.
849  *
850  * @discussion
851  * This size is:
852  * - ~256M on 4k or PAC systems with 16k pages
853  * - ~1G on other 16k systems.
854  */
855 #if __arm64e__
856 #define KALLOC_ARRAY_SIZE_MAX   ((uint32_t)PAGE_MASK << PAGE_SHIFT)
857 #define KALLOC_ARRAY_GRANULE    32ul
858 #else
859 #define KALLOC_ARRAY_SIZE_MAX   ((uint32_t)UINT16_MAX << PAGE_SHIFT)
860 #define KALLOC_ARRAY_GRANULE    16ul
861 #endif
862 
863 /*!
864  * @macro KALLOC_ARRAY_TYPE_DECL
865  *
866  * @brief
867  * Declares a type used as a packed kalloc array type.
868  *
869  * @discussion
870  * This macro comes in two variants
871  *
872  * - KALLOC_ARRAY_TYPE_DECL(name, e_ty)
873  * - KALLOC_ARRAY_TYPE_DECL(name, h_ty, e_ty)
874  *
875  * The first one defines an array of elements of type @c e_ty,
876  * and the second a header of type @c h_ty followed by
877  * an array of elements of type @c e_ty.
878  *
879  * Those macros will then define the type @c ${name}_t as a typedef
880  * to a non existent structure type, in order to avoid accidental
881  * dereference of those pointers.
882  *
883  * kalloc array pointers are actually pointers that in addition to encoding
884  * the array base pointer, also encode the allocation size (only sizes
885  * up to @c KALLOC_ARRAY_SIZE_MAX bytes).
886  *
887  * Such pointers can be signed with data PAC properly, which will provide
888  * integrity of both the base pointer, and its size.
889  *
890  * kalloc arrays are useful to use instead of embedding the length
891  * of the allocation inside of itself, which tends to be driven by:
892  *
893  * - a desire to not grow the outer structure holding the pointer
894  *   to this array with an extra "length" field for optional arrays,
895  *   in order to save memory (see the @c ip_requests field in ports),
896  *
897  * - a need to be able to atomically consult the size of an allocation
898  *   with respect to loading its pointer (where address dependencies
899  *   traditionally gives this property) for lockless algorithms
900  *   (see the IPC space table).
901  *
902  * Using a kalloc array is preferable for two reasons:
903  *
904  * - embedding lengths inside the allocation is self-referential
905  *   and an appetizing target for post-exploitation strategies,
906  *
907  * - having a dependent load to get to the length loses out-of-order
908  *   opportunities for the CPU and prone to back-to-back cache misses.
909  *
910  * Holding information such as a level of usage of this array
911  * within itself is fine provided those quantities are validated
912  * against the "count" (number of elements) or "size" (allocation
913  * size in bytes) of the array before use.
914  *
915  *
916  * This macro will define a series of functions:
917  *
918  * - ${name}_count_to_size() and ${name}_size_to_count()
919  *   to convert between memory sizes and array element counts
920  *   (taking the header size into account when it exists);
921  *
922  *   Note that those functions assume the count/size are corresponding
923  *   to a valid allocation size within [0, KALLOC_ARRAY_SIZE_MAX].
924  *
925  * - ${name}_next_size() to build good allocation growth policies;
926  *
927  * - ${name}_base() returning a (bound-checked indexable) pointer
928  *   to the header of the array (or its first element when there is
929  *   no header);
930  *
931  * - ${name}_begin() returning a (bound-checked indexable)
932  *   pointer to the first element of the the array;
933  *
934  * - ${name}_contains() to check if an element index is within
935  *   the valid range of this allocation;
936  *
937  * - ${name}_next_elem() to get the next element of an array.
938  *
939  * - ${name}_get() and ${name}_get_nocheck() to return a pointer
940  *   to a given cell of the array with (resp. without) a bound
941  *   check against the array size. The bound-checked variant
942  *   returns NULL for invalid indexes.
943  *
944  * - ${name}_alloc_by_count() and ${name}_alloc_by_size()
945  *   to allocate a new array able to hold at least that many elements
946  *   (resp. bytes).
947  *
948  * - ${name}_realloc_by_count() and ${name}_realloc_by_size()
949  *   to re-allocate a new array able to hold at least that many elements
950  *   (resp. bytes).
951  *
952  * - ${name}_free() and ${name}_free_noclear() to free such an array
953  *   (resp. without nil-ing the pointer). The non-clearing variant
954  *   is to be used only when nil-ing out the pointer is otherwise
955  *   not allowed by C (const value, unable to take address of, ...),
956  *   otherwise the normal ${name}_free() must be used.
957  */
958 #define KALLOC_ARRAY_TYPE_DECL(...) \
959 	KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DECL, ##__VA_ARGS__)
960 
961 #if XNU_KERNEL_PRIVATE
962 
963 #define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
964 	KALLOC_TYPE_VAR_DECLARE(name ## _kt_view);                              \
965 	typedef struct name * __unsafe_indexable name ## _t;                    \
966                                                                                 \
967 	__pure2                                                                 \
968 	static inline uint32_t                                                  \
969 	name ## _count_to_size(uint32_t count)                                  \
970 	{                                                                       \
971 	        return (uint32_t)((h_sz) + (e_sz) * count);                     \
972 	}                                                                       \
973                                                                                 \
974 	__pure2                                                                 \
975 	static inline uint32_t                                                  \
976 	name ## _size_to_count(vm_size_t size)                                  \
977 	{                                                                       \
978 	        return (uint32_t)((size - (h_sz)) / (e_sz));                    \
979 	}                                                                       \
980                                                                                 \
981 	__pure2                                                                 \
982 	static inline uint32_t                                                  \
983 	name ## _size(name ## _t array)                                         \
984 	{                                                                       \
985 	        return __kalloc_array_size((vm_address_t)array);                \
986 	}                                                                       \
987                                                                                 \
988 	__pure2                                                                 \
989 	static inline uint32_t                                                  \
990 	name ## _next_size(                                                     \
991 	        uint32_t                min_count,                              \
992 	        vm_size_t               cur_size,                               \
993 	        uint32_t                vm_period)                              \
994 	{                                                                       \
995 	        vm_size_t size;                                                 \
996                                                                                 \
997 	        if (cur_size) {                                                 \
998 	                size = cur_size + (e_sz) - 1;                           \
999 	        } else {                                                        \
1000 	                size = kt_size(h_sz, e_sz, min_count) - 1;              \
1001 	        }                                                               \
1002 	        size &= -KALLOC_ARRAY_GRANULE;                                  \
1003 	        size += KALLOC_ARRAY_GRANULE - 1;                               \
1004 	        size  = kalloc_next_good_size(size, vm_period);                 \
1005 	        if (size <= KALLOC_ARRAY_SIZE_MAX) {                            \
1006 	               return (uint32_t)size;                                   \
1007 	        }                                                               \
1008 	        return 2 * KALLOC_ARRAY_SIZE_MAX; /* will fail */               \
1009 	}                                                                       \
1010                                                                                 \
1011 	__pure2                                                                 \
1012 	static inline uint32_t                                                  \
1013 	name ## _count(name ## _t array)                                        \
1014 	{                                                                       \
1015 	        return name ## _size_to_count(name ## _size(array));            \
1016 	}                                                                       \
1017                                                                                 \
1018 	__pure2                                                                 \
1019 	static inline h_type_t *__header_bidi_indexable                         \
1020 	name ## _base(name ## _t array)                                         \
1021 	{                                                                       \
1022 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1023 	        uint32_t     size = __kalloc_array_size((vm_address_t)array);   \
1024                                                                                 \
1025 	        (void)size;                                                     \
1026 	        return __unsafe_forge_bidi_indexable(h_type_t *, base, size);   \
1027 	}                                                                       \
1028                                                                                 \
1029 	__pure2                                                                 \
1030 	static inline e_type_t *__header_bidi_indexable                         \
1031 	name ## _begin(name ## _t array)                                        \
1032 	{                                                                       \
1033 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1034 	        uint32_t     size = __kalloc_array_size((vm_address_t)array);   \
1035                                                                                 \
1036 	        (void)size;                                                     \
1037 	        return __unsafe_forge_bidi_indexable(e_type_t *, base, size);   \
1038 	}                                                                       \
1039                                                                                 \
1040 	__pure2                                                                 \
1041 	static inline e_type_t *                                                \
1042 	name ## _next_elem(name ## _t array, e_type_t *e)                       \
1043 	{                                                                       \
1044 	        vm_address_t end = __kalloc_array_end((vm_address_t)array);     \
1045 	        vm_address_t ptr = (vm_address_t)e + sizeof(e_type_t);          \
1046                                                                                 \
1047 	        if (ptr + sizeof(e_type_t) <= end) {                            \
1048 	                return __unsafe_forge_single(e_type_t *, ptr);          \
1049 	        }                                                               \
1050 	        return NULL;                                                    \
1051 	}                                                                       \
1052                                                                                 \
1053 	__pure2                                                                 \
1054 	static inline bool                                                      \
1055 	name ## _contains(name ## _t array, vm_size_t i)                        \
1056 	{                                                                       \
1057 	        vm_size_t offs = (e_sz) + (h_sz);                               \
1058 	        vm_size_t s;                                                    \
1059                                                                                 \
1060 	        if (__improbable(os_mul_and_add_overflow(i, e_sz, offs, &s))) { \
1061 	                return false;                                           \
1062 	        }                                                               \
1063 	        if (__improbable(s > name ## _size(array))) {                   \
1064 	                return false;                                           \
1065 	        }                                                               \
1066 	        return true;                                                    \
1067 	}                                                                       \
1068                                                                                 \
1069 	__pure2                                                                 \
1070 	static inline e_type_t * __single                                       \
1071 	name ## _get_nocheck(name ## _t array, vm_size_t i)                     \
1072 	{                                                                       \
1073 	        return name ## _begin(array) + i;                               \
1074 	}                                                                       \
1075                                                                                 \
1076 	__pure2                                                                 \
1077 	static inline e_type_t * __single                                       \
1078 	name ## _get(name ## _t array, vm_size_t i)                             \
1079 	{                                                                       \
1080 	        if (__probable(name ## _contains(array, i))) {                  \
1081 	            return name ## _get_nocheck(array, i);                      \
1082 	        }                                                               \
1083 	        return NULL;                                                    \
1084 	}                                                                       \
1085                                                                                 \
1086 	static inline name ## _t                                                \
1087 	name ## _alloc_by_size(vm_size_t size, zalloc_flags_t fl)               \
1088 	{                                                                       \
1089 	        fl |= Z_KALLOC_ARRAY;                                           \
1090 	        fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG());             \
1091 	        return (name ## _t)kalloc_type_var_impl(name ## _kt_view,       \
1092 	                        size, fl, NULL);                                \
1093 	}                                                                       \
1094                                                                                 \
1095 	static inline name ## _t                                                \
1096 	name ## _alloc_by_count(uint32_t count, zalloc_flags_t fl)              \
1097 	{                                                                       \
1098 	        return name ## _alloc_by_size(kt_size(h_sz, e_sz, count), fl);  \
1099 	}                                                                       \
1100                                                                                 \
1101 	static inline name ## _t                                                \
1102 	name ## _realloc_by_size(                                               \
1103 	        name ## _t              array,                                  \
1104 	        vm_size_t               new_size,                               \
1105 	        zalloc_flags_t          fl)                                     \
1106 	{                                                                       \
1107 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1108 	        vm_size_t    size = __kalloc_array_size((vm_address_t)array);   \
1109                                                                                 \
1110 	        fl |= Z_KALLOC_ARRAY;                                           \
1111 	        fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG());             \
1112 	        return (name ## _t)(krealloc_ext)(name ## _kt_view,             \
1113 	                        (void *)base, size, new_size, fl, NULL).addr;   \
1114 	}                                                                       \
1115                                                                                 \
1116 	static inline name ## _t                                                \
1117 	name ## _realloc_by_count(                                              \
1118 	        name ## _t              array,                                  \
1119 	        uint32_t                new_count,                              \
1120 	        zalloc_flags_t          fl)                                     \
1121 	{                                                                       \
1122 	        vm_size_t new_size = kt_size(h_sz, e_sz, new_count);            \
1123                                                                                 \
1124 	        return name ## _realloc_by_size(array, new_size, fl);           \
1125 	}                                                                       \
1126                                                                                 \
1127 	static inline void                                                      \
1128 	name ## _free_noclear(name ## _t array)                                 \
1129 	{                                                                       \
1130 	        kfree_type_var_impl(name ## _kt_view,                           \
1131 	            name ## _base(array), name ## _size(array));                \
1132 	}                                                                       \
1133                                                                                 \
1134 	static inline void                                                      \
1135 	name ## _free(name ## _t *arrayp)                                       \
1136 	{                                                                       \
1137 	        name ## _t array = *arrayp;                                     \
1138                                                                                 \
1139 	        *arrayp = NULL;                                                 \
1140 	        kfree_type_var_impl(name ## _kt_view,                           \
1141 	            name ## _base(array), name ## _size(array));                \
1142 	}
1143 
1144 
1145 /*!
1146  * @macro KALLOC_ARRAY_TYPE_DEFINE()
1147  *
1148  * @description
1149  * Defines the data structures required to pair with a KALLOC_ARRAY_TYPE_DECL()
1150  * kalloc array declaration.
1151  *
1152  * @discussion
1153  * This macro comes in two variants
1154  *
1155  * - KALLOC_ARRAY_TYPE_DEFINE(name, e_ty, flags)
1156  * - KALLOC_ARRAY_TYPE_DEFINE(name, h_ty, e_ty, flags)
1157  *
1158  * Those must pair with the KALLOC_ARRAY_TYPE_DECL() form being used.
1159  * The flags must be valid @c kalloc_type_flags_t flags.
1160  */
1161 #define KALLOC_ARRAY_TYPE_DEFINE(...) \
1162 	KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DEFINE, ##__VA_ARGS__)
1163 
1164 /*!
1165  * @function kalloc_next_good_size()
1166  *
1167  * @brief
1168  * Allows to implement "allocation growth policies" that work well
1169  * with the allocator.
1170  *
1171  * @discussion
1172  * Note that if the caller tracks a number of elements for an array,
1173  * where the elements are of size S, and the current count is C,
1174  * then it is possible for kalloc_next_good_size(C * S, ..) to hit
1175  * a fixed point, clients must call with a size at least of ((C + 1) * S).
1176  *
1177  * @param size         the current "size" of the allocation (in bytes).
1178  * @param period       the "period" (power of 2) for the allocation growth
1179  *                     policy once hitting the VM sized allocations.
1180  */
1181 extern vm_size_t kalloc_next_good_size(
1182 	vm_size_t               size,
1183 	uint32_t                period);
1184 
1185 #pragma mark kalloc_array implementation details
1186 
1187 #define KALLOC_ARRAY_TYPE_DECL_2(name, e_type_t) \
1188 	KALLOC_ARRAY_TYPE_DECL_(name, e_type_t, 0, e_type_t, sizeof(e_type_t))
1189 
1190 #define KALLOC_ARRAY_TYPE_DECL_3(name, h_type_t, e_type_t) \
1191 	KALLOC_ARRAY_TYPE_DECL_(name, e_type_t, 0, e_type_t, sizeof(e_type_t))
1192 
1193 #define KALLOC_ARRAY_TYPE_DEFINE_3(name, e_type_t, flags) \
1194 	KALLOC_TYPE_VAR_DEFINE_3(name ## _kt_view, e_type_t, flags)
1195 
1196 #define KALLOC_ARRAY_TYPE_DEFINE_4(name, h_type_t, e_type_t, flags) \
1197 	KALLOC_TYPE_VAR_DEFINE_4(name ## _kt_view, h_type_t, e_type_t, flags)
1198 
1199 extern struct kalloc_result __kalloc_array_decode(
1200 	vm_address_t            array) __pure2;
1201 
1202 __pure2
1203 static inline uint32_t
__kalloc_array_size(vm_address_t array)1204 __kalloc_array_size(vm_address_t array)
1205 {
1206 	vm_address_t size = __kalloc_array_decode(array).size;
1207 
1208 	__builtin_assume(size <= KALLOC_ARRAY_SIZE_MAX);
1209 	return (uint32_t)size;
1210 }
1211 
1212 __pure2
1213 static inline vm_address_t
__kalloc_array_base(vm_address_t array)1214 __kalloc_array_base(vm_address_t array)
1215 {
1216 	return (vm_address_t)__kalloc_array_decode(array).addr;
1217 }
1218 
1219 __pure2
1220 static inline vm_address_t
__kalloc_array_begin(vm_address_t array,vm_size_t hdr_size)1221 __kalloc_array_begin(vm_address_t array, vm_size_t hdr_size)
1222 {
1223 	return (vm_address_t)__kalloc_array_decode(array).addr + hdr_size;
1224 }
1225 
1226 __pure2
1227 static inline vm_address_t
__kalloc_array_end(vm_address_t array)1228 __kalloc_array_end(vm_address_t array)
1229 {
1230 	struct kalloc_result kr = __kalloc_array_decode(array);
1231 
1232 	return (vm_address_t)kr.addr + kr.size;
1233 }
1234 
1235 #else /* !XNU_KERNEL_PRIVATE */
1236 
1237 #define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
1238 	typedef struct name * __unsafe_indexable name ## _t
1239 
1240 #endif /* !XNU_KERNEL_PRIVATE */
1241 #pragma mark implementation details
1242 
1243 
1244 static inline void *__unsafe_indexable
kt_mangle_var_view(kalloc_type_var_view_t kt_view)1245 kt_mangle_var_view(kalloc_type_var_view_t kt_view)
1246 {
1247 	return (void *__unsafe_indexable)((uintptr_t)kt_view | 1ul);
1248 }
1249 
1250 static inline kalloc_type_var_view_t __unsafe_indexable
kt_demangle_var_view(void * ptr)1251 kt_demangle_var_view(void *ptr)
1252 {
1253 	return (kalloc_type_var_view_t __unsafe_indexable)((uintptr_t)ptr & ~1ul);
1254 }
1255 
1256 #define kt_is_var_view(ptr)  ((uintptr_t)(ptr) & 1)
1257 
1258 static inline vm_size_t
kt_size(vm_size_t s1,vm_size_t s2,vm_size_t c2)1259 kt_size(vm_size_t s1, vm_size_t s2, vm_size_t c2)
1260 {
1261 	/* kalloc_large() will reject this size before even asking the VM  */
1262 	const vm_size_t limit = 1ull << (8 * sizeof(vm_size_t) - 1);
1263 
1264 	if (os_mul_and_add_overflow(s2, c2, s1, &s1) || (s1 & limit)) {
1265 		return limit;
1266 	}
1267 	return s1;
1268 }
1269 
1270 #ifndef __ZONE_DECLARE_TYPE
1271 #define __ZONE_DECLARE_TYPE(var, type_t)  ((void)0)
1272 #endif
1273 
1274 #define kalloc_type_2(type, flags) ({                                          \
1275 	__ZONE_DECLARE_TYPE(kt_view_var, type);                                \
1276 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
1277 	__unsafe_forge_single(type *, kalloc_type_impl(kt_view_var, flags));   \
1278 })
1279 
1280 #define kfree_type_2(type, elem) ({                                            \
1281 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1282 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
1283 	kfree_type_impl(kt_view_var, os_ptr_load_and_erase(elem));             \
1284 })
1285 
1286 #define kfree_type_3(type, count, elem) ({                                     \
1287 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1288 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1289 	__auto_type __kfree_count = (count);                                   \
1290 	kfree_type_var_impl(kt_view_var, os_ptr_load_and_erase(elem),          \
1291 	    kt_size(0, sizeof(type), __kfree_count));                          \
1292 })
1293 
1294 #define kfree_type_4(hdr_ty, e_ty, count, elem) ({                             \
1295 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                   \
1296 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1297 	    KT_SHARED_ACCT);                                                   \
1298 	__auto_type __kfree_count = (count);                                   \
1299 	kfree_type_var_impl(kt_view_var,                                       \
1300 	    os_ptr_load_and_erase(elem),                                       \
1301 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), __kfree_count));             \
1302 })
1303 
1304 #ifdef XNU_KERNEL_PRIVATE
1305 #define kalloc_type_tag_3(type, flags, tag) ({                                 \
1306 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
1307 	__unsafe_forge_single(type *, zalloc_flags(kt_view_var,                \
1308 	    Z_VM_TAG(flags, tag)));                                            \
1309 })
1310 
1311 #define kalloc_type_tag_4(type, count, flags, tag) ({                          \
1312 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1313 	(type *)kalloc_type_var_impl(kt_view_var,                              \
1314 	    kt_size(0, sizeof(type), count),                                   \
1315 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1316 })
1317 #define kalloc_type_3(type, count, flags)  \
1318 	kalloc_type_tag_4(type, count, flags, VM_ALLOC_SITE_TAG())
1319 
1320 #define kalloc_type_tag_5(hdr_ty, e_ty, count, flags, tag) ({                  \
1321 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1322 	    KT_SHARED_ACCT);                                                   \
1323 	(hdr_ty *)kalloc_type_var_impl(kt_view_var,                            \
1324 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), count),                      \
1325 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1326 })
1327 #define kalloc_type_4(hdr_ty, e_ty, count, flags) \
1328 	kalloc_type_tag_5(hdr_ty, e_ty, count, flags, VM_ALLOC_SITE_TAG())
1329 
1330 #define krealloc_type_tag_6(type, old_count, new_count, elem, flags, tag) ({   \
1331 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1332 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1333 	(type *)__krealloc_type(kt_view_var, elem,                             \
1334 	    kt_size(0, sizeof(type), old_count),                               \
1335 	    kt_size(0, sizeof(type), new_count),                               \
1336 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1337 })
1338 #define krealloc_type_5(type, old_count, new_count, elem, flags) \
1339 	krealloc_type_tag_6(type, old_count, new_count, elem, flags, \
1340 	    VM_ALLOC_SITE_TAG())
1341 
1342 #define krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem,          \
1343 	    flags, tag) ({                                                     \
1344 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1345 	    KT_SHARED_ACCT);                                                   \
1346 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                   \
1347 	(hdr_ty *)__krealloc_type(kt_view_var, elem,                           \
1348 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), old_count),                  \
1349 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), new_count),                  \
1350 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1351 })
1352 #define krealloc_type_6(hdr_ty, e_ty, old_count, new_count, elem, flags) \
1353 	krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, flags,   \
1354 	    VM_ALLOC_SITE_TAG())
1355 
1356 #else /* XNU_KERNEL_PRIVATE */
1357 
1358 #define kalloc_type_3(type, count, flags) ({                                   \
1359 	_Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK");   \
1360 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1361 	(type *)kalloc_type_var_impl(kt_view_var,                              \
1362 	    kt_size(0, sizeof(type), count), flags, NULL);                     \
1363 })
1364 
1365 #define kalloc_type_4(hdr_ty, e_ty, count, flags) ({                           \
1366 	_Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK");   \
1367 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1368 	    KT_SHARED_ACCT);                                                   \
1369 	(hdr_ty *)kalloc_type_var_impl(kt_view_var, kt_size(sizeof(hdr_ty),    \
1370 	    sizeof(e_ty), count), flags, NULL);                                \
1371 })
1372 
1373 #endif /* !XNU_KERNEL_PRIVATE */
1374 
1375 /*
1376  * All k*free macros set "elem" to NULL on free.
1377  *
1378  * Note: all values passed to k*free() might be in the element to be freed,
1379  *       temporaries must be taken, and the resetting to be done prior to free.
1380  */
1381 #ifdef XNU_KERNEL_PRIVATE
1382 
1383 #define kheap_free(heap, elem, size) ({                                        \
1384 	kalloc_heap_t __kfree_heap = (heap);                                   \
1385 	__auto_type __kfree_size = (size);                                     \
1386 	__builtin_assume(!kt_is_var_view(__kfree_heap));                       \
1387 	kfree_ext((void *)__kfree_heap,                                        \
1388 	    (void *)os_ptr_load_and_erase(elem), __kfree_size);                \
1389 })
1390 
1391 #define kheap_free_addr(heap, elem) ({                                         \
1392 	kalloc_heap_t __kfree_heap = (heap);                                   \
1393 	kfree_addr_ext(__kfree_heap, (void *)os_ptr_load_and_erase(elem));     \
1394 })
1395 
1396 #define kheap_free_bounded(heap, elem, min_sz, max_sz) ({                      \
1397 	static_assert(max_sz <= KALLOC_SAFE_ALLOC_SIZE);                       \
1398 	kalloc_heap_t __kfree_heap = (heap);                                   \
1399 	__auto_type __kfree_min_sz = (min_sz);                                 \
1400 	__auto_type __kfree_max_sz = (max_sz);                                 \
1401 	(kheap_free_bounded)(__kfree_heap,                                     \
1402 	    (void *)os_ptr_load_and_erase(elem),                               \
1403 	    __kfree_min_sz, __kfree_max_sz);                                   \
1404 })
1405 
1406 #else /* XNU_KERNEL_PRIVATE */
1407 
1408 #define kfree_data(elem, size) ({                                              \
1409 	__auto_type __kfree_size = (size);                                     \
1410 	(kfree_data)((void *)os_ptr_load_and_erase(elem), __kfree_size);       \
1411 })
1412 
1413 #define kfree_data_addr(elem) \
1414 	(kfree_data_addr)((void *)os_ptr_load_and_erase(elem))
1415 
1416 #endif /* !XNU_KERNEL_PRIVATE */
1417 
1418 #if __has_feature(address_sanitizer)
1419 # define __kalloc_no_kasan __attribute__((no_sanitize("address")))
1420 #else
1421 # define __kalloc_no_kasan
1422 #endif
1423 
1424 #define KALLOC_CONCAT(x, y) __CONCAT(x,y)
1425 
1426 #define KALLOC_COUNT_ARGS1(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, N, ...) N
1427 #define KALLOC_COUNT_ARGS(...) \
1428 	KALLOC_COUNT_ARGS1(, ##__VA_ARGS__, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0)
1429 #define KALLOC_DISPATCH1(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1430 #define KALLOC_DISPATCH(base, ...) \
1431 	KALLOC_DISPATCH1(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1432 #define KALLOC_DISPATCH1_R(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1433 #define KALLOC_DISPATCH_R(base, ...) \
1434 	KALLOC_DISPATCH1_R(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1435 
1436 #define kt_view_var \
1437 	KALLOC_CONCAT(kalloc_type_view_, __LINE__)
1438 
1439 #define KALLOC_TYPE_SEGMENT "__DATA_CONST"
1440 
1441 /*
1442  * When kalloc_type_impl is called from xnu, it calls zalloc_flags
1443  * directly and doesn't redirect zone-less sites to kheap_alloc.
1444  * Passing a size larger than KHEAP_MAX_SIZE for these allocations will
1445  * lead to a panic as the zone is null. Therefore assert that size
1446  * is less than KALLOC_SAFE_ALLOC_SIZE.
1447  */
1448 #ifdef XNU_KERNEL_PRIVATE
1449 #define KALLOC_TYPE_SIZE_CHECK(size)                           \
1450 	_Static_assert(size <= KALLOC_SAFE_ALLOC_SIZE,             \
1451 	"type is too large");
1452 #else
1453 #define KALLOC_TYPE_SIZE_CHECK(size)
1454 #endif
1455 
1456 #define KALLOC_TYPE_CHECK_2(check, type) \
1457 	(KALLOC_TYPE_SIG_CHECK(check, type))
1458 
1459 #define KALLOC_TYPE_CHECK_3(check, type1, type2) \
1460 	(KALLOC_TYPE_SIG_CHECK(check, type1) && \
1461 	    KALLOC_TYPE_SIG_CHECK(check, type2))
1462 
1463 #define KALLOC_TYPE_CHECK(...) \
1464 	KALLOC_DISPATCH_R(KALLOC_TYPE_CHECK, ##__VA_ARGS__)
1465 
1466 #define KALLOC_TYPE_VM_SIZE_CHECK_1(type) \
1467 	(sizeof(type) > KHEAP_MAX_SIZE)
1468 
1469 #define KALLOC_TYPE_VM_SIZE_CHECK_2(type1, type2) \
1470 	(sizeof(type1) + sizeof(type2) > KHEAP_MAX_SIZE)
1471 
1472 #define KALLOC_TYPE_VM_SIZE_CHECK(...) \
1473 	KALLOC_DISPATCH_R(KALLOC_TYPE_VM_SIZE_CHECK, ##__VA_ARGS__)
1474 
1475 #define KALLOC_TYPE_TRAILING_DATA_CHECK(hdr_ty, elem_ty)     \
1476 	_Static_assert((KALLOC_TYPE_IS_DATA_ONLY(hdr_ty) ||  \
1477 	    !KALLOC_TYPE_IS_DATA_ONLY(elem_ty)),             \
1478 	"cannot allocate data-only array of " #elem_ty       \
1479 	" contiguously to " #hdr_ty)
1480 
1481 #ifdef __cplusplus
1482 #define KALLOC_TYPE_CAST_FLAGS(flags) static_cast<kalloc_type_flags_t>(flags)
1483 #else
1484 #define KALLOC_TYPE_CAST_FLAGS(flags) (kalloc_type_flags_t)(flags)
1485 #endif
1486 
1487 /*
1488  * Don't emit signature if type is "data-only" or is large enough that it
1489  * uses the VM.
1490  *
1491  * Note: sig_type is the type you want to emit signature for. The variable
1492  * args can be used to provide other types in the allocation, to make the
1493  * decision of whether to emit the signature.
1494  */
1495 #define KALLOC_TYPE_EMIT_SIG(sig_type, ...)                              \
1496 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, sig_type, ##__VA_ARGS__) || \
1497 	KALLOC_TYPE_VM_SIZE_CHECK(sig_type, ##__VA_ARGS__))?                 \
1498 	"" : __builtin_xnu_type_signature(sig_type)
1499 
1500 /*
1501  * Kalloc type flags are adjusted to indicate if the type is "data-only" or
1502  * will use the VM or is a pointer array.
1503  */
1504 #define KALLOC_TYPE_ADJUST_FLAGS(flags, ...)                                 \
1505 	KALLOC_TYPE_CAST_FLAGS((flags | KT_CHANGED | KT_CHANGED2 |               \
1506 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, __VA_ARGS__)? KT_DATA_ONLY: 0) |\
1507 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_PTR, __VA_ARGS__)? KT_PTR_ARRAY: 0) | \
1508 	(KALLOC_TYPE_VM_SIZE_CHECK(__VA_ARGS__)? KT_VM : 0)))
1509 
1510 #define _KALLOC_TYPE_DEFINE(var, type, flags)                       \
1511 	__kalloc_no_kasan                                               \
1512 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_type, "      \
1513 	    "regular, live_support")                                    \
1514 	struct kalloc_type_view var[1] = { {                            \
1515 	    .kt_zv.zv_name = "site." #type,                             \
1516 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1517 	    .kt_size = sizeof(type),                                    \
1518 	    .kt_signature = KALLOC_TYPE_EMIT_SIG(type),                 \
1519 	} };                                                            \
1520 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1521 
1522 #define KALLOC_TYPE_VAR_DEFINE_3(var, type, flags)                  \
1523 	__kalloc_no_kasan                                               \
1524 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, "       \
1525 	    "regular, live_support")                                    \
1526 	struct kalloc_type_var_view var[1] = { {                        \
1527 	    .kt_version = KT_V1,                                        \
1528 	    .kt_name = "site." #type,                                   \
1529 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1530 	    .kt_size_type = sizeof(type),                               \
1531 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type),                  \
1532 	} };                                                            \
1533 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1534 
1535 #define KALLOC_TYPE_VAR_DEFINE_4(var, hdr, type, flags)             \
1536 	__kalloc_no_kasan                                               \
1537 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, "       \
1538 	    "regular, live_support")                                    \
1539 	struct kalloc_type_var_view var[1] = { {                        \
1540 	    .kt_version = KT_V1,                                        \
1541 	    .kt_name = "site." #hdr "." #type,                          \
1542 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, hdr, type),     \
1543 	    .kt_size_hdr = sizeof(hdr),                                 \
1544 	    .kt_size_type = sizeof(type),                               \
1545 	    .kt_sig_hdr = KALLOC_TYPE_EMIT_SIG(hdr, type),              \
1546 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type, hdr),             \
1547 	} };                                                            \
1548 	KALLOC_TYPE_SIZE_CHECK(sizeof(hdr));                            \
1549 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));                           \
1550 	KALLOC_TYPE_TRAILING_DATA_CHECK(hdr, type);
1551 
1552 #ifndef XNU_KERNEL_PRIVATE
1553 /*
1554  * This macro is currently used by AppleImage4
1555  */
1556 #define KALLOC_TYPE_DEFINE_SITE(var, type, flags)       \
1557 	static _KALLOC_TYPE_DEFINE(var, type, flags)
1558 
1559 #endif /* !XNU_KERNEL_PRIVATE */
1560 
1561 #ifdef XNU_KERNEL_PRIVATE
1562 
1563 #define kalloc_type_impl(kt_view, flags) \
1564 	zalloc_flags(kt_view, flags)
1565 
1566 static inline void
kfree_type_impl(kalloc_type_view_t kt_view,void * __unsafe_indexable ptr)1567 kfree_type_impl(kalloc_type_view_t kt_view, void *__unsafe_indexable ptr)
1568 {
1569 	if (NULL == ptr) {
1570 		return;
1571 	}
1572 	zfree(kt_view, ptr);
1573 }
1574 
1575 extern struct kalloc_result
1576 kalloc_ext(
1577 	void                   *kheap_or_kt_view __unsafe_indexable,
1578 	vm_size_t               size,
1579 	zalloc_flags_t          flags,
1580 	void                   *site);
1581 
1582 static inline struct kalloc_result
__kalloc_ext(void * kheap_or_kt_view __unsafe_indexable,vm_size_t size,zalloc_flags_t flags,void * site)1583 __kalloc_ext(
1584 	void                   *kheap_or_kt_view __unsafe_indexable,
1585 	vm_size_t               size,
1586 	zalloc_flags_t          flags,
1587 	void                   *site)
1588 {
1589 	struct kalloc_result kr;
1590 
1591 	kr    = (kalloc_ext)(kheap_or_kt_view, size, flags, site);
1592 	if (flags & Z_NOFAIL) {
1593 		__builtin_assume(kr.addr != NULL);
1594 	}
1595 	return kr;
1596 }
1597 
1598 #define kalloc_ext(hov, size, fl, site) __kalloc_ext(hov, size, fl, site)
1599 
1600 extern void
1601 kfree_ext(
1602 	void                   *kheap_or_kt_view __unsafe_indexable,
1603 	void                   *addr __unsafe_indexable,
1604 	vm_size_t               size);
1605 
1606 // rdar://87559422
1607 static inline void *__unsafe_indexable
kalloc_type_var_impl(kalloc_type_var_view_t kt_view,vm_size_t size,zalloc_flags_t flags,void * site)1608 kalloc_type_var_impl(
1609 	kalloc_type_var_view_t    kt_view,
1610 	vm_size_t                 size,
1611 	zalloc_flags_t            flags,
1612 	void                      *site)
1613 {
1614 	struct kalloc_result kr;
1615 
1616 	kr = kalloc_ext(kt_mangle_var_view(kt_view), size, flags, site);
1617 	return kr.addr;
1618 }
1619 
1620 static inline void
kfree_type_var_impl(kalloc_type_var_view_t kt_view,void * ptr __unsafe_indexable,vm_size_t size)1621 kfree_type_var_impl(
1622 	kalloc_type_var_view_t      kt_view,
1623 	void                       *ptr __unsafe_indexable,
1624 	vm_size_t                   size)
1625 {
1626 	kfree_ext(kt_mangle_var_view(kt_view), ptr, size);
1627 }
1628 
1629 #else /* XNU_KERNEL_PRIVATE */
1630 
1631 extern void *__unsafe_indexable
1632 kalloc_type_impl(
1633 	kalloc_type_view_t  kt_view,
1634 	zalloc_flags_t      flags);
1635 
1636 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)1637 __kalloc_type_impl(
1638 	kalloc_type_view_t  kt_view,
1639 	zalloc_flags_t      flags)
1640 {
1641 	void *addr = (kalloc_type_impl)(kt_view, flags);
1642 	if (flags & Z_NOFAIL) {
1643 		__builtin_assume(addr != NULL);
1644 	}
1645 	return addr;
1646 }
1647 
1648 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
1649 
1650 extern void
1651 kfree_type_impl(
1652 	kalloc_type_view_t  kt_view,
1653 	void                *ptr __unsafe_indexable);
1654 
1655 extern void *__unsafe_indexable
1656 kalloc_type_var_impl(
1657 	kalloc_type_var_view_t  kt_view,
1658 	vm_size_t               size,
1659 	zalloc_flags_t          flags,
1660 	void                   *site);
1661 
1662 __attribute__((malloc, alloc_size(2)))
1663 static inline void *
__sized_by(size)1664 __sized_by(size)
1665 __kalloc_type_var_impl(
1666 	kalloc_type_var_view_t  kt_view,
1667 	vm_size_t               size,
1668 	zalloc_flags_t          flags,
1669 	void                   *site)
1670 {
1671 	void *addr = (kalloc_type_var_impl)(kt_view, size, flags, site);
1672 	if (flags & Z_NOFAIL) {
1673 		__builtin_assume(addr != NULL);
1674 	}
1675 	return addr;
1676 }
1677 
1678 #define kalloc_type_var_impl(ktv, size, fl, site) \
1679 	__kalloc_type_var_impl(ktv, size, fl, site)
1680 
1681 extern void
1682 kfree_type_var_impl(
1683 	kalloc_type_var_view_t  kt_view,
1684 	void                   *ptr __unsafe_indexable,
1685 	vm_size_t               size);
1686 
1687 #endif /* !XNU_KERNEL_PRIVATE */
1688 
1689 void *
1690 kalloc_type_impl_external(
1691 	kalloc_type_view_t  kt_view,
1692 	zalloc_flags_t      flags);
1693 
1694 void
1695 kfree_type_impl_external(
1696 	kalloc_type_view_t  kt_view,
1697 	void               *ptr __unsafe_indexable);
1698 
1699 extern void *
1700 OSObject_typed_operator_new(
1701 	kalloc_type_view_t  ktv,
1702 	vm_size_t           size);
1703 
1704 extern void
1705 OSObject_typed_operator_delete(
1706 	kalloc_type_view_t  ktv,
1707 	void               *mem __unsafe_indexable,
1708 	vm_size_t           size);
1709 
1710 #ifdef XNU_KERNEL_PRIVATE
1711 #pragma GCC visibility push(hidden)
1712 
1713 #define KALLOC_TYPE_SIZE_MASK  0xffffff
1714 #define KALLOC_TYPE_IDX_SHIFT  24
1715 #define KALLOC_TYPE_IDX_MASK   0xff
1716 
1717 static inline uint32_t
kalloc_type_get_size(uint32_t kt_size)1718 kalloc_type_get_size(uint32_t kt_size)
1719 {
1720 	return kt_size & KALLOC_TYPE_SIZE_MASK;
1721 }
1722 
1723 bool
1724 IOMallocType_from_vm(
1725 	kalloc_type_view_t ktv);
1726 
1727 /* Used by kern_os_* and operator new */
1728 KALLOC_HEAP_DECLARE(KERN_OS_MALLOC);
1729 
1730 extern void
1731 kheap_startup_init(
1732 	kalloc_heap_t heap);
1733 
1734 __attribute__((malloc, alloc_size(2)))
1735 static inline void *
__sized_by(size)1736 __sized_by(size)
1737 __kheap_alloc(
1738 	kalloc_heap_t           kheap,
1739 	vm_size_t               size,
1740 	zalloc_flags_t          flags,
1741 	void                   *site)
1742 {
1743 	struct kalloc_result kr;
1744 	__builtin_assume(!kt_is_var_view(kheap));
1745 	kr = kalloc_ext(kheap, size, flags, site);
1746 	return __unsafe_forge_bidi_indexable(void *, kr.addr, size);
1747 }
1748 
1749 extern struct kalloc_result
1750 krealloc_ext(
1751 	void                   *kheap_or_kt_view __unsafe_indexable,
1752 	void                   *addr __unsafe_indexable,
1753 	vm_size_t               old_size,
1754 	vm_size_t               new_size,
1755 	zalloc_flags_t          flags,
1756 	void                   *site);
1757 
1758 static inline struct kalloc_result
__krealloc_ext(void * kheap_or_kt_view __unsafe_indexable,void * addr __sized_by (old_size),vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,void * site)1759 __krealloc_ext(
1760 	void                   *kheap_or_kt_view __unsafe_indexable,
1761 	void                   *addr __sized_by(old_size),
1762 	vm_size_t               old_size,
1763 	vm_size_t               new_size,
1764 	zalloc_flags_t          flags,
1765 	void                   *site)
1766 {
1767 	struct kalloc_result kr = (krealloc_ext)(kheap_or_kt_view, addr, old_size,
1768 	    new_size, flags, site);
1769 	if (flags & Z_NOFAIL) {
1770 		__builtin_assume(kr.addr != NULL);
1771 	}
1772 	return kr;
1773 }
1774 
1775 #define krealloc_ext(hov, addr, old_size, new_size, fl, site) \
1776 	__krealloc_ext(hov, addr, old_size, new_size, fl, site)
1777 
1778 __attribute__((malloc, alloc_size(4)))
1779 static inline void *
__sized_by(new_size)1780 __sized_by(new_size)
1781 __kheap_realloc(
1782 	kalloc_heap_t           kheap,
1783 	void                   *addr __sized_by(old_size),
1784 	vm_size_t               old_size,
1785 	vm_size_t               new_size,
1786 	zalloc_flags_t          flags,
1787 	void                   *site)
1788 {
1789 	struct kalloc_result kr;
1790 	__builtin_assume(!kt_is_var_view(kheap));
1791 	kr = krealloc_ext(kheap, addr, old_size, new_size, flags, site);
1792 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1793 }
1794 
1795 __attribute__((malloc, alloc_size(4)))
1796 static inline void *
__sized_by(new_size)1797 __sized_by(new_size)
1798 __krealloc_type(
1799 	kalloc_type_var_view_t  kt_view,
1800 	void                   *addr __sized_by(old_size),
1801 	vm_size_t               old_size,
1802 	vm_size_t               new_size,
1803 	zalloc_flags_t          flags,
1804 	void                   *site)
1805 {
1806 	struct kalloc_result kr;
1807 	kr = krealloc_ext(kt_mangle_var_view(kt_view), addr,
1808 	    old_size, new_size, flags, site);
1809 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1810 }
1811 
1812 extern void
1813 kfree_addr_ext(
1814 	kalloc_heap_t           kheap,
1815 	void                   *addr __unsafe_indexable);
1816 
1817 extern zone_t
1818 kalloc_zone_for_size(
1819 	zone_id_t             zid,
1820 	vm_size_t             size);
1821 
1822 extern vm_size_t kalloc_large_max;
1823 SCALABLE_COUNTER_DECLARE(kalloc_large_count);
1824 SCALABLE_COUNTER_DECLARE(kalloc_large_total);
1825 
1826 extern void
1827 kern_os_typed_free(
1828 	kalloc_type_view_t    ktv,
1829 	void                 *addr __unsafe_indexable,
1830 	vm_size_t             esize);
1831 
1832 #pragma GCC visibility pop
1833 #endif  /* !XNU_KERNEL_PRIVATE */
1834 
1835 extern void
1836 kern_os_zfree(
1837 	zone_t        zone,
1838 	void         *addr __unsafe_indexable,
1839 	vm_size_t     size);
1840 
1841 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1842 
1843 #endif  /* _KERN_KALLOC_H_ */
1844 
1845 #endif  /* KERNEL_PRIVATE */
1846