xref: /xnu-12377.1.9/osfmk/kern/kalloc.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifdef  KERNEL_PRIVATE
58 
59 #ifndef _KERN_KALLOC_H_
60 #define _KERN_KALLOC_H_
61 
62 #include <mach/machine/vm_types.h>
63 #include <mach/boolean.h>
64 #include <mach/vm_types.h>
65 #include <kern/zalloc.h>
66 #include <libkern/section_keywords.h>
67 #include <os/alloc_util.h>
68 #if XNU_KERNEL_PRIVATE
69 #include <kern/counter.h>
70 #endif /* XNU_KERNEL_PRIVATE */
71 
72 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
73 
74 /*!
75  * @const KALLOC_SAFE_ALLOC_SIZE
76  *
77  * @brief
78  * The maximum allocation size that is safe to allocate with Z_NOFAIL in kalloc.
79  */
80 #define KALLOC_SAFE_ALLOC_SIZE  (16u * 1024u)
81 
82 #if XNU_KERNEL_PRIVATE
83 /*!
84  * @typedef kalloc_heap_t
85  *
86  * @abstract
87  * A kalloc heap view represents a sub-accounting context
88  * for a given kalloc heap.
89  */
90 typedef struct kalloc_heap {
91 	zone_stats_t        kh_stats;
92 	const char         *__unsafe_indexable kh_name;
93 	zone_kheap_id_t     kh_heap_id;
94 	vm_tag_t            kh_tag;
95 	uint16_t            kh_type_hash;
96 	zone_id_t           kh_zstart;
97 	struct kalloc_heap *kh_views;
98 } *kalloc_heap_t;
99 
100 /*!
101  * @macro KALLOC_HEAP_DECLARE
102  *
103  * @abstract
104  * (optionally) declare a kalloc heap view in a header.
105  *
106  * @discussion
107  * Unlike kernel zones, new full blown heaps cannot be instantiated.
108  * However new accounting views of the base heaps can be made.
109  */
110 #define KALLOC_HEAP_DECLARE(var) \
111 	extern struct kalloc_heap var[1]
112 
113 /**
114  * @const KHEAP_DATA_BUFFERS
115  *
116  * @brief
117  * The builtin heap for bags of pure bytes.
118  *
119  * @discussion
120  * This set of kalloc zones should contain pure bags of bytes with no pointers
121  * or length/offset fields.
122  *
123  * The zones forming the heap aren't sequestered from each other, however the
124  * entire heap lives in a different submap from any other kernel allocation.
125  *
126  * The main motivation behind this separation is due to the fact that a lot of
127  * these objects have been used by attackers to spray the heap to make it more
128  * predictable while exploiting use-after-frees or overflows.
129  *
130  * Common attributes that make these objects useful for spraying includes
131  * control of:
132  * - Data in allocation
133  * - Time of alloc and free (lifetime)
134  * - Size of allocation
135  */
136 KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS);
137 
138 /**
139  * @const KHEAP_DATA_SHARED
140  *
141  * @brief
142  * The builtin heap for bags of pure bytes that get shared across components.
143  *
144  * @discussion
145  * There's a further distinction that we can make between kalloc zones that
146  * contain bag of bytes, which is based on their intended use. In particular
147  * a number of pure data allocations are intended to be shared between kernel
148  * and user or kernel and coprocessors (DMA). These allocations cannot
149  * sustain the security XNU_KERNEL_RESTRICTED checks, therefore we isolate
150  * them in a separated heap, to further increase the security guarantees
151  * around KHEAP_DATA_BUFFERS.
152  */
153 KALLOC_HEAP_DECLARE(KHEAP_DATA_SHARED);
154 
155 /**
156  * @const KHEAP_DEFAULT
157  *
158  * @brief
159  * The builtin default core kernel kalloc heap.
160  *
161  * @discussion
162  * This set of kalloc zones should contain other objects that don't have their
163  * own security mitigations. The individual zones are themselves sequestered.
164  */
165 KALLOC_HEAP_DECLARE(KHEAP_DEFAULT);
166 
167 /**
168  * @const KHEAP_KT_VAR
169  *
170  * @brief
171  * Temporary heap for variable sized kalloc type allocations
172  *
173  * @discussion
174  * This heap will be removed when logic for kalloc_type_var_views is added
175  *
176  */
177 KALLOC_HEAP_DECLARE(KHEAP_KT_VAR);
178 
179 /*!
180  * @macro KALLOC_HEAP_DEFINE
181  *
182  * @abstract
183  * Defines a given kalloc heap view and what it points to.
184  *
185  * @discussion
186  * Kalloc heaps are views over one of the pre-defined builtin heaps
187  * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating
188  * a new one allows for accounting of allocations through this view.
189  *
190  * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase,
191  * as the last rank. If views on zones are created, these must have been
192  * created before this stage.
193  *
194  * @param var           the name for the zone view.
195  * @param name          a string describing the zone view.
196  * @param heap_id       a @c KHEAP_ID_* constant.
197  */
198 #define KALLOC_HEAP_DEFINE(var, name, heap_id) \
199 	SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \
200 	    .kh_name = (name), \
201 	    .kh_heap_id = (heap_id), \
202 	} }; \
203 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, kheap_startup_init, var)
204 
205 
206 STATIC_IF_KEY_DECLARE_TRUE(kexts_enroll_data_shared);
207 
208 #define GET_KEXT_KHEAP_DATA() \
209 	static_if(kexts_enroll_data_shared) ? KHEAP_DATA_SHARED : KHEAP_DATA_BUFFERS
210 
211 /*
212  * Helper functions to query the status of security policies
213  * regarding data allocations. This has consequences for things
214  * like dedicated kmem range for shared data allocations
215  */
216 extern bool kalloc_is_restricted_data_mode_telemetry(void);
217 extern bool kalloc_is_restricted_data_mode_enforced(void);
218 extern bool kmem_needs_data_share_range(void);
219 
220 /*
221  * Allocations of type SO_NAME are known to not have pointers for
222  * most platforms -- for macOS this is not guaranteed
223  */
224 #if XNU_TARGET_OS_OSX
225 #define KHEAP_SONAME KHEAP_DEFAULT
226 #else /* XNU_TARGET_OS_OSX */
227 #define KHEAP_SONAME KHEAP_DATA_BUFFERS
228 #endif /* XNU_TARGET_OS_OSX */
229 
230 #endif /* XNU_KERNEL_PRIVATE */
231 
232 /*!
233  * @enum kalloc_type_flags_t
234  *
235  * @brief
236  * Flags that can be passed to @c KALLOC_TYPE_DEFINE
237  *
238  * @discussion
239  * These flags can be used to request for a specific accounting
240  * behavior.
241  *
242  * @const KT_DEFAULT
243  * Passing this flag will provide default accounting behavior
244  * i.e shared accounting unless toggled with KT_OPTIONS_ACCT is
245  * set in kt boot-arg.
246  *
247  * @const KT_PRIV_ACCT
248  * Passing this flag will provide individual stats for your
249  * @c kalloc_type_view that is defined.
250  *
251  * @const KT_SHARED_ACCT
252  * Passing this flag will accumulate stats as a part of the
253  * zone that your @c kalloc_type_view points to.
254  *
255  * @const KT_DATA_ONLY
256  * Represents that the type is "data-only" and is never shared
257  * with another security domain. Adopters should not set
258  * this flag manually, it is meant for the compiler to set
259  * automatically when KALLOC_TYPE_CHECK(DATA) passes.
260  *
261  * @const KT_VM
262  * Represents that the type is large enough to use the VM. Adopters
263  * should not set this flag manually, it is meant for the compiler
264  * to set automatically when KALLOC_TYPE_VM_SIZE_CHECK passes.
265  *
266  * @const KT_PTR_ARRAY
267  * Represents that the type is an array of pointers. Adopters should not
268  * set this flag manually, it is meant for the compiler to set
269  * automatically when KALLOC_TYPE_CHECK(PTR) passes.
270  *
271  * @const KT_CHANGED*
272  * Represents a change in the version of the kalloc_type_view. This
273  * is required inorder to decouple requiring kexts to be rebuilt to
274  * use the new defintions right away. This flags should not be used
275  * manually at a callsite, it is meant for internal use only. Future
276  * changes to kalloc_type_view defintion should toggle this flag.
277  *
278  #if XNU_KERNEL_PRIVATE
279  * @const KT_NOEARLY
280  * This flags will force the callsite to bypass the early (shared) zone and
281  * directly allocate from the assigned zone. This can only be used
282  * with KT_PRIV_ACCT right now. If you still require this behavior
283  * but don't want private stats use Z_SET_NOTEARLY at the allocation
284  * callsite instead.
285  *
286  * @const KT_SLID
287  * To indicate that strings in the view were slid during early boot.
288  *
289  * @const KT_PROCESSED
290  * This flag is set once the view is parse during early boot. Views
291  * that are not in BootKC on macOS aren't parsed and therefore will
292  * not have this flag set. The runtime can use this as an indication
293  * to appropriately redirect the call.
294  *
295  * @const KT_HASH
296  * Hash of signature used by kmem_*_guard to determine range and
297  * direction for allocation
298  #endif
299  */
300 __options_decl(kalloc_type_flags_t, uint32_t, {
301 	KT_DEFAULT        = 0x0001,
302 	KT_PRIV_ACCT      = 0x0002,
303 	KT_SHARED_ACCT    = 0x0004,
304 	KT_DATA_ONLY      = 0x0008,
305 	KT_VM             = 0x0010,
306 	KT_CHANGED        = 0x0020,
307 	KT_CHANGED2       = 0x0040,
308 	KT_PTR_ARRAY      = 0x0080,
309 #if XNU_KERNEL_PRIVATE
310 	KT_NOEARLY       = 0x2000,
311 	KT_SLID           = 0x4000,
312 	KT_PROCESSED      = 0x8000,
313 	KT_HASH           = 0xffff0000,
314 #endif
315 });
316 
317 /*!
318  * @typedef kalloc_type_view_t
319  *
320  * @abstract
321  * A kalloc type view is a structure used to redirect callers
322  * of @c kalloc_type to a particular zone based on the signature of
323  * their type.
324  *
325  * @discussion
326  * These structures are automatically created under the hood for every
327  * @c kalloc_type and @c kfree_type callsite. They are ingested during startup
328  * and are assigned zones based on the security policy for their signature.
329  *
330  * These structs are protected by the kernel lockdown and can't be initialized
331  * dynamically. They must be created using @c KALLOC_TYPE_DEFINE() or
332  * @c kalloc_type or @c kfree_type.
333  *
334  */
335 #if XNU_KERNEL_PRIVATE
336 struct kalloc_type_view {
337 	struct zone_view        kt_zv;
338 	const char             *kt_signature __unsafe_indexable;
339 	kalloc_type_flags_t     kt_flags;
340 	uint32_t                kt_size;
341 	zone_t                  kt_zearly;
342 	zone_t                  kt_zsig;
343 };
344 #else /* XNU_KERNEL_PRIVATE */
345 struct kalloc_type_view {
346 	struct zone_view        kt_zv;
347 	const char             *kt_signature __unsafe_indexable;
348 	kalloc_type_flags_t     kt_flags;
349 	uint32_t                kt_size;
350 	void                   *unused1;
351 	void                   *unused2;
352 };
353 #endif /* XNU_KERNEL_PRIVATE */
354 
355 /*
356  * The set of zones used by all kalloc heaps are defined by the constants
357  * below.
358  *
359  * KHEAP_START_SIZE: Size of the first sequential zone.
360  * KHEAP_MAX_SIZE  : Size of the last sequential zone.
361  * KHEAP_STEP_WIDTH: Number of zones created at every step (power of 2).
362  * KHEAP_STEP_START: Size of the first step.
363  * We also create some extra initial zones that don't follow the sequence
364  * for sizes 8 (on armv7 only), 16 and 32.
365  *
366  * idx step_increment   zone_elem_size
367  * 0       -                  16
368  * 1       -                  32
369  * 2       16                 48
370  * 3       16                 64
371  * 4       32                 96
372  * 5       32                 128
373  * 6       64                 192
374  * 7       64                 256
375  * 8       128                384
376  * 9       128                512
377  * 10      256                768
378  * 11      256                1024
379  * 12      512                1536
380  * 13      512                2048
381  * 14      1024               3072
382  * 15      1024               4096
383  * 16      2048               6144
384  * 17      2048               8192
385  * 18      4096               12288
386  * 19      4096               16384
387  * 20      8192               24576
388  * 21      8192               32768
389  */
390 #define kalloc_log2down(mask)   (31 - __builtin_clz(mask))
391 #define KHEAP_START_SIZE        32
392 #if  __x86_64__
393 #define KHEAP_MAX_SIZE          (16 * 1024)
394 #define KHEAP_EXTRA_ZONES       2
395 #else
396 #define KHEAP_MAX_SIZE          (32 * 1024)
397 #define KHEAP_EXTRA_ZONES       2
398 #endif
399 #define KHEAP_STEP_WIDTH        2
400 #define KHEAP_STEP_START        16
401 #define KHEAP_START_IDX         kalloc_log2down(KHEAP_START_SIZE)
402 #define KHEAP_NUM_STEPS         (kalloc_log2down(KHEAP_MAX_SIZE) - \
403 	                                kalloc_log2down(KHEAP_START_SIZE))
404 #define KHEAP_NUM_ZONES         (KHEAP_NUM_STEPS * KHEAP_STEP_WIDTH + \
405 	                                KHEAP_EXTRA_ZONES)
406 
407 /*!
408  * @enum kalloc_type_version_t
409  *
410  * @brief
411  * Enum that holds versioning information for @c kalloc_type_var_view
412  *
413  * @const KT_V1
414  * Version 1
415  *
416  */
417 __options_decl(kalloc_type_version_t, uint16_t, {
418 	KT_V1             = 0x0001,
419 });
420 
421 /*!
422  * @typedef kalloc_type_var_view_t
423  *
424  * @abstract
425  * This structure is analoguous to @c kalloc_type_view but handles
426  * @c kalloc_type callsites that are variable in size.
427  *
428  * @discussion
429  * These structures are automatically created under the hood for every
430  * variable sized @c kalloc_type and @c kfree_type callsite. They are ingested
431  * during startup and are assigned zones based on the security policy for
432  * their signature.
433  *
434  * These structs are protected by the kernel lockdown and can't be initialized
435  * dynamically. They must be created using @c KALLOC_TYPE_VAR_DEFINE() or
436  * @c kalloc_type or @c kfree_type.
437  *
438  */
439 struct kalloc_type_var_view {
440 	kalloc_type_version_t   kt_version;
441 	uint16_t                kt_size_hdr;
442 	/*
443 	 * Temporary: Needs to be 32bits cause we have many structs that use
444 	 * IONew/Delete that are larger than 32K.
445 	 */
446 	uint32_t                kt_size_type;
447 	zone_stats_t            kt_stats;
448 	const char             *__unsafe_indexable kt_name;
449 	zone_view_t             kt_next;
450 	zone_id_t               kt_heap_start;
451 	uint8_t                 kt_zones[KHEAP_NUM_ZONES];
452 	const char             * __unsafe_indexable kt_sig_hdr;
453 	const char             * __unsafe_indexable kt_sig_type;
454 	kalloc_type_flags_t     kt_flags;
455 };
456 
457 typedef struct kalloc_type_var_view *kalloc_type_var_view_t;
458 
459 /*!
460  * @macro KALLOC_TYPE_DECLARE
461  *
462  * @abstract
463  * (optionally) declares a kalloc type view (in a header).
464  *
465  * @param var           the name for the kalloc type view.
466  */
467 #define KALLOC_TYPE_DECLARE(var) \
468 	extern struct kalloc_type_view var[1]
469 
470 /*!
471  * @macro KALLOC_TYPE_DEFINE
472  *
473  * @abstract
474  * Defines a given kalloc type view with prefered accounting
475  *
476  * @discussion
477  * This macro allows you to define a kalloc type with private
478  * accounting. The defined kalloc_type_view can be used with
479  * kalloc_type_impl/kfree_type_impl to allocate/free memory.
480  * zalloc/zfree can also be used from inside xnu. However doing
481  * so doesn't handle freeing a NULL pointer or the use of tags.
482  *
483  * @param var           the name for the kalloc type view.
484  * @param type          the type of your allocation.
485  * @param flags         a @c KT_* flag.
486  */
487 #define KALLOC_TYPE_DEFINE(var, type, flags) \
488 	_KALLOC_TYPE_DEFINE(var, type, flags); \
489 	__ZONE_DECLARE_TYPE(var, type)
490 
491 /*!
492  * @macro KALLOC_TYPE_VAR_DECLARE
493  *
494  * @abstract
495  * (optionally) declares a kalloc type var view (in a header).
496  *
497  * @param var           the name for the kalloc type var view.
498  */
499 #define KALLOC_TYPE_VAR_DECLARE(var) \
500 	extern struct kalloc_type_var_view var[1]
501 
502 /*!
503  * @macro KALLOC_TYPE_VAR_DEFINE
504  *
505  * @abstract
506  * Defines a given kalloc type view with prefered accounting for
507  * variable sized typed allocations.
508  *
509  * @discussion
510  * As the views aren't yet being ingested, individual stats aren't
511  * available. The defined kalloc_type_var_view should be used with
512  * kalloc_type_var_impl/kfree_type_var_impl to allocate/free memory.
513  *
514  * This macro comes in 2 variants:
515  *
516  * 1. @c KALLOC_TYPE_VAR_DEFINE(var, e_ty, flags)
517  * 2. @c KALLOC_TYPE_VAR_DEFINE(var, h_ty, e_ty, flags)
518  *
519  * @param var           the name for the kalloc type var view.
520  * @param h_ty          the type of header in the allocation.
521  * @param e_ty          the type of repeating part in the allocation.
522  * @param flags         a @c KT_* flag.
523  */
524 #define KALLOC_TYPE_VAR_DEFINE(...) KALLOC_DISPATCH(KALLOC_TYPE_VAR_DEFINE, ##__VA_ARGS__)
525 
526 #ifdef XNU_KERNEL_PRIVATE
527 
528 /*
529  * These versions allow specifying the kalloc heap to allocate memory
530  * from
531  */
532 #define kheap_alloc_tag(kalloc_heap, size, flags, itag) \
533 	__kheap_alloc(kalloc_heap, size, __zone_flags_mix_tag(flags, itag), NULL)
534 #define kheap_alloc(kalloc_heap, size, flags) \
535 	kheap_alloc_tag(kalloc_heap, size, flags, VM_ALLOC_SITE_TAG())
536 
537 /*
538  * These versions should be used for allocating pure data bytes that
539  * do not contain any pointers
540  */
541 #define kalloc_data_tag(size, flags, itag) \
542 	kheap_alloc_tag(GET_KEXT_KHEAP_DATA(), size, flags, itag)
543 #define kalloc_data(size, flags) \
544 	kheap_alloc(GET_KEXT_KHEAP_DATA(), size, flags)
545 
546 #define krealloc_data_tag(elem, old_size, new_size, flags, itag) \
547 	__kheap_realloc(GET_KEXT_KHEAP_DATA(), elem, old_size, new_size, \
548 	    __zone_flags_mix_tag(flags, itag), NULL)
549 #define krealloc_data(elem, old_size, new_size, flags) \
550 	krealloc_data_tag(elem, old_size, new_size, flags, \
551 	    VM_ALLOC_SITE_TAG())
552 
553 #define kfree_data(elem, size) \
554 	kheap_free(GET_KEXT_KHEAP_DATA(), elem, size);
555 
556 #define kfree_data_addr(elem) \
557 	kheap_free_addr(GET_KEXT_KHEAP_DATA(), elem);
558 
559 extern void kheap_free_bounded(
560 	kalloc_heap_t heap,
561 	void         *addr __unsafe_indexable,
562 	vm_size_t     min_sz,
563 	vm_size_t     max_sz);
564 
565 extern void kalloc_data_require(
566 	void         *data __unsafe_indexable,
567 	vm_size_t     size);
568 
569 extern void kalloc_non_data_require(
570 	void         *data __unsafe_indexable,
571 	vm_size_t     size);
572 
573 extern bool kalloc_is_data_buffers(
574 	void         *addr,
575 	vm_size_t    size);
576 
577 #else /* XNU_KERNEL_PRIVATE */
578 
579 extern void *__sized_by(size) kalloc(
580 	vm_size_t           size) __attribute__((malloc, alloc_size(1)));
581 
582 extern void *__unsafe_indexable kalloc_data(
583 	vm_size_t           size,
584 	zalloc_flags_t      flags);
585 
586 __attribute__((malloc, alloc_size(1)))
587 static inline void *
__sized_by(size)588 __sized_by(size)
589 __kalloc_data(vm_size_t size, zalloc_flags_t flags)
590 {
591 	void *__unsafe_indexable addr = (kalloc_data)(size, flags);
592 	if (flags & Z_NOFAIL) {
593 		__builtin_assume(addr != NULL);
594 	}
595 	return addr ? __unsafe_forge_bidi_indexable(uint8_t *, addr, size) : NULL;
596 }
597 
598 #define kalloc_data(size, fl) __kalloc_data(size, fl)
599 
600 extern void *__unsafe_indexable krealloc_data(
601 	void               *ptr __unsafe_indexable,
602 	vm_size_t           old_size,
603 	vm_size_t           new_size,
604 	zalloc_flags_t      flags);
605 
606 __attribute__((malloc, alloc_size(3)))
607 static inline void *
__sized_by(new_size)608 __sized_by(new_size)
609 __krealloc_data(
610 	void               *ptr __sized_by(old_size),
611 	vm_size_t           old_size,
612 	vm_size_t           new_size,
613 	zalloc_flags_t      flags)
614 {
615 	void *__unsafe_indexable addr = (krealloc_data)(ptr, old_size, new_size, flags);
616 	if (flags & Z_NOFAIL) {
617 		__builtin_assume(addr != NULL);
618 	}
619 	return addr ? __unsafe_forge_bidi_indexable(uint8_t *, addr, new_size) : NULL;
620 }
621 
622 #define krealloc_data(ptr, old_size, new_size, fl) \
623 	__krealloc_data(ptr, old_size, new_size, fl)
624 
625 extern void kfree(
626 	void               *data __unsafe_indexable,
627 	vm_size_t           size);
628 
629 extern void kfree_data(
630 	void               *ptr __unsafe_indexable,
631 	vm_size_t           size);
632 
633 extern void kfree_data_addr(
634 	void               *ptr __unsafe_indexable);
635 
636 #endif /* !XNU_KERNEL_PRIVATE */
637 
638 /*!
639  * @macro kalloc_type
640  *
641  * @abstract
642  * Allocates element of a particular type
643  *
644  * @discussion
645  * This family of allocators segregate kalloc allocations based on their type.
646  *
647  * This macro comes in 3 variants:
648  *
649  * 1. @c kalloc_type(type, flags)
650  *    Use this macro for fixed sized allocation of a particular type.
651  *
652  * 2. @c kalloc_type(e_type, count, flags)
653  *    Use this macro for variable sized allocations that form an array,
654  *    do note that @c kalloc_type(e_type, 1, flags) is not equivalent to
655  *    @c kalloc_type(e_type, flags).
656  *
657  * 3. @c kalloc_type(hdr_type, e_type, count, flags)
658  *    Use this macro for variable sized allocations formed with
659  *    a header of type @c hdr_type followed by a variable sized array
660  *    with elements of type @c e_type, equivalent to this:
661  *
662  *    <code>
663  *    struct {
664  *        hdr_type hdr;
665  *        e_type   arr[];
666  *    }
667  *    </code>
668  *
669  * @param flags         @c zalloc_flags_t that get passed to zalloc_internal
670  */
671 #define kalloc_type(...)  KALLOC_DISPATCH(kalloc_type, ##__VA_ARGS__)
672 
673 /*!
674  * @macro kfree_type
675  *
676  * @abstract
677  * Frees element of a particular type
678  *
679  * @discussion
680  * This pairs with the @c kalloc_type() that was made to allocate this element.
681  * Arguments passed to @c kfree_type() must match the one passed at allocation
682  * time precisely.
683  *
684  * This macro comes in the same 3 variants kalloc_type() does:
685  *
686  * 1. @c kfree_type(type, elem)
687  * 2. @c kfree_type(e_type, count, elem)
688  * 3. @c kfree_type(hdr_type, e_type, count, elem)
689  *
690  * @param elem          The address of the element to free
691  */
692 #define kfree_type(...)  KALLOC_DISPATCH(kfree_type, ##__VA_ARGS__)
693 #define kfree_type_counted_by(type, count, elem) \
694 	kfree_type_counted_by_3(type, count, elem)
695 
696 #ifdef XNU_KERNEL_PRIVATE
697 #define kalloc_type_tag(...)     KALLOC_DISPATCH(kalloc_type_tag, ##__VA_ARGS__)
698 #define krealloc_type_tag(...)   KALLOC_DISPATCH(krealloc_type_tag, ##__VA_ARGS__)
699 #define krealloc_type(...)       KALLOC_DISPATCH(krealloc_type, ##__VA_ARGS__)
700 
701 /*
702  * kalloc_type_require can't be made available to kexts as the
703  * kalloc_type_view's zone could be NULL in the following cases:
704  * - Size greater than KALLOC_SAFE_ALLOC_SIZE
705  * - On macOS, if call is not in BootKC
706  * - All allocations in kext for armv7
707  */
708 #define kalloc_type_require(type, value) ({                                    \
709 	static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);         \
710 	zone_require(kt_view_var->kt_zv.zv_zone, value);                       \
711 })
712 
713 #endif
714 
715 /*!
716  * @enum kt_granule_t
717  *
718  * @brief
719  * Granule encodings used by the compiler for the type signature.
720  *
721  * @discussion
722  * Given a type, the XNU signature type system (__builtin_xnu_type_signature)
723  * produces a signature by analyzing its memory layout, in chunks of 8 bytes,
724  * which we call granules. The encoding produced for each granule is the
725  * bitwise or of the encodings of all the types of the members included
726  * in that granule.
727  *
728  * @const KT_GRANULE_PADDING
729  * Represents padding inside a record type.
730  *
731  * @const KT_GRANULE_POINTER
732  * Represents a pointer type.
733  *
734  * @const KT_GRANULE_DATA
735  * Represents a scalar type that is not a pointer.
736  *
737  * @const KT_GRANULE_DUAL
738  * Currently unused.
739  *
740  * @const KT_GRANULE_PAC
741  * Represents a pointer which is subject to PAC.
742  */
743 __options_decl(kt_granule_t, uint32_t, {
744 	KT_GRANULE_PADDING = 0,
745 	KT_GRANULE_POINTER = 1,
746 	KT_GRANULE_DATA    = 2,
747 	KT_GRANULE_DUAL    = 4,
748 	KT_GRANULE_PAC     = 8
749 });
750 
751 #define KT_GRANULE_MAX                                                \
752 	(KT_GRANULE_PADDING | KT_GRANULE_POINTER | KT_GRANULE_DATA |  \
753 	    KT_GRANULE_DUAL | KT_GRANULE_PAC)
754 
755 /*
756  * Convert a granule encoding to the index of the bit that
757  * represents such granule in the type summary.
758  *
759  * The XNU type summary (__builtin_xnu_type_summary) produces a 32-bit
760  * summary of the type signature of a given type. If the bit at index
761  * (1 << G) is set in the summary, that means that the type contains
762  * one or more granules with encoding G.
763  */
764 #define KT_SUMMARY_GRANULE_TO_IDX(g)  (1UL << (g))
765 
766 #define KT_SUMMARY_MASK_TYPE_BITS  (0xffff)
767 
768 #define KT_SUMMARY_MASK_DATA                             \
769 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |  \
770 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA))
771 
772 #define KT_SUMMARY_MASK_PTR                              \
773 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
774 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
775 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
776 
777 #define KT_SUMMARY_MASK_ALL_GRANULES                        \
778 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
779 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
780 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA) |     \
781 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DUAL) |     \
782 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
783 
784 /*!
785  * @macro KT_SUMMARY_GRANULES
786  *
787  * @abstract
788  * Return the granule type summary for a given type
789  *
790  * @discussion
791  * This macro computes the type summary of a type, and it then extracts the
792  * bits which carry information about the granules in the memory layout.
793  *
794  * Note: you should never have to use __builtin_xnu_type_summary
795  * directly, as we reserve the right to use the remaining bits with
796  * different semantics.
797  *
798  * @param type          The type to analyze
799  */
800 #define KT_SUMMARY_GRANULES(type) \
801 	(__builtin_xnu_type_summary(type) & KT_SUMMARY_MASK_TYPE_BITS)
802 
803 /*!
804  * @macro KALLOC_TYPE_SIG_CHECK
805  *
806  * @abstract
807  * Return whether a given type is only made up of granules specified in mask
808  *
809  * @param mask          Granules to check for
810  * @param type          The type to analyze
811  */
812 #define KALLOC_TYPE_SIG_CHECK(mask, type) \
813 	((KT_SUMMARY_GRANULES(type) & ~(mask)) == 0)
814 
815 /*!
816  * @macro KALLOC_TYPE_IS_DATA_ONLY
817  *
818  * @abstract
819  * Return whether a given type is considered a data-only type.
820  *
821  * @param type          The type to analyze
822  */
823 #define KALLOC_TYPE_IS_DATA_ONLY(type) \
824 	KALLOC_TYPE_SIG_CHECK(KT_SUMMARY_MASK_DATA, type)
825 
826 /*!
827  * @macro KALLOC_TYPE_HAS_OVERLAPS
828  *
829  * @abstract
830  * Return whether a given type has overlapping granules.
831  *
832  * @discussion
833  * This macro returns whether the memory layout for a given type contains
834  * overlapping granules. An overlapping granule is a granule which includes
835  * members with types that have different encodings under the XNU signature
836  * type system.
837  *
838  * @param type          The type to analyze
839  */
840 #define KALLOC_TYPE_HAS_OVERLAPS(type) \
841 	((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_ALL_GRANULES) != 0)
842 
843 /*!
844  * @macro KALLOC_TYPE_IS_COMPATIBLE_PTR
845  *
846  * @abstract
847  * Return whether pointer is compatible with a given type, in the XNU
848  * signature type system.
849  *
850  * @discussion
851  * This macro returns whether type pointed to by @c ptr is either the same
852  * type as @c type, or it has the same signature. The implementation relies
853  * on the @c __builtin_xnu_types_compatible builtin, and the value returned
854  * can be evaluated at compile time in both C and C++.
855  *
856  * Note: void pointers are treated as wildcards, and are thus compatible
857  * with any given type.
858  *
859  * @param ptr           the pointer whose type needs to be checked.
860  * @param type          the type which the pointer will be checked against.
861  */
862 #define KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type)                         \
863 	(__builtin_xnu_types_compatible(os_get_pointee_type(ptr), type) ||   \
864 	    __builtin_xnu_types_compatible(os_get_pointee_type(ptr), void))  \
865 
866 #define KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type) \
867 	_Static_assert(KALLOC_TYPE_IS_COMPATIBLE_PTR(ptr, type), \
868 	    "Pointer type is not compatible with specified type")
869 
870 
871 /*!
872  * @const KALLOC_ARRAY_SIZE_MAX
873  *
874  * @brief
875  * The maximum size that can be allocated with the @c KALLOC_ARRAY interface.
876  *
877  * @discussion
878  * This size is:
879  * - ~256M on 4k or PAC systems with 16k pages
880  * - ~1G on other 16k systems.
881  */
882 #if __arm64e__ || KASAN_TBI
883 #define KALLOC_ARRAY_SIZE_MAX   ((uint32_t)PAGE_MASK << PAGE_SHIFT)
884 #define KALLOC_ARRAY_GRANULE    32ul
885 #else
886 #define KALLOC_ARRAY_SIZE_MAX   ((uint32_t)UINT16_MAX << PAGE_SHIFT)
887 #define KALLOC_ARRAY_GRANULE    16ul
888 #endif
889 
890 /*!
891  * @macro KALLOC_ARRAY_TYPE_DECL
892  *
893  * @brief
894  * Declares a type used as a packed kalloc array type.
895  *
896  * @discussion
897  * This macro comes in two variants
898  *
899  * - KALLOC_ARRAY_TYPE_DECL(name, e_ty)
900  * - KALLOC_ARRAY_TYPE_DECL(name, h_ty, e_ty)
901  *
902  * The first one defines an array of elements of type @c e_ty,
903  * and the second a header of type @c h_ty followed by
904  * an array of elements of type @c e_ty.
905  *
906  * Those macros will then define the type @c ${name}_t as a typedef
907  * to a non existent structure type, in order to avoid accidental
908  * dereference of those pointers.
909  *
910  * kalloc array pointers are actually pointers that in addition to encoding
911  * the array base pointer, also encode the allocation size (only sizes
912  * up to @c KALLOC_ARRAY_SIZE_MAX bytes).
913  *
914  * Such pointers can be signed with data PAC properly, which will provide
915  * integrity of both the base pointer, and its size.
916  *
917  * kalloc arrays are useful to use instead of embedding the length
918  * of the allocation inside of itself, which tends to be driven by:
919  *
920  * - a desire to not grow the outer structure holding the pointer
921  *   to this array with an extra "length" field for optional arrays,
922  *   in order to save memory (see the @c ip_requests field in ports),
923  *
924  * - a need to be able to atomically consult the size of an allocation
925  *   with respect to loading its pointer (where address dependencies
926  *   traditionally gives this property) for lockless algorithms
927  *   (see the IPC space table).
928  *
929  * Using a kalloc array is preferable for two reasons:
930  *
931  * - embedding lengths inside the allocation is self-referential
932  *   and an appetizing target for post-exploitation strategies,
933  *
934  * - having a dependent load to get to the length loses out-of-order
935  *   opportunities for the CPU and prone to back-to-back cache misses.
936  *
937  * Holding information such as a level of usage of this array
938  * within itself is fine provided those quantities are validated
939  * against the "count" (number of elements) or "size" (allocation
940  * size in bytes) of the array before use.
941  *
942  *
943  * This macro will define a series of functions:
944  *
945  * - ${name}_count_to_size() and ${name}_size_to_count()
946  *   to convert between memory sizes and array element counts
947  *   (taking the header size into account when it exists);
948  *
949  *   Note that those functions assume the count/size are corresponding
950  *   to a valid allocation size within [0, KALLOC_ARRAY_SIZE_MAX].
951  *
952  * - ${name}_next_size() to build good allocation growth policies;
953  *
954  * - ${name}_base() returning a (bound-checked indexable) pointer
955  *   to the header of the array (or its first element when there is
956  *   no header);
957  *
958  * - ${name}_begin() returning a (bound-checked indexable)
959  *   pointer to the first element of the the array;
960  *
961  * - ${name}_contains() to check if an element index is within
962  *   the valid range of this allocation;
963  *
964  * - ${name}_next_elem() to get the next element of an array.
965  *
966  * - ${name}_get() and ${name}_get_nocheck() to return a pointer
967  *   to a given cell of the array with (resp. without) a bound
968  *   check against the array size. The bound-checked variant
969  *   returns NULL for invalid indexes.
970  *
971  * - ${name}_alloc_by_count() and ${name}_alloc_by_size()
972  *   to allocate a new array able to hold at least that many elements
973  *   (resp. bytes).
974  *
975  * - ${name}_realloc_by_count() and ${name}_realloc_by_size()
976  *   to re-allocate a new array able to hold at least that many elements
977  *   (resp. bytes).
978  *
979  * - ${name}_free() and ${name}_free_noclear() to free such an array
980  *   (resp. without nil-ing the pointer). The non-clearing variant
981  *   is to be used only when nil-ing out the pointer is otherwise
982  *   not allowed by C (const value, unable to take address of, ...),
983  *   otherwise the normal ${name}_free() must be used.
984  */
985 #define KALLOC_ARRAY_TYPE_DECL(...) \
986 	KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DECL, ##__VA_ARGS__)
987 
988 #if XNU_KERNEL_PRIVATE
989 
990 #define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
991 	KALLOC_TYPE_VAR_DECLARE(name ## _kt_view);                              \
992 	typedef struct name * __unsafe_indexable name ## _t;                    \
993                                                                                 \
994 	__pure2                                                                 \
995 	static inline uint32_t                                                  \
996 	name ## _count_to_size(uint32_t count)                                  \
997 	{                                                                       \
998 	        return (uint32_t)((h_sz) + (e_sz) * count);                     \
999 	}                                                                       \
1000                                                                                 \
1001 	__pure2                                                                 \
1002 	static inline uint32_t                                                  \
1003 	name ## _size_to_count(vm_size_t size)                                  \
1004 	{                                                                       \
1005 	        return (uint32_t)((size - (h_sz)) / (e_sz));                    \
1006 	}                                                                       \
1007                                                                                 \
1008 	__pure2                                                                 \
1009 	static inline uint32_t                                                  \
1010 	name ## _size(name ## _t array)                                         \
1011 	{                                                                       \
1012 	        return __kalloc_array_size((vm_address_t)array);                \
1013 	}                                                                       \
1014                                                                                 \
1015 	__pure2                                                                 \
1016 	static inline uint32_t                                                  \
1017 	name ## _next_size(                                                     \
1018 	        uint32_t                min_count,                              \
1019 	        vm_size_t               cur_size,                               \
1020 	        uint32_t                vm_period)                              \
1021 	{                                                                       \
1022 	        vm_size_t size;                                                 \
1023                                                                                 \
1024 	        if (cur_size) {                                                 \
1025 	                size = cur_size + (e_sz) - 1;                           \
1026 	        } else {                                                        \
1027 	                size = kt_size(h_sz, e_sz, min_count) - 1;              \
1028 	        }                                                               \
1029 	        size  = kalloc_next_good_size(size, vm_period);                 \
1030 	        if (size <= KALLOC_ARRAY_SIZE_MAX) {                            \
1031 	               return (uint32_t)size;                                   \
1032 	        }                                                               \
1033 	        return 2 * KALLOC_ARRAY_SIZE_MAX; /* will fail */               \
1034 	}                                                                       \
1035                                                                                 \
1036 	__pure2                                                                 \
1037 	static inline uint32_t                                                  \
1038 	name ## _count(name ## _t array)                                        \
1039 	{                                                                       \
1040 	        return name ## _size_to_count(name ## _size(array));            \
1041 	}                                                                       \
1042                                                                                 \
1043 	__pure2                                                                 \
1044 	static inline h_type_t *__header_bidi_indexable                         \
1045 	name ## _base(name ## _t array)                                         \
1046 	{                                                                       \
1047 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1048 	        uint32_t     size = __kalloc_array_size((vm_address_t)array);   \
1049                                                                                 \
1050 	        (void)size;                                                     \
1051 	        return __unsafe_forge_bidi_indexable(h_type_t *, base, size);   \
1052 	}                                                                       \
1053                                                                                 \
1054 	__pure2                                                                 \
1055 	static inline e_type_t *__header_bidi_indexable                         \
1056 	name ## _begin(name ## _t array)                                        \
1057 	{                                                                       \
1058 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1059 	        uint32_t     size = __kalloc_array_size((vm_address_t)array);   \
1060                                                                                 \
1061 	        (void)size;                                                     \
1062 	        return __unsafe_forge_bidi_indexable(e_type_t *, base, size);   \
1063 	}                                                                       \
1064                                                                                 \
1065 	__pure2                                                                 \
1066 	static inline e_type_t *                                                \
1067 	name ## _next_elem(name ## _t array, e_type_t *e)                       \
1068 	{                                                                       \
1069 	        vm_address_t end = __kalloc_array_end((vm_address_t)array);     \
1070 	        vm_address_t ptr = (vm_address_t)e + sizeof(e_type_t);          \
1071                                                                                 \
1072 	        if (ptr + sizeof(e_type_t) <= end) {                            \
1073 	                return __unsafe_forge_single(e_type_t *, ptr);          \
1074 	        }                                                               \
1075 	        return NULL;                                                    \
1076 	}                                                                       \
1077                                                                                 \
1078 	__pure2                                                                 \
1079 	static inline bool                                                      \
1080 	name ## _contains(name ## _t array, vm_size_t i)                        \
1081 	{                                                                       \
1082 	        vm_size_t offs = (e_sz) + (h_sz);                               \
1083 	        vm_size_t s;                                                    \
1084                                                                                 \
1085 	        if (__improbable(os_mul_and_add_overflow(i, e_sz, offs, &s))) { \
1086 	                return false;                                           \
1087 	        }                                                               \
1088 	        if (__improbable(s > name ## _size(array))) {                   \
1089 	                return false;                                           \
1090 	        }                                                               \
1091 	        return true;                                                    \
1092 	}                                                                       \
1093                                                                                 \
1094 	__pure2                                                                 \
1095 	static inline e_type_t * __single                                       \
1096 	name ## _get_nocheck(name ## _t array, vm_size_t i)                     \
1097 	{                                                                       \
1098 	        return name ## _begin(array) + i;                               \
1099 	}                                                                       \
1100                                                                                 \
1101 	__pure2                                                                 \
1102 	static inline e_type_t * __single                                       \
1103 	name ## _get(name ## _t array, vm_size_t i)                             \
1104 	{                                                                       \
1105 	        if (__probable(name ## _contains(array, i))) {                  \
1106 	            return name ## _get_nocheck(array, i);                      \
1107 	        }                                                               \
1108 	        return NULL;                                                    \
1109 	}                                                                       \
1110                                                                                 \
1111 	static inline name ## _t                                                \
1112 	name ## _alloc_by_size(vm_size_t size, zalloc_flags_t fl)               \
1113 	{                                                                       \
1114 	        fl |= Z_KALLOC_ARRAY;                                           \
1115 	        fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG());             \
1116 	        return (name ## _t)kalloc_type_var_impl(name ## _kt_view,       \
1117 	                        size, fl, NULL);                                \
1118 	}                                                                       \
1119                                                                                 \
1120 	static inline name ## _t                                                \
1121 	name ## _alloc_by_count(uint32_t count, zalloc_flags_t fl)              \
1122 	{                                                                       \
1123 	        return name ## _alloc_by_size(kt_size(h_sz, e_sz, count), fl);  \
1124 	}                                                                       \
1125                                                                                 \
1126 	static inline name ## _t                                                \
1127 	name ## _realloc_by_size(                                               \
1128 	        name ## _t              array,                                  \
1129 	        vm_size_t               new_size,                               \
1130 	        zalloc_flags_t          fl)                                     \
1131 	{                                                                       \
1132 	        vm_address_t base = __kalloc_array_base((vm_address_t)array);   \
1133 	        vm_size_t    size = __kalloc_array_size((vm_address_t)array);   \
1134                                                                                 \
1135 	        fl |= Z_KALLOC_ARRAY;                                           \
1136 	        fl = __zone_flags_mix_tag(fl, VM_ALLOC_SITE_TAG());             \
1137 	        return (name ## _t)(krealloc_ext)(                              \
1138 	                        kt_mangle_var_view(name ## _kt_view),           \
1139 	                        (void *)base, size, new_size, fl, NULL).addr;   \
1140 	}                                                                       \
1141                                                                                 \
1142 	static inline name ## _t                                                \
1143 	name ## _realloc_by_count(                                              \
1144 	        name ## _t              array,                                  \
1145 	        uint32_t                new_count,                              \
1146 	        zalloc_flags_t          fl)                                     \
1147 	{                                                                       \
1148 	        vm_size_t new_size = kt_size(h_sz, e_sz, new_count);            \
1149                                                                                 \
1150 	        return name ## _realloc_by_size(array, new_size, fl);           \
1151 	}                                                                       \
1152                                                                                 \
1153 	static inline void                                                      \
1154 	name ## _free_noclear(name ## _t array)                                 \
1155 	{                                                                       \
1156 	        kfree_type_var_impl(name ## _kt_view,                           \
1157 	            name ## _base(array), name ## _size(array));                \
1158 	}                                                                       \
1159                                                                                 \
1160 	static inline void                                                      \
1161 	name ## _free(name ## _t *arrayp)                                       \
1162 	{                                                                       \
1163 	        name ## _t array = *arrayp;                                     \
1164                                                                                 \
1165 	        *arrayp = NULL;                                                 \
1166 	        kfree_type_var_impl(name ## _kt_view,                           \
1167 	            name ## _base(array), name ## _size(array));                \
1168 	}
1169 
1170 
1171 /*!
1172  * @macro KALLOC_ARRAY_TYPE_DEFINE()
1173  *
1174  * @description
1175  * Defines the data structures required to pair with a KALLOC_ARRAY_TYPE_DECL()
1176  * kalloc array declaration.
1177  *
1178  * @discussion
1179  * This macro comes in two variants
1180  *
1181  * - KALLOC_ARRAY_TYPE_DEFINE(name, e_ty, flags)
1182  * - KALLOC_ARRAY_TYPE_DEFINE(name, h_ty, e_ty, flags)
1183  *
1184  * Those must pair with the KALLOC_ARRAY_TYPE_DECL() form being used.
1185  * The flags must be valid @c kalloc_type_flags_t flags.
1186  */
1187 #define KALLOC_ARRAY_TYPE_DEFINE(...) \
1188 	KALLOC_DISPATCH(KALLOC_ARRAY_TYPE_DEFINE, ##__VA_ARGS__)
1189 
1190 /*!
1191  * @function kalloc_next_good_size()
1192  *
1193  * @brief
1194  * Allows to implement "allocation growth policies" that work well
1195  * with the allocator.
1196  *
1197  * @discussion
1198  * Note that if the caller tracks a number of elements for an array,
1199  * where the elements are of size S, and the current count is C,
1200  * then it is possible for kalloc_next_good_size(C * S, ..) to hit
1201  * a fixed point, clients must call with a size at least of ((C + 1) * S).
1202  *
1203  * @param size         the current "size" of the allocation (in bytes).
1204  * @param period       the "period" (power of 2) for the allocation growth
1205  *                     policy once hitting the VM sized allocations.
1206  */
1207 extern vm_size_t kalloc_next_good_size(
1208 	vm_size_t               size,
1209 	uint32_t                period);
1210 
1211 #pragma mark kalloc_array implementation details
1212 
1213 #define KALLOC_ARRAY_TYPE_DECL_2(name, e_type_t) \
1214 	KALLOC_ARRAY_TYPE_DECL_(name, e_type_t, 0, e_type_t, sizeof(e_type_t))
1215 
1216 #define KALLOC_ARRAY_TYPE_DECL_3(name, h_type_t, e_type_t) \
1217 	KALLOC_ARRAY_TYPE_DECL_(name,                                           \
1218 	    h_type_t, kt_realign_sizeof(h_type_t, e_type_t),                    \
1219 	    e_type_t, sizeof(e_type_t))                                         \
1220 
1221 #define KALLOC_ARRAY_TYPE_DEFINE_3(name, e_type_t, flags) \
1222 	KALLOC_TYPE_VAR_DEFINE_3(name ## _kt_view, e_type_t, flags)
1223 
1224 #define KALLOC_ARRAY_TYPE_DEFINE_4(name, h_type_t, e_type_t, flags) \
1225 	KALLOC_TYPE_VAR_DEFINE_4(name ## _kt_view, h_type_t, e_type_t, flags)
1226 
1227 extern struct kalloc_result __kalloc_array_decode(
1228 	vm_address_t            array) __pure2;
1229 
1230 __pure2
1231 static inline uint32_t
__kalloc_array_size(vm_address_t array)1232 __kalloc_array_size(vm_address_t array)
1233 {
1234 	vm_address_t size = __kalloc_array_decode(array).size;
1235 
1236 	__builtin_assume(size <= KALLOC_ARRAY_SIZE_MAX);
1237 	return (uint32_t)size;
1238 }
1239 
1240 __pure2
1241 static inline vm_address_t
__kalloc_array_base(vm_address_t array)1242 __kalloc_array_base(vm_address_t array)
1243 {
1244 	return (vm_address_t)__kalloc_array_decode(array).addr;
1245 }
1246 
1247 __pure2
1248 static inline vm_address_t
__kalloc_array_begin(vm_address_t array,vm_size_t hdr_size)1249 __kalloc_array_begin(vm_address_t array, vm_size_t hdr_size)
1250 {
1251 	return (vm_address_t)__kalloc_array_decode(array).addr + hdr_size;
1252 }
1253 
1254 __pure2
1255 static inline vm_address_t
__kalloc_array_end(vm_address_t array)1256 __kalloc_array_end(vm_address_t array)
1257 {
1258 	struct kalloc_result kr = __kalloc_array_decode(array);
1259 
1260 	return (vm_address_t)kr.addr + kr.size;
1261 }
1262 
1263 #else /* !XNU_KERNEL_PRIVATE */
1264 
1265 #define KALLOC_ARRAY_TYPE_DECL_(name, h_type_t, h_sz, e_type_t, e_sz) \
1266 	typedef struct name * __unsafe_indexable name ## _t
1267 
1268 #endif /* !XNU_KERNEL_PRIVATE */
1269 #pragma mark implementation details
1270 
1271 
1272 static inline void *__unsafe_indexable
kt_mangle_var_view(kalloc_type_var_view_t kt_view)1273 kt_mangle_var_view(kalloc_type_var_view_t kt_view)
1274 {
1275 	return (void *__unsafe_indexable)((uintptr_t)kt_view | 1ul);
1276 }
1277 
1278 static inline kalloc_type_var_view_t __unsafe_indexable
kt_demangle_var_view(void * ptr)1279 kt_demangle_var_view(void *ptr)
1280 {
1281 	return (kalloc_type_var_view_t __unsafe_indexable)((uintptr_t)ptr & ~1ul);
1282 }
1283 
1284 #define kt_is_var_view(ptr)  ((uintptr_t)(ptr) & 1)
1285 
1286 #define kt_realign_sizeof(h_ty, e_ty) \
1287 	((sizeof(h_ty) + _Alignof(e_ty) - 1) & -_Alignof(e_ty))
1288 
1289 static inline vm_size_t
kt_size(vm_size_t s1,vm_size_t s2,vm_size_t c2)1290 kt_size(vm_size_t s1, vm_size_t s2, vm_size_t c2)
1291 {
1292 	/* kalloc_large() will reject this size before even asking the VM  */
1293 	const vm_size_t limit = 1ull << (8 * sizeof(vm_size_t) - 1);
1294 
1295 	if (os_mul_and_add_overflow(s2, c2, s1, &s1) || (s1 & limit)) {
1296 		return limit;
1297 	}
1298 	return s1;
1299 }
1300 
1301 #define kalloc_type_2(type, flags) ({                                          \
1302 	static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);         \
1303 	__unsafe_forge_single(type *, kalloc_type_impl(kt_view_var, flags));   \
1304 })
1305 
1306 #define kfree_type_2(type, elem) ({                                            \
1307 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1308 	static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);         \
1309 	kfree_type_impl(kt_view_var, os_ptr_load_and_erase(elem));             \
1310 })
1311 
1312 #define kfree_type_3(type, count, elem) ({                                     \
1313 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1314 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1315 	__auto_type __kfree_count = (count);                                   \
1316 	kfree_type_var_impl(kt_view_var, os_ptr_load_and_erase(elem),          \
1317 	    kt_size(0, sizeof(type), __kfree_count));                          \
1318 })
1319 
1320 // rdar://123257599
1321 #define kfree_type_counted_by_3(type, count_var, elem_var) ({                  \
1322 	void *__header_bidi_indexable __elem_copy = (elem_var);                \
1323 	__auto_type __kfree_count = (count_var);                               \
1324 	(elem_var) = 0;                                                        \
1325 	(count_var) = 0;                                                       \
1326 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(                                 \
1327 	    (os_get_pointee_type(elem_var) *)NULL, type);                      \
1328 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1329 	kfree_type_var_impl(kt_view_var, __elem_copy,                          \
1330 	    kt_size(0, sizeof(type), __kfree_count));                          \
1331 })
1332 
1333 #define kfree_type_4(hdr_ty, e_ty, count, elem) ({                             \
1334 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                   \
1335 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1336 	    KT_SHARED_ACCT);                                                   \
1337 	__auto_type __kfree_count = (count);                                   \
1338 	kfree_type_var_impl(kt_view_var,                                       \
1339 	    os_ptr_load_and_erase(elem),                                       \
1340 	    kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty),             \
1341 	    __kfree_count));                                                   \
1342 })
1343 
1344 #ifdef XNU_KERNEL_PRIVATE
1345 #define kalloc_type_tag_3(type, flags, tag) ({                                 \
1346 	static _KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);         \
1347 	__unsafe_forge_single(type *, kalloc_type_impl(kt_view_var,            \
1348 	    Z_VM_TAG(flags, tag)));                                            \
1349 })
1350 
1351 #define kalloc_type_tag_4(type, count, flags, tag) ({                          \
1352 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1353 	(type *)kalloc_type_var_impl(kt_view_var,                              \
1354 	    kt_size(0, sizeof(type), count),                                   \
1355 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1356 })
1357 #define kalloc_type_3(type, count, flags)  \
1358 	kalloc_type_tag_4(type, count, flags, VM_ALLOC_SITE_TAG())
1359 
1360 #define kalloc_type_tag_5(hdr_ty, e_ty, count, flags, tag) ({                  \
1361 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1362 	    KT_SHARED_ACCT);                                                   \
1363 	(hdr_ty *)kalloc_type_var_impl(kt_view_var,                            \
1364 	    kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), count),     \
1365 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1366 })
1367 #define kalloc_type_4(hdr_ty, e_ty, count, flags) \
1368 	kalloc_type_tag_5(hdr_ty, e_ty, count, flags, VM_ALLOC_SITE_TAG())
1369 
1370 #define krealloc_type_tag_6(type, old_count, new_count, elem, flags, tag) ({   \
1371 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1372 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                     \
1373 	(type *)__krealloc_type(kt_view_var, elem,                             \
1374 	    kt_size(0, sizeof(type), old_count),                               \
1375 	    kt_size(0, sizeof(type), new_count),                               \
1376 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1377 })
1378 #define krealloc_type_5(type, old_count, new_count, elem, flags) \
1379 	krealloc_type_tag_6(type, old_count, new_count, elem, flags, \
1380 	    VM_ALLOC_SITE_TAG())
1381 
1382 #define krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem,          \
1383 	    flags, tag) ({                                                     \
1384 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1385 	    KT_SHARED_ACCT);                                                   \
1386 	KALLOC_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                   \
1387 	(hdr_ty *)__krealloc_type(kt_view_var, elem,                           \
1388 	    kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), old_count), \
1389 	    kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), new_count), \
1390 	    __zone_flags_mix_tag(flags, tag), NULL);                           \
1391 })
1392 #define krealloc_type_6(hdr_ty, e_ty, old_count, new_count, elem, flags) \
1393 	krealloc_type_tag_7(hdr_ty, e_ty, old_count, new_count, elem, flags,   \
1394 	    VM_ALLOC_SITE_TAG())
1395 
1396 #else /* XNU_KERNEL_PRIVATE */
1397 
1398 #define kalloc_type_3(type, count, flags) ({                                   \
1399 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
1400 	(type *)kalloc_type_var_impl(kt_view_var,                              \
1401 	    kt_size(0, sizeof(type), count), flags, NULL);                     \
1402 })
1403 
1404 #define kalloc_type_4(hdr_ty, e_ty, count, flags) ({                           \
1405 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
1406 	    KT_SHARED_ACCT);                                                   \
1407 	(hdr_ty *)kalloc_type_var_impl(kt_view_var,                            \
1408 	    kt_size(kt_realign_sizeof(hdr_ty, e_ty), sizeof(e_ty), count),     \
1409 	    flags, NULL);                                                      \
1410 })
1411 
1412 #endif /* !XNU_KERNEL_PRIVATE */
1413 
1414 /*
1415  * All k*free macros set "elem" to NULL on free.
1416  *
1417  * Note: all values passed to k*free() might be in the element to be freed,
1418  *       temporaries must be taken, and the resetting to be done prior to free.
1419  */
1420 #ifdef XNU_KERNEL_PRIVATE
1421 
1422 #define kheap_free(heap, elem, size) ({                                        \
1423 	kalloc_heap_t __kfree_heap = (heap);                                   \
1424 	__auto_type __kfree_size = (size);                                     \
1425 	__builtin_assume(!kt_is_var_view(__kfree_heap));                       \
1426 	kfree_ext((void *)__kfree_heap,                                        \
1427 	    (void *)os_ptr_load_and_erase(elem), __kfree_size);                \
1428 })
1429 
1430 #define kheap_free_addr(heap, elem) ({                                         \
1431 	kalloc_heap_t __kfree_heap = (heap);                                   \
1432 	kfree_addr_ext(__kfree_heap, (void *)os_ptr_load_and_erase(elem));     \
1433 })
1434 
1435 #define kheap_free_bounded(heap, elem, min_sz, max_sz) ({                      \
1436 	static_assert(max_sz <= KALLOC_SAFE_ALLOC_SIZE);                       \
1437 	kalloc_heap_t __kfree_heap = (heap);                                   \
1438 	__auto_type __kfree_min_sz = (min_sz);                                 \
1439 	__auto_type __kfree_max_sz = (max_sz);                                 \
1440 	(kheap_free_bounded)(__kfree_heap,                                     \
1441 	    (void *)os_ptr_load_and_erase(elem),                               \
1442 	    __kfree_min_sz, __kfree_max_sz);                                   \
1443 })
1444 
1445 #else /* XNU_KERNEL_PRIVATE */
1446 
1447 #define kfree_data(elem, size) ({                                              \
1448 	__auto_type __kfree_size = (size);                                     \
1449 	(kfree_data)((void *)os_ptr_load_and_erase(elem), __kfree_size);       \
1450 })
1451 
1452 #define kfree_data_addr(elem) \
1453 	(kfree_data_addr)((void *)os_ptr_load_and_erase(elem))
1454 
1455 #endif /* !XNU_KERNEL_PRIVATE */
1456 
1457 #define __kfree_data_elem_count_size(elem_var, count_var, size) ({              \
1458 	void *__header_bidi_indexable __elem_copy = (elem_var);                 \
1459 	(elem_var) = 0;                                                         \
1460 	(count_var) = 0;                                                        \
1461 	kfree_data(__elem_copy, size);                                          \
1462 })
1463 
1464 #define __kfree_data_addr_count_size(addr_var, count_var) ({                    \
1465 	void *__header_bidi_indexable __addr_copy = (addr_var);                 \
1466 	(addr_var) = 0;                                                         \
1467 	(count_var) = 0;                                                        \
1468 	kfree_data_addr(__addr_copy);                                           \
1469 })
1470 
1471 /*
1472  * kfree_data_sized_by is the kfree_data equivalent that is compatible with
1473  * -fbounds-safety's __sized_by pointers. Consistently with the -fbounds-safety
1474  * semantics, `size` must be the byte size of the allocation that is freed (for
1475  * instance, 20 for an array of 5 uint32_t).
1476  */
1477 #define kfree_data_sized_by(elem, size) ({                                      \
1478 	__auto_type __size = (size);                                            \
1479 	__kfree_data_elem_count_size(elem, size, __size);                       \
1480 })
1481 
1482 #define kfree_data_addr_sized_by(addr, size) ({                                 \
1483 	__kfree_data_addr_count_size(addr, size);                               \
1484 })
1485 
1486 /*
1487  * kfree_data_counted_by is the kfree_data equivalent that is compatible with
1488  * -fbounds-safety's __counted_by pointers. Consistently with the
1489  * -fbounds-safety semantics, `count` must be the object count of the allocation
1490  * that is freed (for instance, 5 for an array of 5 uint32_t).
1491  */
1492 #define kfree_data_counted_by(elem, count) ({                                  \
1493 	__auto_type __size = (count) * sizeof(*(elem));                        \
1494 	__kfree_data_elem_count_size(elem, count, __size);                     \
1495 })
1496 
1497 #if __has_feature(address_sanitizer)
1498 # define __kalloc_no_kasan __attribute__((no_sanitize("address")))
1499 #else
1500 # define __kalloc_no_kasan
1501 #endif
1502 
1503 #define KALLOC_CONCAT(x, y) __CONCAT(x,y)
1504 
1505 #define KALLOC_COUNT_ARGS1(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, N, ...) N
1506 #define KALLOC_COUNT_ARGS(...) \
1507 	KALLOC_COUNT_ARGS1(, ##__VA_ARGS__, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0)
1508 #define KALLOC_DISPATCH1(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1509 #define KALLOC_DISPATCH(base, ...) \
1510 	KALLOC_DISPATCH1(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1511 #define KALLOC_DISPATCH1_R(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1512 #define KALLOC_DISPATCH_R(base, ...) \
1513 	KALLOC_DISPATCH1_R(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1514 
1515 #define kt_view_var \
1516 	KALLOC_CONCAT(kalloc_type_view_, __LINE__)
1517 
1518 #ifndef __BUILDING_XNU_LIBRARY__
1519 #define KALLOC_TYPE_SEGMENT "__DATA_CONST"
1520 #else /* __BUILDING_XNU_LIBRARY__ */
1521 /* Special segments are not used when building for user-mode */
1522 #define KALLOC_TYPE_SEGMENT "__DATA"
1523 #endif /* __BUILDING_XNU_LIBRARY__ */
1524 
1525 /*
1526  * When kalloc_type_impl is called from xnu, it calls zalloc_flags
1527  * directly and doesn't redirect zone-less sites to kheap_alloc.
1528  * Passing a size larger than KHEAP_MAX_SIZE for these allocations will
1529  * lead to a panic as the zone is null. Therefore assert that size
1530  * is less than KALLOC_SAFE_ALLOC_SIZE.
1531  */
1532 #if XNU_KERNEL_PRIVATE || defined(KALLOC_TYPE_STRICT_SIZE_CHECK)
1533 #define KALLOC_TYPE_SIZE_CHECK(size)                           \
1534 	_Static_assert(size <= KALLOC_SAFE_ALLOC_SIZE,             \
1535 	"type is too large");
1536 #else
1537 #define KALLOC_TYPE_SIZE_CHECK(size)
1538 #endif
1539 
1540 #define KALLOC_TYPE_CHECK_2(check, type) \
1541 	(KALLOC_TYPE_SIG_CHECK(check, type))
1542 
1543 #define KALLOC_TYPE_CHECK_3(check, type1, type2) \
1544 	(KALLOC_TYPE_SIG_CHECK(check, type1) && \
1545 	    KALLOC_TYPE_SIG_CHECK(check, type2))
1546 
1547 #define KALLOC_TYPE_CHECK(...) \
1548 	KALLOC_DISPATCH_R(KALLOC_TYPE_CHECK, ##__VA_ARGS__)
1549 
1550 #define KALLOC_TYPE_VM_SIZE_CHECK_1(type) \
1551 	(sizeof(type) > KHEAP_MAX_SIZE)
1552 
1553 #define KALLOC_TYPE_VM_SIZE_CHECK_2(type1, type2) \
1554 	(sizeof(type1) + sizeof(type2) > KHEAP_MAX_SIZE)
1555 
1556 #define KALLOC_TYPE_VM_SIZE_CHECK(...) \
1557 	KALLOC_DISPATCH_R(KALLOC_TYPE_VM_SIZE_CHECK, ##__VA_ARGS__)
1558 
1559 #define KALLOC_TYPE_TRAILING_DATA_CHECK(hdr_ty, elem_ty)     \
1560 	_Static_assert((KALLOC_TYPE_IS_DATA_ONLY(hdr_ty) ||  \
1561 	    !KALLOC_TYPE_IS_DATA_ONLY(elem_ty)),             \
1562 	"cannot allocate data-only array of " #elem_ty       \
1563 	" contiguously to " #hdr_ty)
1564 
1565 #ifdef __cplusplus
1566 #define KALLOC_TYPE_CAST_FLAGS(flags) static_cast<kalloc_type_flags_t>(flags)
1567 #else
1568 #define KALLOC_TYPE_CAST_FLAGS(flags) (kalloc_type_flags_t)(flags)
1569 #endif
1570 
1571 /*
1572  * Don't emit signature if type is "data-only" or is large enough that it
1573  * uses the VM.
1574  *
1575  * Note: sig_type is the type you want to emit signature for. The variable
1576  * args can be used to provide other types in the allocation, to make the
1577  * decision of whether to emit the signature.
1578  */
1579 #define KALLOC_TYPE_EMIT_SIG(sig_type, ...)                              \
1580 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, sig_type, ##__VA_ARGS__) || \
1581 	KALLOC_TYPE_VM_SIZE_CHECK(sig_type, ##__VA_ARGS__))?                 \
1582 	"" : __builtin_xnu_type_signature(sig_type)
1583 
1584 /*
1585  * Kalloc type flags are adjusted to indicate if the type is "data-only" or
1586  * will use the VM or is a pointer array.
1587  *
1588  * There is no need to create another sig for data shared. We expect shareable
1589  * allocations to be marked explicitly by the callers, by using the dedicated
1590  * allocation API for shared data.
1591  */
1592 #define KALLOC_TYPE_ADJUST_FLAGS(flags, ...)                                 \
1593 	KALLOC_TYPE_CAST_FLAGS((flags | KT_CHANGED | KT_CHANGED2 |               \
1594 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, __VA_ARGS__)? KT_DATA_ONLY: 0) |\
1595 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_PTR, __VA_ARGS__)? KT_PTR_ARRAY: 0) | \
1596 	(KALLOC_TYPE_VM_SIZE_CHECK(__VA_ARGS__)? KT_VM : 0)))
1597 
1598 #define _KALLOC_TYPE_DEFINE(var, type, flags)                       \
1599 	__kalloc_no_kasan                                               \
1600 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_type, "      \
1601 	    "regular, live_support")                                    \
1602 	struct kalloc_type_view var[1] = { {                            \
1603 	    .kt_zv.zv_name = "site." #type,                             \
1604 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1605 	    .kt_size = sizeof(type),                                    \
1606 	    .kt_signature = KALLOC_TYPE_EMIT_SIG(type),                 \
1607 	} };                                                            \
1608 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1609 
1610 #define KALLOC_TYPE_VAR_DEFINE_3(var, type, flags)                  \
1611 	__kalloc_no_kasan                                               \
1612 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, "       \
1613 	    "regular, live_support")                                    \
1614 	struct kalloc_type_var_view var[1] = { {                        \
1615 	    .kt_version = KT_V1,                                        \
1616 	    .kt_name = "site." #type,                                   \
1617 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1618 	    .kt_size_type = sizeof(type),                               \
1619 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type),                  \
1620 	} };                                                            \
1621 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1622 
1623 #define KALLOC_TYPE_VAR_DEFINE_4(var, hdr, type, flags)             \
1624 	__kalloc_no_kasan                                               \
1625 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var, "       \
1626 	    "regular, live_support")                                    \
1627 	struct kalloc_type_var_view var[1] = { {                        \
1628 	    .kt_version = KT_V1,                                        \
1629 	    .kt_name = "site." #hdr "." #type,                          \
1630 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, hdr, type),     \
1631 	    .kt_size_hdr = sizeof(hdr),                                 \
1632 	    .kt_size_type = sizeof(type),                               \
1633 	    .kt_sig_hdr = KALLOC_TYPE_EMIT_SIG(hdr, type),              \
1634 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type, hdr),             \
1635 	} };                                                            \
1636 	KALLOC_TYPE_SIZE_CHECK(sizeof(hdr));                            \
1637 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));                           \
1638 	KALLOC_TYPE_TRAILING_DATA_CHECK(hdr, type);
1639 
1640 #ifndef XNU_KERNEL_PRIVATE
1641 /*
1642  * This macro is currently used by AppleImage4
1643  */
1644 #define KALLOC_TYPE_DEFINE_SITE(var, type, flags)       \
1645 	static _KALLOC_TYPE_DEFINE(var, type, flags)
1646 
1647 #endif /* !XNU_KERNEL_PRIVATE */
1648 
1649 #ifdef XNU_KERNEL_PRIVATE
1650 
1651 extern struct kalloc_result kalloc_ext(
1652 	void                   *kheap_or_kt_view __unsafe_indexable,
1653 	vm_size_t               size,
1654 	zalloc_flags_t          flags,
1655 	void                   *site);
1656 
1657 static inline struct kalloc_result
__kalloc_ext(void * kheap_or_kt_view __unsafe_indexable,vm_size_t size,zalloc_flags_t flags,void * site)1658 __kalloc_ext(
1659 	void                   *kheap_or_kt_view __unsafe_indexable,
1660 	vm_size_t               size,
1661 	zalloc_flags_t          flags,
1662 	void                   *site)
1663 {
1664 	struct kalloc_result kr;
1665 
1666 	kr    = (kalloc_ext)(kheap_or_kt_view, size, flags, site);
1667 	if (flags & Z_NOFAIL) {
1668 		__builtin_assume(kr.addr != NULL);
1669 	}
1670 	return kr;
1671 }
1672 
1673 #define kalloc_ext(hov, size, fl, site) __kalloc_ext(hov, size, fl, site)
1674 
1675 extern void kfree_ext(
1676 	void                   *kheap_or_kt_view __unsafe_indexable,
1677 	void                   *addr __unsafe_indexable,
1678 	vm_size_t               size);
1679 
1680 // rdar://87559422
1681 static inline void *__unsafe_indexable
kalloc_type_var_impl(kalloc_type_var_view_t kt_view,vm_size_t size,zalloc_flags_t flags,void * site)1682 kalloc_type_var_impl(
1683 	kalloc_type_var_view_t    kt_view,
1684 	vm_size_t                 size,
1685 	zalloc_flags_t            flags,
1686 	void                      *site)
1687 {
1688 	struct kalloc_result kr;
1689 
1690 	kr = kalloc_ext(kt_mangle_var_view(kt_view), size, flags, site);
1691 	return kr.addr;
1692 }
1693 
1694 static inline void
kfree_type_var_impl(kalloc_type_var_view_t kt_view,void * ptr __unsafe_indexable,vm_size_t size)1695 kfree_type_var_impl(
1696 	kalloc_type_var_view_t      kt_view,
1697 	void                       *ptr __unsafe_indexable,
1698 	vm_size_t                   size)
1699 {
1700 	kfree_ext(kt_mangle_var_view(kt_view), ptr, size);
1701 }
1702 
1703 #else /* XNU_KERNEL_PRIVATE */
1704 
1705 extern void *__unsafe_indexable kalloc_type_var_impl(
1706 	kalloc_type_var_view_t  kt_view,
1707 	vm_size_t               size,
1708 	zalloc_flags_t          flags,
1709 	void                   *site);
1710 
1711 extern void kfree_type_var_impl(
1712 	kalloc_type_var_view_t  kt_view,
1713 	void                   *ptr __unsafe_indexable,
1714 	vm_size_t               size);
1715 
1716 #endif /* !XNU_KERNEL_PRIVATE */
1717 
1718 __attribute__((malloc, alloc_size(2)))
1719 static inline void *
__sized_by(size)1720 __sized_by(size)
1721 __kalloc_type_var_impl(
1722 	kalloc_type_var_view_t  kt_view,
1723 	vm_size_t               size,
1724 	zalloc_flags_t          flags,
1725 	void                   *site)
1726 {
1727 	void *__unsafe_indexable addr;
1728 
1729 	addr = (kalloc_type_var_impl)(kt_view, size, flags, site);
1730 	if (flags & Z_NOFAIL) {
1731 		__builtin_assume(addr != NULL);
1732 	}
1733 	return __unsafe_forge_bidi_indexable(void *, addr, size);
1734 }
1735 
1736 #define kalloc_type_var_impl(ktv, size, fl, site) \
1737 	__kalloc_type_var_impl(ktv, size, fl, site)
1738 
1739 extern void *kalloc_type_impl_external(
1740 	kalloc_type_view_t  kt_view,
1741 	zalloc_flags_t      flags);
1742 
1743 extern void kfree_type_impl_external(
1744 	kalloc_type_view_t  kt_view,
1745 	void               *ptr __unsafe_indexable);
1746 
1747 extern void *OSObject_typed_operator_new(
1748 	kalloc_type_view_t  ktv,
1749 	vm_size_t           size);
1750 
1751 extern void OSObject_typed_operator_delete(
1752 	kalloc_type_view_t  ktv,
1753 	void               *mem __unsafe_indexable,
1754 	vm_size_t           size);
1755 
1756 #ifdef XNU_KERNEL_PRIVATE
1757 #pragma GCC visibility push(hidden)
1758 
1759 #define KALLOC_TYPE_SIZE_MASK  0xffffff
1760 #define KALLOC_TYPE_IDX_SHIFT  24
1761 #define KALLOC_TYPE_IDX_MASK   0xff
1762 
1763 static inline uint32_t
kalloc_type_get_size(uint32_t kt_size)1764 kalloc_type_get_size(uint32_t kt_size)
1765 {
1766 	return kt_size & KALLOC_TYPE_SIZE_MASK;
1767 }
1768 
1769 static inline uint32_t
kalloc_type_size(kalloc_type_view_t ktv)1770 kalloc_type_size(kalloc_type_view_t ktv)
1771 {
1772 	return kalloc_type_get_size(ktv->kt_size);
1773 }
1774 
1775 extern bool IOMallocType_from_vm(
1776 	kalloc_type_view_t ktv);
1777 
1778 /* Used by kern_os_* and operator new */
1779 KALLOC_HEAP_DECLARE(KERN_OS_MALLOC);
1780 
1781 extern void kheap_startup_init(kalloc_heap_t heap);
1782 extern void kheap_var_startup_init(kalloc_heap_t heap);
1783 
1784 __attribute__((malloc, alloc_size(2)))
1785 static inline void *
__sized_by(size)1786 __sized_by(size)
1787 __kheap_alloc(
1788 	kalloc_heap_t           kheap,
1789 	vm_size_t               size,
1790 	zalloc_flags_t          flags,
1791 	void                   *site)
1792 {
1793 	struct kalloc_result kr;
1794 	__builtin_assume(!kt_is_var_view(kheap));
1795 	kr = kalloc_ext(kheap, size, flags, site);
1796 	return __unsafe_forge_bidi_indexable(void *, kr.addr, size);
1797 }
1798 
1799 extern struct kalloc_result krealloc_ext(
1800 	void                   *kheap_or_kt_view __unsafe_indexable,
1801 	void                   *addr __unsafe_indexable,
1802 	vm_size_t               old_size,
1803 	vm_size_t               new_size,
1804 	zalloc_flags_t          flags,
1805 	void                   *site);
1806 
1807 static inline struct kalloc_result
__krealloc_ext(void * kheap_or_kt_view __unsafe_indexable,void * addr __sized_by (old_size),vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,void * site)1808 __krealloc_ext(
1809 	void                   *kheap_or_kt_view __unsafe_indexable,
1810 	void                   *addr __sized_by(old_size),
1811 	vm_size_t               old_size,
1812 	vm_size_t               new_size,
1813 	zalloc_flags_t          flags,
1814 	void                   *site)
1815 {
1816 	struct kalloc_result kr = (krealloc_ext)(kheap_or_kt_view, addr, old_size,
1817 	    new_size, flags, site);
1818 	if (flags & Z_NOFAIL) {
1819 		__builtin_assume(kr.addr != NULL);
1820 	}
1821 	return kr;
1822 }
1823 
1824 #define krealloc_ext(hov, addr, old_size, new_size, fl, site) \
1825 	__krealloc_ext(hov, addr, old_size, new_size, fl, site)
1826 
1827 __attribute__((malloc, alloc_size(4)))
1828 static inline void *
__sized_by(new_size)1829 __sized_by(new_size)
1830 __kheap_realloc(
1831 	kalloc_heap_t           kheap,
1832 	void                   *addr __sized_by(old_size),
1833 	vm_size_t               old_size,
1834 	vm_size_t               new_size,
1835 	zalloc_flags_t          flags,
1836 	void                   *site)
1837 {
1838 	struct kalloc_result kr;
1839 	__builtin_assume(!kt_is_var_view(kheap));
1840 	kr = krealloc_ext(kheap, addr, old_size, new_size, flags, site);
1841 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1842 }
1843 
1844 __attribute__((malloc, alloc_size(4)))
1845 static inline void *
__sized_by(new_size)1846 __sized_by(new_size)
1847 __krealloc_type(
1848 	kalloc_type_var_view_t  kt_view,
1849 	void                   *addr __sized_by(old_size),
1850 	vm_size_t               old_size,
1851 	vm_size_t               new_size,
1852 	zalloc_flags_t          flags,
1853 	void                   *site)
1854 {
1855 	struct kalloc_result kr;
1856 	kr = krealloc_ext(kt_mangle_var_view(kt_view), addr,
1857 	    old_size, new_size, flags, site);
1858 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1859 }
1860 
1861 extern void kfree_addr_ext(
1862 	kalloc_heap_t           kheap,
1863 	void                   *addr __unsafe_indexable);
1864 
1865 extern zone_t kalloc_zone_for_size(
1866 	zone_id_t             zid,
1867 	vm_size_t             size);
1868 
1869 extern vm_size_t kalloc_large_max;
1870 SCALABLE_COUNTER_DECLARE(kalloc_large_count);
1871 SCALABLE_COUNTER_DECLARE(kalloc_large_total);
1872 
1873 extern void kern_os_typed_free(
1874 	kalloc_type_view_t    ktv,
1875 	void                 *addr __unsafe_indexable,
1876 	vm_size_t             esize);
1877 
1878 #pragma GCC visibility pop
1879 #endif  /* !XNU_KERNEL_PRIVATE */
1880 
1881 extern void kern_os_zfree(
1882 	zone_t        zone,
1883 	void         *addr __unsafe_indexable,
1884 	vm_size_t     size);
1885 
1886 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1887 
1888 #endif  /* _KERN_KALLOC_H_ */
1889 
1890 #endif  /* KERNEL_PRIVATE */
1891