xref: /xnu-8020.101.4/osfmk/kern/kalloc.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #ifdef  KERNEL_PRIVATE
58 
59 #ifndef _KERN_KALLOC_H_
60 #define _KERN_KALLOC_H_
61 
62 #include <mach/machine/vm_types.h>
63 #include <mach/boolean.h>
64 #include <mach/vm_types.h>
65 #include <kern/zalloc.h>
66 #include <libkern/section_keywords.h>
67 #include <os/alloc_util.h>
68 
69 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
70 
71 /*!
72  * @const KALLOC_SAFE_ALLOC_SIZE
73  *
74  * @brief
75  * The maximum allocation size that is safe to allocate with Z_NOFAIL in kalloc.
76  */
77 #if __LP64__
78 #define KALLOC_SAFE_ALLOC_SIZE  (16u * 1024u)
79 #else
80 #define KALLOC_SAFE_ALLOC_SIZE  (8u * 1024u)
81 #endif
82 
83 #if XNU_KERNEL_PRIVATE
84 /*!
85  * @typedef kalloc_heap_t
86  *
87  * @abstract
88  * A kalloc heap view represents a sub-accounting context
89  * for a given kalloc heap.
90  */
91 typedef struct kalloc_heap {
92 	struct kheap_zones *kh_zones;
93 	zone_stats_t        kh_stats;
94 	const char         *kh_name __unsafe_indexable;
95 	struct kalloc_heap *kh_next;
96 	zone_kheap_id_t     kh_heap_id;
97 	vm_map_t            kh_large_map;
98 	vm_map_t            kh_fallback_map;
99 	vm_tag_t            kh_tag;
100 } *kalloc_heap_t;
101 
102 /*!
103  * @macro KALLOC_HEAP_DECLARE
104  *
105  * @abstract
106  * (optionally) declare a kalloc heap view in a header.
107  *
108  * @discussion
109  * Unlike kernel zones, new full blown heaps cannot be instantiated.
110  * However new accounting views of the base heaps can be made.
111  */
112 #define KALLOC_HEAP_DECLARE(var) \
113 	extern struct kalloc_heap var[1]
114 
115 /**
116  * @const KHEAP_ANY
117  *
118  * @brief
119  * A value that represents either the default or kext heap for codepaths that
120  * need to allow @c kheap_free() to either one.
121  *
122  * @discussion
123  * When the memory provenance is not known, this value can be used to free
124  * memory indiscriminately.
125  *
126  * Note: code using this constant can likely be used as a gadget to free
127  * arbitrary memory and its use is strongly discouraged.
128  */
129 #define KHEAP_ANY  ((struct kalloc_heap *)NULL)
130 
131 /**
132  * @const KHEAP_DATA_BUFFERS
133  *
134  * @brief
135  * The builtin heap for bags of pure bytes.
136  *
137  * @discussion
138  * This set of kalloc zones should contain pure bags of bytes with no pointers
139  * or length/offset fields.
140  *
141  * The zones forming the heap aren't sequestered from each other, however the
142  * entire heap lives in a different submap from any other kernel allocation.
143  *
144  * The main motivation behind this separation is due to the fact that a lot of
145  * these objects have been used by attackers to spray the heap to make it more
146  * predictable while exploiting use-after-frees or overflows.
147  *
148  * Common attributes that make these objects useful for spraying includes
149  * control of:
150  * - Data in allocation
151  * - Time of alloc and free (lifetime)
152  * - Size of allocation
153  */
154 KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS);
155 
156 /**
157  * @const KHEAP_KEXT
158  *
159  * @brief
160  * The builtin heap for allocations made by kexts.
161  *
162  * @discussion
163  * This set of kalloc zones should contain allocations from kexts and the
164  * individual zones in this heap are sequestered.
165  */
166 KALLOC_HEAP_DECLARE(KHEAP_KEXT);
167 
168 /**
169  * @const KHEAP_DEFAULT
170  *
171  * @brief
172  * The builtin default core kernel kalloc heap.
173  *
174  * @discussion
175  * This set of kalloc zones should contain other objects that don't have their
176  * own security mitigations. The individual zones are themselves sequestered.
177  */
178 KALLOC_HEAP_DECLARE(KHEAP_DEFAULT);
179 
180 /**
181  * @const KHEAP_KT_VAR
182  *
183  * @brief
184  * Temporary heap for variable sized kalloc type allocations
185  *
186  * @discussion
187  * This heap will be removed when logic for kalloc_type_var_views is added
188  *
189  */
190 KALLOC_HEAP_DECLARE(KHEAP_KT_VAR);
191 
192 /*!
193  * @macro KALLOC_HEAP_DEFINE
194  *
195  * @abstract
196  * Defines a given kalloc heap view and what it points to.
197  *
198  * @discussion
199  * Kalloc heaps are views over one of the pre-defined builtin heaps
200  * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating
201  * a new one allows for accounting of allocations through this view.
202  *
203  * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase,
204  * as the last rank. If views on zones are created, these must have been
205  * created before this stage.
206  *
207  * @param var           the name for the zone view.
208  * @param name          a string describing the zone view.
209  * @param heap_id       a @c KHEAP_ID_* constant.
210  */
211 #define KALLOC_HEAP_DEFINE(var, name, heap_id) \
212 	SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \
213 	    .kh_name = name, \
214 	    .kh_heap_id = heap_id, \
215 	} }; \
216 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, kheap_startup_init, var)
217 
218 
219 /*
220  * Allocations of type SO_NAME are known to not have pointers for
221  * most platforms -- for macOS this is not guaranteed
222  */
223 #if XNU_TARGET_OS_OSX
224 #define KHEAP_SONAME KHEAP_DEFAULT
225 #else /* XNU_TARGET_OS_OSX */
226 #define KHEAP_SONAME KHEAP_DATA_BUFFERS
227 #endif /* XNU_TARGET_OS_OSX */
228 
229 #endif/* XNU_KERNEL_PRIVATE */
230 
231 /*!
232  * @enum kalloc_type_flags_t
233  *
234  * @brief
235  * Flags that can be passed to @c KALLOC_TYPE_DEFINE
236  *
237  * @discussion
238  * These flags can be used to request for a specific accounting
239  * behavior.
240  *
241  * @const KT_DEFAULT
242  * Passing this flag will provide default accounting behavior
243  * i.e shared accounting unless toggled with KT_OPTIONS_ACCT is
244  * set in kt boot-arg.
245  *
246  * @const KT_PRIV_ACCT
247  * Passing this flag will provide individual stats for your
248  * @c kalloc_type_view that is defined.
249  *
250  * @const KT_SHARED_ACCT
251  * Passing this flag will accumulate stats as a part of the
252  * zone that your @c kalloc_type_view points to.
253  *
254  * @const KT_DATA_ONLY
255  * Represents that the type is "data-only". Adopters should not
256  * set this flag manually, it is meant for the compiler to set
257  * automatically when KALLOC_TYPE_CHECK(DATA) passes.
258  *
259  * @const KT_VM
260  * Represents that the type is large enough to use the VM. Adopters
261  * should not set this flag manually, it is meant for the compiler
262  * to set automatically when KALLOC_TYPE_VM_SIZE_CHECK passes.
263  *
264  * @const KT_PTR_ARRAY
265  * Represents that the type is an array of pointers. Adopters should not
266  * set this flag manually, it is meant for the compiler to set
267  * automatically when KALLOC_TYPE_CHECK(PTR) passes.
268  *
269  * @const KT_CHANGED*
270  * Represents a change in the version of the kalloc_type_view. This
271  * is required inorder to decouple requiring kexts to be rebuilt to
272  * use the new defintions right away. This flags should not be used
273  * manually at a callsite, it is meant for internal use only. Future
274  * changes to kalloc_type_view defintion should toggle this flag.
275  *
276  #if XNU_KERNEL_PRIVATE
277  *
278  * @const KT_SLID
279  * To indicate that strings in the view were slid during early boot.
280  *
281  * @const KT_PROCESSED
282  * This flag is set once the view is parse during early boot. Views
283  * that are not in BootKC on macOS aren't parsed and therefore will
284  * not have this flag set. The runtime can use this as an indication
285  * to appropriately redirect the call.
286  *
287  * @const KT_VM_TAG_MASK
288  * Represents bits in which a vm_tag_t for the allocation can be passed.
289  * (used for the zone tagging debugging feature).
290  #endif
291  */
292 __options_decl(kalloc_type_flags_t, uint32_t, {
293 	KT_DEFAULT        = 0x0001,
294 	KT_PRIV_ACCT      = 0x0002,
295 	KT_SHARED_ACCT    = 0x0004,
296 	KT_DATA_ONLY      = 0x0008,
297 	KT_VM             = 0x0010,
298 	KT_CHANGED        = 0x0020,
299 	KT_CHANGED2       = 0x0040,
300 	KT_PTR_ARRAY      = 0x0080,
301 #if XNU_KERNEL_PRIVATE
302 	KT_SLID           = 0x4000,
303 	KT_PROCESSED      = 0x8000,
304 	/** used to propagate vm tags for -zt */
305 	KT_VM_TAG_MASK    = 0xffff0000,
306 #endif
307 });
308 
309 /*!
310  * @typedef kalloc_type_view_t
311  *
312  * @abstract
313  * A kalloc type view is a structure used to redirect callers
314  * of @c kalloc_type to a particular zone based on the signature of
315  * their type.
316  *
317  * @discussion
318  * These structures are automatically created under the hood for every
319  * @c kalloc_type and @c kfree_type callsite. They are ingested during startup
320  * and are assigned zones based on the security policy for their signature.
321  *
322  * These structs are protected by the kernel lockdown and can't be initialized
323  * dynamically. They must be created using @c KALLOC_TYPE_DEFINE() or
324  * @c kalloc_type or @c kfree_type.
325  *
326  */
327 struct kalloc_type_view {
328 	struct zone_view        kt_zv;
329 	const char             *kt_signature __unsafe_indexable;
330 	kalloc_type_flags_t     kt_flags;
331 	uint32_t                kt_size;
332 	void                   *unused1;
333 	void                   *unused2;
334 };
335 
336 typedef struct kalloc_type_view *kalloc_type_view_t;
337 
338 /*
339  * "Heaps" or sets of zones, used for variable size kalloc_type allocations
340  * are defined by the constants below.
341  *
342  * KHEAP_START_SIZE: Size of the first sequential zone.
343  * KHEAP_MAX_SIZE  : Size of the last sequential zone.
344  * KHEAP_STEP_WIDTH: Number of zones created at every step (power of 2).
345  * KHEAP_STEP_START: Size of the first step.
346  * We also create some extra initial zones that don't follow the sequence
347  * for sizes 8 (on armv7 only), 16 and 32.
348  *
349  * idx step_increment   zone_elem_size
350  * 0       -                  16
351  * 1       -                  32
352  * 2       16                 48
353  * 3       16                 64
354  * 4       32                 96
355  * 5       32                 128
356  * 6       64                 192
357  * 7       64                 256
358  * 8       128                384
359  * 9       128                512
360  * 10      256                768
361  * 11      256                1024
362  * 12      512                1536
363  * 13      512                2048
364  * 14      1024               3072
365  * 15      1024               4096
366  * 16      2048               6144
367  * 17      2048               8192
368  * 18      4096               12288
369  * 19      4096               16384
370  * 20      8192               24576
371  * 21      8192               32768
372  */
373 #define kalloc_log2down(mask)   (31 - __builtin_clz(mask))
374 #define KHEAP_START_SIZE        32
375 #if !defined(__LP64__)
376 #define KHEAP_MAX_SIZE          8 * 1024
377 #define KHEAP_EXTRA_ZONES       3
378 #elif  __x86_64__
379 #define KHEAP_MAX_SIZE          16 * 1024
380 #define KHEAP_EXTRA_ZONES       2
381 #else
382 #define KHEAP_MAX_SIZE          32 * 1024
383 #define KHEAP_EXTRA_ZONES       2
384 #endif
385 #define KHEAP_STEP_WIDTH        2
386 #define KHEAP_STEP_START        16
387 #define KHEAP_START_IDX         kalloc_log2down(KHEAP_START_SIZE)
388 #define KHEAP_NUM_STEPS         (kalloc_log2down(KHEAP_MAX_SIZE) - \
389 	                                kalloc_log2down(KHEAP_START_SIZE))
390 #define KHEAP_NUM_ZONES         KHEAP_NUM_STEPS * KHEAP_STEP_WIDTH \
391 	                                + KHEAP_EXTRA_ZONES
392 
393 /*!
394  * @enum kalloc_type_version_t
395  *
396  * @brief
397  * Enum that holds versioning information for @c kalloc_type_var_view
398  *
399  * @const KT_V1
400  * Version 1
401  *
402  */
403 __options_decl(kalloc_type_version_t, uint16_t, {
404 	KT_V1             = 0x0001,
405 });
406 
407 /*!
408  * @typedef kalloc_type_var_view_t
409  *
410  * @abstract
411  * This structure is analoguous to @c kalloc_type_view but handles
412  * @c kalloc_type callsites that are variable in size.
413  *
414  * @discussion
415  * These structures are automatically created under the hood for every
416  * variable sized @c kalloc_type and @c kfree_type callsite. They are ingested
417  * during startup and are assigned zones based on the security policy for
418  * their signature.
419  *
420  * These structs are protected by the kernel lockdown and can't be initialized
421  * dynamically. They must be created using @c KALLOC_TYPE_VAR_DEFINE() or
422  * @c kalloc_type or @c kfree_type.
423  *
424  */
425 struct kalloc_type_var_view {
426 	kalloc_type_version_t   kt_version;
427 	uint16_t                kt_size_hdr;
428 	/*
429 	 * Temporary: Needs to be 32bits cause we have many structs that use
430 	 * IONew/Delete that are larger than 32K.
431 	 */
432 	uint32_t                kt_size_type;
433 	zone_stats_t            kt_stats;
434 	const char             *kt_name __unsafe_indexable;
435 	zone_view_t             kt_next;
436 	zone_id_t               kt_heap_start;
437 	uint8_t                 kt_zones[KHEAP_NUM_ZONES];
438 	const char             *kt_sig_hdr __unsafe_indexable;
439 	const char             *kt_sig_type __unsafe_indexable;
440 	kalloc_type_flags_t     kt_flags;
441 };
442 
443 typedef struct kalloc_type_var_view *kalloc_type_var_view_t;
444 
445 /*!
446  * @macro KALLOC_TYPE_DECLARE
447  *
448  * @abstract
449  * (optionally) declares a kalloc type view (in a header).
450  *
451  * @param var           the name for the kalloc type view.
452  */
453 #define KALLOC_TYPE_DECLARE(var) \
454 	extern struct kalloc_type_view var[1]
455 
456 /*!
457  * @macro KALLOC_TYPE_DEFINE
458  *
459  * @abstract
460  * Defines a given kalloc type view with prefered accounting
461  *
462  * @discussion
463  * This macro allows you to define a kalloc type with private
464  * accounting. The defined kalloc_type_view can be used with
465  * kalloc_type_impl/kfree_type_impl to allocate/free memory.
466  * zalloc/zfree can also be used from inside xnu. However doing
467  * so doesn't handle freeing a NULL pointer or the use of tags.
468  *
469  * @param var           the name for the kalloc type view.
470  * @param type          the type of your allocation.
471  * @param flags         a @c KT_* flag.
472  */
473 #define KALLOC_TYPE_DEFINE(var, type, flags) \
474 	_KALLOC_TYPE_DEFINE(var, type, flags)
475 
476 /*!
477  * @macro KALLOC_TYPE_VAR_DEFINE
478  *
479  * @abstract
480  * Defines a given kalloc type view with prefered accounting for
481  * variable sized typed allocations.
482  *
483  * @discussion
484  * As the views aren't yet being ingested, individual stats aren't
485  * available. The defined kalloc_type_var_view should be used with
486  * kalloc_type_var_impl/kfree_type_var_impl to allocate/free memory.
487  *
488  * This macro comes in 2 variants:
489  *
490  * 1. @c KALLOC_TYPE_VAR_DEFINE(var, e_ty, flags)
491  * 2. @c KALLOC_TYPE_VAR_DEFINE(var, h_ty, e_ty, flags)
492  *
493  * @param var           the name for the kalloc type var view.
494  * @param h_ty          the type of header in the allocation.
495  * @param e_ty          the type of repeating part in the allocation.
496  * @param flags         a @c KT_* flag.
497  */
498 #define KALLOC_TYPE_VAR_DEFINE(...) KALLOC_DISPATCH(KALLOC_TYPE_VAR_DEFINE, ##__VA_ARGS__)
499 
500 #ifdef XNU_KERNEL_PRIVATE
501 
502 /*
503  * These versions allow specifying the kalloc heap to allocate memory
504  * from
505  */
506 #define kheap_alloc_site(kalloc_heap, size, flags, site) \
507 	__kheap_alloc_site(kalloc_heap, size, flags, site)
508 
509 #define kheap_alloc(kalloc_heap, size, flags) \
510 	({ VM_ALLOC_SITE_STATIC(0, 0); \
511 	kheap_alloc_site(kalloc_heap, size, flags, &site); })
512 
513 #define kheap_alloc_tag(kalloc_heap, size, flags, itag) \
514 	kheap_alloc_site(kalloc_heap, size, Z_VM_TAG(flags, itag), NULL)
515 
516 /*
517  * These versions should be used for allocating pure data bytes that
518  * do not contain any pointers
519  */
520 #define kalloc_data_site(size, flags, site) \
521 	kheap_alloc_site(KHEAP_DATA_BUFFERS, size, flags, site)
522 
523 #define kalloc_data(size, flags) \
524 	kheap_alloc(KHEAP_DATA_BUFFERS, size, flags)
525 
526 #define kalloc_data_tag(size, flags, itag) \
527 	kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, flags, itag)
528 
529 #define krealloc_data_site(elem, old_size, new_size, flags, site) \
530 	__krealloc_site(KHEAP_DATA_BUFFERS, elem, old_size, new_size, flags, site)
531 
532 #define krealloc_data(elem, old_size, new_size, flags) \
533 	({ VM_ALLOC_SITE_STATIC(0, 0); \
534 	krealloc_data_site(elem, old_size, new_size, flags, &site); })
535 
536 #define krealloc_data_tag(elem, old_size, new_size, flags, itag) \
537 	krealloc_data_site(KHEAP_DATA_BUFFERS, elem, old_size, new_size, \
538 	    Z_VM_TAG(flags, itag), NULL)
539 
540 #define kfree_data(elem, size) \
541 	kheap_free(KHEAP_DATA_BUFFERS, elem, size);
542 
543 #define kfree_data_addr(elem) \
544 	kheap_free_addr(KHEAP_DATA_BUFFERS, elem);
545 
546 extern void
547 kheap_free(
548 	kalloc_heap_t heap,
549 	void         *data  __unsafe_indexable,
550 	vm_size_t     size);
551 
552 extern void
553 kheap_free_addr(
554 	kalloc_heap_t heap,
555 	void         *addr __unsafe_indexable);
556 
557 extern void
558 kheap_free_bounded(
559 	kalloc_heap_t heap,
560 	void         *addr __unsafe_indexable,
561 	vm_size_t     min_sz,
562 	vm_size_t     max_sz);
563 
564 extern void
565 kalloc_data_require(
566 	void         *data __unsafe_indexable,
567 	vm_size_t     size);
568 
569 extern void
570 kalloc_non_data_require(
571 	void         *data __unsafe_indexable,
572 	vm_size_t     size);
573 
574 #else /* XNU_KERNEL_PRIVATE */
575 
576 extern void *__sized_by(size)
577 kalloc(
578 	vm_size_t           size) __attribute__((malloc, alloc_size(1)));
579 
580 extern void *__sized_by(size)
581 kalloc_data(
582 	vm_size_t           size,
583 	zalloc_flags_t      flags) __attribute__((malloc, alloc_size(1)));
584 
585 extern void *__sized_by(new_size)
586 krealloc_data(
587 	void               *ptr __unsafe_indexable,
588 	vm_size_t           old_size,
589 	vm_size_t           new_size,
590 	zalloc_flags_t      flags) __attribute__((malloc, alloc_size(3)));
591 
592 extern void
593 kfree(
594 	void               *data __unsafe_indexable,
595 	vm_size_t           size);
596 
597 extern void
598 kfree_data(
599 	void               *ptr __unsafe_indexable,
600 	vm_size_t           size);
601 
602 extern void
603 kfree_data_addr(
604 	void               *ptr __unsafe_indexable);
605 
606 #endif /* !XNU_KERNEL_PRIVATE */
607 
608 /*!
609  * @macro kalloc_type
610  *
611  * @abstract
612  * Allocates element of a particular type
613  *
614  * @discussion
615  * This family of allocators segregate kalloc allocations based on their type.
616  *
617  * This macro comes in 3 variants:
618  *
619  * 1. @c kalloc_type(type, flags)
620  *    Use this macro for fixed sized allocation of a particular type.
621  *
622  * 2. @c kalloc_type(e_type, count, flags)
623  *    Use this macro for variable sized allocations that form an array,
624  *    do note that @c kalloc_type(e_type, 1, flags) is not equivalent to
625  *    @c kalloc_type(e_type, flags).
626  *
627  * 3. @c kalloc_type(hdr_type, e_type, count, flags)
628  *    Use this macro for variable sized allocations formed with
629  *    a header of type @c hdr_type followed by a variable sized array
630  *    with elements of type @c e_type, equivalent to this:
631  *
632  *    <code>
633  *    struct {
634  *        hdr_type hdr;
635  *        e_type   arr[];
636  *    }
637  *    </code>
638  *
639  * @param flags         @c zalloc_flags_t that get passed to zalloc_internal
640  */
641 #define kalloc_type(...)  KALLOC_DISPATCH(kalloc_type, ##__VA_ARGS__)
642 
643 /*!
644  * @macro kfree_type
645  *
646  * @abstract
647  * Allocates element of a particular type
648  *
649  * @discussion
650  * This pairs with the @c kalloc_type() that was made to allocate this element.
651  * Arguments passed to @c kfree_type() must match the one passed at allocation
652  * time precisely.
653  *
654  * This macro comes in the same 3 variants kalloc_type() does:
655  *
656  * 1. @c kfree_type(type, elem)
657  * 2. @c kfree_type(e_type, count, elem)
658  * 3. @c kfree_type(hdr_type, e_type, count, elem)
659  *
660  * @param elem          The address of the element to free
661  */
662 #define kfree_type(...)  KALLOC_DISPATCH(kfree_type, ##__VA_ARGS__)
663 
664 #ifdef XNU_KERNEL_PRIVATE
665 #define kalloc_type_site(...)    KALLOC_DISPATCH(kalloc_type_site, ##__VA_ARGS__)
666 #define kalloc_type_tag(...)     KALLOC_DISPATCH(kalloc_type_tag, ##__VA_ARGS__)
667 #define krealloc_type_site(...)  KALLOC_DISPATCH(krealloc_type_site, ##__VA_ARGS__)
668 #define krealloc_type(...)       KALLOC_DISPATCH(krealloc_type, ##__VA_ARGS__)
669 
670 /*
671  * kalloc_type_require can't be made available to kexts as the
672  * kalloc_type_view's zone could be NULL in the following cases:
673  * - Size greater than KALLOC_SAFE_ALLOC_SIZE
674  * - On macOS, if call is not in BootKC
675  * - All allocations in kext for armv7
676  */
677 #define kalloc_type_require(type, value) ({                                    \
678 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
679 	zone_require(kt_view_var->kt_zv, value);                               \
680 })
681 
682 #endif
683 
684 /*!
685  * @enum kt_granule_t
686  *
687  * @brief
688  * Granule encodings used by the compiler for the type signature.
689  *
690  * @discussion
691  * Given a type, the XNU signature type system (__builtin_xnu_type_signature)
692  * produces a signature by analyzing its memory layout, in chunks of 8 bytes,
693  * which we call granules. The encoding produced for each granule is the
694  * bitwise or of the encodings of all the types of the members included
695  * in that granule.
696  *
697  * @const KT_GRANULE_PADDING
698  * Represents padding inside a record type.
699  *
700  * @const KT_GRANULE_POINTER
701  * Represents a pointer type.
702  *
703  * @const KT_GRANULE_DATA
704  * Represents a scalar type that is not a pointer.
705  *
706  * @const KT_GRANULE_DUAL
707  * Currently unused.
708  *
709  * @const KT_GRANULE_PAC
710  * Represents a pointer which is subject to PAC.
711  */
712 __options_decl(kt_granule_t, uint32_t, {
713 	KT_GRANULE_PADDING = 0,
714 	KT_GRANULE_POINTER = 1,
715 	KT_GRANULE_DATA    = 2,
716 	KT_GRANULE_DUAL    = 4,
717 	KT_GRANULE_PAC     = 8
718 });
719 
720 #define KT_GRANULE_MAX                                                \
721 	(KT_GRANULE_PADDING | KT_GRANULE_POINTER | KT_GRANULE_DATA |  \
722 	    KT_GRANULE_DUAL | KT_GRANULE_PAC)
723 
724 /*
725  * Convert a granule encoding to the index of the bit that
726  * represents such granule in the type summary.
727  *
728  * The XNU type summary (__builtin_xnu_type_summary) produces a 32-bit
729  * summary of the type signature of a given type. If the bit at index
730  * (1 << G) is set in the summary, that means that the type contains
731  * one or more granules with encoding G.
732  */
733 #define KT_SUMMARY_GRANULE_TO_IDX(g)  (1UL << g)
734 
735 #define KT_SUMMARY_MASK_TYPE_BITS  (0xffff)
736 
737 #define KT_SUMMARY_MASK_DATA                             \
738 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |  \
739 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA))
740 
741 #define KT_SUMMARY_MASK_PTR                              \
742 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
743 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
744 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
745 
746 #define KT_SUMMARY_MASK_ALL_GRANULES                        \
747 	(KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PADDING) |     \
748 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_POINTER) |  \
749 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DATA) |     \
750 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_DUAL) |     \
751 	    KT_SUMMARY_GRANULE_TO_IDX(KT_GRANULE_PAC))
752 
753 /*!
754  * @macro KT_SUMMARY_GRANULES
755  *
756  * @abstract
757  * Return the granule type summary for a given type
758  *
759  * @discussion
760  * This macro computes the type summary of a type, and it then extracts the
761  * bits which carry information about the granules in the memory layout.
762  *
763  * Note: you should never have to use __builtin_xnu_type_summary
764  * directly, as we reserve the right to use the remaining bits with
765  * different semantics.
766  *
767  * @param type          The type to analyze
768  */
769 #define KT_SUMMARY_GRANULES(type) \
770     (__builtin_xnu_type_summary(type) & KT_SUMMARY_MASK_TYPE_BITS)
771 
772 /*!
773  * @macro KALLOC_TYPE_IS_DATA_ONLY
774  *
775  * @abstract
776  * Return whether a given type is considered a data-only type.
777  *
778  * @param type          The type to analyze
779  */
780 #define KALLOC_TYPE_IS_DATA_ONLY(type) \
781     ((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_DATA) == 0)
782 
783 /*!
784  * @macro KALLOC_TYPE_SIG_CHECK
785  *
786  * @abstract
787  * Return whether a given type is only made up of granules specified in mask
788  *
789  * @param mask          Granules to check for
790  * @param type          The type to analyze
791  */
792 #define KALLOC_TYPE_SIG_CHECK(mask, type) \
793     ((KT_SUMMARY_GRANULES(type) & ~(mask)) == 0)
794 
795 /*!
796  * @macro KALLOC_TYPE_HAS_OVERLAPS
797  *
798  * @abstract
799  * Return whether a given type has overlapping granules.
800  *
801  * @discussion
802  * This macro returns whether the memory layout for a given type contains
803  * overlapping granules. An overlapping granule is a granule which includes
804  * members with types that have different encodings under the XNU signature
805  * type system.
806  *
807  * @param type          The type to analyze
808  */
809 #define KALLOC_TYPE_HAS_OVERLAPS(type) \
810 	((KT_SUMMARY_GRANULES(type) & ~KT_SUMMARY_MASK_ALL_GRANULES) != 0)
811 
812 
813 #pragma mark implementation details
814 
815 #ifdef XNU_KERNEL_PRIVATE
816 
817 #define KFREE_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type)          \
818 	_Static_assert(os_is_compatible_ptr(ptr, type),           \
819 	    "Pointer type is not compatible with specified type")
820 
821 #else  /* XNU_KERNEL_PRIVATE */
822 
823 #define KFREE_TYPE_ASSERT_COMPATIBLE_POINTER(ptr, type) do { } while (0)
824 
825 #endif /* XNU_KERNEL_PRIVATE */
826 
827 static inline vm_size_t
kt_size(vm_size_t s1,vm_size_t s2,vm_size_t c2)828 kt_size(vm_size_t s1, vm_size_t s2, vm_size_t c2)
829 {
830 	/* kalloc_large() will reject this size before even asking the VM  */
831 	const vm_size_t limit = 1ull << (8 * sizeof(vm_size_t) - 1);
832 
833 	if (os_mul_and_add_overflow(s2, c2, s1, &s1) || (s1 & limit)) {
834 		return limit;
835 	}
836 	return s1;
837 }
838 
839 #define kalloc_type_2(type, flags) ({                                          \
840 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
841 	__unsafe_forge_single(type *, kalloc_type_impl(kt_view_var, flags));   \
842 })
843 
844 #define kfree_type_2(type, elem) ({                                            \
845 	KFREE_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                      \
846 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
847 	kfree_type_impl(kt_view_var, os_ptr_load_and_erase(elem));             \
848 })
849 
850 #define kfree_type_3(type, count, elem) ({                                 \
851 	KFREE_TYPE_ASSERT_COMPATIBLE_POINTER(elem, type);                      \
852 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
853 	__auto_type __kfree_count = (count);                                   \
854 	kfree_type_var_impl(kt_view_var, os_ptr_load_and_erase(elem),          \
855 	    kt_size(0, sizeof(type), __kfree_count));                          \
856 })
857 
858 #define kfree_type_4(hdr_ty, e_ty, count, elem) ({                         \
859 	KFREE_TYPE_ASSERT_COMPATIBLE_POINTER(elem, hdr_ty);                    \
860 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
861 	    KT_SHARED_ACCT);                                                   \
862 	__auto_type __kfree_count = (count);                                   \
863 	kfree_type_var_impl(kt_view_var,                                       \
864 	    os_ptr_load_and_erase(elem),                                       \
865 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), __kfree_count));             \
866 })
867 
868 #ifdef XNU_KERNEL_PRIVATE
869 #define kalloc_type_3(type, count, flags) ({                               \
870 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
871 	VM_ALLOC_SITE_STATIC(0, 0);                                            \
872 	(type *)kalloc_type_var_impl(kt_view_var,                              \
873 	    kt_size(0, sizeof(type), count), flags, &site);                    \
874 })
875 
876 #define kalloc_type_4(hdr_ty, e_ty, count, flags) ({                       \
877 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
878 	    KT_SHARED_ACCT);                                                   \
879 	VM_ALLOC_SITE_STATIC(0, 0);                                            \
880 	(hdr_ty *)kalloc_type_var_impl(kt_view_var, kt_size(sizeof(hdr_ty),    \
881 	    sizeof(e_ty), count), flags, &site);                               \
882 })
883 
884 #define kalloc_type_tag_3(type, flags, tag) ({                             \
885 	static KALLOC_TYPE_DEFINE(kt_view_var, type,                           \
886 	    (kalloc_type_flags_t)Z_VM_TAG(KT_SHARED_ACCT, tag));               \
887 	__unsafe_forge_single(type *, zalloc_flags(kt_view_var,                \
888 	    Z_VM_TAG(flags, tag)));                                            \
889 })
890 
891 #define kalloc_type_site_3(type, flags, site) ({                           \
892 	static KALLOC_TYPE_DEFINE(kt_view_var, type, KT_SHARED_ACCT);          \
893 	__unsafe_forge_single(type *, zalloc_flags(kt_view_var,                \
894 	    __zone_flags_mix_tag(kt_view_var->kt_zv.zv_zone, flags, site)));   \
895 })
896 
897 #define kalloc_type_tag_4(type, count, flags, tag) ({                      \
898 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
899 	(type *)kalloc_type_var_impl(kt_view_var, kt_size(0, sizeof(type),     \
900 	    count), Z_VM_TAG(flags, tag), NULL);                               \
901 })
902 
903 #define kalloc_type_site_4(type, count, flags, site) ({                    \
904 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
905 	(type *)kalloc_type_var_impl(kt_view_var,                              \
906 	    kt_size(0, sizeof(type), count), flags, site);                     \
907 })
908 
909 #define kalloc_type_tag_5(hdr_ty, e_ty, count, flags, tag) ({              \
910 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
911 	    KT_SHARED_ACCT);                                                   \
912 	(hdr_ty *)kalloc_type_var_impl(kt_view_var,                            \
913 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), count),                      \
914 	    Z_VM_TAG(flags, tag), NULL);                                       \
915 })
916 
917 #define kalloc_type_site_5(hdr_ty, e_ty, count, flags, site) ({            \
918 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
919 	    KT_SHARED_ACCT);                                                   \
920 	(hdr_ty *)kalloc_type_var_impl(kt_view_var, kt_size(sizeof(hdr_ty),    \
921 	    sizeof(e_ty), count), flags, site);                                \
922 })
923 
924 #define krealloc_type_site_6(type, old_count, new_count, elem, flags,      \
925 	    site) ({                                                           \
926 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
927 	((type *)__krealloc_type_site(kt_view_var, elem,                       \
928 	    kt_size(0, sizeof(type), old_count),                               \
929 	    kt_size(0, sizeof(type), new_count), flags, site));                \
930 })
931 
932 #define krealloc_type_5(type, old_count, new_count, elem, flags) \
933 	({ VM_ALLOC_SITE_STATIC(0, 0); \
934 	krealloc_type_site_6(type, old_count, new_count, elem, flags, &site); })
935 
936 #define krealloc_type_site_7(hdr_ty, e_ty, old_count, new_count, elem,     \
937 	    flags, site) ({                                                    \
938 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
939 	    KT_SHARED_ACCT);                                                   \
940 	((type *)__krealloc_type_site(kt_view_var, elem,                       \
941 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), old_count),                  \
942 	    kt_size(sizeof(hdr_ty), sizeof(e_ty), new_count), flags, site));   \
943 })
944 
945 #define krealloc_type_6(hdr_ty, e_ty, old_count, new_count, elem, flags) \
946 	({ VM_ALLOC_SITE_STATIC(0, 0); \
947 	krealloc_type_site_7(hdr_ty, e_ty, old_count, new_count, elem, flags, &site); })
948 
949 #else /* XNU_KERNEL_PRIVATE */
950 /* for now kexts do not have access to flags */
951 #define kalloc_type_3(type, count, flags) ({                               \
952 	_Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK");   \
953 	static KALLOC_TYPE_VAR_DEFINE_3(kt_view_var, type, KT_SHARED_ACCT);    \
954 	(type *)kalloc_type_var_impl(kt_view_var,                              \
955 	    kt_size(0, sizeof(type), count), flags, NULL);                     \
956 })
957 
958 #define kalloc_type_4(hdr_ty, e_ty, count, flags) ({                       \
959 	_Static_assert((flags) == Z_WAITOK, "kexts can only pass Z_WAITOK");   \
960 	static KALLOC_TYPE_VAR_DEFINE_4(kt_view_var, hdr_ty, e_ty,             \
961 	    KT_SHARED_ACCT);                                                   \
962 	(hdr_ty *)kalloc_type_var_impl(kt_view_var, kt_size(sizeof(hdr_ty),    \
963 	    sizeof(e_ty), count), flags, NULL);                                \
964 })
965 
966 #endif /* !XNU_KERNEL_PRIVATE */
967 
968 /*
969  * All k*free macros set "elem" to NULL on free.
970  *
971  * Note: all values passed to k*free() might be in the element to be freed,
972  *       temporaries must be taken, and the resetting to be done prior to free.
973  */
974 #ifdef XNU_KERNEL_PRIVATE
975 
976 #define kheap_free(heap, elem, size) ({                        \
977 	kalloc_heap_t __kfree_heap = (heap);                       \
978 	__auto_type __kfree_size = (size);                         \
979 	(kheap_free)(__kfree_heap,                                 \
980 	(void *)os_ptr_load_and_erase(elem),                       \
981 	__kfree_size);                                             \
982 })
983 
984 #define kheap_free_addr(heap, elem) ({                         \
985 	kalloc_heap_t __kfree_heap = (heap);                       \
986 	(kheap_free_addr)(__kfree_heap,                            \
987 	(void *)os_ptr_load_and_erase(elem));                      \
988 })
989 
990 #define kheap_free_bounded(heap, elem, min_sz, max_sz) ({      \
991 	static_assert(max_sz <= KALLOC_SAFE_ALLOC_SIZE);           \
992 	kalloc_heap_t __kfree_heap = (heap);                       \
993 	__auto_type __kfree_min_sz = (min_sz);                     \
994 	__auto_type __kfree_max_sz = (max_sz);                     \
995 	(kheap_free_bounded)(__kfree_heap,                         \
996 	(void *)os_ptr_load_and_erase(elem),                       \
997 	__kfree_min_sz, __kfree_max_sz);                           \
998 })
999 
1000 #else /* XNU_KERNEL_PRIVATE */
1001 
1002 #define kfree_data(elem, size) ({                              \
1003 	__auto_type __kfree_size = (size);                         \
1004 	(kfree_data)((void *)os_ptr_load_and_erase(elem),          \
1005 	__kfree_size);                                             \
1006 })
1007 
1008 #define kfree_data_addr(elem) \
1009 	(kfree_data_addr)((void *)os_ptr_load_and_erase(elem))
1010 
1011 #endif /* !XNU_KERNEL_PRIVATE */
1012 
1013 #if __has_feature(address_sanitizer)
1014 # define __kalloc_no_kasan __attribute__((no_sanitize("address")))
1015 #else
1016 # define __kalloc_no_kasan
1017 #endif
1018 
1019 #define KALLOC_CONCAT(x, y) __CONCAT(x,y)
1020 
1021 #define KALLOC_COUNT_ARGS1(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, N, ...) N
1022 #define KALLOC_COUNT_ARGS(...) \
1023 	KALLOC_COUNT_ARGS1(, ##__VA_ARGS__, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0)
1024 #define KALLOC_DISPATCH1(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1025 #define KALLOC_DISPATCH(base, ...) \
1026 	KALLOC_DISPATCH1(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1027 #define KALLOC_DISPATCH1_R(base, N, ...) __CONCAT(base, N)(__VA_ARGS__)
1028 #define KALLOC_DISPATCH_R(base, ...) \
1029 	KALLOC_DISPATCH1_R(base, KALLOC_COUNT_ARGS(__VA_ARGS__), ##__VA_ARGS__)
1030 
1031 #define kt_view_var \
1032 	KALLOC_CONCAT(kalloc_type_view_, __LINE__)
1033 
1034 #if __LP64__
1035 #define KALLOC_TYPE_SEGMENT "__DATA_CONST"
1036 #else
1037 #define KALLOC_TYPE_SEGMENT "__DATA"
1038 #endif
1039 
1040 /*
1041  * When kalloc_type_impl is called from xnu, it calls zalloc_flags
1042  * directly and doesn't redirect zone-less sites to kheap_alloc.
1043  * Passing a size larger than kalloc_max for these allocations will
1044  * lead to a panic as the zone is null. Therefore assert that size
1045  * is less than KALLOC_SAFE_ALLOC_SIZE.
1046  */
1047 #ifdef XNU_KERNEL_PRIVATE
1048 #define KALLOC_TYPE_SIZE_CHECK(size)                           \
1049 	_Static_assert(size <= KALLOC_SAFE_ALLOC_SIZE,             \
1050 	"type is too large");
1051 #else
1052 #define KALLOC_TYPE_SIZE_CHECK(size)
1053 #endif
1054 
1055 #define KALLOC_TYPE_CHECK_2(check, type) \
1056 	(KALLOC_TYPE_SIG_CHECK(check, type))
1057 
1058 #define KALLOC_TYPE_CHECK_3(check, type1, type2) \
1059 	(KALLOC_TYPE_SIG_CHECK(check, type1) && \
1060 	    KALLOC_TYPE_SIG_CHECK(check, type2))
1061 
1062 #define KALLOC_TYPE_CHECK(...) \
1063 	KALLOC_DISPATCH_R(KALLOC_TYPE_CHECK, ##__VA_ARGS__)
1064 
1065 #define KALLOC_TYPE_VM_SIZE_CHECK_1(type) \
1066 	(sizeof(type) > KHEAP_MAX_SIZE)
1067 
1068 #define KALLOC_TYPE_VM_SIZE_CHECK_2(type1, type2) \
1069 	(sizeof(type1) + sizeof(type2) > KHEAP_MAX_SIZE)
1070 
1071 #define KALLOC_TYPE_VM_SIZE_CHECK(...) \
1072 	KALLOC_DISPATCH_R(KALLOC_TYPE_VM_SIZE_CHECK, ##__VA_ARGS__)
1073 
1074 #ifdef __cplusplus
1075 #define KALLOC_TYPE_CAST_FLAGS(flags) static_cast<kalloc_type_flags_t>(flags)
1076 #else
1077 #define KALLOC_TYPE_CAST_FLAGS(flags) (kalloc_type_flags_t)(flags)
1078 #endif
1079 
1080 /*
1081  * Don't emit signature if type is "data-only" or is large enough that it
1082  * uses the VM.
1083  *
1084  * Note: sig_type is the type you want to emit signature for. The variable
1085  * args can be used to provide other types in the allocation, to make the
1086  * decision of whether to emit the signature.
1087  */
1088 #define KALLOC_TYPE_EMIT_SIG(sig_type, ...)                              \
1089 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, sig_type, ##__VA_ARGS__) || \
1090 	KALLOC_TYPE_VM_SIZE_CHECK(sig_type, ##__VA_ARGS__))?                 \
1091 	"" : __builtin_xnu_type_signature(sig_type)
1092 
1093 /*
1094  * Kalloc type flags are adjusted to indicate if the type is "data-only" or
1095  * will use the VM or is a pointer array.
1096  */
1097 #define KALLOC_TYPE_ADJUST_FLAGS(flags, ...)                                 \
1098 	KALLOC_TYPE_CAST_FLAGS((flags | KT_CHANGED | KT_CHANGED2 |               \
1099 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_DATA, __VA_ARGS__)? KT_DATA_ONLY: 0) |\
1100 	(KALLOC_TYPE_CHECK(KT_SUMMARY_MASK_PTR, __VA_ARGS__)? KT_PTR_ARRAY: 0) | \
1101 	(KALLOC_TYPE_VM_SIZE_CHECK(__VA_ARGS__)? KT_VM : 0)))
1102 
1103 #define _KALLOC_TYPE_DEFINE(var, type, flags)                       \
1104 	__kalloc_no_kasan                                               \
1105 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_type")       \
1106 	struct kalloc_type_view var[1] = { {                            \
1107 	    .kt_zv.zv_name = "site." #type,                             \
1108 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1109 	    .kt_size = sizeof(type),                                    \
1110 	    .kt_signature = KALLOC_TYPE_EMIT_SIG(type),                 \
1111 	} };                                                            \
1112 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1113 
1114 #define KALLOC_TYPE_VAR_DEFINE_3(var, type, flags)                  \
1115 	__kalloc_no_kasan                                               \
1116 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var")        \
1117 	struct kalloc_type_var_view var[1] = { {                        \
1118 	    .kt_version = KT_V1,                                        \
1119 	    .kt_name = "site." #type,                                   \
1120 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, type),          \
1121 	    .kt_size_type = sizeof(type),                               \
1122 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type),                  \
1123 	} };                                                            \
1124 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1125 
1126 #define KALLOC_TYPE_VAR_DEFINE_4(var, hdr, type, flags)             \
1127 	__kalloc_no_kasan                                               \
1128 	__PLACE_IN_SECTION(KALLOC_TYPE_SEGMENT ", __kalloc_var")        \
1129 	struct kalloc_type_var_view var[1] = { {                        \
1130 	    .kt_version = KT_V1,                                        \
1131 	    .kt_name = "site." #hdr "." #type,                          \
1132 	    .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(flags, hdr, type),     \
1133 	    .kt_size_hdr = sizeof(hdr),                                 \
1134 	    .kt_size_type = sizeof(type),                               \
1135 	    .kt_sig_hdr = KALLOC_TYPE_EMIT_SIG(hdr, type),              \
1136 	    .kt_sig_type = KALLOC_TYPE_EMIT_SIG(type, hdr),             \
1137 	} };                                                            \
1138 	KALLOC_TYPE_SIZE_CHECK(sizeof(hdr));                            \
1139 	KALLOC_TYPE_SIZE_CHECK(sizeof(type));
1140 
1141 #ifndef XNU_KERNEL_PRIVATE
1142 /*
1143  * This macro is currently used by AppleImage4
1144  */
1145 #define KALLOC_TYPE_DEFINE_SITE(var, type, flags)       \
1146 	static _KALLOC_TYPE_DEFINE(var, type, flags)
1147 
1148 #endif /* !XNU_KERNEL_PRIVATE */
1149 
1150 #ifdef XNU_KERNEL_PRIVATE
1151 
1152 #define KT_VM_TAG(var) \
1153 	((var)->kt_flags & KT_VM_TAG_MASK)
1154 
1155 #define kalloc_type_impl(kt_view, flags) \
1156 	zalloc_flags(kt_view, (zalloc_flags_t)(KT_VM_TAG(kt_view) | (flags)))
1157 
1158 static inline void
kfree_type_impl(kalloc_type_view_t kt_view,void * __unsafe_indexable ptr)1159 kfree_type_impl(kalloc_type_view_t kt_view, void *__unsafe_indexable ptr)
1160 {
1161 	if (NULL == ptr) {
1162 		return;
1163 	}
1164 	zfree(kt_view, ptr);
1165 }
1166 
1167 /*
1168  * This type is used so that kalloc_internal has good calling conventions
1169  * for callers who want to cheaply both know the allocated address
1170  * and the actual size of the allocation.
1171  */
1172 struct kalloc_result {
1173 	void         *addr __sized_by(size);
1174 	vm_size_t     size;
1175 };
1176 
1177 extern struct kalloc_result
1178 kalloc_type_var_impl_internal(
1179 	kalloc_type_var_view_t  kt_view,
1180 	vm_size_t               size,
1181 	zalloc_flags_t          flags,
1182 	void                   *site);
1183 
1184 #define kalloc_type_var_impl(kt_view, size, flags, site) \
1185 	kalloc_type_var_impl_internal(kt_view, size, flags, site).addr
1186 
1187 extern void
1188 kfree_type_var_impl_internal(
1189 	kalloc_type_var_view_t  kt_view,
1190 	void                   *ptr __unsafe_indexable,
1191 	vm_size_t               size);
1192 
1193 #define kfree_type_var_impl(kt_view, ptr, size) \
1194 	kfree_type_var_impl_internal(kt_view, ptr, size)
1195 
1196 #else /* XNU_KERNEL_PRIVATE */
1197 
1198 extern void *__unsafe_indexable
1199 kalloc_type_impl(
1200 	kalloc_type_view_t  kt_view,
1201 	zalloc_flags_t      flags);
1202 
1203 extern void
1204 kfree_type_impl(
1205 	kalloc_type_view_t  kt_view,
1206 	void                *ptr __unsafe_indexable);
1207 
1208 __attribute__((malloc, alloc_size(2)))
1209 extern void *__sized_by(size)
1210 kalloc_type_var_impl(
1211 	kalloc_type_var_view_t  kt_view,
1212 	vm_size_t               size,
1213 	zalloc_flags_t          flags,
1214 	void                   *site);
1215 
1216 extern void
1217 kfree_type_var_impl(
1218 	kalloc_type_var_view_t  kt_view,
1219 	void                   *ptr __unsafe_indexable,
1220 	vm_size_t               size);
1221 
1222 #endif /* !XNU_KERNEL_PRIVATE */
1223 
1224 void *
1225 kalloc_type_impl_external(
1226 	kalloc_type_view_t  kt_view,
1227 	zalloc_flags_t      flags);
1228 
1229 void
1230 kfree_type_impl_external(
1231 	kalloc_type_view_t  kt_view,
1232 	void               *ptr __unsafe_indexable);
1233 
1234 extern void *
1235 OSObject_typed_operator_new(
1236 	kalloc_type_view_t  ktv,
1237 	vm_size_t           size);
1238 
1239 extern void
1240 OSObject_typed_operator_delete(
1241 	kalloc_type_view_t  ktv,
1242 	void               *mem __unsafe_indexable,
1243 	vm_size_t           size);
1244 
1245 #ifdef XNU_KERNEL_PRIVATE
1246 #pragma GCC visibility push(hidden)
1247 
1248 #define KALLOC_TYPE_SIZE_MASK  0xffffff
1249 #define KALLOC_TYPE_IDX_SHIFT  24
1250 #define KALLOC_TYPE_IDX_MASK   0xff
1251 
1252 static inline uint16_t
kalloc_type_get_idx(uint32_t kt_size)1253 kalloc_type_get_idx(uint32_t kt_size)
1254 {
1255 	return (uint16_t) (kt_size >> KALLOC_TYPE_IDX_SHIFT);
1256 }
1257 
1258 static inline uint32_t
kalloc_type_set_idx(uint32_t kt_size,uint16_t idx)1259 kalloc_type_set_idx(uint32_t kt_size, uint16_t idx)
1260 {
1261 	return kt_size | ((uint32_t) idx << KALLOC_TYPE_IDX_SHIFT);
1262 }
1263 
1264 static inline uint32_t
kalloc_type_get_size(uint32_t kt_size)1265 kalloc_type_get_size(uint32_t kt_size)
1266 {
1267 	return kt_size & KALLOC_TYPE_SIZE_MASK;
1268 }
1269 
1270 bool
1271 IOMallocType_from_vm(
1272 	kalloc_type_view_t ktv);
1273 
1274 /* Used by kern_os_* and operator new */
1275 KALLOC_HEAP_DECLARE(KERN_OS_MALLOC);
1276 
1277 extern void
1278 kheap_startup_init(
1279 	kalloc_heap_t heap);
1280 
1281 extern struct kalloc_result
1282 kalloc_ext(
1283 	kalloc_heap_t           kheap,
1284 	vm_size_t               size,
1285 	zalloc_flags_t          flags,
1286 	vm_allocation_site_t   *site);
1287 
1288 __attribute__((malloc, alloc_size(2)))
1289 static inline void *
__sized_by(size)1290 __sized_by(size)
1291 __kheap_alloc_site(
1292 	kalloc_heap_t           kheap,
1293 	vm_size_t               size,
1294 	zalloc_flags_t          flags,
1295 	vm_allocation_site_t   *site)
1296 {
1297 	struct kalloc_result kr;
1298 	kr = kalloc_ext(kheap, size, flags, site);
1299 	return __unsafe_forge_bidi_indexable(void *, kr.addr, size);
1300 }
1301 
1302 extern struct kalloc_result
1303 krealloc_ext(
1304 	kalloc_heap_t           kheap,
1305 	void                   *addr __unsafe_indexable,
1306 	vm_size_t               old_size,
1307 	vm_size_t               new_size,
1308 	zalloc_flags_t          flags,
1309 	vm_allocation_site_t   *site);
1310 
1311 __attribute__((malloc, alloc_size(4)))
1312 static inline void *
__sized_by(new_size)1313 __sized_by(new_size)
1314 __krealloc_site(
1315 	kalloc_heap_t           kheap,
1316 	void                   *addr __unsafe_indexable,
1317 	vm_size_t               old_size,
1318 	vm_size_t               new_size,
1319 	zalloc_flags_t          flags,
1320 	vm_allocation_site_t   *site)
1321 {
1322 	struct kalloc_result kr;
1323 	kr = krealloc_ext(kheap, addr, old_size, new_size, flags, site);
1324 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1325 }
1326 
1327 struct kalloc_result
1328 krealloc_type_var_impl(
1329 	kalloc_type_var_view_t  kt_view,
1330 	void                   *addr __unsafe_indexable,
1331 	vm_size_t               old_size,
1332 	vm_size_t               new_size,
1333 	zalloc_flags_t          flags,
1334 	vm_allocation_site_t   *site);
1335 
1336 __attribute__((malloc, alloc_size(4)))
1337 static inline void *
__sized_by(new_size)1338 __sized_by(new_size)
1339 __krealloc_type_site(
1340 	kalloc_type_var_view_t  kt_view,
1341 	void                   *addr __unsafe_indexable,
1342 	vm_size_t               old_size,
1343 	vm_size_t               new_size,
1344 	zalloc_flags_t          flags,
1345 	vm_allocation_site_t   *site)
1346 {
1347 	struct kalloc_result kr;
1348 	kr = krealloc_type_var_impl(kt_view, addr, old_size, new_size, flags, site);
1349 	return __unsafe_forge_bidi_indexable(void *, kr.addr, new_size);
1350 }
1351 
1352 extern bool
1353 kalloc_owned_map(
1354 	vm_map_t      map);
1355 
1356 extern vm_map_t
1357 kalloc_large_map_get(void);
1358 
1359 extern vm_map_t
1360 kalloc_large_data_map_get(void);
1361 
1362 extern vm_map_t
1363 kernel_data_map_get(void);
1364 
1365 extern zone_t
1366 kalloc_heap_zone_for_size(
1367 	kalloc_heap_t         heap,
1368 	vm_size_t             size);
1369 
1370 extern vm_size_t kalloc_max_prerounded;
1371 extern vm_size_t kalloc_large_total;
1372 
1373 extern void
1374 kern_os_kfree(
1375 	void         *addr __unsafe_indexable,
1376 	vm_size_t     size);
1377 
1378 extern void
1379 kern_os_typed_free(
1380 	kalloc_type_view_t    ktv,
1381 	void                 *addr __unsafe_indexable,
1382 	vm_size_t             esize);
1383 
1384 #pragma GCC visibility pop
1385 #endif  /* !XNU_KERNEL_PRIVATE */
1386 
1387 extern void
1388 kern_os_zfree(
1389 	zone_t        zone,
1390 	void         *addr __unsafe_indexable,
1391 	vm_size_t     size);
1392 
1393 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1394 
1395 #endif  /* _KERN_KALLOC_H_ */
1396 
1397 #endif  /* KERNEL_PRIVATE */
1398