xref: /xnu-8020.140.41/osfmk/kern/zalloc.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** Force the created zone to use VA sequestering */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** Force the created zone @b NOT to use VA sequestering */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Do not track this zone when gzalloc is engaged */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** Use guard pages in PGZ mode */
170 	ZC_PGZ_USE_GUARDS       = 0x0100000000000000,
171 
172 	/** Zone doesn't support TBI tagging */
173 	ZC_NOTBITAG             = 0x0200000000000000,
174 
175 	/** This zone will back a kalloc type */
176 	ZC_KALLOC_TYPE          = 0x0400000000000000,
177 
178 	/** This zone will back a kalloc heap */
179 	ZC_KALLOC_HEAP          = 0x0800000000000000,
180 
181 	/* unused                 0x1000000000000000, */
182 
183 	/** This zone belongs to the VM submap */
184 	ZC_VM                   = 0x2000000000000000,
185 #if __LP64__
186 #define ZC_VM_LP64 ZC_VM
187 #else
188 #define ZC_VM_LP64 ZC_NONE
189 #endif
190 
191 	/** Disable kasan quarantine for this zone */
192 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
193 
194 	/** Disable kasan redzones for this zone */
195 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
196 #endif /* XNU_KERNEL_PRIVATE */
197 });
198 
199 /*!
200  * @union zone_or_view
201  *
202  * @abstract
203  * A type used for calls that admit both a zone or a zone view.
204  *
205  * @discussion
206  * @c zalloc() and @c zfree() and their variants can act on both
207  * zones and zone views.
208  */
209 union zone_or_view {
210 	struct zone_view           *zov_view;
211 	struct zone                *zov_zone;
212 	struct kalloc_type_view    *zov_kt_heap;
213 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)214 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
215 	}
zone_or_view(struct zone * z)216 	inline zone_or_view(struct zone *z) : zov_zone(z) {
217 	}
zone_or_view(struct kalloc_type_view * kth)218 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
219 	}
220 #endif
221 };
222 #ifdef __cplusplus
223 typedef union zone_or_view zone_or_view_t;
224 #else
225 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
226 #endif
227 
228 /*!
229  * @enum zone_create_ro_id_t
230  *
231  * @abstract
232  * Zone creation IDs for external read only zones
233  *
234  * @discussion
235  * Kexts that desire to use the RO allocator should:
236  * 1. Add a zone creation id below
237  * 2. Add a corresponding ID to @c zone_reserved_id_t
238  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
239  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
240  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
241  */
242 __enum_decl(zone_create_ro_id_t, zone_id_t, {
243 	ZC_RO_ID_SANDBOX,
244 	ZC_RO_ID_PROFILE,
245 	ZC_RO_ID_PROTOBOX,
246 	ZC_RO_ID_SB_FILTER,
247 	ZC_RO_ID__LAST = ZC_RO_ID_SB_FILTER,
248 });
249 
250 /*!
251  * @function zone_create
252  *
253  * @abstract
254  * Creates a zone with the specified parameters.
255  *
256  * @discussion
257  * A Zone is a slab allocator that returns objects of a given size very quickly.
258  *
259  * @param name          the name for the new zone.
260  * @param size          the size of the elements returned by this zone.
261  * @param flags         a set of @c zone_create_flags_t flags.
262  *
263  * @returns             the created zone, this call never fails.
264  */
265 extern zone_t   zone_create(
266 	const char             *name __unsafe_indexable,
267 	vm_size_t               size,
268 	zone_create_flags_t     flags);
269 
270 /*!
271  * @function zone_create_ro
272  *
273  * @abstract
274  * Creates a read only zone with the specified parameters from kexts
275  *
276  * @discussion
277  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
278  * kexts. Do not use this API to create read only zones in xnu.
279  *
280  * @param name          the name for the new zone.
281  * @param size          the size of the elements returned by this zone.
282  * @param flags         a set of @c zone_create_flags_t flags.
283  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
284  *
285  * @returns             the zone ID of the created zone, this call never fails.
286  */
287 extern zone_id_t   zone_create_ro(
288 	const char             *name __unsafe_indexable,
289 	vm_size_t               size,
290 	zone_create_flags_t     flags,
291 	zone_create_ro_id_t     zc_ro_id);
292 
293 /*!
294  * @function zdestroy
295  *
296  * @abstract
297  * Destroys a zone previously made with zone_create.
298  *
299  * @discussion
300  * Zones must have been made destructible for @c zdestroy() to be allowed,
301  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
302  *
303  * @param zone          the zone to destroy.
304  */
305 extern void     zdestroy(
306 	zone_t          zone);
307 
308 /*!
309  * @function zone_require
310  *
311  * @abstract
312  * Requires for a given pointer to belong to the specified zone.
313  *
314  * @discussion
315  * The function panics if the check fails as it indicates that the kernel
316  * internals have been compromised.
317  *
318  * @param zone          the zone the address needs to belong to.
319  * @param addr          the element address to check.
320  */
321 extern void     zone_require(
322 	zone_t          zone,
323 	void           *addr __unsafe_indexable);
324 
325 /*!
326  * @function zone_require_ro
327  *
328  * @abstract
329  * Version of zone require intended for zones created with ZC_READONLY
330  *
331  * @discussion
332  * This check is not sufficient to fully trust the element.
333  *
334  * Another check of its content must be performed to prove
335  * that the element is "the right one", a typical technique
336  * for when the RO data structure is 1:1 with a mutable one,
337  * is a simple circularity check with a very strict lifetime
338  * (both the mutable and read-only data structures are made
339  * and destroyed as close as possible).
340  *
341  * @param zone_id       the zone id the address needs to belong to.
342  * @param elem_size     the element size for this zone.
343  * @param addr          the element address to check.
344  */
345 extern void     zone_require_ro(
346 	zone_id_t       zone_id,
347 	vm_size_t       elem_size,
348 	void           *addr __unsafe_indexable);
349 
350 /*!
351  * @function zone_require_ro_range_contains
352  *
353  * @abstract
354  * Version of zone require intended for zones created with ZC_READONLY
355  * that only checks that the zone is RO and that the address is in
356  * the zone's submap
357  *
358  * @param zone_id       the zone id the address needs to belong to.
359  * @param addr          the element address to check.
360  */
361 extern void     zone_require_ro_range_contains(
362 	zone_id_t       zone_id,
363 	void           *addr __unsafe_indexable);
364 
365 /*!
366  * @enum zalloc_flags_t
367  *
368  * @brief
369  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
370  *
371  * @discussion
372  * It is encouraged that any callsite passing flags uses exactly one of:
373  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
374  * if nothing else was specified.
375  *
376  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
377  * then @c Z_WAITOK is ignored.
378  *
379  * @const Z_WAITOK
380  * Means that it's OK for zalloc() to block to wait for memory,
381  * when Z_WAITOK is passed, zalloc will never return NULL.
382  *
383  * @const Z_NOWAIT
384  * Passing this flag means that zalloc is not allowed to ever block.
385  *
386  * @const Z_NOPAGEWAIT
387  * Passing this flag means that zalloc is allowed to wait due to lock
388  * contention, but will not wait for the VM to wait for pages when
389  * under memory pressure.
390  *
391  * @const Z_ZERO
392  * Passing this flags means that the returned memory has been zeroed out.
393  *
394  * @const Z_NOFAIL
395  * Passing this flag means that the caller expects the allocation to always
396  * succeed. This will result in a panic if this assumption isn't correct.
397  *
398  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
399  * be used on exhaustible zones.
400  *
401  * @const Z_REALLOCF
402  * For the realloc family of functions,
403  * free the incoming memory on failure cases.
404  *
405  #if XNU_KERNEL_PRIVATE
406  * @const Z_MAY_COPYINMAP
407  * This data allocation might be used with vm_map_copyin().
408  * This allows for those allocations to be associated with a proper VM object.
409  *
410  * @const Z_FULLSIZE
411  * Used to indicate that the caller will use all available space in excess
412  * from the requested allocation size.
413  *
414  * @const Z_SKIP_KASAN
415  * Tell zalloc() not to do any kasan adjustments.
416  *
417  * @const Z_PGZ
418  * Used by zalloc internally to denote an allocation that we will try
419  * to guard with PGZ.
420  *
421  * @const Z_VM_TAG_BT_BIT
422  * Used to blame allocation accounting on the first kext
423  * found in the backtrace of the allocation.
424  *
425  * @const Z_NOZZC
426  * Used internally to mark allocations that will skip zero validation.
427  *
428  * @const Z_PCPU
429  * Used internally for the percpu paths.
430  *
431  * @const Z_VM_TAG_MASK
432  * Represents bits in which a vm_tag_t for the allocation can be passed.
433  * (used by kalloc for the zone tagging debugging feature).
434  #endif
435  */
436 __options_decl(zalloc_flags_t, uint32_t, {
437 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
438 	Z_WAITOK        = 0x0000,
439 	Z_NOWAIT        = 0x0001,
440 	Z_NOPAGEWAIT    = 0x0002,
441 	Z_ZERO          = 0x0004,
442 	Z_REALLOCF      = 0x0008,
443 
444 #if XNU_KERNEL_PRIVATE
445 	Z_MAY_COPYINMAP = 0x0100,
446 	Z_FULLSIZE      = 0x0200,
447 #if KASAN
448 	Z_SKIP_KASAN    = 0x0400,
449 #else
450 	Z_SKIP_KASAN    = 0x0000,
451 #endif
452 	Z_PGZ           = 0x0800,
453 	Z_VM_TAG_BT_BIT = 0x1000,
454 	Z_PCPU          = 0x2000,
455 	Z_NOZZC         = 0x4000,
456 #endif /* XNU_KERNEL_PRIVATE */
457 	Z_NOFAIL        = 0x8000,
458 
459 	/* convenient c++ spellings */
460 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
461 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
462 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL, /* convenient spelling for c++ */
463 
464 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
465 #if XNU_KERNEL_PRIVATE
466 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
467 	/** used by kalloc to propagate vm tags for -zt */
468 	Z_VM_TAG_MASK   = 0xffff0000,
469 
470 #define Z_VM_TAG_SHIFT        16
471 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
472 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
473 #endif
474 });
475 
476 /*
477  * This type is used so that kalloc_internal has good calling conventions
478  * for callers who want to cheaply both know the allocated address
479  * and the actual size of the allocation.
480  */
481 struct kalloc_result {
482 	void         *addr __sized_by(size);
483 	vm_size_t     size;
484 };
485 
486 /*!
487  * @function zalloc
488  *
489  * @abstract
490  * Allocates an element from a specified zone.
491  *
492  * @discussion
493  * If the zone isn't exhaustible and is expandable, this call never fails.
494  *
495  * @param zone_or_view  the zone or zone view to allocate from
496  *
497  * @returns             NULL or the allocated element
498  */
499 __attribute__((malloc))
500 extern void *__unsafe_indexable zalloc(
501 	zone_or_view_t  zone_or_view);
502 
503 /*!
504  * @function zalloc_noblock
505  *
506  * @abstract
507  * Allocates an element from a specified zone, but never blocks.
508  *
509  * @discussion
510  * This call is suitable for preemptible code, however allocation
511  * isn't allowed from interrupt context.
512  *
513  * @param zone_or_view  the zone or zone view to allocate from
514  *
515  * @returns             NULL or the allocated element
516  */
517 __attribute__((malloc))
518 extern void *__unsafe_indexable zalloc_noblock(
519 	zone_or_view_t  zone_or_view);
520 
521 /*!
522  * @function zalloc_flags()
523  *
524  * @abstract
525  * Allocates an element from a specified zone, with flags.
526  *
527  * @param zone_or_view  the zone or zone view to allocate from
528  * @param flags         a collection of @c zalloc_flags_t.
529  *
530  * @returns             NULL or the allocated element
531  */
532 __attribute__((malloc))
533 extern void *__unsafe_indexable zalloc_flags(
534 	zone_or_view_t  zone_or_view,
535 	zalloc_flags_t  flags);
536 
537 /*!
538  * @macro zalloc_id
539  *
540  * @abstract
541  * Allocates an element from a specified zone ID, with flags.
542  *
543  * @param zid           The proper @c ZONE_ID_* constant.
544  * @param flags         a collection of @c zalloc_flags_t.
545  *
546  * @returns             NULL or the allocated element
547  */
548 __attribute__((malloc))
549 extern void *__unsafe_indexable zalloc_id(
550 	zone_id_t       zid,
551 	zalloc_flags_t  flags);
552 
553 /*!
554  * @function zalloc_ro
555  *
556  * @abstract
557  * Allocates an element from a specified read-only zone.
558  *
559  * @param zone_id       the zone id to allocate from
560  * @param flags         a collection of @c zalloc_flags_t.
561  *
562  * @returns             NULL or the allocated element
563  */
564 __attribute__((malloc))
565 extern void *__unsafe_indexable zalloc_ro(
566 	zone_id_t       zone_id,
567 	zalloc_flags_t  flags);
568 
569 /*!
570  * @function zalloc_ro_mut
571  *
572  * @abstract
573  * Modifies an element from a specified read-only zone.
574  *
575  * @discussion
576  * Modifying compiler-assisted authenticated pointers using this function will
577  * not result in a signed pointer being written.  The caller is expected to
578  * sign the value appropriately beforehand if they wish to do this.
579  *
580  * @param zone_id       the zone id to allocate from
581  * @param elem          element to be modified
582  * @param offset        offset from element
583  * @param new_data      pointer to new data
584  * @param new_data_size size of modification
585  *
586  */
587 extern void zalloc_ro_mut(
588 	zone_id_t       zone_id,
589 	void           *elem __unsafe_indexable,
590 	vm_offset_t     offset,
591 	const void     *new_data __sized_by(new_data_size),
592 	vm_size_t       new_data_size);
593 
594 /*!
595  * @function zalloc_ro_update_elem
596  *
597  * @abstract
598  * Update the value of an entire element allocated in the read only allocator.
599  *
600  * @param zone_id       the zone id to allocate from
601  * @param elem          element to be modified
602  * @param new_data      pointer to new data
603  *
604  */
605 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
606 	const typeof(*(elem)) *__new_data = (new_data);                        \
607 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
608 })
609 
610 /*!
611  * @function zalloc_ro_update_field
612  *
613  * @abstract
614  * Update a single field of an element allocated in the read only allocator.
615  *
616  * @param zone_id       the zone id to allocate from
617  * @param elem          element to be modified
618  * @param field         the element field to be modified
619  * @param new_data      pointer to new data
620  *
621  */
622 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
623 	const typeof((elem)->field) *__value = (value);                        \
624 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
625 	    __value, sizeof((elem)->field));                                   \
626 })
627 
628 #if __LP64__
629 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
630 #else
631 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_32
632 #endif
633 
634 /*!
635  * @enum zro_atomic_op_t
636  *
637  * @brief
638  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
639  * atomic operations.
640  *
641  * @discussion
642  * This enum provides all flavors of atomic operations supported in sizes 8,
643  * 16, 32, 64 bits.
644  *
645  * @const ZRO_ATOMIC_OR_*
646  * To perform an @s os_atomic_or
647  *
648  * @const ZRO_ATOMIC_XOR_*
649  * To perform an @s os_atomic_xor
650  *
651  * @const ZRO_ATOMIC_AND_*
652  * To perform an @s os_atomic_and
653  *
654  * @const ZRO_ATOMIC_ADD_*
655  * To perform an @s os_atomic_add
656  *
657  * @const ZRO_ATOMIC_XCHG_*
658  * To perform an @s os_atomic_xchg
659  *
660  */
661 __enum_decl(zro_atomic_op_t, uint32_t, {
662 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
663 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
664 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
665 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
666 
667 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
668 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
669 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
670 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
671 
672 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
673 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
674 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
675 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
676 
677 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
678 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
679 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
680 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
681 
682 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
683 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
684 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
685 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
686 
687 	/* cconvenient spellings */
688 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
689 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
690 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
691 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
692 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
693 });
694 
695 /*!
696  * @function zalloc_ro_mut_atomic
697  *
698  * @abstract
699  * Atomically update an offset in an element allocated in the read only
700  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
701  *
702  * @param zone_id       the zone id to allocate from
703  * @param elem          element to be modified
704  * @param offset        offset in the element to be modified
705  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
706  * @param value         value for the atomic operation
707  *
708  */
709 extern uint64_t zalloc_ro_mut_atomic(
710 	zone_id_t       zone_id,
711 	void           *elem __unsafe_indexable,
712 	vm_offset_t     offset,
713 	zro_atomic_op_t op,
714 	uint64_t        value);
715 
716 /*!
717  * @macro zalloc_ro_update_field_atomic
718  *
719  * @abstract
720  * Atomically update a single field of an element allocated in the read only
721  * allocator.
722  *
723  * @param zone_id       the zone id to allocate from
724  * @param elem          element to be modified
725  * @param field         the element field to be modified
726  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
727  * @param value         value for the atomic operation
728  *
729  */
730 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
731 	const typeof((elem)->field) __value = (value);                         \
732 	static_assert(sizeof(__value) == (op & 0xf));                          \
733 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
734 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
735 })
736 
737 /*!
738  * @function zalloc_ro_clear
739  *
740  * @abstract
741  * Zeroes an element from a specified read-only zone.
742  *
743  * @param zone_id       the zone id to allocate from
744  * @param elem          element to be modified
745  * @param offset        offset from element
746  * @param size          size of modification
747  */
748 extern void    zalloc_ro_clear(
749 	zone_id_t       zone_id,
750 	void           *elem __unsafe_indexable,
751 	vm_offset_t     offset,
752 	vm_size_t       size);
753 
754 /*!
755  * @function zalloc_ro_clear_field
756  *
757  * @abstract
758  * Zeroes the specified field of an element from a specified read-only zone.
759  *
760  * @param zone_id       the zone id to allocate from
761  * @param elem          element to be modified
762  * @param field         offset from element
763  */
764 #define zalloc_ro_clear_field(zone_id, elem, field) \
765 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
766 	    sizeof((elem)->field))
767 
768 /*!
769  * @function zfree_id()
770  *
771  * @abstract
772  * Frees an element previously allocated with @c zalloc_id().
773  *
774  * @param zone_id       the zone id to free the element to.
775  * @param addr          the address to free
776  */
777 extern void     zfree_id(
778 	zone_id_t       zone_id,
779 	void           *addr __unsafe_indexable);
780 
781 /*!
782  * @function zfree_ro()
783  *
784  * @abstract
785  * Frees an element previously allocated with @c zalloc_ro().
786  *
787  * @param zone_id       the zone id to free the element to.
788  * @param addr          the address to free
789  */
790 extern void     zfree_ro(
791 	zone_id_t       zone_id,
792 	void           *addr __unsafe_indexable);
793 
794 /*!
795  * @function zfree
796  *
797  * @abstract
798  * Frees an element allocated with @c zalloc*.
799  *
800  * @discussion
801  * If the element being freed doesn't belong to the specified zone,
802  * then this call will panic.
803  *
804  * @param zone_or_view  the zone or zone view to free the element to.
805  * @param elem          the element to free
806  */
807 extern void     zfree(
808 	zone_or_view_t  zone_or_view,
809 	void            *elem __unsafe_indexable);
810 
811 /*
812  * This macro sets "elem" to NULL on free.
813  *
814  * Note: all values passed to zfree*() might be in the element to be freed,
815  *       temporaries must be taken, and the resetting to be done prior to free.
816  */
817 #define zfree(zone, elem) ({ \
818 	__auto_type __zfree_zone = (zone); \
819 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
820 })
821 
822 #define zfree_id(zid, elem) ({ \
823 	zone_id_t __zfree_zid = (zid); \
824 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
825 })
826 
827 #define zfree_ro(zid, elem) ({ \
828 	zone_id_t __zfree_zid = (zid); \
829 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
830 })
831 
832 /* deprecated KPIS */
833 
834 __zalloc_deprecated("use zone_create()")
835 extern zone_t   zinit(
836 	vm_size_t       size,           /* the size of an element */
837 	vm_size_t       maxmem,         /* maximum memory to use */
838 	vm_size_t       alloc,          /* allocation size */
839 	const char      *name __unsafe_indexable);
840 
841 
842 #pragma mark: zone views
843 /*!
844  * @typedef zone_stats_t
845  *
846  * @abstract
847  * The opaque type for per-cpu zone stats that are accumulated per zone
848  * or per zone-view.
849  */
850 typedef struct zone_stats *__zpercpu zone_stats_t;
851 
852 /*!
853  * @typedef zone_view_t
854  *
855  * @abstract
856  * A view on a zone for accounting purposes.
857  *
858  * @discussion
859  * A zone view uses the zone it references for the allocations backing store,
860  * but does the allocation accounting at the view level.
861  *
862  * These accounting are surfaced by @b zprint(1) and similar tools,
863  * which allow for cheap but finer grained understanding of allocations
864  * without any fragmentation cost.
865  *
866  * Zone views are protected by the kernel lockdown and can't be initialized
867  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
868  */
869 typedef struct zone_view *zone_view_t;
870 struct zone_view {
871 	zone_t          zv_zone;
872 	zone_stats_t    zv_stats;
873 	const char     *zv_name __unsafe_indexable;
874 	zone_view_t     zv_next;
875 };
876 
877 #ifdef XNU_KERNEL_PRIVATE
878 /*!
879  * @enum zone_kheap_id_t
880  *
881  * @brief
882  * Enumerate a particular kalloc heap.
883  *
884  * @discussion
885  * More documentation about heaps is available in @c <kern/kalloc.h>.
886  *
887  * @const KHEAP_ID_NONE
888  * This value denotes regular zones, not used by kalloc.
889  *
890  * @const KHEAP_ID_DEFAULT
891  * Indicates zones part of the KHEAP_DEFAULT heap.
892  *
893  * @const KHEAP_ID_DATA_BUFFERS
894  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
895  *
896  * @const KHEAP_ID_KT_VAR
897  * Indicates zones part of the KHEAP_KT_VAR heap.
898  */
899 __enum_decl(zone_kheap_id_t, uint32_t, {
900 	KHEAP_ID_NONE,
901 	KHEAP_ID_DEFAULT,
902 	KHEAP_ID_DATA_BUFFERS,
903 	KHEAP_ID_KT_VAR,
904 
905 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
906 });
907 
908 /*!
909  * @macro ZONE_VIEW_DECLARE
910  *
911  * @abstract
912  * (optionally) declares a zone view (in a header).
913  *
914  * @param var           the name for the zone view.
915  */
916 #define ZONE_VIEW_DECLARE(var) \
917 	extern struct zone_view var[1]
918 
919 /*!
920  * @macro ZONE_VIEW_DEFINE
921  *
922  * @abstract
923  * Defines a given zone view and what it points to.
924  *
925  * @discussion
926  * Zone views can either share a pre-existing zone,
927  * or perform a lookup into a kalloc heap for the zone
928  * backing the bucket of the proper size.
929  *
930  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
931  * as the last rank. If views on zones are created, these must have been
932  * created before this stage.
933  *
934  * This macro should not be used to create zone views from default
935  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
936  *
937  * @param var           the name for the zone view.
938  * @param name          a string describing the zone view.
939  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
940  * @param size          the element size to be allocated from this view.
941  */
942 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
943 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
944 	    .zv_name = name, \
945 	} }; \
946 	static __startup_data struct zone_view_startup_spec \
947 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
948 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
949 	    &__startup_zone_view_spec_ ## var)
950 
951 #endif /* XNU_KERNEL_PRIVATE */
952 
953 
954 #ifdef XNU_KERNEL_PRIVATE
955 #pragma mark - XNU only interfaces
956 
957 #include <kern/cpu_number.h>
958 
959 #pragma GCC visibility push(hidden)
960 
961 #pragma mark XNU only: zalloc (extended)
962 
963 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
964 #define ZALIGN_16               (sizeof(uint16_t) - 1)
965 #define ZALIGN_32               (sizeof(uint32_t) - 1)
966 #define ZALIGN_PTR              (sizeof(void *)   - 1)
967 #define ZALIGN_64               (sizeof(uint64_t) - 1)
968 #define ZALIGN(t)               (_Alignof(t)      - 1)
969 
970 
971 /*!
972  * @function zalloc_permanent_tag()
973  *
974  * @abstract
975  * Allocates a permanent element from the permanent zone
976  *
977  * @discussion
978  * Memory returned by this function is always 0-initialized.
979  * Note that the size of this allocation can not be determined
980  * by zone_element_size so it should not be used for copyio.
981  *
982  * @param size          the element size (must be smaller than PAGE_SIZE)
983  * @param align_mask    the required alignment for this allocation
984  * @param tag           the tag to use for allocations larger than a page.
985  *
986  * @returns             the allocated element
987  */
988 __attribute__((malloc))
989 extern void *__unsafe_indexable zalloc_permanent_tag(
990 	vm_size_t       size,
991 	vm_offset_t     align_mask,
992 	vm_tag_t        tag);
993 
994 /*!
995  * @function zalloc_permanent()
996  *
997  * @abstract
998  * Allocates a permanent element from the permanent zone
999  *
1000  * @discussion
1001  * Memory returned by this function is always 0-initialized.
1002  * Note that the size of this allocation can not be determined
1003  * by zone_element_size so it should not be used for copyio.
1004  *
1005  * @param size          the element size (must be smaller than PAGE_SIZE)
1006  * @param align_mask    the required alignment for this allocation
1007  *
1008  * @returns             the allocated element
1009  */
1010 #define zalloc_permanent(size, align) \
1011 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1012 
1013 /*!
1014  * @function zalloc_permanent_type()
1015  *
1016  * @abstract
1017  * Allocates a permanent element of a given type with its natural alignment.
1018  *
1019  * @discussion
1020  * Memory returned by this function is always 0-initialized.
1021  *
1022  * @param type_t        the element type
1023  *
1024  * @returns             the allocated element
1025  */
1026 #define zalloc_permanent_type(type_t) \
1027 	__unsafe_forge_single(type_t *, \
1028 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1029 
1030 /*!
1031  * @function zalloc_first_proc_made()
1032  *
1033  * @abstract
1034  * Declare that the "early" allocation phase is done.
1035  */
1036 extern void
1037 zalloc_first_proc_made(void);
1038 
1039 #pragma mark XNU only: per-cpu allocations
1040 
1041 /*!
1042  * @macro zpercpu_get_cpu()
1043  *
1044  * @abstract
1045  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1046  *
1047  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1048  * @param cpu           the specified CPU number as returned by @c cpu_number()
1049  *
1050  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1051  */
1052 #define zpercpu_get_cpu(ptr, cpu) \
1053 	__zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
1054 
1055 /*!
1056  * @macro zpercpu_get()
1057  *
1058  * @abstract
1059  * Get a pointer to the current CPU slot of a given per-cpu variable.
1060  *
1061  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1062  *
1063  * @returns             the per-CPU slot for @c ptr for the current CPU.
1064  */
1065 #define zpercpu_get(ptr) \
1066 	zpercpu_get_cpu(ptr, cpu_number())
1067 
1068 /*!
1069  * @macro zpercpu_foreach()
1070  *
1071  * @abstract
1072  * Enumerate all per-CPU slots by address.
1073  *
1074  * @param it            the name for the iterator
1075  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1076  */
1077 #define zpercpu_foreach(it, ptr) \
1078 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1079 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1080 	    it < __end_##it; it = __zpcpu_next(it))
1081 
1082 /*!
1083  * @macro zpercpu_foreach_cpu()
1084  *
1085  * @abstract
1086  * Enumerate all per-CPU slots by CPU slot number.
1087  *
1088  * @param cpu           the name for cpu number iterator.
1089  */
1090 #define zpercpu_foreach_cpu(cpu) \
1091 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1092 
1093 /*!
1094  * @function zalloc_percpu()
1095  *
1096  * @abstract
1097  * Allocates an element from a per-cpu zone.
1098  *
1099  * @discussion
1100  * The returned pointer cannot be used directly and must be manipulated
1101  * through the @c zpercpu_get*() interfaces.
1102  *
1103  * @param zone_or_view  the zone or zone view to allocate from
1104  * @param flags         a collection of @c zalloc_flags_t.
1105  *
1106  * @returns             NULL or the allocated element
1107  */
1108 extern void *__zpercpu zalloc_percpu(
1109 	zone_or_view_t  zone_or_view,
1110 	zalloc_flags_t  flags);
1111 
1112 /*!
1113  * @function zfree_percpu()
1114  *
1115  * @abstract
1116  * Frees an element previously allocated with @c zalloc_percpu().
1117  *
1118  * @param zone_or_view  the zone or zone view to free the element to.
1119  * @param addr          the address to free
1120  */
1121 extern void     zfree_percpu(
1122 	zone_or_view_t  zone_or_view,
1123 	void *__zpercpu addr);
1124 
1125 /*!
1126  * @function zalloc_percpu_permanent()
1127  *
1128  * @abstract
1129  * Allocates a permanent percpu-element from the permanent percpu zone.
1130  *
1131  * @discussion
1132  * Memory returned by this function is always 0-initialized.
1133  *
1134  * @param size          the element size (must be smaller than PAGE_SIZE)
1135  * @param align_mask    the required alignment for this allocation
1136  *
1137  * @returns             the allocated element
1138  */
1139 extern void *__zpercpu zalloc_percpu_permanent(
1140 	vm_size_t       size,
1141 	vm_offset_t     align_mask);
1142 
1143 /*!
1144  * @function zalloc_percpu_permanent_type()
1145  *
1146  * @abstract
1147  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1148  * type with its natural alignment.
1149  *
1150  * @discussion
1151  * Memory returned by this function is always 0-initialized.
1152  *
1153  * @param type_t        the element type
1154  *
1155  * @returns             the allocated element
1156  */
1157 #define zalloc_percpu_permanent_type(type_t) \
1158 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1159 
1160 
1161 #pragma mark XNU only: zone creation (extended)
1162 
1163 /*!
1164  * @enum zone_reserved_id_t
1165  *
1166  * @abstract
1167  * Well known pre-registered zones, allowing use of zone_id_require()
1168  *
1169  * @discussion
1170  * @c ZONE_ID__* aren't real zone IDs.
1171  *
1172  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1173  * easy a value to produce (by malice or accident).
1174  *
1175  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1176  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1177  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1178  * listed in @c zone_create_ro_id_t.
1179  *
1180  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1181  * @c zone_create().
1182  */
1183 __enum_decl(zone_reserved_id_t, zone_id_t, {
1184 	ZONE_ID__ZERO,
1185 
1186 	ZONE_ID_PERMANENT,
1187 	ZONE_ID_PERCPU_PERMANENT,
1188 
1189 	ZONE_ID_THREAD_RO,
1190 	ZONE_ID_MAC_LABEL,
1191 	ZONE_ID_PROC_RO,
1192 	ZONE_ID_PROC_SIGACTS_RO,
1193 	ZONE_ID_KAUTH_CRED,
1194 	ZONE_ID_CS_BLOB,
1195 
1196 	ZONE_ID_SANDBOX_RO,
1197 	ZONE_ID_PROFILE_RO,
1198 	ZONE_ID_PROTOBOX,
1199 	ZONE_ID_SB_FILTER,
1200 
1201 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1202 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1203 	ZONE_ID__LAST_RO_EXT = ZONE_ID_SB_FILTER,
1204 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1205 
1206 	ZONE_ID_PMAP,
1207 	ZONE_ID_VM_MAP,
1208 	ZONE_ID_VM_MAP_ENTRY,
1209 	ZONE_ID_VM_MAP_HOLES,
1210 	ZONE_ID_VM_MAP_COPY,
1211 	ZONE_ID_VM_PAGES,
1212 	ZONE_ID_IPC_PORT,
1213 	ZONE_ID_IPC_PORT_SET,
1214 	ZONE_ID_IPC_VOUCHERS,
1215 	ZONE_ID_TASK,
1216 	ZONE_ID_PROC,
1217 	ZONE_ID_THREAD,
1218 	ZONE_ID_TURNSTILE,
1219 	ZONE_ID_SEMAPHORE,
1220 	ZONE_ID_SELECT_SET,
1221 	ZONE_ID_FILEPROC,
1222 
1223 	ZONE_ID__FIRST_DYNAMIC,
1224 });
1225 
1226 /*!
1227  * @const ZONE_ID_ANY
1228  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1229  * Zone ID.
1230  */
1231 #define ZONE_ID_ANY ((zone_id_t)-1)
1232 
1233 /*!
1234  * @const ZONE_ID_INVALID
1235  * An invalid zone_id_t that corresponds to nothing.
1236  */
1237 #define ZONE_ID_INVALID ((zone_id_t)-2)
1238 
1239 /**!
1240  * @function zone_name
1241  *
1242  * @param zone          the specified zone
1243  * @returns             the name of the specified zone.
1244  */
1245 const char *__unsafe_indexable zone_name(
1246 	zone_t                  zone);
1247 
1248 /**!
1249  * @function zone_heap_name
1250  *
1251  * @param zone          the specified zone
1252  * @returns             the name of the heap this zone is part of, or "".
1253  */
1254 const char *__unsafe_indexable zone_heap_name(
1255 	zone_t                  zone);
1256 
1257 /*!
1258  * @function zone_create_ext
1259  *
1260  * @abstract
1261  * Creates a zone with the specified parameters.
1262  *
1263  * @discussion
1264  * This is an extended version of @c zone_create().
1265  *
1266  * @param name          the name for the new zone.
1267  * @param size          the size of the elements returned by this zone.
1268  * @param flags         a set of @c zone_create_flags_t flags.
1269  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1270  *
1271  * @param extra_setup   a block that can perform non trivial initialization
1272  *                      on the zone before it is marked valid.
1273  *                      This block can call advanced setups like:
1274  *                      - zone_set_exhaustible()
1275  *                      - zone_set_noexpand()
1276  *
1277  * @returns             the created zone, this call never fails.
1278  */
1279 extern zone_t   zone_create_ext(
1280 	const char             *name __unsafe_indexable,
1281 	vm_size_t               size,
1282 	zone_create_flags_t     flags,
1283 	zone_id_t               desired_zid,
1284 	void                  (^extra_setup)(zone_t));
1285 
1286 /*!
1287  * @macro ZONE_DECLARE
1288  *
1289  * @abstract
1290  * Declares a zone variable and its associated type.
1291  *
1292  * @param var           the name of the variable to declare.
1293  * @param type_t        the type of elements in the zone.
1294  */
1295 #define ZONE_DECLARE(var, type_t) \
1296 	extern zone_t var; \
1297 	__ZONE_DECLARE_TYPE(var, type_t)
1298 
1299 /*!
1300  * @macro ZONE_DECLARE_ID
1301  *
1302  * @abstract
1303  * Declares the type associated with a zone ID.
1304  *
1305  * @param id            the name of zone ID to associate a type with.
1306  * @param type_t        the type of elements in the zone.
1307  */
1308 #define ZONE_DECLARE_ID(id, type_t) \
1309 	__ZONE_DECLARE_TYPE(id, type_t)
1310 
1311 /*!
1312  * @macro ZONE_DEFINE
1313  *
1314  * @abstract
1315  * Declares a zone variable to automatically initialize with the specified
1316  * parameters.
1317  *
1318  * @discussion
1319  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1320  *
1321  * @param var           the name of the variable to declare.
1322  * @param name          the name for the zone
1323  * @param size          the size of the elements returned by this zone.
1324  * @param flags         a set of @c zone_create_flags_t flags.
1325  */
1326 #define ZONE_DEFINE(var, name, size, flags) \
1327 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1328 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1329 	static __startup_data struct zone_create_startup_spec \
1330 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1331 	    ZONE_ID_ANY, NULL }; \
1332 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
1333 	    &__startup_zone_spec_ ## var)
1334 
1335 /*!
1336  * @macro ZONE_DEFINE_TYPE
1337  *
1338  * @abstract
1339  * Defines a zone variable to automatically initialize with the specified
1340  * parameters, associated with a particular type.
1341  *
1342  * @param var           the name of the variable to declare.
1343  * @param name          the name for the zone
1344  * @param type_t        the type of elements in the zone.
1345  * @param flags         a set of @c zone_create_flags_t flags.
1346  */
1347 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1348 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1349 	__ZONE_DECLARE_TYPE(var, type_t)
1350 
1351 /*!
1352  * @macro ZONE_DEFINE_ID
1353  *
1354  * @abstract
1355  * Initializes a given zone automatically during startup with the specified
1356  * parameters.
1357  *
1358  * @param zid           a @c zone_reserved_id_t value.
1359  * @param name          the name for the zone
1360  * @param type_t        the type of elements in the zone.
1361  * @param flags         a set of @c zone_create_flags_t flags.
1362  */
1363 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1364 	ZONE_DECLARE_ID(zid, type_t); \
1365 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1366 
1367 /*!
1368  * @macro ZONE_INIT
1369  *
1370  * @abstract
1371  * Initializes a given zone automatically during startup with the specified
1372  * parameters.
1373  *
1374  * @param var           the name of the variable to initialize.
1375  * @param name          the name for the zone
1376  * @param size          the size of the elements returned by this zone.
1377  * @param flags         a set of @c zone_create_flags_t flags.
1378  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1379  * @param extra_setup   a block that can perform non trivial initialization
1380  *                      (@see @c zone_create_ext()).
1381  */
1382 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1383 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1384 
1385 /*!
1386  * @function zone_id_require
1387  *
1388  * @abstract
1389  * Requires for a given pointer to belong to the specified zone, by ID and size.
1390  *
1391  * @discussion
1392  * The function panics if the check fails as it indicates that the kernel
1393  * internals have been compromised.
1394  *
1395  * This is a variant of @c zone_require() which:
1396  * - isn't sensitive to @c zone_t::elem_size being compromised,
1397  * - is slightly faster as it saves one load and a multiplication.
1398  *
1399  * @param zone_id       the zone ID the address needs to belong to.
1400  * @param elem_size     the size of elements for this zone.
1401  * @param addr          the element address to check.
1402  */
1403 extern void     zone_id_require(
1404 	zone_id_t               zone_id,
1405 	vm_size_t               elem_size,
1406 	void                   *addr __unsafe_indexable);
1407 
1408 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
1409 extern void     zone_set_noexpand(
1410 	zone_t          zone,
1411 	vm_size_t       max_elements);
1412 
1413 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1414 extern void     zone_set_exhaustible(
1415 	zone_t          zone,
1416 	vm_size_t       max_elements);
1417 
1418 /*!
1419  * @function zone_fill_initially
1420  *
1421  * @brief
1422  * Initially fill a non collectable zone to have the specified amount of
1423  * elements.
1424  *
1425  * @discussion
1426  * This function must be called on a non collectable permanent zone before it
1427  * has been used yet.
1428  *
1429  * @param zone          The zone to fill.
1430  * @param nelems        The number of elements to be able to hold.
1431  */
1432 extern void     zone_fill_initially(
1433 	zone_t          zone,
1434 	vm_size_t       nelems);
1435 
1436 #pragma mark XNU only: PGZ support
1437 
1438 /*!
1439  * @function pgz_owned()
1440  *
1441  * @brief
1442  * Returns whether an address is PGZ owned.
1443  *
1444  * @param addr          The address to translate.
1445  * @returns             Whether it is PGZ owned
1446  */
1447 #if CONFIG_PROB_GZALLOC
1448 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
1449 #else
1450 #define pgz_owned(addr) false
1451 #endif
1452 
1453 /*!
1454  * @function pgz_decode()
1455  *
1456  * @brief
1457  * Translates a PGZ protected virtual address to its unprotected
1458  * backing store.
1459  *
1460  * @discussion
1461  * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
1462  * elements since the PGZ protected virtual addresses are maintained by PGZ
1463  * at the pmap level without the VM involvment.
1464  *
1465  * "allow_invalid" schemes relying on sequestering also need this
1466  * to perform the locking attempts on the unprotected address.
1467  *
1468  * @param addr          The address to translate.
1469  * @param size          The object size.
1470  * @returns             The unprotected address or @c addr.
1471  */
1472 #if CONFIG_PROB_GZALLOC
1473 #define pgz_decode(addr, size) \
1474 	((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
1475 #else
1476 #define pgz_decode(addr, size)  (addr)
1477 #endif
1478 
1479 /*!
1480  * @function pgz_decode_allow_invalid()
1481  *
1482  * @brief
1483  * Translates a PGZ protected virtual address to its unprotected
1484  * backing store, but doesn't assert it is still allocated/valid.
1485  *
1486  * @discussion
1487  * "allow_invalid" schemes relying on sequestering also need this
1488  * to perform the locking attempts on the unprotected address.
1489  *
1490  * @param addr          The address to translate.
1491  * @param want_zid      The expected zone ID for the element.
1492  * @returns             The unprotected address or @c addr.
1493  */
1494 #if CONFIG_PROB_GZALLOC
1495 #define pgz_decode_allow_invalid(addr, want_zid) \
1496 	((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
1497 #else
1498 #define pgz_decode_allow_invalid(addr, zid)  (addr)
1499 #endif
1500 
1501 #pragma mark XNU only: misc & implementation details
1502 
1503 struct zone_create_startup_spec {
1504 	zone_t                 *z_var;
1505 	const char             *z_name __unsafe_indexable;
1506 	vm_size_t               z_size;
1507 	zone_create_flags_t     z_flags;
1508 	zone_id_t               z_zid;
1509 	void                  (^z_setup)(zone_t);
1510 };
1511 
1512 extern void     zone_create_startup(
1513 	struct zone_create_startup_spec *spec);
1514 
1515 #define __ZONE_DECLARE_TYPE(var, type_t) \
1516 	__attribute__((visibility("hidden"))) \
1517 	extern type_t *__zalloc__##var##__type_name
1518 
1519 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1520 	static __startup_data struct zone_create_startup_spec \
1521 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
1522 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
1523 	    &__startup_zone_spec_ ## ns)
1524 
1525 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
1526 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1527 
1528 #define __zalloc_cast(namespace, expr) \
1529 	__unsafe_forge_single(typeof(__zalloc__##namespace##__type_name), expr)
1530 
1531 #define zalloc_id(zid, flags)   __zalloc_cast(zid, (zalloc_id)(zid, flags))
1532 #define zalloc_ro(zid, flags)   __zalloc_cast(zid, (zalloc_ro)(zid, flags))
1533 #if ZALLOC_TYPE_SAFE
1534 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
1535 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
1536 #define zalloc_flags(zov, fl)   __zalloc_cast(zov, (zalloc_flags)(zov, fl))
1537 #endif
1538 
1539 struct zone_view_startup_spec {
1540 	zone_view_t         zv_view;
1541 	union {
1542 		zone_kheap_id_t zv_heapid;
1543 		zone_t         *zv_zone;
1544 	};
1545 	vm_size_t           zv_size;
1546 };
1547 
1548 extern void zone_view_startup_init(
1549 	struct zone_view_startup_spec *spec);
1550 
1551 extern void zone_userspace_reboot_checks(void);
1552 
1553 #if VM_TAG_SIZECLASSES
1554 extern void __zone_site_register(
1555 	vm_allocation_site_t   *site);
1556 
1557 #define VM_ALLOC_SITE_TAG() ({ \
1558 	__PLACE_IN_SECTION("__DATA, __data")                                   \
1559 	static vm_allocation_site_t site = { .refcount = 2, };                 \
1560 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __zone_site_register, &site);   \
1561 	site.tag;                                                              \
1562 })
1563 #else /* VM_TAG_SIZECLASSES */
1564 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
1565 #endif /* !VM_TAG_SIZECLASSES */
1566 
1567 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)1568 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
1569 {
1570 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
1571 }
1572 
1573 #if DEBUG || DEVELOPMENT
1574 #  if __LP64__
1575 #    define ZPCPU_MANGLE_BIT    (1ul << 63)
1576 #  else /* !__LP64__ */
1577 #    define ZPCPU_MANGLE_BIT    (1ul << 31)
1578 #  endif /* !__LP64__ */
1579 #else /* !(DEBUG || DEVELOPMENT) */
1580 #  define ZPCPU_MANGLE_BIT      0ul
1581 #endif /* !(DEBUG || DEVELOPMENT) */
1582 
1583 #define __zpcpu_mangle(ptr)     (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
1584 #define __zpcpu_demangle(ptr)   (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
1585 #define __zpcpu_addr(e)         ((vm_address_t)(e))
1586 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(ptr), e)
1587 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
1588 
1589 /**
1590  * @macro __zpcpu_mangle_for_boot()
1591  *
1592  * @discussion
1593  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
1594  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
1595  * storage marked @c __startup_data and replace it with the proper allocation
1596  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
1597  *
1598  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
1599  * provides the proper mangling of the storage into a "fake" percpu pointer so
1600  * that accesses through @c zpercpu_get() functions properly.
1601  *
1602  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
1603  */
1604 #define __zpcpu_mangle_for_boot(ptr)  ({ \
1605 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
1606 	__zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
1607 })
1608 
1609 extern unsigned zpercpu_count(void) __pure2;
1610 
1611 #if CONFIG_PROB_GZALLOC
1612 
1613 extern vm_offset_t __pgz_decode(
1614 	mach_vm_address_t       addr,
1615 	mach_vm_size_t          size);
1616 
1617 extern vm_offset_t __pgz_decode_allow_invalid(
1618 	vm_offset_t             offs,
1619 	zone_id_t               zid);
1620 
1621 #endif
1622 #if DEBUG || DEVELOPMENT
1623 extern size_t zone_pages_wired;
1624 extern size_t zone_guard_pages;
1625 #endif /* DEBUG || DEVELOPMENT */
1626 #if CONFIG_ZLEAKS
1627 extern uint32_t                 zleak_active;
1628 extern vm_size_t                zleak_max_zonemap_size;
1629 extern vm_size_t                zleak_global_tracking_threshold;
1630 extern vm_size_t                zleak_per_zone_tracking_threshold;
1631 
1632 extern kern_return_t zleak_update_threshold(
1633 	vm_size_t              *arg,
1634 	uint64_t                value);
1635 #endif /* CONFIG_ZLEAKS */
1636 
1637 extern uint32_t                 zone_map_jetsam_limit;
1638 
1639 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
1640 
1641 extern zone_t percpu_u64_zone;
1642 
1643 #pragma GCC visibility pop
1644 #endif /* XNU_KERNEL_PRIVATE */
1645 
1646 /*
1647  * This macro is currently used by AppleImage4 (rdar://83924635)
1648  */
1649 #define __zalloc_ptr_load_and_erase(elem) \
1650 	os_ptr_load_and_erase(elem)
1651 
1652 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1653 
1654 #endif  /* _KERN_ZALLOC_H_ */
1655 
1656 #endif  /* KERNEL_PRIVATE */
1657