xref: /xnu-12377.61.12/osfmk/kern/zalloc.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** (obsolete) */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** (obsolete) */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Obsolete */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** This zone contains pure data meant to be shared */
170 	ZC_SHARED_DATA          = 0x0040000000000000,
171 
172 	/** This zone is a built object cache */
173 	ZC_OBJ_CACHE            = 0x0080000000000000,
174 
175 	// was ZC_PGZ_USE_GUARDS  0x0100000000000000,
176 
177 	/** Zone doesn't support TBI tagging */
178 	ZC_NO_TBI_TAG           = 0x0200000000000000,
179 
180 	/** This zone will back a kalloc type */
181 	ZC_KALLOC_TYPE          = 0x0400000000000000,
182 
183 	// was ZC_NOPGZ         = 0x0800000000000000,
184 
185 	/** This zone contains pure data */
186 	ZC_DATA                 = 0x1000000000000000,
187 
188 	/** This zone belongs to the VM submap */
189 	ZC_VM                   = 0x2000000000000000,
190 
191 	/** Disable kasan quarantine for this zone */
192 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
193 
194 	/** Disable kasan redzones for this zone */
195 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
196 #endif /* XNU_KERNEL_PRIVATE */
197 });
198 
199 /*!
200  * @union zone_or_view
201  *
202  * @abstract
203  * A type used for calls that admit both a zone or a zone view.
204  *
205  * @discussion
206  * @c zalloc() and @c zfree() and their variants can act on both
207  * zones and zone views.
208  */
209 union zone_or_view {
210 	struct kalloc_type_view    *zov_kt_heap;
211 	struct zone_view           *zov_view;
212 	struct zone                *zov_zone;
213 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)214 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
215 	}
zone_or_view(struct zone * z)216 	inline zone_or_view(struct zone *z) : zov_zone(z) {
217 	}
zone_or_view(struct kalloc_type_view * kth)218 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
219 	}
220 #endif
221 };
222 #ifdef __cplusplus
223 typedef union zone_or_view zone_or_view_t;
224 #else
225 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
226 #endif
227 
228 /*!
229  * @enum zone_create_ro_id_t
230  *
231  * @abstract
232  * Zone creation IDs for external read only zones
233  *
234  * @discussion
235  * Kexts that desire to use the RO allocator should:
236  * 1. Add a zone creation id below
237  * 2. Add a corresponding ID to @c zone_reserved_id_t
238  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
239  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
240  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
241  */
242 __enum_decl(zone_create_ro_id_t, zone_id_t, {
243 	ZC_RO_ID_SANDBOX,
244 	ZC_RO_ID_PROFILE,
245 	ZC_RO_ID_PROTOBOX,
246 	ZC_RO_ID_SB_FILTER,
247 	ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 	ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
249 });
250 
251 /*!
252  * @function zone_create
253  *
254  * @abstract
255  * Creates a zone with the specified parameters.
256  *
257  * @discussion
258  * A Zone is a slab allocator that returns objects of a given size very quickly.
259  *
260  * @param name          the name for the new zone.
261  * @param size          the size of the elements returned by this zone.
262  * @param flags         a set of @c zone_create_flags_t flags.
263  *
264  * @returns             the created zone, this call never fails.
265  */
266 extern zone_t   zone_create(
267 	const char             *name __unsafe_indexable,
268 	vm_size_t               size,
269 	zone_create_flags_t     flags);
270 
271 /*!
272  *
273  * @function zone_get_elem_size
274  *
275  * @abstract
276  * Get the intrinsic size of one element allocated by the given zone.
277  *
278  * @discussion
279  * All zones are created to allocate elements of a fixed size, but the size is
280  * not always a compile-time constant. @c zone_get_elem_size can be used to
281  * retrieve the size of elements allocated by this zone at runtime.
282  *
283  * @param zone			the zone to inspect
284  *
285  * @returns			the size of elements allocated by this zone
286  */
287 extern vm_size_t    zone_get_elem_size(zone_t zone);
288 
289 /*!
290  * @function zone_create_ro
291  *
292  * @abstract
293  * Creates a read only zone with the specified parameters from kexts
294  *
295  * @discussion
296  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
297  * kexts. Do not use this API to create read only zones in xnu.
298  *
299  * @param name          the name for the new zone.
300  * @param size          the size of the elements returned by this zone.
301  * @param flags         a set of @c zone_create_flags_t flags.
302  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
303  *
304  * @returns             the zone ID of the created zone, this call never fails.
305  */
306 extern zone_id_t   zone_create_ro(
307 	const char             *name __unsafe_indexable,
308 	vm_size_t               size,
309 	zone_create_flags_t     flags,
310 	zone_create_ro_id_t     zc_ro_id);
311 
312 /*!
313  * @function zdestroy
314  *
315  * @abstract
316  * Destroys a zone previously made with zone_create.
317  *
318  * @discussion
319  * Zones must have been made destructible for @c zdestroy() to be allowed,
320  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
321  *
322  * @param zone          the zone to destroy.
323  */
324 extern void     zdestroy(
325 	zone_t          zone);
326 
327 /*!
328  * @function zone_require
329  *
330  * @abstract
331  * Requires for a given pointer to belong to the specified zone.
332  *
333  * @discussion
334  * The function panics if the check fails as it indicates that the kernel
335  * internals have been compromised.
336  *
337  * @param zone          the zone the address needs to belong to.
338  * @param addr          the element address to check.
339  */
340 extern void     zone_require(
341 	zone_t          zone,
342 	void           *addr __unsafe_indexable);
343 
344 /*!
345  * @function zone_require_ro
346  *
347  * @abstract
348  * Version of zone require intended for zones created with ZC_READONLY
349  *
350  * @discussion
351  * This check is not sufficient to fully trust the element.
352  *
353  * Another check of its content must be performed to prove
354  * that the element is "the right one", a typical technique
355  * for when the RO data structure is 1:1 with a mutable one,
356  * is a simple circularity check with a very strict lifetime
357  * (both the mutable and read-only data structures are made
358  * and destroyed as close as possible).
359  *
360  * @param zone_id       the zone id the address needs to belong to.
361  * @param elem_size     the element size for this zone.
362  * @param addr          the element address to check.
363  */
364 extern void     zone_require_ro(
365 	zone_id_t       zone_id,
366 	vm_size_t       elem_size,
367 	void           *addr __unsafe_indexable);
368 
369 /*!
370  * @enum zalloc_flags_t
371  *
372  * @brief
373  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
374  *
375  * @discussion
376  * It is encouraged that any callsite passing flags uses exactly one of:
377  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
378  * if nothing else was specified.
379  *
380  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
381  * then @c Z_WAITOK is ignored.
382  *
383  * @const Z_WAITOK
384  * Passing this flag means that zalloc() will be allowed to sleep
385  * for memory to become available for this allocation. If the zone
386  * isn't exhaustible, zalloc(Z_WAITOK) never fails.
387  *
388  * If the zone is exhaustible, zalloc() might still fail if the zone
389  * is at its maximum allowed memory usage, unless Z_NOFAIL is passed,
390  * in which case zalloc() will block until an element is freed.
391  *
392  * @const Z_NOWAIT
393  * Passing this flag means that zalloc is not allowed to ever block.
394  *
395  * @const Z_NOPAGEWAIT
396  * Passing this flag means that zalloc is allowed to wait due to lock
397  * contention, but will not wait for the VM to wait for pages when
398  * under memory pressure.
399  *
400  * @const Z_ZERO
401  * Passing this flags means that the returned memory has been zeroed out.
402  *
403  * @const Z_NOFAIL
404  * Passing this flag means that the caller expects the allocation to always
405  * succeed. This will result in a panic if this assumption isn't correct.
406  *
407  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT.
408  * For exhaustible zones, it forces the caller to wait until a zfree() happend
409  * if the zone has reached its maximum of allowed elements.
410  *
411  * @const Z_REALLOCF
412  * For the realloc family of functions,
413  * free the incoming memory on failure cases.
414  *
415  #if XNU_KERNEL_PRIVATE
416  * @const Z_SET_NOTEARLY
417  * Using this flag from external allocations APIs (kalloc_type/zalloc)
418  * allows the callsite to skip the early (shared) zone for that sizeclass and
419  * directly allocated from the requested zone.
420  * Using this flag from internal APIs (zalloc_ext) will skip the early
421  * zone only when a given threshold is exceeded. It will also set a flag
422  * to indicate that future allocations to the zone should directly go to
423  * the zone instead of the shared zone.
424  *
425  * @const Z_SPRAYQTN
426  * This flag tells the VM to allocate from the "spray quarantine" range when
427  * it services the allocation. For more details on what allocations qualify
428  * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
429  *
430  * @const Z_KALLOC_ARRAY
431  * Instead of returning a standard "pointer" return a pointer that encodes
432  * its size-class into the pointer itself (Only for kalloc, might limit
433  * the range of allocations that can be done).
434  *
435  * @const Z_FULLSIZE
436  * Used to indicate that the caller will use all available space in excess
437  * from the requested allocation size.
438  *
439  * @const Z_SKIP_KASAN
440  * Tell zalloc() not to do any kasan adjustments.
441  *
442  * @const Z_MAY_COPYINMAP
443  * This data allocation might be used with vm_map_copyin().
444  * This allows for those allocations to be associated with a proper VM object.
445  *
446  * @const Z_VM_TAG_BT_BIT
447  * Used to blame allocation accounting on the first kext
448  * found in the backtrace of the allocation.
449  *
450  * @const Z_NOZZC
451  * Used internally to mark allocations that will skip zero validation.
452  *
453  * @const Z_PCPU
454  * Used internally for the percpu paths.
455  *
456  * @const Z_VM_TAG_MASK
457  * Represents bits in which a vm_tag_t for the allocation can be passed.
458  * (used by kalloc for the zone tagging debugging feature).
459  #endif
460  */
461 __options_decl(zalloc_flags_t, uint32_t, {
462 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
463 	Z_WAITOK        = 0x0000,
464 	Z_NOWAIT        = 0x0001,
465 	Z_NOPAGEWAIT    = 0x0002,
466 	Z_ZERO          = 0x0004,
467 	Z_REALLOCF      = 0x0008,
468 
469 #if XNU_KERNEL_PRIVATE
470 	Z_NOSOFTLIMIT   = 0x0020,
471 	Z_SET_NOTEARLY = 0x0040,
472 	Z_SPRAYQTN      = 0x0080,
473 	Z_KALLOC_ARRAY  = 0x0100,
474 #if KASAN_CLASSIC
475 	Z_FULLSIZE      = 0x0000,
476 #else
477 	Z_FULLSIZE      = 0x0200,
478 #endif
479 #if KASAN_CLASSIC
480 	Z_SKIP_KASAN    = 0x0400,
481 #else
482 	Z_SKIP_KASAN    = 0x0000,
483 #endif
484 	Z_MAY_COPYINMAP = 0x0800,
485 	Z_VM_TAG_BT_BIT = 0x1000,
486 	Z_PCPU          = 0x2000,
487 	Z_NOZZC         = 0x4000,
488 #endif /* XNU_KERNEL_PRIVATE */
489 	Z_NOFAIL        = 0x8000,
490 
491 	/* convenient c++ spellings */
492 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
493 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
494 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL,
495 #if XNU_KERNEL_PRIVATE
496 	Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
497 #endif
498 
499 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
500 #if XNU_KERNEL_PRIVATE
501 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
502 	/** used by kalloc to propagate vm tags for -zt */
503 	Z_VM_TAG_MASK   = 0xffff0000,
504 
505 #define Z_VM_TAG_SHIFT        16
506 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
507 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
508 #endif
509 });
510 
511 /*
512  * This type is used so that kalloc_internal has good calling conventions
513  * for callers who want to cheaply both know the allocated address
514  * and the actual size of the allocation.
515  */
516 struct kalloc_result {
517 	void         *addr __sized_by(size);
518 	vm_size_t     size;
519 };
520 
521 /*!
522  * @typedef zone_stats_t
523  *
524  * @abstract
525  * The opaque type for per-cpu zone stats that are accumulated per zone
526  * or per zone-view.
527  */
528 typedef struct zone_stats *__zpercpu zone_stats_t;
529 
530 /*!
531  * @typedef zone_view_t
532  *
533  * @abstract
534  * A view on a zone for accounting purposes.
535  *
536  * @discussion
537  * A zone view uses the zone it references for the allocations backing store,
538  * but does the allocation accounting at the view level.
539  *
540  * These accounting are surfaced by @b zprint(1) and similar tools,
541  * which allow for cheap but finer grained understanding of allocations
542  * without any fragmentation cost.
543  *
544  * Zone views are protected by the kernel lockdown and can't be initialized
545  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
546  */
547 typedef struct zone_view *zone_view_t;
548 struct zone_view {
549 	zone_t          zv_zone;
550 	zone_stats_t    zv_stats;
551 	const char     *zv_name __unsafe_indexable;
552 	zone_view_t     zv_next;
553 };
554 
555 /*!
556  * @typedef kalloc_type_view_t
557  *
558  * @abstract
559  * The opaque type created at kalloc_type callsites to redirect calls to
560  * the right zone.
561  */
562 typedef struct kalloc_type_view *kalloc_type_view_t;
563 
564 #if XNU_KERNEL_PRIVATE
565 /*
566  * kalloc_type/kfree_type implementation functions
567  */
568 extern void *__unsafe_indexable kalloc_type_impl_internal(
569 	kalloc_type_view_t  kt_view,
570 	zalloc_flags_t      flags);
571 
572 extern void kfree_type_impl_internal(
573 	kalloc_type_view_t kt_view,
574 	void               *ptr __unsafe_indexable);
575 
576 static inline void *__unsafe_indexable
kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)577 kalloc_type_impl(
578 	kalloc_type_view_t      kt_view,
579 	zalloc_flags_t          flags)
580 {
581 	void *__unsafe_indexable addr = kalloc_type_impl_internal(kt_view, flags);
582 	if (flags & Z_NOFAIL) {
583 		__builtin_assume(addr != NULL);
584 	}
585 	return addr;
586 }
587 
588 #define kfree_type_impl(kt_view, ptr) \
589 	kfree_type_impl_internal(kt_view, (ptr))
590 
591 #else /* XNU_KERNEL_PRIVATE */
592 
593 extern void *__unsafe_indexable kalloc_type_impl(
594 	kalloc_type_view_t  kt_view,
595 	zalloc_flags_t      flags);
596 
597 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)598 __kalloc_type_impl(
599 	kalloc_type_view_t  kt_view,
600 	zalloc_flags_t      flags)
601 {
602 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
603 	if (flags & Z_NOFAIL) {
604 		__builtin_assume(addr != NULL);
605 	}
606 	return addr;
607 }
608 
609 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
610 
611 extern void kfree_type_impl(
612 	kalloc_type_view_t  kt_view,
613 	void                *ptr __unsafe_indexable);
614 
615 #endif /* XNU_KERNEL_PRIVATE */
616 
617 /*!
618  * @function zalloc
619  *
620  * @abstract
621  * Allocates an element from a specified zone.
622  *
623  * @discussion
624  * If the zone isn't exhaustible and is expandable, this call never fails.
625  *
626  * @param zone          the zone or zone view to allocate from
627  *
628  * @returns             NULL or the allocated element
629  */
630 __attribute__((malloc))
631 extern void *__unsafe_indexable zalloc(
632 	zone_t          zone);
633 
634 __attribute__((malloc))
635 __attribute__((overloadable))
636 static inline void *__unsafe_indexable
zalloc(zone_view_t view)637 zalloc(zone_view_t view)
638 {
639 	return zalloc((zone_t)view);
640 }
641 
642 __attribute__((malloc))
643 __attribute__((overloadable))
644 static inline void *__unsafe_indexable
zalloc(kalloc_type_view_t kt_view)645 zalloc(kalloc_type_view_t kt_view)
646 {
647 	return (kalloc_type_impl)(kt_view, Z_WAITOK);
648 }
649 
650 /*!
651  * @function zalloc_noblock
652  *
653  * @abstract
654  * Allocates an element from a specified zone, but never blocks.
655  *
656  * @discussion
657  * This call is suitable for preemptible code, however allocation
658  * isn't allowed from interrupt context.
659  *
660  * @param zone          the zone or zone view to allocate from
661  *
662  * @returns             NULL or the allocated element
663  */
664 __attribute__((malloc))
665 extern void *__unsafe_indexable zalloc_noblock(
666 	zone_t          zone);
667 
668 __attribute__((malloc))
669 __attribute__((overloadable))
670 static inline void *__unsafe_indexable
zalloc_noblock(zone_view_t view)671 zalloc_noblock(zone_view_t view)
672 {
673 	return zalloc_noblock((zone_t)view);
674 }
675 
676 __attribute__((malloc))
677 __attribute__((overloadable))
678 static inline void *__unsafe_indexable
zalloc_noblock(kalloc_type_view_t kt_view)679 zalloc_noblock(kalloc_type_view_t kt_view)
680 {
681 	return (kalloc_type_impl)(kt_view, Z_NOWAIT);
682 }
683 
684 /*!
685  * @function zalloc_flags()
686  *
687  * @abstract
688  * Allocates an element from a specified zone, with flags.
689  *
690  * @param zone          the zone or zone view to allocate from
691  * @param flags         a collection of @c zalloc_flags_t.
692  *
693  * @returns             NULL or the allocated element
694  */
695 __attribute__((malloc))
696 extern void *__unsafe_indexable zalloc_flags(
697 	zone_t          zone,
698 	zalloc_flags_t  flags);
699 
700 __attribute__((malloc))
701 __attribute__((overloadable))
702 static inline void *__unsafe_indexable
__zalloc_flags(zone_t zone,zalloc_flags_t flags)703 __zalloc_flags(
704 	zone_t          zone,
705 	zalloc_flags_t  flags)
706 {
707 	void *__unsafe_indexable addr = (zalloc_flags)(zone, flags);
708 	if (flags & Z_NOFAIL) {
709 		__builtin_assume(addr != NULL);
710 	}
711 	return addr;
712 }
713 
714 __attribute__((malloc))
715 __attribute__((overloadable))
716 static inline void *__unsafe_indexable
__zalloc_flags(zone_view_t view,zalloc_flags_t flags)717 __zalloc_flags(
718 	zone_view_t     view,
719 	zalloc_flags_t  flags)
720 {
721 	return __zalloc_flags((zone_t)view, flags);
722 }
723 
724 __attribute__((malloc))
725 __attribute__((overloadable))
726 static inline void *__unsafe_indexable
__zalloc_flags(kalloc_type_view_t kt_view,zalloc_flags_t flags)727 __zalloc_flags(
728 	kalloc_type_view_t  kt_view,
729 	zalloc_flags_t      flags)
730 {
731 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
732 	if (flags & Z_NOFAIL) {
733 		__builtin_assume(addr != NULL);
734 	}
735 	return addr;
736 }
737 
738 __attribute__((malloc))
739 static inline void *__header_indexable
zalloc_flags_buf(zone_t zone,zalloc_flags_t flags)740 zalloc_flags_buf(
741 	zone_t          zone,
742 	zalloc_flags_t  flags)
743 {
744 	void *__unsafe_indexable addr = __zalloc_flags(zone, flags);
745 	if (flags & Z_NOFAIL) {
746 		__builtin_assume(addr != NULL);
747 	}
748 	return __unsafe_forge_bidi_indexable(void *, addr, zone_get_elem_size(zone));
749 }
750 
751 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
752 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
753 #else
754 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
755 #endif
756 
757 /*!
758  * @macro zalloc_id
759  *
760  * @abstract
761  * Allocates an element from a specified zone ID, with flags.
762  *
763  * @param zid           The proper @c ZONE_ID_* constant.
764  * @param flags         a collection of @c zalloc_flags_t.
765  *
766  * @returns             NULL or the allocated element
767  */
768 __attribute__((malloc))
769 extern void *__unsafe_indexable zalloc_id(
770 	zone_id_t       zid,
771 	zalloc_flags_t  flags);
772 
773 __attribute__((malloc))
774 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)775 __zalloc_id(
776 	zone_id_t       zid,
777 	zalloc_flags_t  flags)
778 {
779 	void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
780 	if (flags & Z_NOFAIL) {
781 		__builtin_assume(addr != NULL);
782 	}
783 	return addr;
784 }
785 
786 #if XNU_KERNEL_PRIVATE
787 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
788 #else
789 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
790 #endif
791 
792 /*!
793  * @function zalloc_ro
794  *
795  * @abstract
796  * Allocates an element from a specified read-only zone.
797  *
798  * @param zone_id       the zone id to allocate from
799  * @param flags         a collection of @c zalloc_flags_t.
800  *
801  * @returns             NULL or the allocated element
802  */
803 __attribute__((malloc))
804 extern void *__unsafe_indexable zalloc_ro(
805 	zone_id_t       zone_id,
806 	zalloc_flags_t  flags);
807 
808 __attribute__((malloc))
809 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)810 __zalloc_ro(
811 	zone_id_t       zone_id,
812 	zalloc_flags_t  flags)
813 {
814 	void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
815 	if (flags & Z_NOFAIL) {
816 		__builtin_assume(addr != NULL);
817 	}
818 	return addr;
819 }
820 
821 #if XNU_KERNEL_PRIVATE
822 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
823 #else
824 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
825 #endif
826 
827 /*!
828  * @function zalloc_ro_mut
829  *
830  * @abstract
831  * Modifies an element from a specified read-only zone.
832  *
833  * @discussion
834  * Modifying compiler-assisted authenticated pointers using this function will
835  * not result in a signed pointer being written.  The caller is expected to
836  * sign the value appropriately beforehand if they wish to do this.
837  *
838  * @param zone_id       the zone id to allocate from
839  * @param elem          element to be modified
840  * @param offset        offset from element
841  * @param new_data      pointer to new data
842  * @param new_data_size size of modification
843  *
844  */
845 extern void zalloc_ro_mut(
846 	zone_id_t       zone_id,
847 	void           *elem __unsafe_indexable,
848 	vm_offset_t     offset,
849 	const void     *new_data __sized_by(new_data_size),
850 	vm_size_t       new_data_size);
851 
852 /*!
853  * @function zalloc_ro_update_elem
854  *
855  * @abstract
856  * Update the value of an entire element allocated in the read only allocator.
857  *
858  * @param zone_id       the zone id to allocate from
859  * @param elem          element to be modified
860  * @param new_data      pointer to new data
861  *
862  */
863 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
864 	const typeof(*(elem)) *__new_data = (new_data);                        \
865 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
866 })
867 
868 /*!
869  * @function zalloc_ro_update_field
870  *
871  * @abstract
872  * Update a single field of an element allocated in the read only allocator.
873  *
874  * @param zone_id       the zone id to allocate from
875  * @param elem          element to be modified
876  * @param field         the element field to be modified
877  * @param new_data      pointer to new data
878  *
879  */
880 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
881 	const typeof((elem)->field) *__value = (value);                        \
882 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
883 	    __value, sizeof((elem)->field));                                   \
884 })
885 
886 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
887 
888 /*!
889  * @enum zro_atomic_op_t
890  *
891  * @brief
892  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
893  * atomic operations.
894  *
895  * @discussion
896  * This enum provides all flavors of atomic operations supported in sizes 8,
897  * 16, 32, 64 bits.
898  *
899  * @const ZRO_ATOMIC_OR_*
900  * To perform an @s os_atomic_or
901  *
902  * @const ZRO_ATOMIC_XOR_*
903  * To perform an @s os_atomic_xor
904  *
905  * @const ZRO_ATOMIC_AND_*
906  * To perform an @s os_atomic_and
907  *
908  * @const ZRO_ATOMIC_ADD_*
909  * To perform an @s os_atomic_add
910  *
911  * @const ZRO_ATOMIC_XCHG_*
912  * To perform an @s os_atomic_xchg
913  *
914  */
915 __enum_decl(zro_atomic_op_t, uint32_t, {
916 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
917 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
918 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
919 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
920 
921 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
922 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
923 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
924 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
925 
926 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
927 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
928 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
929 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
930 
931 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
932 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
933 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
934 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
935 
936 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
937 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
938 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
939 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
940 
941 	/* cconvenient spellings */
942 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
943 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
944 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
945 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
946 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
947 });
948 
949 /*!
950  * @function zalloc_ro_mut_atomic
951  *
952  * @abstract
953  * Atomically update an offset in an element allocated in the read only
954  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
955  *
956  * @param zone_id       the zone id to allocate from
957  * @param elem          element to be modified
958  * @param offset        offset in the element to be modified
959  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
960  * @param value         value for the atomic operation
961  *
962  */
963 extern uint64_t zalloc_ro_mut_atomic(
964 	zone_id_t       zone_id,
965 	void           *elem __unsafe_indexable,
966 	vm_offset_t     offset,
967 	zro_atomic_op_t op,
968 	uint64_t        value);
969 
970 /*!
971  * @macro zalloc_ro_update_field_atomic
972  *
973  * @abstract
974  * Atomically update a single field of an element allocated in the read only
975  * allocator.
976  *
977  * @param zone_id       the zone id to allocate from
978  * @param elem          element to be modified
979  * @param field         the element field to be modified
980  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
981  * @param value         value for the atomic operation
982  *
983  */
984 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
985 	const typeof((elem)->field) __value = (value);                         \
986 	static_assert(sizeof(__value) == (op & 0xf));                          \
987 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
988 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
989 })
990 
991 /*!
992  * @function zalloc_ro_clear
993  *
994  * @abstract
995  * Zeroes an element from a specified read-only zone.
996  *
997  * @param zone_id       the zone id to allocate from
998  * @param elem          element to be modified
999  * @param offset        offset from element
1000  * @param size          size of modification
1001  */
1002 extern void    zalloc_ro_clear(
1003 	zone_id_t       zone_id,
1004 	void           *elem __unsafe_indexable,
1005 	vm_offset_t     offset,
1006 	vm_size_t       size);
1007 
1008 /*!
1009  * @function zalloc_ro_clear_field
1010  *
1011  * @abstract
1012  * Zeroes the specified field of an element from a specified read-only zone.
1013  *
1014  * @param zone_id       the zone id to allocate from
1015  * @param elem          element to be modified
1016  * @param field         offset from element
1017  */
1018 #define zalloc_ro_clear_field(zone_id, elem, field) \
1019 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
1020 	    sizeof((elem)->field))
1021 
1022 /*!
1023  * @function zfree_id()
1024  *
1025  * @abstract
1026  * Frees an element previously allocated with @c zalloc_id().
1027  *
1028  * @param zone_id       the zone id to free the element to.
1029  * @param addr          the address to free
1030  */
1031 extern void     zfree_id(
1032 	zone_id_t       zone_id,
1033 	void           *addr __unsafe_indexable);
1034 #define zfree_id(zid, elem) ({ \
1035 	zone_id_t __zfree_zid = (zid); \
1036 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1037 })
1038 
1039 
1040 /*!
1041  * @function zfree_ro()
1042  *
1043  * @abstract
1044  * Frees an element previously allocated with @c zalloc_ro().
1045  *
1046  * @param zone_id       the zone id to free the element to.
1047  * @param addr          the address to free
1048  */
1049 extern void     zfree_ro(
1050 	zone_id_t       zone_id,
1051 	void           *addr __unsafe_indexable);
1052 #define zfree_ro(zid, elem) ({ \
1053 	zone_id_t __zfree_zid = (zid); \
1054 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1055 })
1056 
1057 
1058 /*!
1059  * @function zfree
1060  *
1061  * @abstract
1062  * Frees an element allocated with @c zalloc*.
1063  *
1064  * @discussion
1065  * If the element being freed doesn't belong to the specified zone,
1066  * then this call will panic.
1067  *
1068  * @param zone          the zone or zone view to free the element to.
1069  * @param elem          the element to free
1070  */
1071 extern void     zfree(
1072 	zone_t          zone,
1073 	void           *elem __unsafe_indexable);
1074 
1075 __attribute__((overloadable))
1076 static inline void
zfree(zone_view_t view,void * elem __unsafe_indexable)1077 zfree(
1078 	zone_view_t     view,
1079 	void           *elem __unsafe_indexable)
1080 {
1081 	zfree((zone_t)view, elem);
1082 }
1083 
1084 __attribute__((overloadable))
1085 static inline void
zfree(kalloc_type_view_t kt_view,void * elem __unsafe_indexable)1086 zfree(
1087 	kalloc_type_view_t   kt_view,
1088 	void                *elem __unsafe_indexable)
1089 {
1090 	return kfree_type_impl(kt_view, elem);
1091 }
1092 
1093 #define zfree(zone, elem) ({ \
1094 	__auto_type __zfree_zone = (zone); \
1095 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1096 })
1097 
1098 
1099 /* deprecated KPIS */
1100 
1101 __zalloc_deprecated("use zone_create()")
1102 extern zone_t   zinit(
1103 	vm_size_t       size,           /* the size of an element */
1104 	vm_size_t       maxmem,         /* maximum memory to use */
1105 	vm_size_t       alloc,          /* allocation size */
1106 	const char      *name __unsafe_indexable);
1107 
1108 #pragma mark: implementation details
1109 
1110 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1111 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1112 	__attribute__((visibility("hidden"))) \
1113 	extern type_t *__single __zalloc__##var##__type_name
1114 
1115 #ifdef XNU_KERNEL_PRIVATE
1116 #pragma mark - XNU only interfaces
1117 
1118 #include <kern/cpu_number.h>
1119 
1120 __exported_push_hidden
1121 
1122 #pragma mark XNU only: zalloc (extended)
1123 
1124 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
1125 #define ZALIGN_16               (sizeof(uint16_t) - 1)
1126 #define ZALIGN_32               (sizeof(uint32_t) - 1)
1127 #define ZALIGN_PTR              (sizeof(void *)   - 1)
1128 #define ZALIGN_64               (sizeof(uint64_t) - 1)
1129 #define ZALIGN(t)               (_Alignof(t)      - 1)
1130 
1131 
1132 /*!
1133  * @function zalloc_permanent_tag()
1134  *
1135  * @abstract
1136  * Allocates a permanent element from the permanent zone
1137  *
1138  * @discussion
1139  * Memory returned by this function is always 0-initialized.
1140  * Note that the size of this allocation can not be determined
1141  * by zone_element_size so it should not be used for copyio.
1142  *
1143  * @param size          the element size (must be smaller than PAGE_SIZE)
1144  * @param align_mask    the required alignment for this allocation
1145  * @param tag           the tag to use for allocations larger than a page.
1146  *
1147  * @returns             the allocated element
1148  */
1149 __attribute__((malloc))
1150 extern void *__sized_by(size) zalloc_permanent_tag(
1151 	vm_size_t       size,
1152 	vm_offset_t     align_mask,
1153 	vm_tag_t        tag)
1154 __attribute__((__diagnose_if__((align_mask & (align_mask + 1)),
1155     "align mask looks invalid", "error")));
1156 
1157 /*!
1158  * @function zalloc_permanent()
1159  *
1160  * @abstract
1161  * Allocates a permanent element from the permanent zone
1162  *
1163  * @discussion
1164  * Memory returned by this function is always 0-initialized.
1165  * Note that the size of this allocation can not be determined
1166  * by zone_element_size so it should not be used for copyio.
1167  *
1168  * @param size          the element size (must be smaller than PAGE_SIZE)
1169  * @param align_mask    the required alignment for this allocation
1170  *
1171  * @returns             the allocated element
1172  */
1173 #define zalloc_permanent(size, align) \
1174 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1175 
1176 /*!
1177  * @function zalloc_permanent_type()
1178  *
1179  * @abstract
1180  * Allocates a permanent element of a given type with its natural alignment.
1181  *
1182  * @discussion
1183  * Memory returned by this function is always 0-initialized.
1184  *
1185  * @param type_t        the element type
1186  *
1187  * @returns             the allocated element
1188  */
1189 #define zalloc_permanent_type(type_t) \
1190 	__unsafe_forge_single(type_t *, \
1191 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1192 
1193 /*!
1194  * @function zalloc_first_proc_made()
1195  *
1196  * @abstract
1197  * Declare that the "early" allocation phase is done.
1198  */
1199 extern void zalloc_first_proc_made(void);
1200 /*!
1201  * @function zalloc_iokit_lockdown()
1202  *
1203  * @abstract
1204  * Declare that iokit matching has started.
1205  */
1206 extern void zalloc_iokit_lockdown(void);
1207 
1208 #pragma mark XNU only: per-cpu allocations
1209 
1210 /*!
1211  * @macro zpercpu_get_cpu()
1212  *
1213  * @abstract
1214  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1215  *
1216  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1217  * @param cpu           the specified CPU number as returned by @c cpu_number()
1218  *
1219  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1220  */
1221 #define zpercpu_get_cpu(ptr, cpu) \
1222 	__zpcpu_cast(ptr, __zpcpu_addr(ptr) + ptoa((unsigned)(cpu)))
1223 
1224 /*!
1225  * @macro zpercpu_get()
1226  *
1227  * @abstract
1228  * Get a pointer to the current CPU slot of a given per-cpu variable.
1229  *
1230  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1231  *
1232  * @returns             the per-CPU slot for @c ptr for the current CPU.
1233  */
1234 #define zpercpu_get(ptr) \
1235 	zpercpu_get_cpu(ptr, cpu_number())
1236 
1237 /*!
1238  * @macro zpercpu_foreach()
1239  *
1240  * @abstract
1241  * Enumerate all per-CPU slots by address.
1242  *
1243  * @param it            the name for the iterator
1244  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1245  */
1246 #define zpercpu_foreach(it, ptr) \
1247 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1248 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1249 	    it < __end_##it; it = __zpcpu_next(it))
1250 
1251 /*!
1252  * @macro zpercpu_foreach_cpu()
1253  *
1254  * @abstract
1255  * Enumerate all per-CPU slots by CPU slot number.
1256  *
1257  * @param cpu           the name for cpu number iterator.
1258  */
1259 #define zpercpu_foreach_cpu(cpu) \
1260 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1261 
1262 /*!
1263  * @function zalloc_percpu()
1264  *
1265  * @abstract
1266  * Allocates an element from a per-cpu zone.
1267  *
1268  * @discussion
1269  * The returned pointer cannot be used directly and must be manipulated
1270  * through the @c zpercpu_get*() interfaces.
1271  *
1272  * @param zone_or_view  the zone or zone view to allocate from
1273  * @param flags         a collection of @c zalloc_flags_t.
1274  *
1275  * @returns             NULL or the allocated element
1276  */
1277 extern void *__zpercpu zalloc_percpu(
1278 	zone_or_view_t  zone_or_view,
1279 	zalloc_flags_t  flags);
1280 
1281 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1282 __zalloc_percpu(
1283 	zone_or_view_t  zone_or_view,
1284 	zalloc_flags_t  flags)
1285 {
1286 	void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1287 	if (flags & Z_NOFAIL) {
1288 		__builtin_assume(addr != NULL);
1289 	}
1290 	return addr;
1291 }
1292 
1293 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1294 
1295 /*!
1296  * @function zfree_percpu()
1297  *
1298  * @abstract
1299  * Frees an element previously allocated with @c zalloc_percpu().
1300  *
1301  * @param zone_or_view  the zone or zone view to free the element to.
1302  * @param addr          the address to free
1303  */
1304 extern void     zfree_percpu(
1305 	zone_or_view_t  zone_or_view,
1306 	void *__zpercpu addr);
1307 
1308 /*!
1309  * @function zalloc_percpu_permanent()
1310  *
1311  * @abstract
1312  * Allocates a permanent percpu-element from the permanent percpu zone.
1313  *
1314  * @discussion
1315  * Memory returned by this function is always 0-initialized.
1316  *
1317  * @param size          the element size (must be smaller than PAGE_SIZE)
1318  * @param align_mask    the required alignment for this allocation
1319  *
1320  * @returns             the allocated element
1321  */
1322 extern void *__zpercpu zalloc_percpu_permanent(
1323 	vm_size_t       size,
1324 	vm_offset_t     align_mask);
1325 
1326 /*!
1327  * @function zalloc_percpu_permanent_type()
1328  *
1329  * @abstract
1330  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1331  * type with its natural alignment.
1332  *
1333  * @discussion
1334  * Memory returned by this function is always 0-initialized.
1335  *
1336  * @param type_t        the element type
1337  *
1338  * @returns             the allocated element
1339  */
1340 #define zalloc_percpu_permanent_type(type_t) \
1341 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1342 
1343 
1344 #pragma mark XNU only: SMR support for zones
1345 
1346 struct smr;
1347 
1348 /*!
1349  * @typedef zone_smr_free_cb_t
1350  *
1351  * @brief
1352  * Type for the delayed free callback for SMR zones.
1353  *
1354  * @description
1355  * This function is called before an element is reused,
1356  * or when memory is returned to the system.
1357  *
1358  * This function MUST zero the element, and if no special
1359  * action is to be taken on free, then @c bzero() is a fine
1360  * callback to use.
1361  *
1362  * This function also must be preemption-disabled safe,
1363  * as it runs with preemption disabled.
1364  *
1365  *
1366  * Note that this function should only clean the fields
1367  * that must be preserved for stale SMR readers to see.
1368  * Any field that is accessed after element validation
1369  * such as a try-retain or acquiring a lock on it must
1370  * be cleaned up much earlier as they might hold onto
1371  * expensive resources.
1372  *
1373  * The suggested pattern for an SMR type using this facility,
1374  * is to have 2 functions:
1375  *
1376  * - one "retire" stage that tries to clean up as much from
1377  *   the element as possible, with great care to leave no dangling
1378  *   pointers around, as elements in this stage might linger
1379  *   in the allocator for a long time, and this could possibly
1380  *   be abused during UaF exploitation.
1381  *
1382  * - one "smr_free" function which cleans up whatever was left,
1383  *   and zeroes the rest of the element.
1384  *
1385  * <code>
1386  *     void
1387  *     type_retire(type_t elem)
1388  *     {
1389  *         // invalidating the element makes most fields
1390  *         // inaccessible to readers.
1391  *         type_mark_invalid(elem);
1392  *
1393  *         // do cleanups for things requiring a validity check
1394  *         kfree_type(some_type_t, elem->expensive_thing);
1395  *         type_remove_from_global_list(&elem->linkage);
1396  *
1397  *         zfree_smr(type_zone, elem);
1398  *     }
1399  *
1400  *     void
1401  *     type_smr_free(void *_elem)
1402  *     {
1403  *         type_t elem = elem;
1404  *
1405  *         // cleanup fields that are used to "find" this element
1406  *         // and that SMR readers may access hazardously.
1407  *         lck_ticket_destroy(&elem->lock);
1408  *         kfree_data(elem->key, elem->keylen);
1409  *
1410  *         // compulsory: element must be zeroed fully
1411  *         bzero(elem, sizeof(*elem));
1412  *     }
1413  * </code>
1414  */
1415 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1416 
1417 /*!
1418  * @function zone_enable_smr()
1419  *
1420  * @abstract
1421  * Enable SMR for a zone.
1422  *
1423  * @discussion
1424  * This can only be done once, and must be done before
1425  * the first allocation is made with this zone.
1426  *
1427  * @param zone          the zone to enable SMR for
1428  * @param smr           the smr domain to use
1429  * @param free_cb       the free callback to use
1430  */
1431 extern void     zone_enable_smr(
1432 	zone_t                  zone,
1433 	struct smr             *smr,
1434 	zone_smr_free_cb_t      free_cb);
1435 
1436 /*!
1437  * @function zone_id_enable_smr()
1438  *
1439  * @abstract
1440  * Enable SMR for a zone ID.
1441  *
1442  * @discussion
1443  * This can only be done once, and must be done before
1444  * the first allocation is made with this zone.
1445  *
1446  * @param zone_id       the zone to enable SMR for
1447  * @param smr           the smr domain to use
1448  * @param free_cb       the free callback to use
1449  */
1450 #define zone_id_enable_smr(zone_id, smr, free_cb)  ({ \
1451 	void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t);      \
1452                                                                                 \
1453 	__cb = (free_cb);                                                       \
1454 	zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb);    \
1455 })
1456 
1457 /*!
1458  * @macro zalloc_smr()
1459  *
1460  * @abstract
1461  * Allocates an element from an SMR enabled zone
1462  *
1463  * @discussion
1464  * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1465  *
1466  * @param zone          the zone to allocate from
1467  * @param flags         a collection of @c zalloc_flags_t.
1468  *
1469  * @returns             NULL or the allocated element
1470  */
1471 #define zalloc_smr(zone, flags) \
1472 	zalloc_flags(zone, flags)
1473 
1474 /*!
1475  * @macro zalloc_id_smr()
1476  *
1477  * @abstract
1478  * Allocates an element from a specified zone ID with SMR enabled.
1479  *
1480  * @param zid           The proper @c ZONE_ID_* constant.
1481  * @param flags         a collection of @c zalloc_flags_t.
1482  *
1483  * @returns             NULL or the allocated element
1484  */
1485 #define zalloc_id_smr(zid, flags) \
1486 	zalloc_id(zid, flags)
1487 
1488 /*!
1489  * @macro zfree_smr()
1490  *
1491  * @abstract
1492  * Frees an element previously allocated with @c zalloc_smr().
1493  *
1494  * @discussion
1495  * When zfree_smr() is called, then the element is not immediately zeroed,
1496  * and the "free" callback that has been registered with the zone will
1497  * run later (@see zone_smr_free_cb_t).
1498  *
1499  * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1500  *
1501  *
1502  * It is guaranteed that the SMR timestamp associated with an element
1503  * will always be equal or greater than the stamp associated with
1504  * elements freed before it on the same thread.
1505  *
1506  * It means that when freeing multiple elements in a sequence, these
1507  * must be freed in topological order (parents before children).
1508  *
1509  * It is worth noting that calling zfree_smr() on several elements
1510  * in a given order doesn't necessarily mean they will be effectively
1511  * reused or cleaned up in that same order, only that their SMR clocks
1512  * will expire in that order.
1513  *
1514  *
1515  * @param zone          the zone to free the element to.
1516  * @param elem          the address to free
1517  */
1518 extern void     zfree_smr(
1519 	zone_t          zone,
1520 	void           *elem __unsafe_indexable);
1521 #define zfree_smr(zone, elem) ({ \
1522 	__auto_type __zfree_zone = (zone); \
1523 	(zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1524 })
1525 
1526 
1527 /*!
1528  * @function zfree_id_smr()
1529  *
1530  * @abstract
1531  * Frees an element previously allocated with @c zalloc_id_smr().
1532  *
1533  * @param zone_id       the zone id to free the element to.
1534  * @param addr          the address to free
1535  */
1536 extern void     zfree_id_smr(
1537 	zone_id_t       zone_id,
1538 	void           *addr __unsafe_indexable);
1539 #define zfree_id_smr(zid, elem) ({ \
1540 	zone_id_t __zfree_zid = (zid); \
1541 	(zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1542 })
1543 
1544 /*!
1545  * @macro zfree_smr_noclear()
1546  *
1547  * @abstract
1548  * Frees an element previously allocated with @c zalloc_smr().
1549  *
1550  * @discussion
1551  * This variant doesn't clear the pointer passed as an argument,
1552  * as it is often required for SMR algorithms to function correctly
1553  * to leave pointers "dangling" to an extent.
1554  *
1555  * However it expects the field in question to be an SMR_POINTER()
1556  * struct.
1557  *
1558  * @param zone          the zone to free the element to.
1559  * @param elem          the address to free
1560  */
1561 #define zfree_smr_noclear(zone, elem) \
1562 	(zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1563 
1564 /*!
1565  * @macro zfree_id_smr_noclear()
1566  *
1567  * @abstract
1568  * Frees an element previously allocated with @c zalloc_id_smr().
1569  *
1570  * @discussion
1571  * This variant doesn't clear the pointer passed as an argument,
1572  * as it is often required for SMR algorithms to function correctly
1573  * to leave pointers "dangling" to an extent.
1574  *
1575  * However it expects the field in question to be an SMR_POINTER()
1576  * struct.
1577  *
1578  * @param zone          the zone to free the element to.
1579  * @param elem          the address to free
1580  */
1581 #define zfree_id_smr_noclear(zone, elem) \
1582 	(zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1583 
1584 
1585 #pragma mark XNU only: zone creation (extended)
1586 
1587 /*!
1588  * @enum zone_reserved_id_t
1589  *
1590  * @abstract
1591  * Well known pre-registered zones, allowing use of zone_id_require()
1592  *
1593  * @discussion
1594  * @c ZONE_ID__* aren't real zone IDs.
1595  *
1596  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1597  * easy a value to produce (by malice or accident).
1598  *
1599  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1600  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1601  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1602  * listed in @c zone_create_ro_id_t.
1603  *
1604  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1605  * @c zone_create().
1606  */
1607 __enum_decl(zone_reserved_id_t, zone_id_t, {
1608 	ZONE_ID__ZERO,
1609 
1610 	ZONE_ID_PERMANENT,
1611 	ZONE_ID_PERCPU_PERMANENT,
1612 
1613 	ZONE_ID_THREAD_RO,
1614 	ZONE_ID_MAC_LABEL,
1615 	ZONE_ID_PROC_RO,
1616 	ZONE_ID_PROC_SIGACTS_RO,
1617 	ZONE_ID_KAUTH_CRED,
1618 	ZONE_ID_CS_BLOB,
1619 
1620 	ZONE_ID_SANDBOX_RO,
1621 	ZONE_ID_PROFILE_RO,
1622 	ZONE_ID_PROTOBOX,
1623 	ZONE_ID_SB_FILTER,
1624 	ZONE_ID_AMFI_OSENTITLEMENTS,
1625 
1626 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1627 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1628 	ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1629 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1630 
1631 	ZONE_ID_PMAP,
1632 	ZONE_ID_VM_MAP,
1633 	ZONE_ID_VM_MAP_ENTRY,
1634 	ZONE_ID_VM_MAP_HOLES,
1635 	ZONE_ID_VM_MAP_COPY,
1636 	ZONE_ID_VM_PAGES,
1637 	ZONE_ID_IPC_PORT,
1638 	ZONE_ID_IPC_PORT_SET,
1639 	ZONE_ID_IPC_KMSG,
1640 	ZONE_ID_IPC_VOUCHERS,
1641 	ZONE_ID_PROC_TASK,
1642 	ZONE_ID_THREAD,
1643 	ZONE_ID_TURNSTILE,
1644 	ZONE_ID_SEMAPHORE,
1645 	ZONE_ID_SELECT_SET,
1646 	ZONE_ID_FILEPROC,
1647 
1648 #if !CONFIG_MBUF_MCACHE
1649 	ZONE_ID_MBUF_REF,
1650 	ZONE_ID_MBUF,
1651 	ZONE_ID_CLUSTER_2K,
1652 	ZONE_ID_CLUSTER_4K,
1653 	ZONE_ID_CLUSTER_16K,
1654 	ZONE_ID_MBUF_CLUSTER_2K,
1655 	ZONE_ID_MBUF_CLUSTER_4K,
1656 	ZONE_ID_MBUF_CLUSTER_16K,
1657 #endif /* !CONFIG_MBUF_MCACHE */
1658 
1659 	ZONE_ID__FIRST_DYNAMIC,
1660 });
1661 
1662 /*!
1663  * @const ZONE_ID_ANY
1664  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1665  * Zone ID.
1666  */
1667 #define ZONE_ID_ANY ((zone_id_t)-1)
1668 
1669 /*!
1670  * @const ZONE_ID_INVALID
1671  * An invalid zone_id_t that corresponds to nothing.
1672  */
1673 #define ZONE_ID_INVALID ((zone_id_t)-2)
1674 
1675 /**!
1676  * @function zone_by_id
1677  *
1678  * @param zid           the specified zone ID.
1679  * @returns             the zone with that ID.
1680  */
1681 zone_t zone_by_id(
1682 	size_t                  zid) __pure2;
1683 
1684 /**!
1685  * @function zone_name
1686  *
1687  * @param zone          the specified zone
1688  * @returns             the name of the specified zone.
1689  */
1690 const char *__unsafe_indexable zone_name(
1691 	zone_t                  zone);
1692 
1693 /**!
1694  * @function zone_heap_name
1695  *
1696  * @param zone          the specified zone
1697  * @returns             the name of the heap this zone is part of, or "".
1698  */
1699 const char *__unsafe_indexable zone_heap_name(
1700 	zone_t                  zone);
1701 
1702 /*!
1703  * @function zone_create_ext
1704  *
1705  * @abstract
1706  * Creates a zone with the specified parameters.
1707  *
1708  * @discussion
1709  * This is an extended version of @c zone_create().
1710  *
1711  * @param name          the name for the new zone.
1712  * @param size          the size of the elements returned by this zone.
1713  * @param flags         a set of @c zone_create_flags_t flags.
1714  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1715  *
1716  * @param extra_setup   a block that can perform non trivial initialization
1717  *                      on the zone before it is marked valid.
1718  *                      This block can call advanced setups like:
1719  *                      - zone_set_exhaustible()
1720  *
1721  * @returns             the created zone, this call never fails.
1722  */
1723 extern zone_t   zone_create_ext(
1724 	const char             *name __unsafe_indexable,
1725 	vm_size_t               size,
1726 	zone_create_flags_t     flags,
1727 	zone_id_t               desired_zid,
1728 	void                  (^extra_setup)(zone_t));
1729 
1730 /*!
1731  * @macro ZONE_DECLARE
1732  *
1733  * @abstract
1734  * Declares a zone variable and its associated type.
1735  *
1736  * @param var           the name of the variable to declare.
1737  * @param type_t        the type of elements in the zone.
1738  */
1739 #define ZONE_DECLARE(var, type_t) \
1740 	extern zone_t var; \
1741 	__ZONE_DECLARE_TYPE(var, type_t)
1742 
1743 /*!
1744  * @macro ZONE_DECLARE_ID
1745  *
1746  * @abstract
1747  * Declares the type associated with a zone ID.
1748  *
1749  * @param id            the name of zone ID to associate a type with.
1750  * @param type_t        the type of elements in the zone.
1751  */
1752 #define ZONE_DECLARE_ID(id, type_t) \
1753 	__ZONE_DECLARE_TYPE(id, type_t)
1754 
1755 /*!
1756  * @macro ZONE_DEFINE
1757  *
1758  * @abstract
1759  * Declares a zone variable to automatically initialize with the specified
1760  * parameters.
1761  *
1762  * @discussion
1763  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1764  *
1765  * @param var           the name of the variable to declare.
1766  * @param name          the name for the zone
1767  * @param size          the size of the elements returned by this zone.
1768  * @param flags         a set of @c zone_create_flags_t flags.
1769  */
1770 #define ZONE_DEFINE(var, name, size, flags) \
1771 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1772 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1773 	static __startup_data struct zone_create_startup_spec \
1774 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1775 	    ZONE_ID_ANY, NULL }; \
1776 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1777 	    &__startup_zone_spec_ ## var)
1778 
1779 /*!
1780  * @macro ZONE_DEFINE_TYPE
1781  *
1782  * @abstract
1783  * Defines a zone variable to automatically initialize with the specified
1784  * parameters, associated with a particular type.
1785  *
1786  * @param var           the name of the variable to declare.
1787  * @param name          the name for the zone
1788  * @param type_t        the type of elements in the zone.
1789  * @param flags         a set of @c zone_create_flags_t flags.
1790  */
1791 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1792 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1793 	__ZONE_DECLARE_TYPE(var, type_t)
1794 
1795 /*!
1796  * @macro ZONE_DEFINE_ID
1797  *
1798  * @abstract
1799  * Initializes a given zone automatically during startup with the specified
1800  * parameters.
1801  *
1802  * @param zid           a @c zone_reserved_id_t value.
1803  * @param name          the name for the zone
1804  * @param type_t        the type of elements in the zone.
1805  * @param flags         a set of @c zone_create_flags_t flags.
1806  */
1807 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1808 	ZONE_DECLARE_ID(zid, type_t); \
1809 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1810 
1811 /*!
1812  * @macro ZONE_INIT
1813  *
1814  * @abstract
1815  * Initializes a given zone automatically during startup with the specified
1816  * parameters.
1817  *
1818  * @param var           the name of the variable to initialize.
1819  * @param name          the name for the zone
1820  * @param size          the size of the elements returned by this zone.
1821  * @param flags         a set of @c zone_create_flags_t flags.
1822  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1823  * @param extra_setup   a block that can perform non trivial initialization
1824  *                      (@see @c zone_create_ext()).
1825  */
1826 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1827 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1828 
1829 /*!
1830  * @function zone_id_require
1831  *
1832  * @abstract
1833  * Requires for a given pointer to belong to the specified zone, by ID and size.
1834  *
1835  * @discussion
1836  * The function panics if the check fails as it indicates that the kernel
1837  * internals have been compromised.
1838  *
1839  * This is a variant of @c zone_require() which:
1840  * - isn't sensitive to @c zone_t::elem_size being compromised,
1841  * - is slightly faster as it saves one load and a multiplication.
1842  *
1843  * @param zone_id       the zone ID the address needs to belong to.
1844  * @param elem_size     the size of elements for this zone.
1845  * @param addr          the element address to check.
1846  */
1847 extern void     zone_id_require(
1848 	zone_id_t               zone_id,
1849 	vm_size_t               elem_size,
1850 	void                   *addr __unsafe_indexable);
1851 
1852 /*!
1853  * @function zone_id_require_aligned
1854  *
1855  * @abstract
1856  * Requires for a given pointer to belong to the specified zone, by ID and size.
1857  *
1858  * @discussion
1859  * Similar to @c zone_id_require() but does more checks such as whether the
1860  * element is properly aligned.
1861  *
1862  * @param zone_id       the zone ID the address needs to belong to.
1863  * @param addr          the element address to check.
1864  */
1865 extern void     zone_id_require_aligned(
1866 	zone_id_t               zone_id,
1867 	void                   *addr __unsafe_indexable);
1868 
1869 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1870 extern void     zone_set_exhaustible(
1871 	zone_t                  zone,
1872 	vm_size_t               max_elements,
1873 	bool                    exhausts_by_design);
1874 
1875 /*!
1876  * @function zone_raise_reserve()
1877  *
1878  * @brief
1879  * Used to raise the reserve on a zone.
1880  *
1881  * @discussion
1882  * Can be called from any context (zone_create_ext() setup hook or after).
1883  */
1884 extern void     zone_raise_reserve(
1885 	zone_or_view_t          zone_or_view,
1886 	uint16_t                min_elements);
1887 
1888 /*!
1889  * @function zone_fill_initially
1890  *
1891  * @brief
1892  * Initially fill a non collectable zone to have the specified amount of
1893  * elements.
1894  *
1895  * @discussion
1896  * This function must be called on a non collectable permanent zone before it
1897  * has been used yet.
1898  *
1899  * @param zone          The zone to fill.
1900  * @param nelems        The number of elements to be able to hold.
1901  */
1902 extern void     zone_fill_initially(
1903 	zone_t                  zone,
1904 	vm_size_t               nelems);
1905 
1906 /*!
1907  * @function zone_drain()
1908  *
1909  * @abstract
1910  * Forces a zone to be drained (have all its data structures freed
1911  * back to its data store, and empty pages returned to the system).
1912  *
1913  * @param zone          the zone id to free the objects to.
1914  */
1915 extern void zone_drain(
1916 	zone_t                  zone);
1917 
1918 /*!
1919  * @struct zone_basic_stats
1920  *
1921  * @abstract
1922  * Used to report basic statistics about a zone.
1923  *
1924  * @field zbs_avail     the number of elements in a zone.
1925  * @field zbs_alloc     the number of allocated elements in a zone.
1926  * @field zbs_free      the number of free elements in a zone.
1927  * @field zbs_cached    the number of free elements in the per-CPU caches.
1928  *                      (included in zbs_free).
1929  * @field zbs_alloc_fail
1930  *                      the number of allocation failures.
1931  */
1932 struct zone_basic_stats {
1933 	uint64_t        zbs_avail;
1934 	uint64_t        zbs_alloc;
1935 	uint64_t        zbs_free;
1936 	uint64_t        zbs_cached;
1937 	uint64_t        zbs_alloc_fail;
1938 };
1939 
1940 /*!
1941  * @function zone_get_stats
1942  *
1943  * @abstract
1944  * Retrieves statistics about zones, include its per-CPU caches.
1945  *
1946  * @param zone          the zone to collect stats from.
1947  * @param stats         the statistics to fill.
1948  */
1949 extern void zone_get_stats(
1950 	zone_t                  zone,
1951 	struct zone_basic_stats *stats);
1952 
1953 
1954 /*!
1955  * @typedef zone_exhausted_cb_t
1956  *
1957  * @brief
1958  * The callback type for the ZONE_EXHAUSTED event.
1959  */
1960 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone, bool exhausted);
1961 
1962 /*!
1963  * @brief
1964  * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1965  * wiring limit.
1966  *
1967  * @discussion
1968  * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1969  * performing zone expansion and no significant amount of work can be performed
1970  * from this context.
1971  *
1972  * In particular, those callbacks cannot allocate any memory, it is expected
1973  * that they will filter if the zone is of interest, and wake up another thread
1974  * to perform the actual work (for example via thread call).
1975  */
1976 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1977 
1978 
1979 #pragma mark XNU only: zone views
1980 
1981 /*!
1982  * @enum zone_kheap_id_t
1983  *
1984  * @brief
1985  * Enumerate a particular kalloc heap.
1986  *
1987  * @discussion
1988  * More documentation about heaps is available in @c <kern/kalloc.h>.
1989  *
1990  * @const KHEAP_ID_NONE
1991  * This value denotes regular zones, not used by kalloc.
1992  *
1993  * @const KHEAP_ID_EARLY
1994  * Indicates zones part of the KHEAP_EARLY heap.
1995  *
1996  * @const KHEAP_ID_DATA_BUFFERS
1997  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1998  *
1999  * @const KHEAP_ID_DATA_SHARED
2000  * Indicates zones part of the KHEAP_DATA_SHARED heap.
2001  *
2002  * @const KHEAP_ID_KT_VAR
2003  * Indicates zones part of the KHEAP_KT_VAR heap.
2004  */
2005 __enum_decl(zone_kheap_id_t, uint8_t, {
2006 	KHEAP_ID_NONE,
2007 	KHEAP_ID_EARLY,
2008 	KHEAP_ID_DATA_BUFFERS,
2009 	KHEAP_ID_DATA_SHARED,
2010 	KHEAP_ID_KT_VAR,
2011 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
2012 });
2013 
2014 static inline bool
zone_is_data_kheap(zone_kheap_id_t kheap_id)2015 zone_is_data_kheap(zone_kheap_id_t kheap_id)
2016 {
2017 	return kheap_id == KHEAP_ID_DATA_BUFFERS ||
2018 	       kheap_id == KHEAP_ID_DATA_SHARED;
2019 }
2020 
2021 static inline bool
zone_is_data_buffers_kheap(zone_kheap_id_t kheap_id)2022 zone_is_data_buffers_kheap(zone_kheap_id_t kheap_id)
2023 {
2024 	return kheap_id == KHEAP_ID_DATA_BUFFERS;
2025 }
2026 
2027 static inline bool
zone_is_data_shared_kheap(zone_kheap_id_t kheap_id)2028 zone_is_data_shared_kheap(zone_kheap_id_t kheap_id)
2029 {
2030 	return kheap_id == KHEAP_ID_DATA_SHARED;
2031 }
2032 
2033 /*!
2034  * @macro ZONE_VIEW_DECLARE
2035  *
2036  * @abstract
2037  * (optionally) declares a zone view (in a header).
2038  *
2039  * @param var           the name for the zone view.
2040  */
2041 #define ZONE_VIEW_DECLARE(var) \
2042 	extern struct zone_view var[1]
2043 
2044 /*!
2045  * @macro ZONE_VIEW_DEFINE
2046  *
2047  * @abstract
2048  * Defines a given zone view and what it points to.
2049  *
2050  * @discussion
2051  * Zone views can either share a pre-existing zone,
2052  * or perform a lookup into a kalloc heap for the zone
2053  * backing the bucket of the proper size.
2054  *
2055  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
2056  * as the last rank. If views on zones are created, these must have been
2057  * created before this stage.
2058  *
2059  * This macro should not be used to create zone views from default
2060  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
2061  *
2062  * @param var           the name for the zone view.
2063  * @param name          a string describing the zone view.
2064  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
2065  * @param size          the element size to be allocated from this view.
2066  */
2067 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
2068 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
2069 	    .zv_name = (name), \
2070 	} }; \
2071 	static __startup_data struct zone_view_startup_spec \
2072 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
2073 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_view_startup_init, \
2074 	    &__startup_zone_view_spec_ ## var)
2075 
2076 
2077 #pragma mark XNU only: batched allocations
2078 
2079 /*!
2080  * @typedef zstack_t
2081  *
2082  * @brief
2083  * A stack of allocated elements chained with delta encoding.
2084  *
2085  * @discussion
2086  * Some batch allocation interfaces interact with the data heap
2087  * where leaking kernel pointers is not acceptable. This is why
2088  * element offsets are used instead.
2089  */
2090 typedef struct zstack {
2091 	vm_offset_t     z_head;
2092 	uint32_t        z_count;
2093 } zstack_t;
2094 
2095 /*!
2096  * @function zstack_push
2097  *
2098  * @brief
2099  * Push a given element onto a zstack.
2100  */
2101 extern void zstack_push(
2102 	zstack_t               *stack,
2103 	void                   *elem);
2104 
2105 /*!
2106  * @function zstack_pop
2107  *
2108  * @brief
2109  * Pops an element from a zstack, the caller must check it's not empty.
2110  */
2111 void *zstack_pop(
2112 	zstack_t               *stack);
2113 
2114 /*!
2115  * @function zstack_empty
2116  *
2117  * @brief
2118  * Returns whether a stack is empty.
2119  */
2120 static inline uint32_t
zstack_count(zstack_t stack)2121 zstack_count(zstack_t stack)
2122 {
2123 	return stack.z_count;
2124 }
2125 
2126 /*!
2127  * @function zstack_empty
2128  *
2129  * @brief
2130  * Returns whether a stack is empty.
2131  */
2132 static inline bool
zstack_empty(zstack_t stack)2133 zstack_empty(zstack_t stack)
2134 {
2135 	return zstack_count(stack) == 0;
2136 }
2137 
2138 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)2139 zstack_load_and_erase(zstack_t *stackp)
2140 {
2141 	zstack_t stack = *stackp;
2142 
2143 	*stackp = (zstack_t){ };
2144 	return stack;
2145 }
2146 
2147 /*!
2148  * @function zfree_nozero
2149  *
2150  * @abstract
2151  * Frees an element allocated with @c zalloc*, without zeroing it.
2152  *
2153  * @discussion
2154  * This is for the sake of networking only, no one else should use this.
2155  *
2156  * @param zone_id       the zone id to free the element to.
2157  * @param elem          the element to free
2158  */
2159 extern void zfree_nozero(
2160 	zone_id_t               zone_id,
2161 	void                   *elem __unsafe_indexable);
2162 #define zfree_nozero(zone_id, elem) ({ \
2163 	zone_id_t __zfree_zid = (zone_id); \
2164 	(zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
2165 })
2166 
2167 /*!
2168  * @function zalloc_n
2169  *
2170  * @abstract
2171  * Allocates a batch of elements from the specified zone.
2172  *
2173  * @discussion
2174  * This is for the sake of networking only, no one else should use this.
2175  *
2176  * @param zone_id       the zone id to allocate the element from.
2177  * @param count         how many elements to allocate (less might be returned)
2178  * @param flags         a set of @c zone_create_flags_t flags.
2179  */
2180 extern zstack_t zalloc_n(
2181 	zone_id_t               zone_id,
2182 	uint32_t                count,
2183 	zalloc_flags_t          flags);
2184 
2185 /*!
2186  * @function zfree_n
2187  *
2188  * @abstract
2189  * Batched variant of zfree(): frees a stack of elements.
2190  *
2191  * @param zone_id       the zone id to free the element to.
2192  * @param stack         a stack of elements to free.
2193  */
2194 extern void zfree_n(
2195 	zone_id_t               zone_id,
2196 	zstack_t                stack);
2197 #define zfree_n(zone_id, stack) ({ \
2198 	zone_id_t __zfree_zid = (zone_id); \
2199 	(zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2200 })
2201 
2202 /*!
2203  * @function zfree_nozero_n
2204  *
2205  * @abstract
2206  * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
2207  * them.
2208  *
2209  * @discussion
2210  * This is for the sake of networking only, no one else should use this.
2211  *
2212  * @param zone_id       the zone id to free the element to.
2213  * @param stack         a stack of elements to free.
2214  */
2215 extern void zfree_nozero_n(
2216 	zone_id_t               zone_id,
2217 	zstack_t                stack);
2218 #define zfree_nozero_n(zone_id, stack) ({ \
2219 	zone_id_t __zfree_zid = (zone_id); \
2220 	(zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2221 })
2222 
2223 #pragma mark XNU only: cached objects
2224 
2225 /*!
2226  * @typedef zone_cache_ops_t
2227  *
2228  * @brief
2229  * A set of callbacks used for a zcache (cache of composite objects).
2230  *
2231  * @field zc_op_alloc
2232  * The callback to "allocate" a cached object from scratch.
2233  *
2234  * @field zc_op_mark_valid
2235  * The callback that is called when a cached object is being reused,
2236  * will typically call @c zcache_mark_valid() on the various
2237  * sub-pieces of the composite cached object.
2238  *
2239  * @field zc_op_mark_invalid
2240  * The callback that is called when a composite object is being freed
2241  * to the cache. This will typically call @c zcache_mark_invalid()
2242  * on the various sub-pieces of the composite object.
2243  *
2244  * @field zc_op_free
2245  * The callback to "free" a composite object completely.
2246  */
2247 typedef const struct zone_cache_ops {
2248 	void         *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2249 	void         *(*zc_op_mark_valid)(zone_id_t, void *);
2250 	void         *(*zc_op_mark_invalid)(zone_id_t, void *);
2251 	void          (*zc_op_free)(zone_id_t, void *);
2252 } *zone_cache_ops_t;
2253 
2254 #if __has_ptrcheck
2255 static inline char *__bidi_indexable
zcache_transpose_bounds(char * __bidi_indexable pointer_with_bounds,char * __unsafe_indexable unsafe_pointer)2256 zcache_transpose_bounds(
2257 	char *__bidi_indexable pointer_with_bounds,
2258 	char *__unsafe_indexable unsafe_pointer)
2259 {
2260 	vm_offset_t offset_from_start = pointer_with_bounds - __ptr_lower_bound(pointer_with_bounds);
2261 	vm_offset_t offset_to_end = __ptr_upper_bound(pointer_with_bounds) - pointer_with_bounds;
2262 	vm_offset_t size = offset_from_start + offset_to_end;
2263 	return __unsafe_forge_bidi_indexable(char *, unsafe_pointer - offset_from_start, size)
2264 	       + offset_from_start;
2265 }
2266 #else
2267 static inline char *__header_indexable
zcache_transpose_bounds(char * __header_indexable pointer_with_bounds __unused,char * __unsafe_indexable unsafe_pointer)2268 zcache_transpose_bounds(
2269 	char *__header_indexable pointer_with_bounds __unused,
2270 	char *__unsafe_indexable unsafe_pointer)
2271 {
2272 	return unsafe_pointer;
2273 }
2274 #endif // __has_ptrcheck
2275 
2276 /*!
2277  * @function zcache_mark_valid()
2278  *
2279  * @brief
2280  * Mark an element as "valid".
2281  *
2282  * @description
2283  * This function is used to be able to integrate with KASAN or PGZ
2284  * for a cache of composite objects. It typically is a function
2285  * called in their @c zc_op_mark_valid() callback.
2286  *
2287  * If PGZ or KASAN isn't in use, then this callback is a no-op.
2288  * Otherwise the @c elem address might be updated.
2289  *
2290  * @param zone          the zone the element belongs to.
2291  * @param elem          the address of the element
2292  * @returns             the new address to correctly access @c elem.
2293  */
2294 extern void *__unsafe_indexable zcache_mark_valid(
2295 	zone_t                  zone,
2296 	void                    *elem __unsafe_indexable);
2297 
2298 static inline void *
zcache_mark_valid_single(zone_t zone,void * elem)2299 zcache_mark_valid_single(
2300 	zone_t                  zone,
2301 	void                    *elem)
2302 {
2303 	return __unsafe_forge_single(void *, zcache_mark_valid(zone, elem));
2304 }
2305 
2306 static inline void *__header_bidi_indexable
zcache_mark_valid_indexable(zone_t zone,void * elem __header_bidi_indexable)2307 zcache_mark_valid_indexable(
2308 	zone_t                  zone,
2309 	void                    *elem __header_bidi_indexable)
2310 {
2311 	return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_valid(zone, elem));
2312 }
2313 
2314 /*!
2315  * @function zcache_mark_invalid()
2316  *
2317  * @brief
2318  * Mark an element as "invalid".
2319  *
2320  * @description
2321  * This function is used to be able to integrate with KASAN or PGZ
2322  * for a cache of composite objects. It typically is a function
2323  * called in their @c zc_op_mark_invalid() callback.
2324  *
2325  * This function performs validation that @c elem belongs
2326  * to the right zone and is properly "aligned", and should
2327  * never be elided under any configuration.
2328  *
2329  * @param zone          the zone the element belongs to.
2330  * @param elem          the address of the element
2331  * @returns             the new address to correctly access @c elem.
2332  */
2333 extern void *__unsafe_indexable zcache_mark_invalid(
2334 	zone_t                  zone,
2335 	void                    *elem __unsafe_indexable);
2336 
2337 static inline void *
zcache_mark_invalid_single(zone_t zone,void * elem)2338 zcache_mark_invalid_single(
2339 	zone_t                  zone,
2340 	void                    *elem)
2341 {
2342 	return __unsafe_forge_single(void *, zcache_mark_invalid(zone, elem));
2343 }
2344 
2345 static inline void *__header_bidi_indexable
zcache_mark_invalid_indexable(zone_t zone,void * elem __header_bidi_indexable)2346 zcache_mark_invalid_indexable(
2347 	zone_t                  zone,
2348 	void                    *elem __header_bidi_indexable)
2349 {
2350 	return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_invalid(zone, elem));
2351 }
2352 
2353 /*!
2354  * @macro zcache_alloc()
2355  *
2356  * @abstract
2357  * Allocates a composite object from a cache.
2358  *
2359  * @param zone_id       The proper @c ZONE_ID_* constant.
2360  * @param flags         a collection of @c zalloc_flags_t.
2361  *
2362  * @returns             NULL or the allocated element
2363  */
2364 #define zcache_alloc(zone_id, fl) \
2365 	__zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2366 
2367 /*!
2368  * @function zcache_alloc_n()
2369  *
2370  * @abstract
2371  * Allocates a stack of composite objects from a cache.
2372  *
2373  * @param zone_id       The proper @c ZONE_ID_* constant.
2374  * @param count         how many elements to allocate (less might be returned)
2375  * @param flags         a set of @c zone_create_flags_t flags.
2376  *
2377  * @returns             NULL or the allocated composite object
2378  */
2379 extern zstack_t zcache_alloc_n(
2380 	zone_id_t               zone_id,
2381 	uint32_t                count,
2382 	zalloc_flags_t          flags,
2383 	zone_cache_ops_t        ops);
2384 #define zcache_alloc_n(zone_id, count, flags) \
2385 	(zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2386 
2387 
2388 
2389 /*!
2390  * @function zcache_free()
2391  *
2392  * @abstract
2393  * Frees a composite object previously allocated
2394  * with @c zcache_alloc() or @c zcache_alloc_n().
2395  *
2396  * @param zone_id       the zcache id to free the object to.
2397  * @param addr          the address to free
2398  * @param ops           the pointer to the zcache ops for this zcache.
2399  */
2400 extern void zcache_free(
2401 	zone_id_t               zone_id,
2402 	void                   *addr __unsafe_indexable,
2403 	zone_cache_ops_t        ops);
2404 #define zcache_free(zone_id, elem) \
2405 	(zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2406 	    __zcache_##zone_id##_ops)
2407 
2408 /*!
2409  * @function zcache_free_n()
2410  *
2411  * @abstract
2412  * Frees a stack of composite objects previously allocated
2413  * with @c zcache_alloc() or @c zcache_alloc_n().
2414  *
2415  * @param zone_id       the zcache id to free the objects to.
2416  * @param stack         a stack of composite objects
2417  * @param ops           the pointer to the zcache ops for this zcache.
2418  */
2419 extern void zcache_free_n(
2420 	zone_id_t               zone_id,
2421 	zstack_t                stack,
2422 	zone_cache_ops_t        ops);
2423 #define zcache_free_n(zone_id, stack) \
2424 	(zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2425 	    __zcache_##zone_id##_ops)
2426 
2427 
2428 /*!
2429  * @function zcache_drain()
2430  *
2431  * @abstract
2432  * Forces a zcache to be drained (have all its data structures freed
2433  * back to the original zones).
2434  *
2435  * @param zone_id       the zcache id to free the objects to.
2436  */
2437 extern void zcache_drain(
2438 	zone_id_t               zone_id);
2439 
2440 
2441 /*!
2442  * @macro ZCACHE_DECLARE
2443  *
2444  * @abstract
2445  * Declares the type associated with a zone cache ID.
2446  *
2447  * @param id            the name of zone ID to associate a type with.
2448  * @param type_t        the type of elements in the zone.
2449  */
2450 #define ZCACHE_DECLARE(id, type_t) \
2451 	__ZONE_DECLARE_TYPE(id, type_t); \
2452 	__attribute__((visibility("hidden"))) \
2453 	extern const zone_cache_ops_t __zcache_##id##_ops
2454 
2455 
2456 /*!
2457  * @macro ZCACHE_DEFINE
2458  *
2459  * @abstract
2460  * Defines a zone cache for a given ID and type.
2461  *
2462  * @param zone_id       the name of zone ID to associate a type with.
2463  * @param name          the name for the zone
2464  * @param type_t        the type of elements in the zone.
2465  * @param size          the size of elements in the cache
2466  * @param ops           the ops for this zcache.
2467  */
2468 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2469 	ZCACHE_DECLARE(zid, type_t);                                            \
2470 	ZONE_DECLARE_ID(zid, type_t);                                           \
2471 	const zone_cache_ops_t __zcache_##zid##_ops = (ops);                    \
2472 	ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) {   \
2473 	        zcache_ops[zid] = (ops);                                        \
2474 	})
2475 
2476 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2477 
2478 #pragma mark XNU only: misc & implementation details
2479 
2480 struct zone_create_startup_spec {
2481 	zone_t                 *z_var;
2482 	const char             *z_name __unsafe_indexable;
2483 	vm_size_t               z_size;
2484 	zone_create_flags_t     z_flags;
2485 	zone_id_t               z_zid;
2486 	void                  (^z_setup)(zone_t);
2487 };
2488 
2489 extern void     zone_create_startup(
2490 	struct zone_create_startup_spec *spec);
2491 
2492 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2493 	static __startup_data struct zone_create_startup_spec \
2494 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2495 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2496 	    &__startup_zone_spec_ ## ns)
2497 
2498 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2499 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2500 
2501 #define __zalloc_cast(namespace, expr) \
2502 	((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2503 
2504 #if ZALLOC_TYPE_SAFE
2505 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
2506 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
2507 #endif /* !ZALLOC_TYPE_SAFE */
2508 
2509 struct zone_view_startup_spec {
2510 	zone_view_t         zv_view;
2511 	union {
2512 		zone_kheap_id_t zv_heapid;
2513 		zone_t         *zv_zone;
2514 	};
2515 	vm_size_t           zv_size;
2516 };
2517 
2518 extern void zone_view_startup_init(
2519 	struct zone_view_startup_spec *spec);
2520 
2521 extern void zone_userspace_reboot_checks(void);
2522 
2523 #if VM_TAG_SIZECLASSES
2524 extern void __zone_site_register(
2525 	vm_allocation_site_t   *site);
2526 
2527 #define VM_ALLOC_SITE_TAG() ({ \
2528 	__PLACE_IN_SECTION("__DATA, __data")                                   \
2529 	static vm_allocation_site_t site = { .refcount = 2, };                 \
2530 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, __zone_site_register, &site);   \
2531 	site.tag;                                                              \
2532 })
2533 #else /* VM_TAG_SIZECLASSES */
2534 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
2535 #endif /* !VM_TAG_SIZECLASSES */
2536 
2537 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2538 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2539 {
2540 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2541 }
2542 
2543 #define __zpcpu_addr(e)         ((vm_address_t)__unsafe_forge_single(void *, e))
2544 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(*(ptr)) *, e)
2545 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2546 
2547 /**
2548  * @macro __zpcpu_mangle_for_boot()
2549  *
2550  * @discussion
2551  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2552  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2553  * storage marked @c __startup_data and replace it with the proper allocation
2554  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2555  *
2556  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2557  * provides the proper mangling of the storage into a "fake" percpu pointer so
2558  * that accesses through @c zpercpu_get() functions properly.
2559  *
2560  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2561  */
2562 #define __zpcpu_mangle_for_boot(ptr)  ({ \
2563 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
2564 	__zpcpu_cast(ptr, __zpcpu_addr(ptr) - ptoa(cpu_number())); \
2565 })
2566 
2567 extern unsigned zpercpu_count(void) __pure2;
2568 
2569 #if DEBUG || DEVELOPMENT
2570 /* zone_max_zone is here (but not zalloc_internal.h) for the BSD kernel */
2571 extern unsigned int zone_max_zones(void);
2572 
2573 extern size_t zone_pages_wired;
2574 extern size_t zone_guard_pages;
2575 #endif /* DEBUG || DEVELOPMENT */
2576 #if CONFIG_ZLEAKS
2577 extern uint32_t                 zleak_active;
2578 extern vm_size_t                zleak_max_zonemap_size;
2579 extern vm_size_t                zleak_per_zone_tracking_threshold;
2580 
2581 extern kern_return_t zleak_update_threshold(
2582 	vm_size_t              *arg,
2583 	uint64_t                value);
2584 #endif /* CONFIG_ZLEAKS */
2585 
2586 extern uint32_t                 zone_map_jetsam_limit;
2587 
2588 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2589 
2590 /* max length of a zone name we can take from boot-args/sysctl */
2591 #define MAX_ZONE_NAME   32
2592 
2593 #if DEVELOPMENT || DEBUG
2594 
2595 extern kern_return_t zone_reset_peak(const char *zonename);
2596 extern kern_return_t zone_reset_all_peaks(void);
2597 
2598 #endif /* DEVELOPMENT || DEBUG */
2599 
2600 extern zone_t percpu_u64_zone;
2601 
2602 /*!
2603  * @function mach_memory_info_sample
2604  *
2605  * @abstract
2606  * Helper function for mach_memory_info() (MACH) and memorystatus_collect_jetsam_snapshot_zprint() (BSD)
2607  * to collect wired memory information.
2608  *
2609  * @param names array with `*zonesCnt` elements.
2610  * @param info array with `*zonesCnt` elements.
2611  * @param coalesce array with `*zonesCnt` elements, must be set if `redact_info` is true.
2612  * @param zonesCnt set to the allocated count of the above, and on return will be the actual count.
2613  * @param memoryInfo optional, if set must have at least `vm_page_diagnose_estimate()` elements.
2614  * @param memoryInfoCnt optional, if set must be the count of memoryInfo, otherwise if set to 0 then on return will be `vm_page_diagnose_estimate()`.
2615  * @param redact_info if true sensitive information about zone allocations will be removed.
2616  */
2617 extern kern_return_t
2618 mach_memory_info_sample(
2619 	mach_zone_name_t *names,
2620 	mach_zone_info_t *info,
2621 	int              *coalesce,
2622 	unsigned int     *zonesCnt,
2623 	mach_memory_info_t *memoryInfo,
2624 	unsigned int       memoryInfoCnt,
2625 	bool               redact_info);
2626 
2627 extern void     zone_gc_trim(void);
2628 extern void     zone_gc_drain(void);
2629 
2630 __exported_pop
2631 #endif /* XNU_KERNEL_PRIVATE */
2632 
2633 /*
2634  * This macro is currently used by AppleImage4 (rdar://83924635)
2635  */
2636 #define __zalloc_ptr_load_and_erase(elem) \
2637 	os_ptr_load_and_erase(elem)
2638 
2639 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2640 
2641 #endif  /* _KERN_ZALLOC_H_ */
2642 
2643 #endif  /* KERNEL_PRIVATE */
2644