xref: /xnu-10063.141.1/osfmk/kern/zalloc.h (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** (obsolete) */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** (obsolete) */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Obsolete */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** This zone is a built object cache */
170 	ZC_OBJ_CACHE            = 0x0080000000000000,
171 
172 	/** Use guard pages in PGZ mode */
173 	ZC_PGZ_USE_GUARDS       = 0x0100000000000000,
174 
175 	/** Zone doesn't support TBI tagging */
176 	ZC_NOTBITAG             = 0x0200000000000000,
177 
178 	/** This zone will back a kalloc type */
179 	ZC_KALLOC_TYPE          = 0x0400000000000000,
180 
181 	/** Disable PGZ for this zone */
182 	ZC_NOPGZ                = 0x0800000000000000,
183 
184 	/** This zone contains pure data */
185 	ZC_DATA                 = 0x1000000000000000,
186 
187 	/** This zone belongs to the VM submap */
188 	ZC_VM                   = 0x2000000000000000,
189 
190 	/** Disable kasan quarantine for this zone */
191 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
192 
193 	/** Disable kasan redzones for this zone */
194 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
195 #endif /* XNU_KERNEL_PRIVATE */
196 });
197 
198 /*!
199  * @union zone_or_view
200  *
201  * @abstract
202  * A type used for calls that admit both a zone or a zone view.
203  *
204  * @discussion
205  * @c zalloc() and @c zfree() and their variants can act on both
206  * zones and zone views.
207  */
208 union zone_or_view {
209 	struct kalloc_type_view    *zov_kt_heap;
210 	struct zone_view           *zov_view;
211 	struct zone                *zov_zone;
212 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)213 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
214 	}
zone_or_view(struct zone * z)215 	inline zone_or_view(struct zone *z) : zov_zone(z) {
216 	}
zone_or_view(struct kalloc_type_view * kth)217 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
218 	}
219 #endif
220 };
221 #ifdef __cplusplus
222 typedef union zone_or_view zone_or_view_t;
223 #else
224 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
225 #endif
226 
227 /*!
228  * @enum zone_create_ro_id_t
229  *
230  * @abstract
231  * Zone creation IDs for external read only zones
232  *
233  * @discussion
234  * Kexts that desire to use the RO allocator should:
235  * 1. Add a zone creation id below
236  * 2. Add a corresponding ID to @c zone_reserved_id_t
237  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
238  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
239  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
240  */
241 __enum_decl(zone_create_ro_id_t, zone_id_t, {
242 	ZC_RO_ID_SANDBOX,
243 	ZC_RO_ID_PROFILE,
244 	ZC_RO_ID_PROTOBOX,
245 	ZC_RO_ID_SB_FILTER,
246 	ZC_RO_ID_AMFI_OSENTITLEMENTS,
247 	ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 });
249 
250 /*!
251  * @function zone_create
252  *
253  * @abstract
254  * Creates a zone with the specified parameters.
255  *
256  * @discussion
257  * A Zone is a slab allocator that returns objects of a given size very quickly.
258  *
259  * @param name          the name for the new zone.
260  * @param size          the size of the elements returned by this zone.
261  * @param flags         a set of @c zone_create_flags_t flags.
262  *
263  * @returns             the created zone, this call never fails.
264  */
265 extern zone_t   zone_create(
266 	const char             *name __unsafe_indexable,
267 	vm_size_t               size,
268 	zone_create_flags_t     flags);
269 
270 /*!
271  * @function zone_create_ro
272  *
273  * @abstract
274  * Creates a read only zone with the specified parameters from kexts
275  *
276  * @discussion
277  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
278  * kexts. Do not use this API to create read only zones in xnu.
279  *
280  * @param name          the name for the new zone.
281  * @param size          the size of the elements returned by this zone.
282  * @param flags         a set of @c zone_create_flags_t flags.
283  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
284  *
285  * @returns             the zone ID of the created zone, this call never fails.
286  */
287 extern zone_id_t   zone_create_ro(
288 	const char             *name __unsafe_indexable,
289 	vm_size_t               size,
290 	zone_create_flags_t     flags,
291 	zone_create_ro_id_t     zc_ro_id);
292 
293 /*!
294  * @function zdestroy
295  *
296  * @abstract
297  * Destroys a zone previously made with zone_create.
298  *
299  * @discussion
300  * Zones must have been made destructible for @c zdestroy() to be allowed,
301  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
302  *
303  * @param zone          the zone to destroy.
304  */
305 extern void     zdestroy(
306 	zone_t          zone);
307 
308 /*!
309  * @function zone_require
310  *
311  * @abstract
312  * Requires for a given pointer to belong to the specified zone.
313  *
314  * @discussion
315  * The function panics if the check fails as it indicates that the kernel
316  * internals have been compromised.
317  *
318  * @param zone          the zone the address needs to belong to.
319  * @param addr          the element address to check.
320  */
321 extern void     zone_require(
322 	zone_t          zone,
323 	void           *addr __unsafe_indexable);
324 
325 /*!
326  * @function zone_require_ro
327  *
328  * @abstract
329  * Version of zone require intended for zones created with ZC_READONLY
330  *
331  * @discussion
332  * This check is not sufficient to fully trust the element.
333  *
334  * Another check of its content must be performed to prove
335  * that the element is "the right one", a typical technique
336  * for when the RO data structure is 1:1 with a mutable one,
337  * is a simple circularity check with a very strict lifetime
338  * (both the mutable and read-only data structures are made
339  * and destroyed as close as possible).
340  *
341  * @param zone_id       the zone id the address needs to belong to.
342  * @param elem_size     the element size for this zone.
343  * @param addr          the element address to check.
344  */
345 extern void     zone_require_ro(
346 	zone_id_t       zone_id,
347 	vm_size_t       elem_size,
348 	void           *addr __unsafe_indexable);
349 
350 /*!
351  * @enum zalloc_flags_t
352  *
353  * @brief
354  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
355  *
356  * @discussion
357  * It is encouraged that any callsite passing flags uses exactly one of:
358  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
359  * if nothing else was specified.
360  *
361  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
362  * then @c Z_WAITOK is ignored.
363  *
364  * @const Z_WAITOK
365  * Passing this flag means that zalloc() will be allowed to sleep
366  * for memory to become available for this allocation. If the zone
367  * isn't exhaustible, zalloc(Z_WAITOK) never fails.
368  *
369  * If the zone is exhaustible, zalloc() might still fail if the zone
370  * is at its maximum allowed memory usage, unless Z_NOFAIL is passed,
371  * in which case zalloc() will block until an element is freed.
372  *
373  * @const Z_NOWAIT
374  * Passing this flag means that zalloc is not allowed to ever block.
375  *
376  * @const Z_NOPAGEWAIT
377  * Passing this flag means that zalloc is allowed to wait due to lock
378  * contention, but will not wait for the VM to wait for pages when
379  * under memory pressure.
380  *
381  * @const Z_ZERO
382  * Passing this flags means that the returned memory has been zeroed out.
383  *
384  * @const Z_NOFAIL
385  * Passing this flag means that the caller expects the allocation to always
386  * succeed. This will result in a panic if this assumption isn't correct.
387  *
388  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT.
389  * For exhaustible zones, it forces the caller to wait until a zfree() happend
390  * if the zone has reached its maximum of allowed elements.
391  *
392  * @const Z_REALLOCF
393  * For the realloc family of functions,
394  * free the incoming memory on failure cases.
395  *
396  #if XNU_KERNEL_PRIVATE
397  * @const Z_SET_NOTSHARED
398  * Using this flag from external allocations APIs (kalloc_type/zalloc)
399  * allows the callsite to skip the shared zone for that sizeclass and
400  * directly allocated from the requested zone.
401  * Using this flag from internal APIs (zalloc_ext) will skip the shared
402  * zone only when a given threshold is exceeded. It will also set a flag
403  * to indicate that future allocations to the zone should directly go to
404  * the zone instead of the shared zone.
405  *
406  * @const Z_SPRAYQTN
407  * This flag tells the VM to allocate from the "spray quarantine" range when
408  * it services the allocation. For more details on what allocations qualify
409  * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
410  *
411  * @const Z_KALLOC_ARRAY
412  * Instead of returning a standard "pointer" return a pointer that encodes
413  * its size-class into the pointer itself (Only for kalloc, might limit
414  * the range of allocations that can be done).
415  *
416  * @const Z_FULLSIZE
417  * Used to indicate that the caller will use all available space in excess
418  * from the requested allocation size.
419  *
420  * @const Z_SKIP_KASAN
421  * Tell zalloc() not to do any kasan adjustments.
422  *
423  * @const Z_MAY_COPYINMAP
424  * This data allocation might be used with vm_map_copyin().
425  * This allows for those allocations to be associated with a proper VM object.
426  *
427  * @const Z_VM_TAG_BT_BIT
428  * Used to blame allocation accounting on the first kext
429  * found in the backtrace of the allocation.
430  *
431  * @const Z_NOZZC
432  * Used internally to mark allocations that will skip zero validation.
433  *
434  * @const Z_PCPU
435  * Used internally for the percpu paths.
436  *
437  * @const Z_VM_TAG_MASK
438  * Represents bits in which a vm_tag_t for the allocation can be passed.
439  * (used by kalloc for the zone tagging debugging feature).
440  #endif
441  */
442 __options_decl(zalloc_flags_t, uint32_t, {
443 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
444 	Z_WAITOK        = 0x0000,
445 	Z_NOWAIT        = 0x0001,
446 	Z_NOPAGEWAIT    = 0x0002,
447 	Z_ZERO          = 0x0004,
448 	Z_REALLOCF      = 0x0008,
449 
450 #if XNU_KERNEL_PRIVATE
451 	Z_SET_NOTSHARED = 0x0040,
452 	Z_SPRAYQTN      = 0x0080,
453 	Z_KALLOC_ARRAY  = 0x0100,
454 #if KASAN_CLASSIC
455 	Z_FULLSIZE      = 0x0000,
456 #else
457 	Z_FULLSIZE      = 0x0200,
458 #endif
459 #if KASAN_CLASSIC
460 	Z_SKIP_KASAN    = 0x0400,
461 #else
462 	Z_SKIP_KASAN    = 0x0000,
463 #endif
464 	Z_MAY_COPYINMAP = 0x0800,
465 	Z_VM_TAG_BT_BIT = 0x1000,
466 	Z_PCPU          = 0x2000,
467 	Z_NOZZC         = 0x4000,
468 #endif /* XNU_KERNEL_PRIVATE */
469 	Z_NOFAIL        = 0x8000,
470 
471 	/* convenient c++ spellings */
472 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
473 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
474 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL,
475 #if XNU_KERNEL_PRIVATE
476 	Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
477 #endif
478 
479 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
480 #if XNU_KERNEL_PRIVATE
481 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
482 	/** used by kalloc to propagate vm tags for -zt */
483 	Z_VM_TAG_MASK   = 0xffff0000,
484 
485 #define Z_VM_TAG_SHIFT        16
486 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
487 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
488 #endif
489 });
490 
491 /*
492  * This type is used so that kalloc_internal has good calling conventions
493  * for callers who want to cheaply both know the allocated address
494  * and the actual size of the allocation.
495  */
496 struct kalloc_result {
497 	void         *addr __sized_by(size);
498 	vm_size_t     size;
499 };
500 
501 /*!
502  * @typedef zone_stats_t
503  *
504  * @abstract
505  * The opaque type for per-cpu zone stats that are accumulated per zone
506  * or per zone-view.
507  */
508 typedef struct zone_stats *__zpercpu zone_stats_t;
509 
510 /*!
511  * @typedef zone_view_t
512  *
513  * @abstract
514  * A view on a zone for accounting purposes.
515  *
516  * @discussion
517  * A zone view uses the zone it references for the allocations backing store,
518  * but does the allocation accounting at the view level.
519  *
520  * These accounting are surfaced by @b zprint(1) and similar tools,
521  * which allow for cheap but finer grained understanding of allocations
522  * without any fragmentation cost.
523  *
524  * Zone views are protected by the kernel lockdown and can't be initialized
525  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
526  */
527 typedef struct zone_view *zone_view_t;
528 struct zone_view {
529 	zone_t          zv_zone;
530 	zone_stats_t    zv_stats;
531 	const char     *zv_name __unsafe_indexable;
532 	zone_view_t     zv_next;
533 };
534 
535 /*!
536  * @typedef kalloc_type_view_t
537  *
538  * @abstract
539  * The opaque type created at kalloc_type callsites to redirect calls to
540  * the right zone.
541  */
542 typedef struct kalloc_type_view *kalloc_type_view_t;
543 
544 #if XNU_KERNEL_PRIVATE
545 /*
546  * kalloc_type/kfree_type implementation functions
547  */
548 extern void *__unsafe_indexable kalloc_type_impl_internal(
549 	kalloc_type_view_t  kt_view,
550 	zalloc_flags_t      flags);
551 
552 extern void kfree_type_impl_internal(
553 	kalloc_type_view_t kt_view,
554 	void               *ptr __unsafe_indexable);
555 
556 static inline void *__unsafe_indexable
kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)557 kalloc_type_impl(
558 	kalloc_type_view_t      kt_view,
559 	zalloc_flags_t          flags)
560 {
561 	void *__unsafe_indexable addr = kalloc_type_impl_internal(kt_view, flags);
562 	if (flags & Z_NOFAIL) {
563 		__builtin_assume(addr != NULL);
564 	}
565 	return addr;
566 }
567 
568 #define kfree_type_impl(kt_view, ptr) \
569 	kfree_type_impl_internal(kt_view, (ptr))
570 
571 #else /* XNU_KERNEL_PRIVATE */
572 
573 extern void *__unsafe_indexable kalloc_type_impl(
574 	kalloc_type_view_t  kt_view,
575 	zalloc_flags_t      flags);
576 
577 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)578 __kalloc_type_impl(
579 	kalloc_type_view_t  kt_view,
580 	zalloc_flags_t      flags)
581 {
582 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
583 	if (flags & Z_NOFAIL) {
584 		__builtin_assume(addr != NULL);
585 	}
586 	return addr;
587 }
588 
589 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
590 
591 extern void kfree_type_impl(
592 	kalloc_type_view_t  kt_view,
593 	void                *ptr __unsafe_indexable);
594 
595 #endif /* XNU_KERNEL_PRIVATE */
596 
597 /*!
598  * @function zalloc
599  *
600  * @abstract
601  * Allocates an element from a specified zone.
602  *
603  * @discussion
604  * If the zone isn't exhaustible and is expandable, this call never fails.
605  *
606  * @param zone          the zone or zone view to allocate from
607  *
608  * @returns             NULL or the allocated element
609  */
610 __attribute__((malloc))
611 extern void *__unsafe_indexable zalloc(
612 	zone_t          zone);
613 
614 __attribute__((malloc))
615 __attribute__((overloadable))
616 static inline void *__unsafe_indexable
zalloc(zone_view_t view)617 zalloc(zone_view_t view)
618 {
619 	return zalloc((zone_t)view);
620 }
621 
622 __attribute__((malloc))
623 __attribute__((overloadable))
624 static inline void *__unsafe_indexable
zalloc(kalloc_type_view_t kt_view)625 zalloc(kalloc_type_view_t kt_view)
626 {
627 	return (kalloc_type_impl)(kt_view, Z_WAITOK);
628 }
629 
630 /*!
631  * @function zalloc_noblock
632  *
633  * @abstract
634  * Allocates an element from a specified zone, but never blocks.
635  *
636  * @discussion
637  * This call is suitable for preemptible code, however allocation
638  * isn't allowed from interrupt context.
639  *
640  * @param zone          the zone or zone view to allocate from
641  *
642  * @returns             NULL or the allocated element
643  */
644 __attribute__((malloc))
645 extern void *__unsafe_indexable zalloc_noblock(
646 	zone_t          zone);
647 
648 __attribute__((malloc))
649 __attribute__((overloadable))
650 static inline void *__unsafe_indexable
zalloc_noblock(zone_view_t view)651 zalloc_noblock(zone_view_t view)
652 {
653 	return zalloc_noblock((zone_t)view);
654 }
655 
656 __attribute__((malloc))
657 __attribute__((overloadable))
658 static inline void *__unsafe_indexable
zalloc_noblock(kalloc_type_view_t kt_view)659 zalloc_noblock(kalloc_type_view_t kt_view)
660 {
661 	return (kalloc_type_impl)(kt_view, Z_NOWAIT);
662 }
663 
664 /*!
665  * @function zalloc_flags()
666  *
667  * @abstract
668  * Allocates an element from a specified zone, with flags.
669  *
670  * @param zone          the zone or zone view to allocate from
671  * @param flags         a collection of @c zalloc_flags_t.
672  *
673  * @returns             NULL or the allocated element
674  */
675 __attribute__((malloc))
676 extern void *__unsafe_indexable zalloc_flags(
677 	zone_t          zone,
678 	zalloc_flags_t  flags);
679 
680 __attribute__((malloc))
681 __attribute__((overloadable))
682 static inline void *__unsafe_indexable
__zalloc_flags(zone_t zone,zalloc_flags_t flags)683 __zalloc_flags(
684 	zone_t          zone,
685 	zalloc_flags_t  flags)
686 {
687 	void *__unsafe_indexable addr = (zalloc_flags)(zone, flags);
688 	if (flags & Z_NOFAIL) {
689 		__builtin_assume(addr != NULL);
690 	}
691 	return addr;
692 }
693 
694 __attribute__((malloc))
695 __attribute__((overloadable))
696 static inline void *__unsafe_indexable
__zalloc_flags(zone_view_t view,zalloc_flags_t flags)697 __zalloc_flags(
698 	zone_view_t     view,
699 	zalloc_flags_t  flags)
700 {
701 	return __zalloc_flags((zone_t)view, flags);
702 }
703 
704 __attribute__((malloc))
705 __attribute__((overloadable))
706 static inline void *__unsafe_indexable
__zalloc_flags(kalloc_type_view_t kt_view,zalloc_flags_t flags)707 __zalloc_flags(
708 	kalloc_type_view_t  kt_view,
709 	zalloc_flags_t      flags)
710 {
711 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
712 	if (flags & Z_NOFAIL) {
713 		__builtin_assume(addr != NULL);
714 	}
715 	return addr;
716 }
717 
718 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
719 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
720 #else
721 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
722 #endif
723 
724 /*!
725  * @macro zalloc_id
726  *
727  * @abstract
728  * Allocates an element from a specified zone ID, with flags.
729  *
730  * @param zid           The proper @c ZONE_ID_* constant.
731  * @param flags         a collection of @c zalloc_flags_t.
732  *
733  * @returns             NULL or the allocated element
734  */
735 __attribute__((malloc))
736 extern void *__unsafe_indexable zalloc_id(
737 	zone_id_t       zid,
738 	zalloc_flags_t  flags);
739 
740 __attribute__((malloc))
741 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)742 __zalloc_id(
743 	zone_id_t       zid,
744 	zalloc_flags_t  flags)
745 {
746 	void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
747 	if (flags & Z_NOFAIL) {
748 		__builtin_assume(addr != NULL);
749 	}
750 	return addr;
751 }
752 
753 #if XNU_KERNEL_PRIVATE
754 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
755 #else
756 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
757 #endif
758 
759 /*!
760  * @function zalloc_ro
761  *
762  * @abstract
763  * Allocates an element from a specified read-only zone.
764  *
765  * @param zone_id       the zone id to allocate from
766  * @param flags         a collection of @c zalloc_flags_t.
767  *
768  * @returns             NULL or the allocated element
769  */
770 __attribute__((malloc))
771 extern void *__unsafe_indexable zalloc_ro(
772 	zone_id_t       zone_id,
773 	zalloc_flags_t  flags);
774 
775 __attribute__((malloc))
776 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)777 __zalloc_ro(
778 	zone_id_t       zone_id,
779 	zalloc_flags_t  flags)
780 {
781 	void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
782 	if (flags & Z_NOFAIL) {
783 		__builtin_assume(addr != NULL);
784 	}
785 	return addr;
786 }
787 
788 #if XNU_KERNEL_PRIVATE
789 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
790 #else
791 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
792 #endif
793 
794 /*!
795  * @function zalloc_ro_mut
796  *
797  * @abstract
798  * Modifies an element from a specified read-only zone.
799  *
800  * @discussion
801  * Modifying compiler-assisted authenticated pointers using this function will
802  * not result in a signed pointer being written.  The caller is expected to
803  * sign the value appropriately beforehand if they wish to do this.
804  *
805  * @param zone_id       the zone id to allocate from
806  * @param elem          element to be modified
807  * @param offset        offset from element
808  * @param new_data      pointer to new data
809  * @param new_data_size size of modification
810  *
811  */
812 extern void zalloc_ro_mut(
813 	zone_id_t       zone_id,
814 	void           *elem __unsafe_indexable,
815 	vm_offset_t     offset,
816 	const void     *new_data __sized_by(new_data_size),
817 	vm_size_t       new_data_size);
818 
819 /*!
820  * @function zalloc_ro_update_elem
821  *
822  * @abstract
823  * Update the value of an entire element allocated in the read only allocator.
824  *
825  * @param zone_id       the zone id to allocate from
826  * @param elem          element to be modified
827  * @param new_data      pointer to new data
828  *
829  */
830 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
831 	const typeof(*(elem)) *__new_data = (new_data);                        \
832 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
833 })
834 
835 /*!
836  * @function zalloc_ro_update_field
837  *
838  * @abstract
839  * Update a single field of an element allocated in the read only allocator.
840  *
841  * @param zone_id       the zone id to allocate from
842  * @param elem          element to be modified
843  * @param field         the element field to be modified
844  * @param new_data      pointer to new data
845  *
846  */
847 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
848 	const typeof((elem)->field) *__value = (value);                        \
849 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
850 	    __value, sizeof((elem)->field));                                   \
851 })
852 
853 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
854 
855 /*!
856  * @enum zro_atomic_op_t
857  *
858  * @brief
859  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
860  * atomic operations.
861  *
862  * @discussion
863  * This enum provides all flavors of atomic operations supported in sizes 8,
864  * 16, 32, 64 bits.
865  *
866  * @const ZRO_ATOMIC_OR_*
867  * To perform an @s os_atomic_or
868  *
869  * @const ZRO_ATOMIC_XOR_*
870  * To perform an @s os_atomic_xor
871  *
872  * @const ZRO_ATOMIC_AND_*
873  * To perform an @s os_atomic_and
874  *
875  * @const ZRO_ATOMIC_ADD_*
876  * To perform an @s os_atomic_add
877  *
878  * @const ZRO_ATOMIC_XCHG_*
879  * To perform an @s os_atomic_xchg
880  *
881  */
882 __enum_decl(zro_atomic_op_t, uint32_t, {
883 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
884 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
885 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
886 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
887 
888 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
889 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
890 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
891 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
892 
893 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
894 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
895 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
896 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
897 
898 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
899 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
900 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
901 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
902 
903 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
904 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
905 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
906 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
907 
908 	/* cconvenient spellings */
909 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
910 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
911 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
912 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
913 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
914 });
915 
916 /*!
917  * @function zalloc_ro_mut_atomic
918  *
919  * @abstract
920  * Atomically update an offset in an element allocated in the read only
921  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
922  *
923  * @param zone_id       the zone id to allocate from
924  * @param elem          element to be modified
925  * @param offset        offset in the element to be modified
926  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
927  * @param value         value for the atomic operation
928  *
929  */
930 extern uint64_t zalloc_ro_mut_atomic(
931 	zone_id_t       zone_id,
932 	void           *elem __unsafe_indexable,
933 	vm_offset_t     offset,
934 	zro_atomic_op_t op,
935 	uint64_t        value);
936 
937 /*!
938  * @macro zalloc_ro_update_field_atomic
939  *
940  * @abstract
941  * Atomically update a single field of an element allocated in the read only
942  * allocator.
943  *
944  * @param zone_id       the zone id to allocate from
945  * @param elem          element to be modified
946  * @param field         the element field to be modified
947  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
948  * @param value         value for the atomic operation
949  *
950  */
951 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
952 	const typeof((elem)->field) __value = (value);                         \
953 	static_assert(sizeof(__value) == (op & 0xf));                          \
954 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
955 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
956 })
957 
958 /*!
959  * @function zalloc_ro_clear
960  *
961  * @abstract
962  * Zeroes an element from a specified read-only zone.
963  *
964  * @param zone_id       the zone id to allocate from
965  * @param elem          element to be modified
966  * @param offset        offset from element
967  * @param size          size of modification
968  */
969 extern void    zalloc_ro_clear(
970 	zone_id_t       zone_id,
971 	void           *elem __unsafe_indexable,
972 	vm_offset_t     offset,
973 	vm_size_t       size);
974 
975 /*!
976  * @function zalloc_ro_clear_field
977  *
978  * @abstract
979  * Zeroes the specified field of an element from a specified read-only zone.
980  *
981  * @param zone_id       the zone id to allocate from
982  * @param elem          element to be modified
983  * @param field         offset from element
984  */
985 #define zalloc_ro_clear_field(zone_id, elem, field) \
986 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
987 	    sizeof((elem)->field))
988 
989 /*!
990  * @function zfree_id()
991  *
992  * @abstract
993  * Frees an element previously allocated with @c zalloc_id().
994  *
995  * @param zone_id       the zone id to free the element to.
996  * @param addr          the address to free
997  */
998 extern void     zfree_id(
999 	zone_id_t       zone_id,
1000 	void           *addr __unsafe_indexable);
1001 #define zfree_id(zid, elem) ({ \
1002 	zone_id_t __zfree_zid = (zid); \
1003 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1004 })
1005 
1006 
1007 /*!
1008  * @function zfree_ro()
1009  *
1010  * @abstract
1011  * Frees an element previously allocated with @c zalloc_ro().
1012  *
1013  * @param zone_id       the zone id to free the element to.
1014  * @param addr          the address to free
1015  */
1016 extern void     zfree_ro(
1017 	zone_id_t       zone_id,
1018 	void           *addr __unsafe_indexable);
1019 #define zfree_ro(zid, elem) ({ \
1020 	zone_id_t __zfree_zid = (zid); \
1021 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1022 })
1023 
1024 
1025 /*!
1026  * @function zfree
1027  *
1028  * @abstract
1029  * Frees an element allocated with @c zalloc*.
1030  *
1031  * @discussion
1032  * If the element being freed doesn't belong to the specified zone,
1033  * then this call will panic.
1034  *
1035  * @param zone          the zone or zone view to free the element to.
1036  * @param elem          the element to free
1037  */
1038 extern void     zfree(
1039 	zone_t          zone,
1040 	void           *elem __unsafe_indexable);
1041 
1042 __attribute__((overloadable))
1043 static inline void
zfree(zone_view_t view,void * elem __unsafe_indexable)1044 zfree(
1045 	zone_view_t     view,
1046 	void           *elem __unsafe_indexable)
1047 {
1048 	zfree((zone_t)view, elem);
1049 }
1050 
1051 __attribute__((overloadable))
1052 static inline void
zfree(kalloc_type_view_t kt_view,void * elem __unsafe_indexable)1053 zfree(
1054 	kalloc_type_view_t   kt_view,
1055 	void                *elem __unsafe_indexable)
1056 {
1057 	return kfree_type_impl(kt_view, elem);
1058 }
1059 
1060 #define zfree(zone, elem) ({ \
1061 	__auto_type __zfree_zone = (zone); \
1062 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1063 })
1064 
1065 
1066 /* deprecated KPIS */
1067 
1068 __zalloc_deprecated("use zone_create()")
1069 extern zone_t   zinit(
1070 	vm_size_t       size,           /* the size of an element */
1071 	vm_size_t       maxmem,         /* maximum memory to use */
1072 	vm_size_t       alloc,          /* allocation size */
1073 	const char      *name __unsafe_indexable);
1074 
1075 #pragma mark: implementation details
1076 
1077 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1078 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1079 	__attribute__((visibility("hidden"))) \
1080 	extern type_t *__single __zalloc__##var##__type_name
1081 
1082 #ifdef XNU_KERNEL_PRIVATE
1083 #pragma mark - XNU only interfaces
1084 
1085 #include <kern/cpu_number.h>
1086 
1087 #pragma GCC visibility push(hidden)
1088 
1089 #pragma mark XNU only: zalloc (extended)
1090 
1091 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
1092 #define ZALIGN_16               (sizeof(uint16_t) - 1)
1093 #define ZALIGN_32               (sizeof(uint32_t) - 1)
1094 #define ZALIGN_PTR              (sizeof(void *)   - 1)
1095 #define ZALIGN_64               (sizeof(uint64_t) - 1)
1096 #define ZALIGN(t)               (_Alignof(t)      - 1)
1097 
1098 
1099 /*!
1100  * @function zalloc_permanent_tag()
1101  *
1102  * @abstract
1103  * Allocates a permanent element from the permanent zone
1104  *
1105  * @discussion
1106  * Memory returned by this function is always 0-initialized.
1107  * Note that the size of this allocation can not be determined
1108  * by zone_element_size so it should not be used for copyio.
1109  *
1110  * @param size          the element size (must be smaller than PAGE_SIZE)
1111  * @param align_mask    the required alignment for this allocation
1112  * @param tag           the tag to use for allocations larger than a page.
1113  *
1114  * @returns             the allocated element
1115  */
1116 __attribute__((malloc))
1117 extern void *__sized_by(size) zalloc_permanent_tag(
1118 	vm_size_t       size,
1119 	vm_offset_t     align_mask,
1120 	vm_tag_t        tag)
1121 __attribute__((__diagnose_if__((align_mask & (align_mask + 1)),
1122     "align mask looks invalid", "error")));
1123 
1124 /*!
1125  * @function zalloc_permanent()
1126  *
1127  * @abstract
1128  * Allocates a permanent element from the permanent zone
1129  *
1130  * @discussion
1131  * Memory returned by this function is always 0-initialized.
1132  * Note that the size of this allocation can not be determined
1133  * by zone_element_size so it should not be used for copyio.
1134  *
1135  * @param size          the element size (must be smaller than PAGE_SIZE)
1136  * @param align_mask    the required alignment for this allocation
1137  *
1138  * @returns             the allocated element
1139  */
1140 #define zalloc_permanent(size, align) \
1141 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1142 
1143 /*!
1144  * @function zalloc_permanent_type()
1145  *
1146  * @abstract
1147  * Allocates a permanent element of a given type with its natural alignment.
1148  *
1149  * @discussion
1150  * Memory returned by this function is always 0-initialized.
1151  *
1152  * @param type_t        the element type
1153  *
1154  * @returns             the allocated element
1155  */
1156 #define zalloc_permanent_type(type_t) \
1157 	__unsafe_forge_single(type_t *, \
1158 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1159 
1160 /*!
1161  * @function zalloc_first_proc_made()
1162  *
1163  * @abstract
1164  * Declare that the "early" allocation phase is done.
1165  */
1166 extern void zalloc_first_proc_made(void);
1167 /*!
1168  * @function zalloc_iokit_lockdown()
1169  *
1170  * @abstract
1171  * Declare that iokit matching has started.
1172  */
1173 extern void zalloc_iokit_lockdown(void);
1174 
1175 #pragma mark XNU only: per-cpu allocations
1176 
1177 /*!
1178  * @macro zpercpu_get_cpu()
1179  *
1180  * @abstract
1181  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1182  *
1183  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1184  * @param cpu           the specified CPU number as returned by @c cpu_number()
1185  *
1186  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1187  */
1188 #define zpercpu_get_cpu(ptr, cpu) \
1189 	__zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)(cpu)))
1190 
1191 /*!
1192  * @macro zpercpu_get()
1193  *
1194  * @abstract
1195  * Get a pointer to the current CPU slot of a given per-cpu variable.
1196  *
1197  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1198  *
1199  * @returns             the per-CPU slot for @c ptr for the current CPU.
1200  */
1201 #define zpercpu_get(ptr) \
1202 	zpercpu_get_cpu(ptr, cpu_number())
1203 
1204 /*!
1205  * @macro zpercpu_foreach()
1206  *
1207  * @abstract
1208  * Enumerate all per-CPU slots by address.
1209  *
1210  * @param it            the name for the iterator
1211  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1212  */
1213 #define zpercpu_foreach(it, ptr) \
1214 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1215 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1216 	    it < __end_##it; it = __zpcpu_next(it))
1217 
1218 /*!
1219  * @macro zpercpu_foreach_cpu()
1220  *
1221  * @abstract
1222  * Enumerate all per-CPU slots by CPU slot number.
1223  *
1224  * @param cpu           the name for cpu number iterator.
1225  */
1226 #define zpercpu_foreach_cpu(cpu) \
1227 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1228 
1229 /*!
1230  * @function zalloc_percpu()
1231  *
1232  * @abstract
1233  * Allocates an element from a per-cpu zone.
1234  *
1235  * @discussion
1236  * The returned pointer cannot be used directly and must be manipulated
1237  * through the @c zpercpu_get*() interfaces.
1238  *
1239  * @param zone_or_view  the zone or zone view to allocate from
1240  * @param flags         a collection of @c zalloc_flags_t.
1241  *
1242  * @returns             NULL or the allocated element
1243  */
1244 extern void *__zpercpu zalloc_percpu(
1245 	zone_or_view_t  zone_or_view,
1246 	zalloc_flags_t  flags);
1247 
1248 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1249 __zalloc_percpu(
1250 	zone_or_view_t  zone_or_view,
1251 	zalloc_flags_t  flags)
1252 {
1253 	void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1254 	if (flags & Z_NOFAIL) {
1255 		__builtin_assume(addr != NULL);
1256 	}
1257 	return addr;
1258 }
1259 
1260 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1261 
1262 /*!
1263  * @function zfree_percpu()
1264  *
1265  * @abstract
1266  * Frees an element previously allocated with @c zalloc_percpu().
1267  *
1268  * @param zone_or_view  the zone or zone view to free the element to.
1269  * @param addr          the address to free
1270  */
1271 extern void     zfree_percpu(
1272 	zone_or_view_t  zone_or_view,
1273 	void *__zpercpu addr);
1274 
1275 /*!
1276  * @function zalloc_percpu_permanent()
1277  *
1278  * @abstract
1279  * Allocates a permanent percpu-element from the permanent percpu zone.
1280  *
1281  * @discussion
1282  * Memory returned by this function is always 0-initialized.
1283  *
1284  * @param size          the element size (must be smaller than PAGE_SIZE)
1285  * @param align_mask    the required alignment for this allocation
1286  *
1287  * @returns             the allocated element
1288  */
1289 extern void *__zpercpu zalloc_percpu_permanent(
1290 	vm_size_t       size,
1291 	vm_offset_t     align_mask);
1292 
1293 /*!
1294  * @function zalloc_percpu_permanent_type()
1295  *
1296  * @abstract
1297  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1298  * type with its natural alignment.
1299  *
1300  * @discussion
1301  * Memory returned by this function is always 0-initialized.
1302  *
1303  * @param type_t        the element type
1304  *
1305  * @returns             the allocated element
1306  */
1307 #define zalloc_percpu_permanent_type(type_t) \
1308 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1309 
1310 
1311 #pragma mark XNU only: SMR support for zones
1312 
1313 struct smr;
1314 
1315 /*!
1316  * @typedef zone_smr_free_cb_t
1317  *
1318  * @brief
1319  * Type for the delayed free callback for SMR zones.
1320  *
1321  * @description
1322  * This function is called before an element is reused,
1323  * or when memory is returned to the system.
1324  *
1325  * This function MUST zero the element, and if no special
1326  * action is to be taken on free, then @c bzero() is a fine
1327  * callback to use.
1328  *
1329  * This function also must be preemption-disabled safe,
1330  * as it runs with preemption disabled.
1331  *
1332  *
1333  * Note that this function should only clean the fields
1334  * that must be preserved for stale SMR readers to see.
1335  * Any field that is accessed after element validation
1336  * such as a try-retain or acquiring a lock on it must
1337  * be cleaned up much earlier as they might hold onto
1338  * expensive resources.
1339  *
1340  * The suggested pattern for an SMR type using this facility,
1341  * is to have 2 functions:
1342  *
1343  * - one "retire" stage that tries to clean up as much from
1344  *   the element as possible, with great care to leave no dangling
1345  *   pointers around, as elements in this stage might linger
1346  *   in the allocator for a long time, and this could possibly
1347  *   be abused during UaF exploitation.
1348  *
1349  * - one "smr_free" function which cleans up whatever was left,
1350  *   and zeroes the rest of the element.
1351  *
1352  * <code>
1353  *     void
1354  *     type_retire(type_t elem)
1355  *     {
1356  *         // invalidating the element makes most fields
1357  *         // inaccessible to readers.
1358  *         type_mark_invalid(elem);
1359  *
1360  *         // do cleanups for things requiring a validity check
1361  *         kfree_type(some_type_t, elem->expensive_thing);
1362  *         type_remove_from_global_list(&elem->linkage);
1363  *
1364  *         zfree_smr(type_zone, elem);
1365  *     }
1366  *
1367  *     void
1368  *     type_smr_free(void *_elem)
1369  *     {
1370  *         type_t elem = elem;
1371  *
1372  *         // cleanup fields that are used to "find" this element
1373  *         // and that SMR readers may access hazardously.
1374  *         lck_ticket_destroy(&elem->lock);
1375  *         kfree_data(elem->key, elem->keylen);
1376  *
1377  *         // compulsory: element must be zeroed fully
1378  *         bzero(elem, sizeof(*elem));
1379  *     }
1380  * </code>
1381  */
1382 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1383 
1384 /*!
1385  * @function zone_enable_smr()
1386  *
1387  * @abstract
1388  * Enable SMR for a zone.
1389  *
1390  * @discussion
1391  * This can only be done once, and must be done before
1392  * the first allocation is made with this zone.
1393  *
1394  * @param zone          the zone to enable SMR for
1395  * @param smr           the smr domain to use
1396  * @param free_cb       the free callback to use
1397  */
1398 extern void     zone_enable_smr(
1399 	zone_t                  zone,
1400 	struct smr             *smr,
1401 	zone_smr_free_cb_t      free_cb);
1402 
1403 /*!
1404  * @function zone_id_enable_smr()
1405  *
1406  * @abstract
1407  * Enable SMR for a zone ID.
1408  *
1409  * @discussion
1410  * This can only be done once, and must be done before
1411  * the first allocation is made with this zone.
1412  *
1413  * @param zone_id       the zone to enable SMR for
1414  * @param smr           the smr domain to use
1415  * @param free_cb       the free callback to use
1416  */
1417 #define zone_id_enable_smr(zone_id, smr, free_cb)  ({ \
1418 	void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t);      \
1419                                                                                 \
1420 	__cb = (free_cb);                                                       \
1421 	zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb);    \
1422 })
1423 
1424 /*!
1425  * @macro zalloc_smr()
1426  *
1427  * @abstract
1428  * Allocates an element from an SMR enabled zone
1429  *
1430  * @discussion
1431  * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1432  *
1433  * @param zone          the zone to allocate from
1434  * @param flags         a collection of @c zalloc_flags_t.
1435  *
1436  * @returns             NULL or the allocated element
1437  */
1438 #define zalloc_smr(zone, flags) \
1439 	zalloc_flags(zone, flags)
1440 
1441 /*!
1442  * @macro zalloc_id_smr()
1443  *
1444  * @abstract
1445  * Allocates an element from a specified zone ID with SMR enabled.
1446  *
1447  * @param zid           The proper @c ZONE_ID_* constant.
1448  * @param flags         a collection of @c zalloc_flags_t.
1449  *
1450  * @returns             NULL or the allocated element
1451  */
1452 #define zalloc_id_smr(zid, flags) \
1453 	zalloc_id(zid, flags)
1454 
1455 /*!
1456  * @macro zfree_smr()
1457  *
1458  * @abstract
1459  * Frees an element previously allocated with @c zalloc_smr().
1460  *
1461  * @discussion
1462  * When zfree_smr() is called, then the element is not immediately zeroed,
1463  * and the "free" callback that has been registered with the zone will
1464  * run later (@see zone_smr_free_cb_t).
1465  *
1466  * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1467  *
1468  *
1469  * It is guaranteed that the SMR timestamp associated with an element
1470  * will always be equal or greater than the stamp associated with
1471  * elements freed before it on the same thread.
1472  *
1473  * It means that when freeing multiple elements in a sequence, these
1474  * must be freed in topological order (parents before children).
1475  *
1476  * It is worth noting that calling zfree_smr() on several elements
1477  * in a given order doesn't necessarily mean they will be effectively
1478  * reused or cleaned up in that same order, only that their SMR clocks
1479  * will expire in that order.
1480  *
1481  *
1482  * @param zone          the zone to free the element to.
1483  * @param elem          the address to free
1484  */
1485 extern void     zfree_smr(
1486 	zone_t          zone,
1487 	void           *elem __unsafe_indexable);
1488 #define zfree_smr(zone, elem) ({ \
1489 	__auto_type __zfree_zone = (zone); \
1490 	(zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1491 })
1492 
1493 
1494 /*!
1495  * @function zfree_id_smr()
1496  *
1497  * @abstract
1498  * Frees an element previously allocated with @c zalloc_id_smr().
1499  *
1500  * @param zone_id       the zone id to free the element to.
1501  * @param addr          the address to free
1502  */
1503 extern void     zfree_id_smr(
1504 	zone_id_t       zone_id,
1505 	void           *addr __unsafe_indexable);
1506 #define zfree_id_smr(zid, elem) ({ \
1507 	zone_id_t __zfree_zid = (zid); \
1508 	(zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1509 })
1510 
1511 /*!
1512  * @macro zfree_smr_noclear()
1513  *
1514  * @abstract
1515  * Frees an element previously allocated with @c zalloc_smr().
1516  *
1517  * @discussion
1518  * This variant doesn't clear the pointer passed as an argument,
1519  * as it is often required for SMR algorithms to function correctly
1520  * to leave pointers "dangling" to an extent.
1521  *
1522  * However it expects the field in question to be an SMR_POINTER()
1523  * struct.
1524  *
1525  * @param zone          the zone to free the element to.
1526  * @param elem          the address to free
1527  */
1528 #define zfree_smr_noclear(zone, elem) \
1529 	(zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1530 
1531 /*!
1532  * @macro zfree_id_smr_noclear()
1533  *
1534  * @abstract
1535  * Frees an element previously allocated with @c zalloc_id_smr().
1536  *
1537  * @discussion
1538  * This variant doesn't clear the pointer passed as an argument,
1539  * as it is often required for SMR algorithms to function correctly
1540  * to leave pointers "dangling" to an extent.
1541  *
1542  * However it expects the field in question to be an SMR_POINTER()
1543  * struct.
1544  *
1545  * @param zone          the zone to free the element to.
1546  * @param elem          the address to free
1547  */
1548 #define zfree_id_smr_noclear(zone, elem) \
1549 	(zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1550 
1551 
1552 #pragma mark XNU only: zone creation (extended)
1553 
1554 /*!
1555  * @enum zone_reserved_id_t
1556  *
1557  * @abstract
1558  * Well known pre-registered zones, allowing use of zone_id_require()
1559  *
1560  * @discussion
1561  * @c ZONE_ID__* aren't real zone IDs.
1562  *
1563  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1564  * easy a value to produce (by malice or accident).
1565  *
1566  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1567  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1568  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1569  * listed in @c zone_create_ro_id_t.
1570  *
1571  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1572  * @c zone_create().
1573  */
1574 __enum_decl(zone_reserved_id_t, zone_id_t, {
1575 	ZONE_ID__ZERO,
1576 
1577 	ZONE_ID_PERMANENT,
1578 	ZONE_ID_PERCPU_PERMANENT,
1579 
1580 	ZONE_ID_THREAD_RO,
1581 	ZONE_ID_MAC_LABEL,
1582 	ZONE_ID_PROC_RO,
1583 	ZONE_ID_PROC_SIGACTS_RO,
1584 	ZONE_ID_KAUTH_CRED,
1585 	ZONE_ID_CS_BLOB,
1586 
1587 	ZONE_ID_SANDBOX_RO,
1588 	ZONE_ID_PROFILE_RO,
1589 	ZONE_ID_PROTOBOX,
1590 	ZONE_ID_SB_FILTER,
1591 	ZONE_ID_AMFI_OSENTITLEMENTS,
1592 
1593 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1594 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1595 	ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1596 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1597 
1598 	ZONE_ID_PMAP,
1599 	ZONE_ID_VM_MAP,
1600 	ZONE_ID_VM_MAP_ENTRY,
1601 	ZONE_ID_VM_MAP_HOLES,
1602 	ZONE_ID_VM_MAP_COPY,
1603 	ZONE_ID_VM_PAGES,
1604 	ZONE_ID_IPC_PORT,
1605 	ZONE_ID_IPC_PORT_SET,
1606 	ZONE_ID_IPC_KMSG,
1607 	ZONE_ID_IPC_VOUCHERS,
1608 	ZONE_ID_PROC_TASK,
1609 	ZONE_ID_THREAD,
1610 	ZONE_ID_TURNSTILE,
1611 	ZONE_ID_SEMAPHORE,
1612 	ZONE_ID_SELECT_SET,
1613 	ZONE_ID_FILEPROC,
1614 
1615 #if !CONFIG_MBUF_MCACHE
1616 	ZONE_ID_MBUF_REF,
1617 	ZONE_ID_MBUF,
1618 	ZONE_ID_CLUSTER_2K,
1619 	ZONE_ID_CLUSTER_4K,
1620 	ZONE_ID_CLUSTER_16K,
1621 	ZONE_ID_MBUF_CLUSTER_2K,
1622 	ZONE_ID_MBUF_CLUSTER_4K,
1623 	ZONE_ID_MBUF_CLUSTER_16K,
1624 #endif /* !CONFIG_MBUF_MCACHE */
1625 
1626 	ZONE_ID__FIRST_DYNAMIC,
1627 });
1628 
1629 /*!
1630  * @const ZONE_ID_ANY
1631  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1632  * Zone ID.
1633  */
1634 #define ZONE_ID_ANY ((zone_id_t)-1)
1635 
1636 /*!
1637  * @const ZONE_ID_INVALID
1638  * An invalid zone_id_t that corresponds to nothing.
1639  */
1640 #define ZONE_ID_INVALID ((zone_id_t)-2)
1641 
1642 /**!
1643  * @function zone_by_id
1644  *
1645  * @param zid           the specified zone ID.
1646  * @returns             the zone with that ID.
1647  */
1648 zone_t zone_by_id(
1649 	size_t                  zid) __pure2;
1650 
1651 /**!
1652  * @function zone_name
1653  *
1654  * @param zone          the specified zone
1655  * @returns             the name of the specified zone.
1656  */
1657 const char *__unsafe_indexable zone_name(
1658 	zone_t                  zone);
1659 
1660 /**!
1661  * @function zone_heap_name
1662  *
1663  * @param zone          the specified zone
1664  * @returns             the name of the heap this zone is part of, or "".
1665  */
1666 const char *__unsafe_indexable zone_heap_name(
1667 	zone_t                  zone);
1668 
1669 /*!
1670  * @function zone_create_ext
1671  *
1672  * @abstract
1673  * Creates a zone with the specified parameters.
1674  *
1675  * @discussion
1676  * This is an extended version of @c zone_create().
1677  *
1678  * @param name          the name for the new zone.
1679  * @param size          the size of the elements returned by this zone.
1680  * @param flags         a set of @c zone_create_flags_t flags.
1681  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1682  *
1683  * @param extra_setup   a block that can perform non trivial initialization
1684  *                      on the zone before it is marked valid.
1685  *                      This block can call advanced setups like:
1686  *                      - zone_set_exhaustible()
1687  *
1688  * @returns             the created zone, this call never fails.
1689  */
1690 extern zone_t   zone_create_ext(
1691 	const char             *name __unsafe_indexable,
1692 	vm_size_t               size,
1693 	zone_create_flags_t     flags,
1694 	zone_id_t               desired_zid,
1695 	void                  (^extra_setup)(zone_t));
1696 
1697 /*!
1698  * @macro ZONE_DECLARE
1699  *
1700  * @abstract
1701  * Declares a zone variable and its associated type.
1702  *
1703  * @param var           the name of the variable to declare.
1704  * @param type_t        the type of elements in the zone.
1705  */
1706 #define ZONE_DECLARE(var, type_t) \
1707 	extern zone_t var; \
1708 	__ZONE_DECLARE_TYPE(var, type_t)
1709 
1710 /*!
1711  * @macro ZONE_DECLARE_ID
1712  *
1713  * @abstract
1714  * Declares the type associated with a zone ID.
1715  *
1716  * @param id            the name of zone ID to associate a type with.
1717  * @param type_t        the type of elements in the zone.
1718  */
1719 #define ZONE_DECLARE_ID(id, type_t) \
1720 	__ZONE_DECLARE_TYPE(id, type_t)
1721 
1722 /*!
1723  * @macro ZONE_DEFINE
1724  *
1725  * @abstract
1726  * Declares a zone variable to automatically initialize with the specified
1727  * parameters.
1728  *
1729  * @discussion
1730  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1731  *
1732  * @param var           the name of the variable to declare.
1733  * @param name          the name for the zone
1734  * @param size          the size of the elements returned by this zone.
1735  * @param flags         a set of @c zone_create_flags_t flags.
1736  */
1737 #define ZONE_DEFINE(var, name, size, flags) \
1738 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1739 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1740 	static __startup_data struct zone_create_startup_spec \
1741 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1742 	    ZONE_ID_ANY, NULL }; \
1743 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1744 	    &__startup_zone_spec_ ## var)
1745 
1746 /*!
1747  * @macro ZONE_DEFINE_TYPE
1748  *
1749  * @abstract
1750  * Defines a zone variable to automatically initialize with the specified
1751  * parameters, associated with a particular type.
1752  *
1753  * @param var           the name of the variable to declare.
1754  * @param name          the name for the zone
1755  * @param type_t        the type of elements in the zone.
1756  * @param flags         a set of @c zone_create_flags_t flags.
1757  */
1758 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1759 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1760 	__ZONE_DECLARE_TYPE(var, type_t)
1761 
1762 /*!
1763  * @macro ZONE_DEFINE_ID
1764  *
1765  * @abstract
1766  * Initializes a given zone automatically during startup with the specified
1767  * parameters.
1768  *
1769  * @param zid           a @c zone_reserved_id_t value.
1770  * @param name          the name for the zone
1771  * @param type_t        the type of elements in the zone.
1772  * @param flags         a set of @c zone_create_flags_t flags.
1773  */
1774 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1775 	ZONE_DECLARE_ID(zid, type_t); \
1776 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1777 
1778 /*!
1779  * @macro ZONE_INIT
1780  *
1781  * @abstract
1782  * Initializes a given zone automatically during startup with the specified
1783  * parameters.
1784  *
1785  * @param var           the name of the variable to initialize.
1786  * @param name          the name for the zone
1787  * @param size          the size of the elements returned by this zone.
1788  * @param flags         a set of @c zone_create_flags_t flags.
1789  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1790  * @param extra_setup   a block that can perform non trivial initialization
1791  *                      (@see @c zone_create_ext()).
1792  */
1793 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1794 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1795 
1796 /*!
1797  * @function zone_id_require
1798  *
1799  * @abstract
1800  * Requires for a given pointer to belong to the specified zone, by ID and size.
1801  *
1802  * @discussion
1803  * The function panics if the check fails as it indicates that the kernel
1804  * internals have been compromised.
1805  *
1806  * This is a variant of @c zone_require() which:
1807  * - isn't sensitive to @c zone_t::elem_size being compromised,
1808  * - is slightly faster as it saves one load and a multiplication.
1809  *
1810  * @param zone_id       the zone ID the address needs to belong to.
1811  * @param elem_size     the size of elements for this zone.
1812  * @param addr          the element address to check.
1813  */
1814 extern void     zone_id_require(
1815 	zone_id_t               zone_id,
1816 	vm_size_t               elem_size,
1817 	void                   *addr __unsafe_indexable);
1818 
1819 /*!
1820  * @function zone_id_require_aligned
1821  *
1822  * @abstract
1823  * Requires for a given pointer to belong to the specified zone, by ID and size.
1824  *
1825  * @discussion
1826  * Similar to @c zone_id_require() but does more checks such as whether the
1827  * element is properly aligned.
1828  *
1829  * @param zone_id       the zone ID the address needs to belong to.
1830  * @param addr          the element address to check.
1831  */
1832 extern void     zone_id_require_aligned(
1833 	zone_id_t               zone_id,
1834 	void                   *addr __unsafe_indexable);
1835 
1836 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1837 extern void     zone_set_exhaustible(
1838 	zone_t                  zone,
1839 	vm_size_t               max_elements,
1840 	bool                    exhausts_by_design);
1841 
1842 /*!
1843  * @function zone_raise_reserve()
1844  *
1845  * @brief
1846  * Used to raise the reserve on a zone.
1847  *
1848  * @discussion
1849  * Can be called from any context (zone_create_ext() setup hook or after).
1850  */
1851 extern void     zone_raise_reserve(
1852 	zone_or_view_t          zone_or_view,
1853 	uint16_t                min_elements);
1854 
1855 /*!
1856  * @function zone_fill_initially
1857  *
1858  * @brief
1859  * Initially fill a non collectable zone to have the specified amount of
1860  * elements.
1861  *
1862  * @discussion
1863  * This function must be called on a non collectable permanent zone before it
1864  * has been used yet.
1865  *
1866  * @param zone          The zone to fill.
1867  * @param nelems        The number of elements to be able to hold.
1868  */
1869 extern void     zone_fill_initially(
1870 	zone_t                  zone,
1871 	vm_size_t               nelems);
1872 
1873 /*!
1874  * @function zone_drain()
1875  *
1876  * @abstract
1877  * Forces a zone to be drained (have all its data structures freed
1878  * back to its data store, and empty pages returned to the system).
1879  *
1880  * @param zone          the zone id to free the objects to.
1881  */
1882 extern void zone_drain(
1883 	zone_t                  zone);
1884 
1885 /*!
1886  * @struct zone_basic_stats
1887  *
1888  * @abstract
1889  * Used to report basic statistics about a zone.
1890  *
1891  * @field zbs_avail     the number of elements in a zone.
1892  * @field zbs_alloc     the number of allocated elements in a zone.
1893  * @field zbs_free      the number of free elements in a zone.
1894  * @field zbs_cached    the number of free elements in the per-CPU caches.
1895  *                      (included in zbs_free).
1896  * @field zbs_alloc_fail
1897  *                      the number of allocation failures.
1898  */
1899 struct zone_basic_stats {
1900 	uint64_t        zbs_avail;
1901 	uint64_t        zbs_alloc;
1902 	uint64_t        zbs_free;
1903 	uint64_t        zbs_cached;
1904 	uint64_t        zbs_alloc_fail;
1905 };
1906 
1907 /*!
1908  * @function zone_get_stats
1909  *
1910  * @abstract
1911  * Retrieves statistics about zones, include its per-CPU caches.
1912  *
1913  * @param zone          the zone to collect stats from.
1914  * @param stats         the statistics to fill.
1915  */
1916 extern void zone_get_stats(
1917 	zone_t                  zone,
1918 	struct zone_basic_stats *stats);
1919 
1920 
1921 /*!
1922  * @typedef zone_exhausted_cb_t
1923  *
1924  * @brief
1925  * The callback type for the ZONE_EXHAUSTED event.
1926  */
1927 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone, bool exhausted);
1928 
1929 /*!
1930  * @brief
1931  * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1932  * wiring limit.
1933  *
1934  * @discussion
1935  * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1936  * performing zone expansion and no significant amount of work can be performed
1937  * from this context.
1938  *
1939  * In particular, those callbacks cannot allocate any memory, it is expected
1940  * that they will filter if the zone is of interest, and wake up another thread
1941  * to perform the actual work (for example via thread call).
1942  */
1943 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1944 
1945 
1946 #pragma mark XNU only: zone views
1947 
1948 /*!
1949  * @enum zone_kheap_id_t
1950  *
1951  * @brief
1952  * Enumerate a particular kalloc heap.
1953  *
1954  * @discussion
1955  * More documentation about heaps is available in @c <kern/kalloc.h>.
1956  *
1957  * @const KHEAP_ID_NONE
1958  * This value denotes regular zones, not used by kalloc.
1959  *
1960  * @const KHEAP_ID_SHARED
1961  * Indicates zones part of the KHEAP_SHARED heap.
1962  *
1963  * @const KHEAP_ID_DATA_BUFFERS
1964  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1965  *
1966  * @const KHEAP_ID_KT_VAR
1967  * Indicates zones part of the KHEAP_KT_VAR heap.
1968  */
1969 __enum_decl(zone_kheap_id_t, uint8_t, {
1970 	KHEAP_ID_NONE,
1971 	KHEAP_ID_SHARED,
1972 	KHEAP_ID_DATA_BUFFERS,
1973 	KHEAP_ID_KT_VAR,
1974 
1975 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
1976 });
1977 
1978 /*!
1979  * @macro ZONE_VIEW_DECLARE
1980  *
1981  * @abstract
1982  * (optionally) declares a zone view (in a header).
1983  *
1984  * @param var           the name for the zone view.
1985  */
1986 #define ZONE_VIEW_DECLARE(var) \
1987 	extern struct zone_view var[1]
1988 
1989 /*!
1990  * @macro ZONE_VIEW_DEFINE
1991  *
1992  * @abstract
1993  * Defines a given zone view and what it points to.
1994  *
1995  * @discussion
1996  * Zone views can either share a pre-existing zone,
1997  * or perform a lookup into a kalloc heap for the zone
1998  * backing the bucket of the proper size.
1999  *
2000  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
2001  * as the last rank. If views on zones are created, these must have been
2002  * created before this stage.
2003  *
2004  * This macro should not be used to create zone views from default
2005  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
2006  *
2007  * @param var           the name for the zone view.
2008  * @param name          a string describing the zone view.
2009  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
2010  * @param size          the element size to be allocated from this view.
2011  */
2012 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
2013 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
2014 	    .zv_name = (name), \
2015 	} }; \
2016 	static __startup_data struct zone_view_startup_spec \
2017 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
2018 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_view_startup_init, \
2019 	    &__startup_zone_view_spec_ ## var)
2020 
2021 
2022 #pragma mark XNU only: batched allocations
2023 
2024 /*!
2025  * @typedef zstack_t
2026  *
2027  * @brief
2028  * A stack of allocated elements chained with delta encoding.
2029  *
2030  * @discussion
2031  * Some batch allocation interfaces interact with the data heap
2032  * where leaking kernel pointers is not acceptable. This is why
2033  * element offsets are used instead.
2034  */
2035 typedef struct zstack {
2036 	vm_offset_t     z_head;
2037 	uint32_t        z_count;
2038 } zstack_t;
2039 
2040 /*!
2041  * @function zstack_push
2042  *
2043  * @brief
2044  * Push a given element onto a zstack.
2045  */
2046 extern void zstack_push(
2047 	zstack_t               *stack,
2048 	void                   *elem);
2049 
2050 /*!
2051  * @function zstack_pop
2052  *
2053  * @brief
2054  * Pops an element from a zstack, the caller must check it's not empty.
2055  */
2056 void *zstack_pop(
2057 	zstack_t               *stack);
2058 
2059 /*!
2060  * @function zstack_empty
2061  *
2062  * @brief
2063  * Returns whether a stack is empty.
2064  */
2065 static inline uint32_t
zstack_count(zstack_t stack)2066 zstack_count(zstack_t stack)
2067 {
2068 	return stack.z_count;
2069 }
2070 
2071 /*!
2072  * @function zstack_empty
2073  *
2074  * @brief
2075  * Returns whether a stack is empty.
2076  */
2077 static inline bool
zstack_empty(zstack_t stack)2078 zstack_empty(zstack_t stack)
2079 {
2080 	return zstack_count(stack) == 0;
2081 }
2082 
2083 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)2084 zstack_load_and_erase(zstack_t *stackp)
2085 {
2086 	zstack_t stack = *stackp;
2087 
2088 	*stackp = (zstack_t){ };
2089 	return stack;
2090 }
2091 
2092 /*!
2093  * @function zfree_nozero
2094  *
2095  * @abstract
2096  * Frees an element allocated with @c zalloc*, without zeroing it.
2097  *
2098  * @discussion
2099  * This is for the sake of networking only, no one else should use this.
2100  *
2101  * @param zone_id       the zone id to free the element to.
2102  * @param elem          the element to free
2103  */
2104 extern void zfree_nozero(
2105 	zone_id_t               zone_id,
2106 	void                   *elem __unsafe_indexable);
2107 #define zfree_nozero(zone_id, elem) ({ \
2108 	zone_id_t __zfree_zid = (zone_id); \
2109 	(zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
2110 })
2111 
2112 /*!
2113  * @function zalloc_n
2114  *
2115  * @abstract
2116  * Allocates a batch of elements from the specified zone.
2117  *
2118  * @discussion
2119  * This is for the sake of networking only, no one else should use this.
2120  *
2121  * @param zone_id       the zone id to allocate the element from.
2122  * @param count         how many elements to allocate (less might be returned)
2123  * @param flags         a set of @c zone_create_flags_t flags.
2124  */
2125 extern zstack_t zalloc_n(
2126 	zone_id_t               zone_id,
2127 	uint32_t                count,
2128 	zalloc_flags_t          flags);
2129 
2130 /*!
2131  * @function zfree_n
2132  *
2133  * @abstract
2134  * Batched variant of zfree(): frees a stack of elements.
2135  *
2136  * @param zone_id       the zone id to free the element to.
2137  * @param stack         a stack of elements to free.
2138  */
2139 extern void zfree_n(
2140 	zone_id_t               zone_id,
2141 	zstack_t                stack);
2142 #define zfree_n(zone_id, stack) ({ \
2143 	zone_id_t __zfree_zid = (zone_id); \
2144 	(zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2145 })
2146 
2147 /*!
2148  * @function zfree_nozero_n
2149  *
2150  * @abstract
2151  * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
2152  * them.
2153  *
2154  * @discussion
2155  * This is for the sake of networking only, no one else should use this.
2156  *
2157  * @param zone_id       the zone id to free the element to.
2158  * @param stack         a stack of elements to free.
2159  */
2160 extern void zfree_nozero_n(
2161 	zone_id_t               zone_id,
2162 	zstack_t                stack);
2163 #define zfree_nozero_n(zone_id, stack) ({ \
2164 	zone_id_t __zfree_zid = (zone_id); \
2165 	(zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2166 })
2167 
2168 #pragma mark XNU only: cached objects
2169 
2170 /*!
2171  * @typedef zone_cache_ops_t
2172  *
2173  * @brief
2174  * A set of callbacks used for a zcache (cache of composite objects).
2175  *
2176  * @field zc_op_alloc
2177  * The callback to "allocate" a cached object from scratch.
2178  *
2179  * @field zc_op_mark_valid
2180  * The callback that is called when a cached object is being reused,
2181  * will typically call @c zcache_mark_valid() on the various
2182  * sub-pieces of the composite cached object.
2183  *
2184  * @field zc_op_mark_invalid
2185  * The callback that is called when a composite object is being freed
2186  * to the cache. This will typically call @c zcache_mark_invalid()
2187  * on the various sub-pieces of the composite object.
2188  *
2189  * @field zc_op_free
2190  * The callback to "free" a composite object completely.
2191  */
2192 typedef const struct zone_cache_ops {
2193 	void         *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2194 	void         *(*zc_op_mark_valid)(zone_id_t, void *);
2195 	void         *(*zc_op_mark_invalid)(zone_id_t, void *);
2196 	void          (*zc_op_free)(zone_id_t, void *);
2197 } *zone_cache_ops_t;
2198 
2199 #if __has_ptrcheck
2200 static inline char *__bidi_indexable
zcache_transpose_bounds(char * __bidi_indexable pointer_with_bounds,char * __unsafe_indexable unsafe_pointer)2201 zcache_transpose_bounds(
2202 	char *__bidi_indexable pointer_with_bounds,
2203 	char *__unsafe_indexable unsafe_pointer)
2204 {
2205 	vm_offset_t offset_from_start = pointer_with_bounds - __ptr_lower_bound(pointer_with_bounds);
2206 	vm_offset_t offset_to_end = __ptr_upper_bound(pointer_with_bounds) - pointer_with_bounds;
2207 	vm_offset_t size = offset_from_start + offset_to_end;
2208 	return __unsafe_forge_bidi_indexable(char *, unsafe_pointer - offset_from_start, size)
2209 	       + offset_from_start;
2210 }
2211 #else
2212 static inline char *__header_indexable
zcache_transpose_bounds(char * __header_indexable pointer_with_bounds __unused,char * __unsafe_indexable unsafe_pointer)2213 zcache_transpose_bounds(
2214 	char *__header_indexable pointer_with_bounds __unused,
2215 	char *__unsafe_indexable unsafe_pointer)
2216 {
2217 	return unsafe_pointer;
2218 }
2219 #endif // __has_ptrcheck
2220 
2221 /*!
2222  * @function zcache_mark_valid()
2223  *
2224  * @brief
2225  * Mark an element as "valid".
2226  *
2227  * @description
2228  * This function is used to be able to integrate with KASAN or PGZ
2229  * for a cache of composite objects. It typically is a function
2230  * called in their @c zc_op_mark_valid() callback.
2231  *
2232  * If PGZ or KASAN isn't in use, then this callback is a no-op.
2233  * Otherwise the @c elem address might be updated.
2234  *
2235  * @param zone          the zone the element belongs to.
2236  * @param elem          the address of the element
2237  * @returns             the new address to correctly access @c elem.
2238  */
2239 extern void *__unsafe_indexable zcache_mark_valid(
2240 	zone_t                  zone,
2241 	void                    *elem __unsafe_indexable);
2242 
2243 static inline void *
zcache_mark_valid_single(zone_t zone,void * elem)2244 zcache_mark_valid_single(
2245 	zone_t                  zone,
2246 	void                    *elem)
2247 {
2248 	return __unsafe_forge_single(void *, zcache_mark_valid(zone, elem));
2249 }
2250 
2251 static inline void *__header_bidi_indexable
zcache_mark_valid_indexable(zone_t zone,void * elem __header_bidi_indexable)2252 zcache_mark_valid_indexable(
2253 	zone_t                  zone,
2254 	void                    *elem __header_bidi_indexable)
2255 {
2256 	return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_valid(zone, elem));
2257 }
2258 
2259 /*!
2260  * @function zcache_mark_invalid()
2261  *
2262  * @brief
2263  * Mark an element as "invalid".
2264  *
2265  * @description
2266  * This function is used to be able to integrate with KASAN or PGZ
2267  * for a cache of composite objects. It typically is a function
2268  * called in their @c zc_op_mark_invalid() callback.
2269  *
2270  * This function performs validation that @c elem belongs
2271  * to the right zone and is properly "aligned", and should
2272  * never be elided under any configuration.
2273  *
2274  * @param zone          the zone the element belongs to.
2275  * @param elem          the address of the element
2276  * @returns             the new address to correctly access @c elem.
2277  */
2278 extern void *__unsafe_indexable zcache_mark_invalid(
2279 	zone_t                  zone,
2280 	void                    *elem __unsafe_indexable);
2281 
2282 static inline void *
zcache_mark_invalid_single(zone_t zone,void * elem)2283 zcache_mark_invalid_single(
2284 	zone_t                  zone,
2285 	void                    *elem)
2286 {
2287 	return __unsafe_forge_single(void *, zcache_mark_invalid(zone, elem));
2288 }
2289 
2290 static inline void *__header_bidi_indexable
zcache_mark_invalid_indexable(zone_t zone,void * elem __header_bidi_indexable)2291 zcache_mark_invalid_indexable(
2292 	zone_t                  zone,
2293 	void                    *elem __header_bidi_indexable)
2294 {
2295 	return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_invalid(zone, elem));
2296 }
2297 
2298 /*!
2299  * @macro zcache_alloc()
2300  *
2301  * @abstract
2302  * Allocates a composite object from a cache.
2303  *
2304  * @param zone_id       The proper @c ZONE_ID_* constant.
2305  * @param flags         a collection of @c zalloc_flags_t.
2306  *
2307  * @returns             NULL or the allocated element
2308  */
2309 #define zcache_alloc(zone_id, fl) \
2310 	__zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2311 
2312 /*!
2313  * @function zcache_alloc_n()
2314  *
2315  * @abstract
2316  * Allocates a stack of composite objects from a cache.
2317  *
2318  * @param zone_id       The proper @c ZONE_ID_* constant.
2319  * @param count         how many elements to allocate (less might be returned)
2320  * @param flags         a set of @c zone_create_flags_t flags.
2321  *
2322  * @returns             NULL or the allocated composite object
2323  */
2324 extern zstack_t zcache_alloc_n(
2325 	zone_id_t               zone_id,
2326 	uint32_t                count,
2327 	zalloc_flags_t          flags,
2328 	zone_cache_ops_t        ops);
2329 #define zcache_alloc_n(zone_id, count, flags) \
2330 	(zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2331 
2332 
2333 
2334 /*!
2335  * @function zcache_free()
2336  *
2337  * @abstract
2338  * Frees a composite object previously allocated
2339  * with @c zcache_alloc() or @c zcache_alloc_n().
2340  *
2341  * @param zone_id       the zcache id to free the object to.
2342  * @param addr          the address to free
2343  * @param ops           the pointer to the zcache ops for this zcache.
2344  */
2345 extern void zcache_free(
2346 	zone_id_t               zone_id,
2347 	void                   *addr __unsafe_indexable,
2348 	zone_cache_ops_t        ops);
2349 #define zcache_free(zone_id, elem) \
2350 	(zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2351 	    __zcache_##zone_id##_ops)
2352 
2353 /*!
2354  * @function zcache_free_n()
2355  *
2356  * @abstract
2357  * Frees a stack of composite objects previously allocated
2358  * with @c zcache_alloc() or @c zcache_alloc_n().
2359  *
2360  * @param zone_id       the zcache id to free the objects to.
2361  * @param stack         a stack of composite objects
2362  * @param ops           the pointer to the zcache ops for this zcache.
2363  */
2364 extern void zcache_free_n(
2365 	zone_id_t               zone_id,
2366 	zstack_t                stack,
2367 	zone_cache_ops_t        ops);
2368 #define zcache_free_n(zone_id, stack) \
2369 	(zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2370 	    __zcache_##zone_id##_ops)
2371 
2372 
2373 /*!
2374  * @function zcache_drain()
2375  *
2376  * @abstract
2377  * Forces a zcache to be drained (have all its data structures freed
2378  * back to the original zones).
2379  *
2380  * @param zone_id       the zcache id to free the objects to.
2381  */
2382 extern void zcache_drain(
2383 	zone_id_t               zone_id);
2384 
2385 
2386 /*!
2387  * @macro ZCACHE_DECLARE
2388  *
2389  * @abstract
2390  * Declares the type associated with a zone cache ID.
2391  *
2392  * @param id            the name of zone ID to associate a type with.
2393  * @param type_t        the type of elements in the zone.
2394  */
2395 #define ZCACHE_DECLARE(id, type_t) \
2396 	__ZONE_DECLARE_TYPE(id, type_t); \
2397 	__attribute__((visibility("hidden"))) \
2398 	extern const zone_cache_ops_t __zcache_##id##_ops
2399 
2400 
2401 /*!
2402  * @macro ZCACHE_DEFINE
2403  *
2404  * @abstract
2405  * Defines a zone cache for a given ID and type.
2406  *
2407  * @param zone_id       the name of zone ID to associate a type with.
2408  * @param name          the name for the zone
2409  * @param type_t        the type of elements in the zone.
2410  * @param size          the size of elements in the cache
2411  * @param ops           the ops for this zcache.
2412  */
2413 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2414 	ZCACHE_DECLARE(zid, type_t);                                            \
2415 	ZONE_DECLARE_ID(zid, type_t);                                           \
2416 	const zone_cache_ops_t __zcache_##zid##_ops = (ops);                    \
2417 	ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) {   \
2418 	        zcache_ops[zid] = (ops);                                        \
2419 	})
2420 
2421 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2422 
2423 #pragma mark XNU only: PGZ support
2424 
2425 /*!
2426  * @function pgz_owned()
2427  *
2428  * @brief
2429  * Returns whether an address is PGZ owned.
2430  *
2431  * @param addr          The address to translate.
2432  * @returns             Whether it is PGZ owned
2433  */
2434 #if CONFIG_PROB_GZALLOC
2435 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
2436 #else
2437 #define pgz_owned(addr) false
2438 #endif
2439 
2440 /*!
2441  * @function pgz_decode()
2442  *
2443  * @brief
2444  * Translates a PGZ protected virtual address to its unprotected
2445  * backing store.
2446  *
2447  * @discussion
2448  * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
2449  * elements since the PGZ protected virtual addresses are maintained by PGZ
2450  * at the pmap level without the VM involvment.
2451  *
2452  * "allow_invalid" schemes relying on sequestering also need this
2453  * to perform the locking attempts on the unprotected address.
2454  *
2455  * @param addr          The address to translate.
2456  * @param size          The object size.
2457  * @returns             The unprotected address or @c addr.
2458  */
2459 #if CONFIG_PROB_GZALLOC
2460 #define pgz_decode(addr, size) \
2461 	((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
2462 #else
2463 #define pgz_decode(addr, size)  (addr)
2464 #endif
2465 
2466 /*!
2467  * @function pgz_decode_allow_invalid()
2468  *
2469  * @brief
2470  * Translates a PGZ protected virtual address to its unprotected
2471  * backing store, but doesn't assert it is still allocated/valid.
2472  *
2473  * @discussion
2474  * "allow_invalid" schemes relying on sequestering also need this
2475  * to perform the locking attempts on the unprotected address.
2476  *
2477  * @param addr          The address to translate.
2478  * @param want_zid      The expected zone ID for the element.
2479  * @returns             The unprotected address or @c addr.
2480  */
2481 #if CONFIG_PROB_GZALLOC
2482 #define pgz_decode_allow_invalid(addr, want_zid) \
2483 	((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
2484 #else
2485 #define pgz_decode_allow_invalid(addr, zid)  (addr)
2486 #endif
2487 
2488 #pragma mark XNU only: misc & implementation details
2489 
2490 struct zone_create_startup_spec {
2491 	zone_t                 *z_var;
2492 	const char             *z_name __unsafe_indexable;
2493 	vm_size_t               z_size;
2494 	zone_create_flags_t     z_flags;
2495 	zone_id_t               z_zid;
2496 	void                  (^z_setup)(zone_t);
2497 };
2498 
2499 extern void     zone_create_startup(
2500 	struct zone_create_startup_spec *spec);
2501 
2502 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2503 	static __startup_data struct zone_create_startup_spec \
2504 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2505 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2506 	    &__startup_zone_spec_ ## ns)
2507 
2508 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2509 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2510 
2511 #define __zalloc_cast(namespace, expr) \
2512 	((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2513 
2514 #if ZALLOC_TYPE_SAFE
2515 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
2516 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
2517 #endif /* !ZALLOC_TYPE_SAFE */
2518 
2519 struct zone_view_startup_spec {
2520 	zone_view_t         zv_view;
2521 	union {
2522 		zone_kheap_id_t zv_heapid;
2523 		zone_t         *zv_zone;
2524 	};
2525 	vm_size_t           zv_size;
2526 };
2527 
2528 extern void zone_view_startup_init(
2529 	struct zone_view_startup_spec *spec);
2530 
2531 extern void zone_userspace_reboot_checks(void);
2532 
2533 #if VM_TAG_SIZECLASSES
2534 extern void __zone_site_register(
2535 	vm_allocation_site_t   *site);
2536 
2537 #define VM_ALLOC_SITE_TAG() ({ \
2538 	__PLACE_IN_SECTION("__DATA, __data")                                   \
2539 	static vm_allocation_site_t site = { .refcount = 2, };                 \
2540 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, __zone_site_register, &site);   \
2541 	site.tag;                                                              \
2542 })
2543 #else /* VM_TAG_SIZECLASSES */
2544 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
2545 #endif /* !VM_TAG_SIZECLASSES */
2546 
2547 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2548 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2549 {
2550 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2551 }
2552 
2553 #if DEBUG || DEVELOPMENT
2554 #  define ZPCPU_MANGLE_MASK     0xc0c0000000000000ul
2555 #else /* !(DEBUG || DEVELOPMENT) */
2556 #  define ZPCPU_MANGLE_MASK     0ul
2557 #endif /* !(DEBUG || DEVELOPMENT) */
2558 
2559 #define __zpcpu_mangle(ptr)     (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_MASK)
2560 #define __zpcpu_demangle(ptr)   (__zpcpu_addr(ptr) | ZPCPU_MANGLE_MASK)
2561 #define __zpcpu_addr(e)         ((vm_address_t)(e))
2562 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(ptr), e)
2563 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2564 
2565 /**
2566  * @macro __zpcpu_mangle_for_boot()
2567  *
2568  * @discussion
2569  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2570  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2571  * storage marked @c __startup_data and replace it with the proper allocation
2572  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2573  *
2574  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2575  * provides the proper mangling of the storage into a "fake" percpu pointer so
2576  * that accesses through @c zpercpu_get() functions properly.
2577  *
2578  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2579  */
2580 #define __zpcpu_mangle_for_boot(ptr)  ({ \
2581 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
2582 	__zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
2583 })
2584 
2585 extern unsigned zpercpu_count(void) __pure2;
2586 
2587 #if CONFIG_PROB_GZALLOC
2588 
2589 extern vm_offset_t __pgz_decode(
2590 	mach_vm_address_t       addr,
2591 	mach_vm_size_t          size);
2592 
2593 extern vm_offset_t __pgz_decode_allow_invalid(
2594 	vm_offset_t             offs,
2595 	zone_id_t               zid);
2596 
2597 #endif
2598 #if DEBUG || DEVELOPMENT
2599 extern size_t zone_pages_wired;
2600 extern size_t zone_guard_pages;
2601 #endif /* DEBUG || DEVELOPMENT */
2602 #if CONFIG_ZLEAKS
2603 extern uint32_t                 zleak_active;
2604 extern vm_size_t                zleak_max_zonemap_size;
2605 extern vm_size_t                zleak_per_zone_tracking_threshold;
2606 
2607 extern kern_return_t zleak_update_threshold(
2608 	vm_size_t              *arg,
2609 	uint64_t                value);
2610 #endif /* CONFIG_ZLEAKS */
2611 
2612 extern uint32_t                 zone_map_jetsam_limit;
2613 
2614 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2615 
2616 extern zone_t percpu_u64_zone;
2617 
2618 #pragma GCC visibility pop
2619 #endif /* XNU_KERNEL_PRIVATE */
2620 
2621 /*
2622  * This macro is currently used by AppleImage4 (rdar://83924635)
2623  */
2624 #define __zalloc_ptr_load_and_erase(elem) \
2625 	os_ptr_load_and_erase(elem)
2626 
2627 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2628 
2629 #endif  /* _KERN_ZALLOC_H_ */
2630 
2631 #endif  /* KERNEL_PRIVATE */
2632