xref: /xnu-11417.121.6/osfmk/kern/zalloc.h (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** (obsolete) */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** (obsolete) */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Obsolete */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** This zone contains pure data meant to be shared */
170 	ZC_SHARED_DATA          = 0x0040000000000000,
171 
172 	/** This zone is a built object cache */
173 	ZC_OBJ_CACHE            = 0x0080000000000000,
174 
175 	/** Use guard pages in PGZ mode */
176 	ZC_PGZ_USE_GUARDS       = 0x0100000000000000,
177 
178 	/** Zone doesn't support TBI tagging */
179 	ZC_NO_TBI_TAG             = 0x0200000000000000,
180 
181 	/** This zone will back a kalloc type */
182 	ZC_KALLOC_TYPE          = 0x0400000000000000,
183 
184 	/** Disable PGZ for this zone */
185 	ZC_NOPGZ                = 0x0800000000000000,
186 
187 	/** This zone contains pure data */
188 	ZC_DATA                 = 0x1000000000000000,
189 
190 	/** This zone belongs to the VM submap */
191 	ZC_VM                   = 0x2000000000000000,
192 
193 	/** Disable kasan quarantine for this zone */
194 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
195 
196 	/** Disable kasan redzones for this zone */
197 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
198 #endif /* XNU_KERNEL_PRIVATE */
199 });
200 
201 /*!
202  * @union zone_or_view
203  *
204  * @abstract
205  * A type used for calls that admit both a zone or a zone view.
206  *
207  * @discussion
208  * @c zalloc() and @c zfree() and their variants can act on both
209  * zones and zone views.
210  */
211 union zone_or_view {
212 	struct kalloc_type_view    *zov_kt_heap;
213 	struct zone_view           *zov_view;
214 	struct zone                *zov_zone;
215 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)216 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
217 	}
zone_or_view(struct zone * z)218 	inline zone_or_view(struct zone *z) : zov_zone(z) {
219 	}
zone_or_view(struct kalloc_type_view * kth)220 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
221 	}
222 #endif
223 };
224 #ifdef __cplusplus
225 typedef union zone_or_view zone_or_view_t;
226 #else
227 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
228 #endif
229 
230 /*!
231  * @enum zone_create_ro_id_t
232  *
233  * @abstract
234  * Zone creation IDs for external read only zones
235  *
236  * @discussion
237  * Kexts that desire to use the RO allocator should:
238  * 1. Add a zone creation id below
239  * 2. Add a corresponding ID to @c zone_reserved_id_t
240  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
241  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
242  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
243  */
244 __enum_decl(zone_create_ro_id_t, zone_id_t, {
245 	ZC_RO_ID_SANDBOX,
246 	ZC_RO_ID_PROFILE,
247 	ZC_RO_ID_PROTOBOX,
248 	ZC_RO_ID_SB_FILTER,
249 	ZC_RO_ID_AMFI_OSENTITLEMENTS,
250 	ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
251 });
252 
253 /*!
254  * @function zone_create
255  *
256  * @abstract
257  * Creates a zone with the specified parameters.
258  *
259  * @discussion
260  * A Zone is a slab allocator that returns objects of a given size very quickly.
261  *
262  * @param name          the name for the new zone.
263  * @param size          the size of the elements returned by this zone.
264  * @param flags         a set of @c zone_create_flags_t flags.
265  *
266  * @returns             the created zone, this call never fails.
267  */
268 extern zone_t   zone_create(
269 	const char             *name __unsafe_indexable,
270 	vm_size_t               size,
271 	zone_create_flags_t     flags);
272 
273 /*!
274  *
275  * @function zone_get_elem_size
276  *
277  * @abstract
278  * Get the intrinsic size of one element allocated by the given zone.
279  *
280  * @discussion
281  * All zones are created to allocate elements of a fixed size, but the size is
282  * not always a compile-time constant. @c zone_get_elem_size can be used to
283  * retrieve the size of elements allocated by this zone at runtime.
284  *
285  * @param zone			the zone to inspect
286  *
287  * @returns			the size of elements allocated by this zone
288  */
289 extern vm_size_t    zone_get_elem_size(zone_t zone);
290 
291 /*!
292  * @function zone_create_ro
293  *
294  * @abstract
295  * Creates a read only zone with the specified parameters from kexts
296  *
297  * @discussion
298  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
299  * kexts. Do not use this API to create read only zones in xnu.
300  *
301  * @param name          the name for the new zone.
302  * @param size          the size of the elements returned by this zone.
303  * @param flags         a set of @c zone_create_flags_t flags.
304  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
305  *
306  * @returns             the zone ID of the created zone, this call never fails.
307  */
308 extern zone_id_t   zone_create_ro(
309 	const char             *name __unsafe_indexable,
310 	vm_size_t               size,
311 	zone_create_flags_t     flags,
312 	zone_create_ro_id_t     zc_ro_id);
313 
314 /*!
315  * @function zdestroy
316  *
317  * @abstract
318  * Destroys a zone previously made with zone_create.
319  *
320  * @discussion
321  * Zones must have been made destructible for @c zdestroy() to be allowed,
322  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
323  *
324  * @param zone          the zone to destroy.
325  */
326 extern void     zdestroy(
327 	zone_t          zone);
328 
329 /*!
330  * @function zone_require
331  *
332  * @abstract
333  * Requires for a given pointer to belong to the specified zone.
334  *
335  * @discussion
336  * The function panics if the check fails as it indicates that the kernel
337  * internals have been compromised.
338  *
339  * @param zone          the zone the address needs to belong to.
340  * @param addr          the element address to check.
341  */
342 extern void     zone_require(
343 	zone_t          zone,
344 	void           *addr __unsafe_indexable);
345 
346 /*!
347  * @function zone_require_ro
348  *
349  * @abstract
350  * Version of zone require intended for zones created with ZC_READONLY
351  *
352  * @discussion
353  * This check is not sufficient to fully trust the element.
354  *
355  * Another check of its content must be performed to prove
356  * that the element is "the right one", a typical technique
357  * for when the RO data structure is 1:1 with a mutable one,
358  * is a simple circularity check with a very strict lifetime
359  * (both the mutable and read-only data structures are made
360  * and destroyed as close as possible).
361  *
362  * @param zone_id       the zone id the address needs to belong to.
363  * @param elem_size     the element size for this zone.
364  * @param addr          the element address to check.
365  */
366 extern void     zone_require_ro(
367 	zone_id_t       zone_id,
368 	vm_size_t       elem_size,
369 	void           *addr __unsafe_indexable);
370 
371 /*!
372  * @enum zalloc_flags_t
373  *
374  * @brief
375  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
376  *
377  * @discussion
378  * It is encouraged that any callsite passing flags uses exactly one of:
379  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
380  * if nothing else was specified.
381  *
382  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
383  * then @c Z_WAITOK is ignored.
384  *
385  * @const Z_WAITOK
386  * Passing this flag means that zalloc() will be allowed to sleep
387  * for memory to become available for this allocation. If the zone
388  * isn't exhaustible, zalloc(Z_WAITOK) never fails.
389  *
390  * If the zone is exhaustible, zalloc() might still fail if the zone
391  * is at its maximum allowed memory usage, unless Z_NOFAIL is passed,
392  * in which case zalloc() will block until an element is freed.
393  *
394  * @const Z_NOWAIT
395  * Passing this flag means that zalloc is not allowed to ever block.
396  *
397  * @const Z_NOPAGEWAIT
398  * Passing this flag means that zalloc is allowed to wait due to lock
399  * contention, but will not wait for the VM to wait for pages when
400  * under memory pressure.
401  *
402  * @const Z_ZERO
403  * Passing this flags means that the returned memory has been zeroed out.
404  *
405  * @const Z_NOFAIL
406  * Passing this flag means that the caller expects the allocation to always
407  * succeed. This will result in a panic if this assumption isn't correct.
408  *
409  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT.
410  * For exhaustible zones, it forces the caller to wait until a zfree() happend
411  * if the zone has reached its maximum of allowed elements.
412  *
413  * @const Z_REALLOCF
414  * For the realloc family of functions,
415  * free the incoming memory on failure cases.
416  *
417  #if XNU_KERNEL_PRIVATE
418  * @const Z_SET_NOTEARLY
419  * Using this flag from external allocations APIs (kalloc_type/zalloc)
420  * allows the callsite to skip the early (shared) zone for that sizeclass and
421  * directly allocated from the requested zone.
422  * Using this flag from internal APIs (zalloc_ext) will skip the early
423  * zone only when a given threshold is exceeded. It will also set a flag
424  * to indicate that future allocations to the zone should directly go to
425  * the zone instead of the shared zone.
426  *
427  * @const Z_SPRAYQTN
428  * This flag tells the VM to allocate from the "spray quarantine" range when
429  * it services the allocation. For more details on what allocations qualify
430  * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
431  *
432  * @const Z_KALLOC_ARRAY
433  * Instead of returning a standard "pointer" return a pointer that encodes
434  * its size-class into the pointer itself (Only for kalloc, might limit
435  * the range of allocations that can be done).
436  *
437  * @const Z_FULLSIZE
438  * Used to indicate that the caller will use all available space in excess
439  * from the requested allocation size.
440  *
441  * @const Z_SKIP_KASAN
442  * Tell zalloc() not to do any kasan adjustments.
443  *
444  * @const Z_MAY_COPYINMAP
445  * This data allocation might be used with vm_map_copyin().
446  * This allows for those allocations to be associated with a proper VM object.
447  *
448  * @const Z_VM_TAG_BT_BIT
449  * Used to blame allocation accounting on the first kext
450  * found in the backtrace of the allocation.
451  *
452  * @const Z_NOZZC
453  * Used internally to mark allocations that will skip zero validation.
454  *
455  * @const Z_PCPU
456  * Used internally for the percpu paths.
457  *
458  * @const Z_VM_TAG_MASK
459  * Represents bits in which a vm_tag_t for the allocation can be passed.
460  * (used by kalloc for the zone tagging debugging feature).
461  #endif
462  */
463 __options_decl(zalloc_flags_t, uint32_t, {
464 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
465 	Z_WAITOK        = 0x0000,
466 	Z_NOWAIT        = 0x0001,
467 	Z_NOPAGEWAIT    = 0x0002,
468 	Z_ZERO          = 0x0004,
469 	Z_REALLOCF      = 0x0008,
470 
471 #if XNU_KERNEL_PRIVATE
472 	Z_SET_NOTEARLY = 0x0040,
473 	Z_SPRAYQTN      = 0x0080,
474 	Z_KALLOC_ARRAY  = 0x0100,
475 #if KASAN_CLASSIC
476 	Z_FULLSIZE      = 0x0000,
477 #else
478 	Z_FULLSIZE      = 0x0200,
479 #endif
480 #if KASAN_CLASSIC
481 	Z_SKIP_KASAN    = 0x0400,
482 #else
483 	Z_SKIP_KASAN    = 0x0000,
484 #endif
485 	Z_MAY_COPYINMAP = 0x0800,
486 	Z_VM_TAG_BT_BIT = 0x1000,
487 	Z_PCPU          = 0x2000,
488 	Z_NOZZC         = 0x4000,
489 #endif /* XNU_KERNEL_PRIVATE */
490 	Z_NOFAIL        = 0x8000,
491 
492 	/* convenient c++ spellings */
493 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
494 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
495 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL,
496 #if XNU_KERNEL_PRIVATE
497 	Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
498 #endif
499 
500 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
501 #if XNU_KERNEL_PRIVATE
502 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
503 	/** used by kalloc to propagate vm tags for -zt */
504 	Z_VM_TAG_MASK   = 0xffff0000,
505 
506 #define Z_VM_TAG_SHIFT        16
507 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
508 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
509 #endif
510 });
511 
512 /*
513  * This type is used so that kalloc_internal has good calling conventions
514  * for callers who want to cheaply both know the allocated address
515  * and the actual size of the allocation.
516  */
517 struct kalloc_result {
518 	void         *addr __sized_by(size);
519 	vm_size_t     size;
520 };
521 
522 /*!
523  * @typedef zone_stats_t
524  *
525  * @abstract
526  * The opaque type for per-cpu zone stats that are accumulated per zone
527  * or per zone-view.
528  */
529 typedef struct zone_stats *__zpercpu zone_stats_t;
530 
531 /*!
532  * @typedef zone_view_t
533  *
534  * @abstract
535  * A view on a zone for accounting purposes.
536  *
537  * @discussion
538  * A zone view uses the zone it references for the allocations backing store,
539  * but does the allocation accounting at the view level.
540  *
541  * These accounting are surfaced by @b zprint(1) and similar tools,
542  * which allow for cheap but finer grained understanding of allocations
543  * without any fragmentation cost.
544  *
545  * Zone views are protected by the kernel lockdown and can't be initialized
546  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
547  */
548 typedef struct zone_view *zone_view_t;
549 struct zone_view {
550 	zone_t          zv_zone;
551 	zone_stats_t    zv_stats;
552 	const char     *zv_name __unsafe_indexable;
553 	zone_view_t     zv_next;
554 };
555 
556 /*!
557  * @typedef kalloc_type_view_t
558  *
559  * @abstract
560  * The opaque type created at kalloc_type callsites to redirect calls to
561  * the right zone.
562  */
563 typedef struct kalloc_type_view *kalloc_type_view_t;
564 
565 #if XNU_KERNEL_PRIVATE
566 /*
567  * kalloc_type/kfree_type implementation functions
568  */
569 extern void *__unsafe_indexable kalloc_type_impl_internal(
570 	kalloc_type_view_t  kt_view,
571 	zalloc_flags_t      flags);
572 
573 extern void kfree_type_impl_internal(
574 	kalloc_type_view_t kt_view,
575 	void               *ptr __unsafe_indexable);
576 
577 static inline void *__unsafe_indexable
kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)578 kalloc_type_impl(
579 	kalloc_type_view_t      kt_view,
580 	zalloc_flags_t          flags)
581 {
582 	void *__unsafe_indexable addr = kalloc_type_impl_internal(kt_view, flags);
583 	if (flags & Z_NOFAIL) {
584 		__builtin_assume(addr != NULL);
585 	}
586 	return addr;
587 }
588 
589 #define kfree_type_impl(kt_view, ptr) \
590 	kfree_type_impl_internal(kt_view, (ptr))
591 
592 #else /* XNU_KERNEL_PRIVATE */
593 
594 extern void *__unsafe_indexable kalloc_type_impl(
595 	kalloc_type_view_t  kt_view,
596 	zalloc_flags_t      flags);
597 
598 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)599 __kalloc_type_impl(
600 	kalloc_type_view_t  kt_view,
601 	zalloc_flags_t      flags)
602 {
603 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
604 	if (flags & Z_NOFAIL) {
605 		__builtin_assume(addr != NULL);
606 	}
607 	return addr;
608 }
609 
610 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
611 
612 extern void kfree_type_impl(
613 	kalloc_type_view_t  kt_view,
614 	void                *ptr __unsafe_indexable);
615 
616 #endif /* XNU_KERNEL_PRIVATE */
617 
618 /*!
619  * @function zalloc
620  *
621  * @abstract
622  * Allocates an element from a specified zone.
623  *
624  * @discussion
625  * If the zone isn't exhaustible and is expandable, this call never fails.
626  *
627  * @param zone          the zone or zone view to allocate from
628  *
629  * @returns             NULL or the allocated element
630  */
631 __attribute__((malloc))
632 extern void *__unsafe_indexable zalloc(
633 	zone_t          zone);
634 
635 __attribute__((malloc))
636 __attribute__((overloadable))
637 static inline void *__unsafe_indexable
zalloc(zone_view_t view)638 zalloc(zone_view_t view)
639 {
640 	return zalloc((zone_t)view);
641 }
642 
643 __attribute__((malloc))
644 __attribute__((overloadable))
645 static inline void *__unsafe_indexable
zalloc(kalloc_type_view_t kt_view)646 zalloc(kalloc_type_view_t kt_view)
647 {
648 	return (kalloc_type_impl)(kt_view, Z_WAITOK);
649 }
650 
651 /*!
652  * @function zalloc_noblock
653  *
654  * @abstract
655  * Allocates an element from a specified zone, but never blocks.
656  *
657  * @discussion
658  * This call is suitable for preemptible code, however allocation
659  * isn't allowed from interrupt context.
660  *
661  * @param zone          the zone or zone view to allocate from
662  *
663  * @returns             NULL or the allocated element
664  */
665 __attribute__((malloc))
666 extern void *__unsafe_indexable zalloc_noblock(
667 	zone_t          zone);
668 
669 __attribute__((malloc))
670 __attribute__((overloadable))
671 static inline void *__unsafe_indexable
zalloc_noblock(zone_view_t view)672 zalloc_noblock(zone_view_t view)
673 {
674 	return zalloc_noblock((zone_t)view);
675 }
676 
677 __attribute__((malloc))
678 __attribute__((overloadable))
679 static inline void *__unsafe_indexable
zalloc_noblock(kalloc_type_view_t kt_view)680 zalloc_noblock(kalloc_type_view_t kt_view)
681 {
682 	return (kalloc_type_impl)(kt_view, Z_NOWAIT);
683 }
684 
685 /*!
686  * @function zalloc_flags()
687  *
688  * @abstract
689  * Allocates an element from a specified zone, with flags.
690  *
691  * @param zone          the zone or zone view to allocate from
692  * @param flags         a collection of @c zalloc_flags_t.
693  *
694  * @returns             NULL or the allocated element
695  */
696 __attribute__((malloc))
697 extern void *__unsafe_indexable zalloc_flags(
698 	zone_t          zone,
699 	zalloc_flags_t  flags);
700 
701 __attribute__((malloc))
702 __attribute__((overloadable))
703 static inline void *__unsafe_indexable
__zalloc_flags(zone_t zone,zalloc_flags_t flags)704 __zalloc_flags(
705 	zone_t          zone,
706 	zalloc_flags_t  flags)
707 {
708 	void *__unsafe_indexable addr = (zalloc_flags)(zone, flags);
709 	if (flags & Z_NOFAIL) {
710 		__builtin_assume(addr != NULL);
711 	}
712 	return addr;
713 }
714 
715 __attribute__((malloc))
716 __attribute__((overloadable))
717 static inline void *__unsafe_indexable
__zalloc_flags(zone_view_t view,zalloc_flags_t flags)718 __zalloc_flags(
719 	zone_view_t     view,
720 	zalloc_flags_t  flags)
721 {
722 	return __zalloc_flags((zone_t)view, flags);
723 }
724 
725 __attribute__((malloc))
726 __attribute__((overloadable))
727 static inline void *__unsafe_indexable
__zalloc_flags(kalloc_type_view_t kt_view,zalloc_flags_t flags)728 __zalloc_flags(
729 	kalloc_type_view_t  kt_view,
730 	zalloc_flags_t      flags)
731 {
732 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
733 	if (flags & Z_NOFAIL) {
734 		__builtin_assume(addr != NULL);
735 	}
736 	return addr;
737 }
738 
739 __attribute__((malloc))
740 static inline void *__header_indexable
zalloc_flags_buf(zone_t zone,zalloc_flags_t flags)741 zalloc_flags_buf(
742 	zone_t          zone,
743 	zalloc_flags_t  flags)
744 {
745 	void *__unsafe_indexable addr = __zalloc_flags(zone, flags);
746 	if (flags & Z_NOFAIL) {
747 		__builtin_assume(addr != NULL);
748 	}
749 	return __unsafe_forge_bidi_indexable(void *, addr, zone_get_elem_size(zone));
750 }
751 
752 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
753 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
754 #else
755 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
756 #endif
757 
758 /*!
759  * @macro zalloc_id
760  *
761  * @abstract
762  * Allocates an element from a specified zone ID, with flags.
763  *
764  * @param zid           The proper @c ZONE_ID_* constant.
765  * @param flags         a collection of @c zalloc_flags_t.
766  *
767  * @returns             NULL or the allocated element
768  */
769 __attribute__((malloc))
770 extern void *__unsafe_indexable zalloc_id(
771 	zone_id_t       zid,
772 	zalloc_flags_t  flags);
773 
774 __attribute__((malloc))
775 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)776 __zalloc_id(
777 	zone_id_t       zid,
778 	zalloc_flags_t  flags)
779 {
780 	void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
781 	if (flags & Z_NOFAIL) {
782 		__builtin_assume(addr != NULL);
783 	}
784 	return addr;
785 }
786 
787 #if XNU_KERNEL_PRIVATE
788 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
789 #else
790 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
791 #endif
792 
793 /*!
794  * @function zalloc_ro
795  *
796  * @abstract
797  * Allocates an element from a specified read-only zone.
798  *
799  * @param zone_id       the zone id to allocate from
800  * @param flags         a collection of @c zalloc_flags_t.
801  *
802  * @returns             NULL or the allocated element
803  */
804 __attribute__((malloc))
805 extern void *__unsafe_indexable zalloc_ro(
806 	zone_id_t       zone_id,
807 	zalloc_flags_t  flags);
808 
809 __attribute__((malloc))
810 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)811 __zalloc_ro(
812 	zone_id_t       zone_id,
813 	zalloc_flags_t  flags)
814 {
815 	void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
816 	if (flags & Z_NOFAIL) {
817 		__builtin_assume(addr != NULL);
818 	}
819 	return addr;
820 }
821 
822 #if XNU_KERNEL_PRIVATE
823 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
824 #else
825 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
826 #endif
827 
828 /*!
829  * @function zalloc_ro_mut
830  *
831  * @abstract
832  * Modifies an element from a specified read-only zone.
833  *
834  * @discussion
835  * Modifying compiler-assisted authenticated pointers using this function will
836  * not result in a signed pointer being written.  The caller is expected to
837  * sign the value appropriately beforehand if they wish to do this.
838  *
839  * @param zone_id       the zone id to allocate from
840  * @param elem          element to be modified
841  * @param offset        offset from element
842  * @param new_data      pointer to new data
843  * @param new_data_size size of modification
844  *
845  */
846 extern void zalloc_ro_mut(
847 	zone_id_t       zone_id,
848 	void           *elem __unsafe_indexable,
849 	vm_offset_t     offset,
850 	const void     *new_data __sized_by(new_data_size),
851 	vm_size_t       new_data_size);
852 
853 /*!
854  * @function zalloc_ro_update_elem
855  *
856  * @abstract
857  * Update the value of an entire element allocated in the read only allocator.
858  *
859  * @param zone_id       the zone id to allocate from
860  * @param elem          element to be modified
861  * @param new_data      pointer to new data
862  *
863  */
864 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
865 	const typeof(*(elem)) *__new_data = (new_data);                        \
866 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
867 })
868 
869 /*!
870  * @function zalloc_ro_update_field
871  *
872  * @abstract
873  * Update a single field of an element allocated in the read only allocator.
874  *
875  * @param zone_id       the zone id to allocate from
876  * @param elem          element to be modified
877  * @param field         the element field to be modified
878  * @param new_data      pointer to new data
879  *
880  */
881 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
882 	const typeof((elem)->field) *__value = (value);                        \
883 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
884 	    __value, sizeof((elem)->field));                                   \
885 })
886 
887 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
888 
889 /*!
890  * @enum zro_atomic_op_t
891  *
892  * @brief
893  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
894  * atomic operations.
895  *
896  * @discussion
897  * This enum provides all flavors of atomic operations supported in sizes 8,
898  * 16, 32, 64 bits.
899  *
900  * @const ZRO_ATOMIC_OR_*
901  * To perform an @s os_atomic_or
902  *
903  * @const ZRO_ATOMIC_XOR_*
904  * To perform an @s os_atomic_xor
905  *
906  * @const ZRO_ATOMIC_AND_*
907  * To perform an @s os_atomic_and
908  *
909  * @const ZRO_ATOMIC_ADD_*
910  * To perform an @s os_atomic_add
911  *
912  * @const ZRO_ATOMIC_XCHG_*
913  * To perform an @s os_atomic_xchg
914  *
915  */
916 __enum_decl(zro_atomic_op_t, uint32_t, {
917 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
918 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
919 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
920 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
921 
922 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
923 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
924 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
925 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
926 
927 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
928 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
929 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
930 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
931 
932 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
933 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
934 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
935 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
936 
937 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
938 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
939 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
940 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
941 
942 	/* cconvenient spellings */
943 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
944 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
945 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
946 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
947 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
948 });
949 
950 /*!
951  * @function zalloc_ro_mut_atomic
952  *
953  * @abstract
954  * Atomically update an offset in an element allocated in the read only
955  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
956  *
957  * @param zone_id       the zone id to allocate from
958  * @param elem          element to be modified
959  * @param offset        offset in the element to be modified
960  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
961  * @param value         value for the atomic operation
962  *
963  */
964 extern uint64_t zalloc_ro_mut_atomic(
965 	zone_id_t       zone_id,
966 	void           *elem __unsafe_indexable,
967 	vm_offset_t     offset,
968 	zro_atomic_op_t op,
969 	uint64_t        value);
970 
971 /*!
972  * @macro zalloc_ro_update_field_atomic
973  *
974  * @abstract
975  * Atomically update a single field of an element allocated in the read only
976  * allocator.
977  *
978  * @param zone_id       the zone id to allocate from
979  * @param elem          element to be modified
980  * @param field         the element field to be modified
981  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
982  * @param value         value for the atomic operation
983  *
984  */
985 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
986 	const typeof((elem)->field) __value = (value);                         \
987 	static_assert(sizeof(__value) == (op & 0xf));                          \
988 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
989 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
990 })
991 
992 /*!
993  * @function zalloc_ro_clear
994  *
995  * @abstract
996  * Zeroes an element from a specified read-only zone.
997  *
998  * @param zone_id       the zone id to allocate from
999  * @param elem          element to be modified
1000  * @param offset        offset from element
1001  * @param size          size of modification
1002  */
1003 extern void    zalloc_ro_clear(
1004 	zone_id_t       zone_id,
1005 	void           *elem __unsafe_indexable,
1006 	vm_offset_t     offset,
1007 	vm_size_t       size);
1008 
1009 /*!
1010  * @function zalloc_ro_clear_field
1011  *
1012  * @abstract
1013  * Zeroes the specified field of an element from a specified read-only zone.
1014  *
1015  * @param zone_id       the zone id to allocate from
1016  * @param elem          element to be modified
1017  * @param field         offset from element
1018  */
1019 #define zalloc_ro_clear_field(zone_id, elem, field) \
1020 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
1021 	    sizeof((elem)->field))
1022 
1023 /*!
1024  * @function zfree_id()
1025  *
1026  * @abstract
1027  * Frees an element previously allocated with @c zalloc_id().
1028  *
1029  * @param zone_id       the zone id to free the element to.
1030  * @param addr          the address to free
1031  */
1032 extern void     zfree_id(
1033 	zone_id_t       zone_id,
1034 	void           *addr __unsafe_indexable);
1035 #define zfree_id(zid, elem) ({ \
1036 	zone_id_t __zfree_zid = (zid); \
1037 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1038 })
1039 
1040 
1041 /*!
1042  * @function zfree_ro()
1043  *
1044  * @abstract
1045  * Frees an element previously allocated with @c zalloc_ro().
1046  *
1047  * @param zone_id       the zone id to free the element to.
1048  * @param addr          the address to free
1049  */
1050 extern void     zfree_ro(
1051 	zone_id_t       zone_id,
1052 	void           *addr __unsafe_indexable);
1053 #define zfree_ro(zid, elem) ({ \
1054 	zone_id_t __zfree_zid = (zid); \
1055 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1056 })
1057 
1058 
1059 /*!
1060  * @function zfree
1061  *
1062  * @abstract
1063  * Frees an element allocated with @c zalloc*.
1064  *
1065  * @discussion
1066  * If the element being freed doesn't belong to the specified zone,
1067  * then this call will panic.
1068  *
1069  * @param zone          the zone or zone view to free the element to.
1070  * @param elem          the element to free
1071  */
1072 extern void     zfree(
1073 	zone_t          zone,
1074 	void           *elem __unsafe_indexable);
1075 
1076 __attribute__((overloadable))
1077 static inline void
zfree(zone_view_t view,void * elem __unsafe_indexable)1078 zfree(
1079 	zone_view_t     view,
1080 	void           *elem __unsafe_indexable)
1081 {
1082 	zfree((zone_t)view, elem);
1083 }
1084 
1085 __attribute__((overloadable))
1086 static inline void
zfree(kalloc_type_view_t kt_view,void * elem __unsafe_indexable)1087 zfree(
1088 	kalloc_type_view_t   kt_view,
1089 	void                *elem __unsafe_indexable)
1090 {
1091 	return kfree_type_impl(kt_view, elem);
1092 }
1093 
1094 #define zfree(zone, elem) ({ \
1095 	__auto_type __zfree_zone = (zone); \
1096 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1097 })
1098 
1099 
1100 /* deprecated KPIS */
1101 
1102 __zalloc_deprecated("use zone_create()")
1103 extern zone_t   zinit(
1104 	vm_size_t       size,           /* the size of an element */
1105 	vm_size_t       maxmem,         /* maximum memory to use */
1106 	vm_size_t       alloc,          /* allocation size */
1107 	const char      *name __unsafe_indexable);
1108 
1109 #pragma mark: implementation details
1110 
1111 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1112 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1113 	__attribute__((visibility("hidden"))) \
1114 	extern type_t *__single __zalloc__##var##__type_name
1115 
1116 #ifdef XNU_KERNEL_PRIVATE
1117 #pragma mark - XNU only interfaces
1118 
1119 #include <kern/cpu_number.h>
1120 
1121 #pragma GCC visibility push(hidden)
1122 
1123 #pragma mark XNU only: zalloc (extended)
1124 
1125 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
1126 #define ZALIGN_16               (sizeof(uint16_t) - 1)
1127 #define ZALIGN_32               (sizeof(uint32_t) - 1)
1128 #define ZALIGN_PTR              (sizeof(void *)   - 1)
1129 #define ZALIGN_64               (sizeof(uint64_t) - 1)
1130 #define ZALIGN(t)               (_Alignof(t)      - 1)
1131 
1132 
1133 /*!
1134  * @function zalloc_permanent_tag()
1135  *
1136  * @abstract
1137  * Allocates a permanent element from the permanent zone
1138  *
1139  * @discussion
1140  * Memory returned by this function is always 0-initialized.
1141  * Note that the size of this allocation can not be determined
1142  * by zone_element_size so it should not be used for copyio.
1143  *
1144  * @param size          the element size (must be smaller than PAGE_SIZE)
1145  * @param align_mask    the required alignment for this allocation
1146  * @param tag           the tag to use for allocations larger than a page.
1147  *
1148  * @returns             the allocated element
1149  */
1150 __attribute__((malloc))
1151 extern void *__sized_by(size) zalloc_permanent_tag(
1152 	vm_size_t       size,
1153 	vm_offset_t     align_mask,
1154 	vm_tag_t        tag)
1155 __attribute__((__diagnose_if__((align_mask & (align_mask + 1)),
1156     "align mask looks invalid", "error")));
1157 
1158 /*!
1159  * @function zalloc_permanent()
1160  *
1161  * @abstract
1162  * Allocates a permanent element from the permanent zone
1163  *
1164  * @discussion
1165  * Memory returned by this function is always 0-initialized.
1166  * Note that the size of this allocation can not be determined
1167  * by zone_element_size so it should not be used for copyio.
1168  *
1169  * @param size          the element size (must be smaller than PAGE_SIZE)
1170  * @param align_mask    the required alignment for this allocation
1171  *
1172  * @returns             the allocated element
1173  */
1174 #define zalloc_permanent(size, align) \
1175 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1176 
1177 /*!
1178  * @function zalloc_permanent_type()
1179  *
1180  * @abstract
1181  * Allocates a permanent element of a given type with its natural alignment.
1182  *
1183  * @discussion
1184  * Memory returned by this function is always 0-initialized.
1185  *
1186  * @param type_t        the element type
1187  *
1188  * @returns             the allocated element
1189  */
1190 #define zalloc_permanent_type(type_t) \
1191 	__unsafe_forge_single(type_t *, \
1192 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1193 
1194 /*!
1195  * @function zalloc_first_proc_made()
1196  *
1197  * @abstract
1198  * Declare that the "early" allocation phase is done.
1199  */
1200 extern void zalloc_first_proc_made(void);
1201 /*!
1202  * @function zalloc_iokit_lockdown()
1203  *
1204  * @abstract
1205  * Declare that iokit matching has started.
1206  */
1207 extern void zalloc_iokit_lockdown(void);
1208 
1209 #pragma mark XNU only: per-cpu allocations
1210 
1211 /*!
1212  * @macro zpercpu_get_cpu()
1213  *
1214  * @abstract
1215  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1216  *
1217  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1218  * @param cpu           the specified CPU number as returned by @c cpu_number()
1219  *
1220  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1221  */
1222 #define zpercpu_get_cpu(ptr, cpu) \
1223 	__zpcpu_cast(ptr, __zpcpu_addr(ptr) + ptoa((unsigned)(cpu)))
1224 
1225 /*!
1226  * @macro zpercpu_get()
1227  *
1228  * @abstract
1229  * Get a pointer to the current CPU slot of a given per-cpu variable.
1230  *
1231  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1232  *
1233  * @returns             the per-CPU slot for @c ptr for the current CPU.
1234  */
1235 #define zpercpu_get(ptr) \
1236 	zpercpu_get_cpu(ptr, cpu_number())
1237 
1238 /*!
1239  * @macro zpercpu_foreach()
1240  *
1241  * @abstract
1242  * Enumerate all per-CPU slots by address.
1243  *
1244  * @param it            the name for the iterator
1245  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1246  */
1247 #define zpercpu_foreach(it, ptr) \
1248 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1249 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1250 	    it < __end_##it; it = __zpcpu_next(it))
1251 
1252 /*!
1253  * @macro zpercpu_foreach_cpu()
1254  *
1255  * @abstract
1256  * Enumerate all per-CPU slots by CPU slot number.
1257  *
1258  * @param cpu           the name for cpu number iterator.
1259  */
1260 #define zpercpu_foreach_cpu(cpu) \
1261 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1262 
1263 /*!
1264  * @function zalloc_percpu()
1265  *
1266  * @abstract
1267  * Allocates an element from a per-cpu zone.
1268  *
1269  * @discussion
1270  * The returned pointer cannot be used directly and must be manipulated
1271  * through the @c zpercpu_get*() interfaces.
1272  *
1273  * @param zone_or_view  the zone or zone view to allocate from
1274  * @param flags         a collection of @c zalloc_flags_t.
1275  *
1276  * @returns             NULL or the allocated element
1277  */
1278 extern void *__zpercpu zalloc_percpu(
1279 	zone_or_view_t  zone_or_view,
1280 	zalloc_flags_t  flags);
1281 
1282 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1283 __zalloc_percpu(
1284 	zone_or_view_t  zone_or_view,
1285 	zalloc_flags_t  flags)
1286 {
1287 	void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1288 	if (flags & Z_NOFAIL) {
1289 		__builtin_assume(addr != NULL);
1290 	}
1291 	return addr;
1292 }
1293 
1294 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1295 
1296 /*!
1297  * @function zfree_percpu()
1298  *
1299  * @abstract
1300  * Frees an element previously allocated with @c zalloc_percpu().
1301  *
1302  * @param zone_or_view  the zone or zone view to free the element to.
1303  * @param addr          the address to free
1304  */
1305 extern void     zfree_percpu(
1306 	zone_or_view_t  zone_or_view,
1307 	void *__zpercpu addr);
1308 
1309 /*!
1310  * @function zalloc_percpu_permanent()
1311  *
1312  * @abstract
1313  * Allocates a permanent percpu-element from the permanent percpu zone.
1314  *
1315  * @discussion
1316  * Memory returned by this function is always 0-initialized.
1317  *
1318  * @param size          the element size (must be smaller than PAGE_SIZE)
1319  * @param align_mask    the required alignment for this allocation
1320  *
1321  * @returns             the allocated element
1322  */
1323 extern void *__zpercpu zalloc_percpu_permanent(
1324 	vm_size_t       size,
1325 	vm_offset_t     align_mask);
1326 
1327 /*!
1328  * @function zalloc_percpu_permanent_type()
1329  *
1330  * @abstract
1331  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1332  * type with its natural alignment.
1333  *
1334  * @discussion
1335  * Memory returned by this function is always 0-initialized.
1336  *
1337  * @param type_t        the element type
1338  *
1339  * @returns             the allocated element
1340  */
1341 #define zalloc_percpu_permanent_type(type_t) \
1342 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1343 
1344 
1345 #pragma mark XNU only: SMR support for zones
1346 
1347 struct smr;
1348 
1349 /*!
1350  * @typedef zone_smr_free_cb_t
1351  *
1352  * @brief
1353  * Type for the delayed free callback for SMR zones.
1354  *
1355  * @description
1356  * This function is called before an element is reused,
1357  * or when memory is returned to the system.
1358  *
1359  * This function MUST zero the element, and if no special
1360  * action is to be taken on free, then @c bzero() is a fine
1361  * callback to use.
1362  *
1363  * This function also must be preemption-disabled safe,
1364  * as it runs with preemption disabled.
1365  *
1366  *
1367  * Note that this function should only clean the fields
1368  * that must be preserved for stale SMR readers to see.
1369  * Any field that is accessed after element validation
1370  * such as a try-retain or acquiring a lock on it must
1371  * be cleaned up much earlier as they might hold onto
1372  * expensive resources.
1373  *
1374  * The suggested pattern for an SMR type using this facility,
1375  * is to have 2 functions:
1376  *
1377  * - one "retire" stage that tries to clean up as much from
1378  *   the element as possible, with great care to leave no dangling
1379  *   pointers around, as elements in this stage might linger
1380  *   in the allocator for a long time, and this could possibly
1381  *   be abused during UaF exploitation.
1382  *
1383  * - one "smr_free" function which cleans up whatever was left,
1384  *   and zeroes the rest of the element.
1385  *
1386  * <code>
1387  *     void
1388  *     type_retire(type_t elem)
1389  *     {
1390  *         // invalidating the element makes most fields
1391  *         // inaccessible to readers.
1392  *         type_mark_invalid(elem);
1393  *
1394  *         // do cleanups for things requiring a validity check
1395  *         kfree_type(some_type_t, elem->expensive_thing);
1396  *         type_remove_from_global_list(&elem->linkage);
1397  *
1398  *         zfree_smr(type_zone, elem);
1399  *     }
1400  *
1401  *     void
1402  *     type_smr_free(void *_elem)
1403  *     {
1404  *         type_t elem = elem;
1405  *
1406  *         // cleanup fields that are used to "find" this element
1407  *         // and that SMR readers may access hazardously.
1408  *         lck_ticket_destroy(&elem->lock);
1409  *         kfree_data(elem->key, elem->keylen);
1410  *
1411  *         // compulsory: element must be zeroed fully
1412  *         bzero(elem, sizeof(*elem));
1413  *     }
1414  * </code>
1415  */
1416 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1417 
1418 /*!
1419  * @function zone_enable_smr()
1420  *
1421  * @abstract
1422  * Enable SMR for a zone.
1423  *
1424  * @discussion
1425  * This can only be done once, and must be done before
1426  * the first allocation is made with this zone.
1427  *
1428  * @param zone          the zone to enable SMR for
1429  * @param smr           the smr domain to use
1430  * @param free_cb       the free callback to use
1431  */
1432 extern void     zone_enable_smr(
1433 	zone_t                  zone,
1434 	struct smr             *smr,
1435 	zone_smr_free_cb_t      free_cb);
1436 
1437 /*!
1438  * @function zone_id_enable_smr()
1439  *
1440  * @abstract
1441  * Enable SMR for a zone ID.
1442  *
1443  * @discussion
1444  * This can only be done once, and must be done before
1445  * the first allocation is made with this zone.
1446  *
1447  * @param zone_id       the zone to enable SMR for
1448  * @param smr           the smr domain to use
1449  * @param free_cb       the free callback to use
1450  */
1451 #define zone_id_enable_smr(zone_id, smr, free_cb)  ({ \
1452 	void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t);      \
1453                                                                                 \
1454 	__cb = (free_cb);                                                       \
1455 	zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb);    \
1456 })
1457 
1458 /*!
1459  * @macro zalloc_smr()
1460  *
1461  * @abstract
1462  * Allocates an element from an SMR enabled zone
1463  *
1464  * @discussion
1465  * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1466  *
1467  * @param zone          the zone to allocate from
1468  * @param flags         a collection of @c zalloc_flags_t.
1469  *
1470  * @returns             NULL or the allocated element
1471  */
1472 #define zalloc_smr(zone, flags) \
1473 	zalloc_flags(zone, flags)
1474 
1475 /*!
1476  * @macro zalloc_id_smr()
1477  *
1478  * @abstract
1479  * Allocates an element from a specified zone ID with SMR enabled.
1480  *
1481  * @param zid           The proper @c ZONE_ID_* constant.
1482  * @param flags         a collection of @c zalloc_flags_t.
1483  *
1484  * @returns             NULL or the allocated element
1485  */
1486 #define zalloc_id_smr(zid, flags) \
1487 	zalloc_id(zid, flags)
1488 
1489 /*!
1490  * @macro zfree_smr()
1491  *
1492  * @abstract
1493  * Frees an element previously allocated with @c zalloc_smr().
1494  *
1495  * @discussion
1496  * When zfree_smr() is called, then the element is not immediately zeroed,
1497  * and the "free" callback that has been registered with the zone will
1498  * run later (@see zone_smr_free_cb_t).
1499  *
1500  * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1501  *
1502  *
1503  * It is guaranteed that the SMR timestamp associated with an element
1504  * will always be equal or greater than the stamp associated with
1505  * elements freed before it on the same thread.
1506  *
1507  * It means that when freeing multiple elements in a sequence, these
1508  * must be freed in topological order (parents before children).
1509  *
1510  * It is worth noting that calling zfree_smr() on several elements
1511  * in a given order doesn't necessarily mean they will be effectively
1512  * reused or cleaned up in that same order, only that their SMR clocks
1513  * will expire in that order.
1514  *
1515  *
1516  * @param zone          the zone to free the element to.
1517  * @param elem          the address to free
1518  */
1519 extern void     zfree_smr(
1520 	zone_t          zone,
1521 	void           *elem __unsafe_indexable);
1522 #define zfree_smr(zone, elem) ({ \
1523 	__auto_type __zfree_zone = (zone); \
1524 	(zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1525 })
1526 
1527 
1528 /*!
1529  * @function zfree_id_smr()
1530  *
1531  * @abstract
1532  * Frees an element previously allocated with @c zalloc_id_smr().
1533  *
1534  * @param zone_id       the zone id to free the element to.
1535  * @param addr          the address to free
1536  */
1537 extern void     zfree_id_smr(
1538 	zone_id_t       zone_id,
1539 	void           *addr __unsafe_indexable);
1540 #define zfree_id_smr(zid, elem) ({ \
1541 	zone_id_t __zfree_zid = (zid); \
1542 	(zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1543 })
1544 
1545 /*!
1546  * @macro zfree_smr_noclear()
1547  *
1548  * @abstract
1549  * Frees an element previously allocated with @c zalloc_smr().
1550  *
1551  * @discussion
1552  * This variant doesn't clear the pointer passed as an argument,
1553  * as it is often required for SMR algorithms to function correctly
1554  * to leave pointers "dangling" to an extent.
1555  *
1556  * However it expects the field in question to be an SMR_POINTER()
1557  * struct.
1558  *
1559  * @param zone          the zone to free the element to.
1560  * @param elem          the address to free
1561  */
1562 #define zfree_smr_noclear(zone, elem) \
1563 	(zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1564 
1565 /*!
1566  * @macro zfree_id_smr_noclear()
1567  *
1568  * @abstract
1569  * Frees an element previously allocated with @c zalloc_id_smr().
1570  *
1571  * @discussion
1572  * This variant doesn't clear the pointer passed as an argument,
1573  * as it is often required for SMR algorithms to function correctly
1574  * to leave pointers "dangling" to an extent.
1575  *
1576  * However it expects the field in question to be an SMR_POINTER()
1577  * struct.
1578  *
1579  * @param zone          the zone to free the element to.
1580  * @param elem          the address to free
1581  */
1582 #define zfree_id_smr_noclear(zone, elem) \
1583 	(zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1584 
1585 
1586 #pragma mark XNU only: zone creation (extended)
1587 
1588 /*!
1589  * @enum zone_reserved_id_t
1590  *
1591  * @abstract
1592  * Well known pre-registered zones, allowing use of zone_id_require()
1593  *
1594  * @discussion
1595  * @c ZONE_ID__* aren't real zone IDs.
1596  *
1597  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1598  * easy a value to produce (by malice or accident).
1599  *
1600  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1601  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1602  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1603  * listed in @c zone_create_ro_id_t.
1604  *
1605  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1606  * @c zone_create().
1607  */
1608 __enum_decl(zone_reserved_id_t, zone_id_t, {
1609 	ZONE_ID__ZERO,
1610 
1611 	ZONE_ID_PERMANENT,
1612 	ZONE_ID_PERCPU_PERMANENT,
1613 
1614 	ZONE_ID_THREAD_RO,
1615 	ZONE_ID_MAC_LABEL,
1616 	ZONE_ID_PROC_RO,
1617 	ZONE_ID_PROC_SIGACTS_RO,
1618 	ZONE_ID_KAUTH_CRED,
1619 	ZONE_ID_CS_BLOB,
1620 
1621 	ZONE_ID_SANDBOX_RO,
1622 	ZONE_ID_PROFILE_RO,
1623 	ZONE_ID_PROTOBOX,
1624 	ZONE_ID_SB_FILTER,
1625 	ZONE_ID_AMFI_OSENTITLEMENTS,
1626 
1627 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1628 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1629 	ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1630 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1631 
1632 	ZONE_ID_PMAP,
1633 	ZONE_ID_VM_MAP,
1634 	ZONE_ID_VM_MAP_ENTRY,
1635 	ZONE_ID_VM_MAP_HOLES,
1636 	ZONE_ID_VM_MAP_COPY,
1637 	ZONE_ID_VM_PAGES,
1638 	ZONE_ID_IPC_PORT,
1639 	ZONE_ID_IPC_PORT_SET,
1640 	ZONE_ID_IPC_KMSG,
1641 	ZONE_ID_IPC_VOUCHERS,
1642 	ZONE_ID_PROC_TASK,
1643 	ZONE_ID_THREAD,
1644 	ZONE_ID_TURNSTILE,
1645 	ZONE_ID_SEMAPHORE,
1646 	ZONE_ID_SELECT_SET,
1647 	ZONE_ID_FILEPROC,
1648 
1649 #if !CONFIG_MBUF_MCACHE
1650 	ZONE_ID_MBUF_REF,
1651 	ZONE_ID_MBUF,
1652 	ZONE_ID_CLUSTER_2K,
1653 	ZONE_ID_CLUSTER_4K,
1654 	ZONE_ID_CLUSTER_16K,
1655 	ZONE_ID_MBUF_CLUSTER_2K,
1656 	ZONE_ID_MBUF_CLUSTER_4K,
1657 	ZONE_ID_MBUF_CLUSTER_16K,
1658 #endif /* !CONFIG_MBUF_MCACHE */
1659 
1660 	ZONE_ID__FIRST_DYNAMIC,
1661 });
1662 
1663 /*!
1664  * @const ZONE_ID_ANY
1665  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1666  * Zone ID.
1667  */
1668 #define ZONE_ID_ANY ((zone_id_t)-1)
1669 
1670 /*!
1671  * @const ZONE_ID_INVALID
1672  * An invalid zone_id_t that corresponds to nothing.
1673  */
1674 #define ZONE_ID_INVALID ((zone_id_t)-2)
1675 
1676 /**!
1677  * @function zone_by_id
1678  *
1679  * @param zid           the specified zone ID.
1680  * @returns             the zone with that ID.
1681  */
1682 zone_t zone_by_id(
1683 	size_t                  zid) __pure2;
1684 
1685 /**!
1686  * @function zone_name
1687  *
1688  * @param zone          the specified zone
1689  * @returns             the name of the specified zone.
1690  */
1691 const char *__unsafe_indexable zone_name(
1692 	zone_t                  zone);
1693 
1694 /**!
1695  * @function zone_heap_name
1696  *
1697  * @param zone          the specified zone
1698  * @returns             the name of the heap this zone is part of, or "".
1699  */
1700 const char *__unsafe_indexable zone_heap_name(
1701 	zone_t                  zone);
1702 
1703 /*!
1704  * @function zone_create_ext
1705  *
1706  * @abstract
1707  * Creates a zone with the specified parameters.
1708  *
1709  * @discussion
1710  * This is an extended version of @c zone_create().
1711  *
1712  * @param name          the name for the new zone.
1713  * @param size          the size of the elements returned by this zone.
1714  * @param flags         a set of @c zone_create_flags_t flags.
1715  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1716  *
1717  * @param extra_setup   a block that can perform non trivial initialization
1718  *                      on the zone before it is marked valid.
1719  *                      This block can call advanced setups like:
1720  *                      - zone_set_exhaustible()
1721  *
1722  * @returns             the created zone, this call never fails.
1723  */
1724 extern zone_t   zone_create_ext(
1725 	const char             *name __unsafe_indexable,
1726 	vm_size_t               size,
1727 	zone_create_flags_t     flags,
1728 	zone_id_t               desired_zid,
1729 	void                  (^extra_setup)(zone_t));
1730 
1731 /*!
1732  * @macro ZONE_DECLARE
1733  *
1734  * @abstract
1735  * Declares a zone variable and its associated type.
1736  *
1737  * @param var           the name of the variable to declare.
1738  * @param type_t        the type of elements in the zone.
1739  */
1740 #define ZONE_DECLARE(var, type_t) \
1741 	extern zone_t var; \
1742 	__ZONE_DECLARE_TYPE(var, type_t)
1743 
1744 /*!
1745  * @macro ZONE_DECLARE_ID
1746  *
1747  * @abstract
1748  * Declares the type associated with a zone ID.
1749  *
1750  * @param id            the name of zone ID to associate a type with.
1751  * @param type_t        the type of elements in the zone.
1752  */
1753 #define ZONE_DECLARE_ID(id, type_t) \
1754 	__ZONE_DECLARE_TYPE(id, type_t)
1755 
1756 /*!
1757  * @macro ZONE_DEFINE
1758  *
1759  * @abstract
1760  * Declares a zone variable to automatically initialize with the specified
1761  * parameters.
1762  *
1763  * @discussion
1764  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1765  *
1766  * @param var           the name of the variable to declare.
1767  * @param name          the name for the zone
1768  * @param size          the size of the elements returned by this zone.
1769  * @param flags         a set of @c zone_create_flags_t flags.
1770  */
1771 #define ZONE_DEFINE(var, name, size, flags) \
1772 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1773 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1774 	static __startup_data struct zone_create_startup_spec \
1775 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1776 	    ZONE_ID_ANY, NULL }; \
1777 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1778 	    &__startup_zone_spec_ ## var)
1779 
1780 /*!
1781  * @macro ZONE_DEFINE_TYPE
1782  *
1783  * @abstract
1784  * Defines a zone variable to automatically initialize with the specified
1785  * parameters, associated with a particular type.
1786  *
1787  * @param var           the name of the variable to declare.
1788  * @param name          the name for the zone
1789  * @param type_t        the type of elements in the zone.
1790  * @param flags         a set of @c zone_create_flags_t flags.
1791  */
1792 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1793 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1794 	__ZONE_DECLARE_TYPE(var, type_t)
1795 
1796 /*!
1797  * @macro ZONE_DEFINE_ID
1798  *
1799  * @abstract
1800  * Initializes a given zone automatically during startup with the specified
1801  * parameters.
1802  *
1803  * @param zid           a @c zone_reserved_id_t value.
1804  * @param name          the name for the zone
1805  * @param type_t        the type of elements in the zone.
1806  * @param flags         a set of @c zone_create_flags_t flags.
1807  */
1808 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1809 	ZONE_DECLARE_ID(zid, type_t); \
1810 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1811 
1812 /*!
1813  * @macro ZONE_INIT
1814  *
1815  * @abstract
1816  * Initializes a given zone automatically during startup with the specified
1817  * parameters.
1818  *
1819  * @param var           the name of the variable to initialize.
1820  * @param name          the name for the zone
1821  * @param size          the size of the elements returned by this zone.
1822  * @param flags         a set of @c zone_create_flags_t flags.
1823  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1824  * @param extra_setup   a block that can perform non trivial initialization
1825  *                      (@see @c zone_create_ext()).
1826  */
1827 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1828 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1829 
1830 /*!
1831  * @function zone_id_require
1832  *
1833  * @abstract
1834  * Requires for a given pointer to belong to the specified zone, by ID and size.
1835  *
1836  * @discussion
1837  * The function panics if the check fails as it indicates that the kernel
1838  * internals have been compromised.
1839  *
1840  * This is a variant of @c zone_require() which:
1841  * - isn't sensitive to @c zone_t::elem_size being compromised,
1842  * - is slightly faster as it saves one load and a multiplication.
1843  *
1844  * @param zone_id       the zone ID the address needs to belong to.
1845  * @param elem_size     the size of elements for this zone.
1846  * @param addr          the element address to check.
1847  */
1848 extern void     zone_id_require(
1849 	zone_id_t               zone_id,
1850 	vm_size_t               elem_size,
1851 	void                   *addr __unsafe_indexable);
1852 
1853 /*!
1854  * @function zone_id_require_aligned
1855  *
1856  * @abstract
1857  * Requires for a given pointer to belong to the specified zone, by ID and size.
1858  *
1859  * @discussion
1860  * Similar to @c zone_id_require() but does more checks such as whether the
1861  * element is properly aligned.
1862  *
1863  * @param zone_id       the zone ID the address needs to belong to.
1864  * @param addr          the element address to check.
1865  */
1866 extern void     zone_id_require_aligned(
1867 	zone_id_t               zone_id,
1868 	void                   *addr __unsafe_indexable);
1869 
1870 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1871 extern void     zone_set_exhaustible(
1872 	zone_t                  zone,
1873 	vm_size_t               max_elements,
1874 	bool                    exhausts_by_design);
1875 
1876 /*!
1877  * @function zone_raise_reserve()
1878  *
1879  * @brief
1880  * Used to raise the reserve on a zone.
1881  *
1882  * @discussion
1883  * Can be called from any context (zone_create_ext() setup hook or after).
1884  */
1885 extern void     zone_raise_reserve(
1886 	zone_or_view_t          zone_or_view,
1887 	uint16_t                min_elements);
1888 
1889 /*!
1890  * @function zone_fill_initially
1891  *
1892  * @brief
1893  * Initially fill a non collectable zone to have the specified amount of
1894  * elements.
1895  *
1896  * @discussion
1897  * This function must be called on a non collectable permanent zone before it
1898  * has been used yet.
1899  *
1900  * @param zone          The zone to fill.
1901  * @param nelems        The number of elements to be able to hold.
1902  */
1903 extern void     zone_fill_initially(
1904 	zone_t                  zone,
1905 	vm_size_t               nelems);
1906 
1907 /*!
1908  * @function zone_drain()
1909  *
1910  * @abstract
1911  * Forces a zone to be drained (have all its data structures freed
1912  * back to its data store, and empty pages returned to the system).
1913  *
1914  * @param zone          the zone id to free the objects to.
1915  */
1916 extern void zone_drain(
1917 	zone_t                  zone);
1918 
1919 /*!
1920  * @struct zone_basic_stats
1921  *
1922  * @abstract
1923  * Used to report basic statistics about a zone.
1924  *
1925  * @field zbs_avail     the number of elements in a zone.
1926  * @field zbs_alloc     the number of allocated elements in a zone.
1927  * @field zbs_free      the number of free elements in a zone.
1928  * @field zbs_cached    the number of free elements in the per-CPU caches.
1929  *                      (included in zbs_free).
1930  * @field zbs_alloc_fail
1931  *                      the number of allocation failures.
1932  */
1933 struct zone_basic_stats {
1934 	uint64_t        zbs_avail;
1935 	uint64_t        zbs_alloc;
1936 	uint64_t        zbs_free;
1937 	uint64_t        zbs_cached;
1938 	uint64_t        zbs_alloc_fail;
1939 };
1940 
1941 /*!
1942  * @function zone_get_stats
1943  *
1944  * @abstract
1945  * Retrieves statistics about zones, include its per-CPU caches.
1946  *
1947  * @param zone          the zone to collect stats from.
1948  * @param stats         the statistics to fill.
1949  */
1950 extern void zone_get_stats(
1951 	zone_t                  zone,
1952 	struct zone_basic_stats *stats);
1953 
1954 
1955 /*!
1956  * @typedef zone_exhausted_cb_t
1957  *
1958  * @brief
1959  * The callback type for the ZONE_EXHAUSTED event.
1960  */
1961 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone, bool exhausted);
1962 
1963 /*!
1964  * @brief
1965  * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1966  * wiring limit.
1967  *
1968  * @discussion
1969  * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1970  * performing zone expansion and no significant amount of work can be performed
1971  * from this context.
1972  *
1973  * In particular, those callbacks cannot allocate any memory, it is expected
1974  * that they will filter if the zone is of interest, and wake up another thread
1975  * to perform the actual work (for example via thread call).
1976  */
1977 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1978 
1979 
1980 #pragma mark XNU only: zone views
1981 
1982 /*!
1983  * @enum zone_kheap_id_t
1984  *
1985  * @brief
1986  * Enumerate a particular kalloc heap.
1987  *
1988  * @discussion
1989  * More documentation about heaps is available in @c <kern/kalloc.h>.
1990  *
1991  * @const KHEAP_ID_NONE
1992  * This value denotes regular zones, not used by kalloc.
1993  *
1994  * @const KHEAP_ID_EARLY
1995  * Indicates zones part of the KHEAP_EARLY heap.
1996  *
1997  * @const KHEAP_ID_DATA_BUFFERS
1998  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1999  *
2000  * @const KHEAP_ID_DATA_SHARED
2001  * Indicates zones part of the KHEAP_DATA_SHARED heap.
2002  *
2003  * @const KHEAP_ID_KT_VAR
2004  * Indicates zones part of the KHEAP_KT_VAR heap.
2005  */
2006 __enum_decl(zone_kheap_id_t, uint8_t, {
2007 	KHEAP_ID_NONE,
2008 	KHEAP_ID_EARLY,
2009 	KHEAP_ID_DATA_BUFFERS,
2010 	KHEAP_ID_DATA_SHARED,
2011 	KHEAP_ID_KT_VAR,
2012 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
2013 });
2014 
2015 static inline bool
zone_is_data_kheap(zone_kheap_id_t kheap_id)2016 zone_is_data_kheap(zone_kheap_id_t kheap_id)
2017 {
2018 	return kheap_id == KHEAP_ID_DATA_BUFFERS || kheap_id == KHEAP_ID_DATA_SHARED;
2019 }
2020 
2021 /*!
2022  * @macro ZONE_VIEW_DECLARE
2023  *
2024  * @abstract
2025  * (optionally) declares a zone view (in a header).
2026  *
2027  * @param var           the name for the zone view.
2028  */
2029 #define ZONE_VIEW_DECLARE(var) \
2030 	extern struct zone_view var[1]
2031 
2032 /*!
2033  * @macro ZONE_VIEW_DEFINE
2034  *
2035  * @abstract
2036  * Defines a given zone view and what it points to.
2037  *
2038  * @discussion
2039  * Zone views can either share a pre-existing zone,
2040  * or perform a lookup into a kalloc heap for the zone
2041  * backing the bucket of the proper size.
2042  *
2043  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
2044  * as the last rank. If views on zones are created, these must have been
2045  * created before this stage.
2046  *
2047  * This macro should not be used to create zone views from default
2048  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
2049  *
2050  * @param var           the name for the zone view.
2051  * @param name          a string describing the zone view.
2052  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
2053  * @param size          the element size to be allocated from this view.
2054  */
2055 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
2056 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
2057 	    .zv_name = (name), \
2058 	} }; \
2059 	static __startup_data struct zone_view_startup_spec \
2060 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
2061 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_view_startup_init, \
2062 	    &__startup_zone_view_spec_ ## var)
2063 
2064 
2065 #pragma mark XNU only: batched allocations
2066 
2067 /*!
2068  * @typedef zstack_t
2069  *
2070  * @brief
2071  * A stack of allocated elements chained with delta encoding.
2072  *
2073  * @discussion
2074  * Some batch allocation interfaces interact with the data heap
2075  * where leaking kernel pointers is not acceptable. This is why
2076  * element offsets are used instead.
2077  */
2078 typedef struct zstack {
2079 	vm_offset_t     z_head;
2080 	uint32_t        z_count;
2081 } zstack_t;
2082 
2083 /*!
2084  * @function zstack_push
2085  *
2086  * @brief
2087  * Push a given element onto a zstack.
2088  */
2089 extern void zstack_push(
2090 	zstack_t               *stack,
2091 	void                   *elem);
2092 
2093 /*!
2094  * @function zstack_pop
2095  *
2096  * @brief
2097  * Pops an element from a zstack, the caller must check it's not empty.
2098  */
2099 void *zstack_pop(
2100 	zstack_t               *stack);
2101 
2102 /*!
2103  * @function zstack_empty
2104  *
2105  * @brief
2106  * Returns whether a stack is empty.
2107  */
2108 static inline uint32_t
zstack_count(zstack_t stack)2109 zstack_count(zstack_t stack)
2110 {
2111 	return stack.z_count;
2112 }
2113 
2114 /*!
2115  * @function zstack_empty
2116  *
2117  * @brief
2118  * Returns whether a stack is empty.
2119  */
2120 static inline bool
zstack_empty(zstack_t stack)2121 zstack_empty(zstack_t stack)
2122 {
2123 	return zstack_count(stack) == 0;
2124 }
2125 
2126 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)2127 zstack_load_and_erase(zstack_t *stackp)
2128 {
2129 	zstack_t stack = *stackp;
2130 
2131 	*stackp = (zstack_t){ };
2132 	return stack;
2133 }
2134 
2135 /*!
2136  * @function zfree_nozero
2137  *
2138  * @abstract
2139  * Frees an element allocated with @c zalloc*, without zeroing it.
2140  *
2141  * @discussion
2142  * This is for the sake of networking only, no one else should use this.
2143  *
2144  * @param zone_id       the zone id to free the element to.
2145  * @param elem          the element to free
2146  */
2147 extern void zfree_nozero(
2148 	zone_id_t               zone_id,
2149 	void                   *elem __unsafe_indexable);
2150 #define zfree_nozero(zone_id, elem) ({ \
2151 	zone_id_t __zfree_zid = (zone_id); \
2152 	(zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
2153 })
2154 
2155 /*!
2156  * @function zalloc_n
2157  *
2158  * @abstract
2159  * Allocates a batch of elements from the specified zone.
2160  *
2161  * @discussion
2162  * This is for the sake of networking only, no one else should use this.
2163  *
2164  * @param zone_id       the zone id to allocate the element from.
2165  * @param count         how many elements to allocate (less might be returned)
2166  * @param flags         a set of @c zone_create_flags_t flags.
2167  */
2168 extern zstack_t zalloc_n(
2169 	zone_id_t               zone_id,
2170 	uint32_t                count,
2171 	zalloc_flags_t          flags);
2172 
2173 /*!
2174  * @function zfree_n
2175  *
2176  * @abstract
2177  * Batched variant of zfree(): frees a stack of elements.
2178  *
2179  * @param zone_id       the zone id to free the element to.
2180  * @param stack         a stack of elements to free.
2181  */
2182 extern void zfree_n(
2183 	zone_id_t               zone_id,
2184 	zstack_t                stack);
2185 #define zfree_n(zone_id, stack) ({ \
2186 	zone_id_t __zfree_zid = (zone_id); \
2187 	(zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2188 })
2189 
2190 /*!
2191  * @function zfree_nozero_n
2192  *
2193  * @abstract
2194  * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
2195  * them.
2196  *
2197  * @discussion
2198  * This is for the sake of networking only, no one else should use this.
2199  *
2200  * @param zone_id       the zone id to free the element to.
2201  * @param stack         a stack of elements to free.
2202  */
2203 extern void zfree_nozero_n(
2204 	zone_id_t               zone_id,
2205 	zstack_t                stack);
2206 #define zfree_nozero_n(zone_id, stack) ({ \
2207 	zone_id_t __zfree_zid = (zone_id); \
2208 	(zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2209 })
2210 
2211 #pragma mark XNU only: cached objects
2212 
2213 /*!
2214  * @typedef zone_cache_ops_t
2215  *
2216  * @brief
2217  * A set of callbacks used for a zcache (cache of composite objects).
2218  *
2219  * @field zc_op_alloc
2220  * The callback to "allocate" a cached object from scratch.
2221  *
2222  * @field zc_op_mark_valid
2223  * The callback that is called when a cached object is being reused,
2224  * will typically call @c zcache_mark_valid() on the various
2225  * sub-pieces of the composite cached object.
2226  *
2227  * @field zc_op_mark_invalid
2228  * The callback that is called when a composite object is being freed
2229  * to the cache. This will typically call @c zcache_mark_invalid()
2230  * on the various sub-pieces of the composite object.
2231  *
2232  * @field zc_op_free
2233  * The callback to "free" a composite object completely.
2234  */
2235 typedef const struct zone_cache_ops {
2236 	void         *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2237 	void         *(*zc_op_mark_valid)(zone_id_t, void *);
2238 	void         *(*zc_op_mark_invalid)(zone_id_t, void *);
2239 	void          (*zc_op_free)(zone_id_t, void *);
2240 } *zone_cache_ops_t;
2241 
2242 #if __has_ptrcheck
2243 static inline char *__bidi_indexable
zcache_transpose_bounds(char * __bidi_indexable pointer_with_bounds,char * __unsafe_indexable unsafe_pointer)2244 zcache_transpose_bounds(
2245 	char *__bidi_indexable pointer_with_bounds,
2246 	char *__unsafe_indexable unsafe_pointer)
2247 {
2248 	vm_offset_t offset_from_start = pointer_with_bounds - __ptr_lower_bound(pointer_with_bounds);
2249 	vm_offset_t offset_to_end = __ptr_upper_bound(pointer_with_bounds) - pointer_with_bounds;
2250 	vm_offset_t size = offset_from_start + offset_to_end;
2251 	return __unsafe_forge_bidi_indexable(char *, unsafe_pointer - offset_from_start, size)
2252 	       + offset_from_start;
2253 }
2254 #else
2255 static inline char *__header_indexable
zcache_transpose_bounds(char * __header_indexable pointer_with_bounds __unused,char * __unsafe_indexable unsafe_pointer)2256 zcache_transpose_bounds(
2257 	char *__header_indexable pointer_with_bounds __unused,
2258 	char *__unsafe_indexable unsafe_pointer)
2259 {
2260 	return unsafe_pointer;
2261 }
2262 #endif // __has_ptrcheck
2263 
2264 /*!
2265  * @function zcache_mark_valid()
2266  *
2267  * @brief
2268  * Mark an element as "valid".
2269  *
2270  * @description
2271  * This function is used to be able to integrate with KASAN or PGZ
2272  * for a cache of composite objects. It typically is a function
2273  * called in their @c zc_op_mark_valid() callback.
2274  *
2275  * If PGZ or KASAN isn't in use, then this callback is a no-op.
2276  * Otherwise the @c elem address might be updated.
2277  *
2278  * @param zone          the zone the element belongs to.
2279  * @param elem          the address of the element
2280  * @returns             the new address to correctly access @c elem.
2281  */
2282 extern void *__unsafe_indexable zcache_mark_valid(
2283 	zone_t                  zone,
2284 	void                    *elem __unsafe_indexable);
2285 
2286 static inline void *
zcache_mark_valid_single(zone_t zone,void * elem)2287 zcache_mark_valid_single(
2288 	zone_t                  zone,
2289 	void                    *elem)
2290 {
2291 	return __unsafe_forge_single(void *, zcache_mark_valid(zone, elem));
2292 }
2293 
2294 static inline void *__header_bidi_indexable
zcache_mark_valid_indexable(zone_t zone,void * elem __header_bidi_indexable)2295 zcache_mark_valid_indexable(
2296 	zone_t                  zone,
2297 	void                    *elem __header_bidi_indexable)
2298 {
2299 	return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_valid(zone, elem));
2300 }
2301 
2302 /*!
2303  * @function zcache_mark_invalid()
2304  *
2305  * @brief
2306  * Mark an element as "invalid".
2307  *
2308  * @description
2309  * This function is used to be able to integrate with KASAN or PGZ
2310  * for a cache of composite objects. It typically is a function
2311  * called in their @c zc_op_mark_invalid() callback.
2312  *
2313  * This function performs validation that @c elem belongs
2314  * to the right zone and is properly "aligned", and should
2315  * never be elided under any configuration.
2316  *
2317  * @param zone          the zone the element belongs to.
2318  * @param elem          the address of the element
2319  * @returns             the new address to correctly access @c elem.
2320  */
2321 extern void *__unsafe_indexable zcache_mark_invalid(
2322 	zone_t                  zone,
2323 	void                    *elem __unsafe_indexable);
2324 
2325 static inline void *
zcache_mark_invalid_single(zone_t zone,void * elem)2326 zcache_mark_invalid_single(
2327 	zone_t                  zone,
2328 	void                    *elem)
2329 {
2330 	return __unsafe_forge_single(void *, zcache_mark_invalid(zone, elem));
2331 }
2332 
2333 static inline void *__header_bidi_indexable
zcache_mark_invalid_indexable(zone_t zone,void * elem __header_bidi_indexable)2334 zcache_mark_invalid_indexable(
2335 	zone_t                  zone,
2336 	void                    *elem __header_bidi_indexable)
2337 {
2338 	return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_invalid(zone, elem));
2339 }
2340 
2341 /*!
2342  * @macro zcache_alloc()
2343  *
2344  * @abstract
2345  * Allocates a composite object from a cache.
2346  *
2347  * @param zone_id       The proper @c ZONE_ID_* constant.
2348  * @param flags         a collection of @c zalloc_flags_t.
2349  *
2350  * @returns             NULL or the allocated element
2351  */
2352 #define zcache_alloc(zone_id, fl) \
2353 	__zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2354 
2355 /*!
2356  * @function zcache_alloc_n()
2357  *
2358  * @abstract
2359  * Allocates a stack of composite objects from a cache.
2360  *
2361  * @param zone_id       The proper @c ZONE_ID_* constant.
2362  * @param count         how many elements to allocate (less might be returned)
2363  * @param flags         a set of @c zone_create_flags_t flags.
2364  *
2365  * @returns             NULL or the allocated composite object
2366  */
2367 extern zstack_t zcache_alloc_n(
2368 	zone_id_t               zone_id,
2369 	uint32_t                count,
2370 	zalloc_flags_t          flags,
2371 	zone_cache_ops_t        ops);
2372 #define zcache_alloc_n(zone_id, count, flags) \
2373 	(zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2374 
2375 
2376 
2377 /*!
2378  * @function zcache_free()
2379  *
2380  * @abstract
2381  * Frees a composite object previously allocated
2382  * with @c zcache_alloc() or @c zcache_alloc_n().
2383  *
2384  * @param zone_id       the zcache id to free the object to.
2385  * @param addr          the address to free
2386  * @param ops           the pointer to the zcache ops for this zcache.
2387  */
2388 extern void zcache_free(
2389 	zone_id_t               zone_id,
2390 	void                   *addr __unsafe_indexable,
2391 	zone_cache_ops_t        ops);
2392 #define zcache_free(zone_id, elem) \
2393 	(zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2394 	    __zcache_##zone_id##_ops)
2395 
2396 /*!
2397  * @function zcache_free_n()
2398  *
2399  * @abstract
2400  * Frees a stack of composite objects previously allocated
2401  * with @c zcache_alloc() or @c zcache_alloc_n().
2402  *
2403  * @param zone_id       the zcache id to free the objects to.
2404  * @param stack         a stack of composite objects
2405  * @param ops           the pointer to the zcache ops for this zcache.
2406  */
2407 extern void zcache_free_n(
2408 	zone_id_t               zone_id,
2409 	zstack_t                stack,
2410 	zone_cache_ops_t        ops);
2411 #define zcache_free_n(zone_id, stack) \
2412 	(zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2413 	    __zcache_##zone_id##_ops)
2414 
2415 
2416 /*!
2417  * @function zcache_drain()
2418  *
2419  * @abstract
2420  * Forces a zcache to be drained (have all its data structures freed
2421  * back to the original zones).
2422  *
2423  * @param zone_id       the zcache id to free the objects to.
2424  */
2425 extern void zcache_drain(
2426 	zone_id_t               zone_id);
2427 
2428 
2429 /*!
2430  * @macro ZCACHE_DECLARE
2431  *
2432  * @abstract
2433  * Declares the type associated with a zone cache ID.
2434  *
2435  * @param id            the name of zone ID to associate a type with.
2436  * @param type_t        the type of elements in the zone.
2437  */
2438 #define ZCACHE_DECLARE(id, type_t) \
2439 	__ZONE_DECLARE_TYPE(id, type_t); \
2440 	__attribute__((visibility("hidden"))) \
2441 	extern const zone_cache_ops_t __zcache_##id##_ops
2442 
2443 
2444 /*!
2445  * @macro ZCACHE_DEFINE
2446  *
2447  * @abstract
2448  * Defines a zone cache for a given ID and type.
2449  *
2450  * @param zone_id       the name of zone ID to associate a type with.
2451  * @param name          the name for the zone
2452  * @param type_t        the type of elements in the zone.
2453  * @param size          the size of elements in the cache
2454  * @param ops           the ops for this zcache.
2455  */
2456 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2457 	ZCACHE_DECLARE(zid, type_t);                                            \
2458 	ZONE_DECLARE_ID(zid, type_t);                                           \
2459 	const zone_cache_ops_t __zcache_##zid##_ops = (ops);                    \
2460 	ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) {   \
2461 	        zcache_ops[zid] = (ops);                                        \
2462 	})
2463 
2464 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2465 
2466 #pragma mark XNU only: PGZ support
2467 
2468 /*!
2469  * @function pgz_owned()
2470  *
2471  * @brief
2472  * Returns whether an address is PGZ owned.
2473  *
2474  * @param addr          The address to translate.
2475  * @returns             Whether it is PGZ owned
2476  */
2477 #if CONFIG_PROB_GZALLOC
2478 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
2479 #else
2480 #define pgz_owned(addr) false
2481 #endif
2482 
2483 /*!
2484  * @function pgz_decode()
2485  *
2486  * @brief
2487  * Translates a PGZ protected virtual address to its unprotected
2488  * backing store.
2489  *
2490  * @discussion
2491  * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
2492  * elements since the PGZ protected virtual addresses are maintained by PGZ
2493  * at the pmap level without the VM involvment.
2494  *
2495  * "allow_invalid" schemes relying on sequestering also need this
2496  * to perform the locking attempts on the unprotected address.
2497  *
2498  * @param addr          The address to translate.
2499  * @param size          The object size.
2500  * @returns             The unprotected address or @c addr.
2501  */
2502 #if CONFIG_PROB_GZALLOC
2503 #define pgz_decode(addr, size) \
2504 	((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
2505 #else
2506 #define pgz_decode(addr, size)  (addr)
2507 #endif
2508 
2509 /*!
2510  * @function pgz_decode_allow_invalid()
2511  *
2512  * @brief
2513  * Translates a PGZ protected virtual address to its unprotected
2514  * backing store, but doesn't assert it is still allocated/valid.
2515  *
2516  * @discussion
2517  * "allow_invalid" schemes relying on sequestering also need this
2518  * to perform the locking attempts on the unprotected address.
2519  *
2520  * @param addr          The address to translate.
2521  * @param want_zid      The expected zone ID for the element.
2522  * @returns             The unprotected address or @c addr.
2523  */
2524 #if CONFIG_PROB_GZALLOC
2525 #define pgz_decode_allow_invalid(addr, want_zid) \
2526 	((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
2527 #else
2528 #define pgz_decode_allow_invalid(addr, zid)  (addr)
2529 #endif
2530 
2531 #pragma mark XNU only: misc & implementation details
2532 
2533 struct zone_create_startup_spec {
2534 	zone_t                 *z_var;
2535 	const char             *z_name __unsafe_indexable;
2536 	vm_size_t               z_size;
2537 	zone_create_flags_t     z_flags;
2538 	zone_id_t               z_zid;
2539 	void                  (^z_setup)(zone_t);
2540 };
2541 
2542 extern void     zone_create_startup(
2543 	struct zone_create_startup_spec *spec);
2544 
2545 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2546 	static __startup_data struct zone_create_startup_spec \
2547 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2548 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2549 	    &__startup_zone_spec_ ## ns)
2550 
2551 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2552 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2553 
2554 #define __zalloc_cast(namespace, expr) \
2555 	((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2556 
2557 #if ZALLOC_TYPE_SAFE
2558 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
2559 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
2560 #endif /* !ZALLOC_TYPE_SAFE */
2561 
2562 struct zone_view_startup_spec {
2563 	zone_view_t         zv_view;
2564 	union {
2565 		zone_kheap_id_t zv_heapid;
2566 		zone_t         *zv_zone;
2567 	};
2568 	vm_size_t           zv_size;
2569 };
2570 
2571 extern void zone_view_startup_init(
2572 	struct zone_view_startup_spec *spec);
2573 
2574 extern void zone_userspace_reboot_checks(void);
2575 
2576 #if VM_TAG_SIZECLASSES
2577 extern void __zone_site_register(
2578 	vm_allocation_site_t   *site);
2579 
2580 #define VM_ALLOC_SITE_TAG() ({ \
2581 	__PLACE_IN_SECTION("__DATA, __data")                                   \
2582 	static vm_allocation_site_t site = { .refcount = 2, };                 \
2583 	STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, __zone_site_register, &site);   \
2584 	site.tag;                                                              \
2585 })
2586 #else /* VM_TAG_SIZECLASSES */
2587 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
2588 #endif /* !VM_TAG_SIZECLASSES */
2589 
2590 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2591 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2592 {
2593 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2594 }
2595 
2596 #define __zpcpu_addr(e)         ((vm_address_t)__unsafe_forge_single(void *, e))
2597 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(*(ptr)) *, e)
2598 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2599 
2600 /**
2601  * @macro __zpcpu_mangle_for_boot()
2602  *
2603  * @discussion
2604  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2605  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2606  * storage marked @c __startup_data and replace it with the proper allocation
2607  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2608  *
2609  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2610  * provides the proper mangling of the storage into a "fake" percpu pointer so
2611  * that accesses through @c zpercpu_get() functions properly.
2612  *
2613  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2614  */
2615 #define __zpcpu_mangle_for_boot(ptr)  ({ \
2616 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
2617 	__zpcpu_cast(ptr, __zpcpu_addr(ptr) - ptoa(cpu_number())); \
2618 })
2619 
2620 extern unsigned zpercpu_count(void) __pure2;
2621 
2622 #if CONFIG_PROB_GZALLOC
2623 
2624 extern vm_offset_t __pgz_decode(
2625 	mach_vm_address_t       addr,
2626 	mach_vm_size_t          size);
2627 
2628 extern vm_offset_t __pgz_decode_allow_invalid(
2629 	vm_offset_t             offs,
2630 	zone_id_t               zid);
2631 
2632 #endif
2633 #if DEBUG || DEVELOPMENT
2634 /* zone_max_zone is here (but not zalloc_internal.h) for the BSD kernel */
2635 extern unsigned int zone_max_zones(void);
2636 
2637 extern size_t zone_pages_wired;
2638 extern size_t zone_guard_pages;
2639 #endif /* DEBUG || DEVELOPMENT */
2640 #if CONFIG_ZLEAKS
2641 extern uint32_t                 zleak_active;
2642 extern vm_size_t                zleak_max_zonemap_size;
2643 extern vm_size_t                zleak_per_zone_tracking_threshold;
2644 
2645 extern kern_return_t zleak_update_threshold(
2646 	vm_size_t              *arg,
2647 	uint64_t                value);
2648 #endif /* CONFIG_ZLEAKS */
2649 
2650 extern uint32_t                 zone_map_jetsam_limit;
2651 
2652 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2653 
2654 extern zone_t percpu_u64_zone;
2655 
2656 /*!
2657  * @function mach_memory_info_sample
2658  *
2659  * @abstract
2660  * Helper function for mach_memory_info() (MACH) and memorystatus_collect_jetsam_snapshot_zprint() (BSD)
2661  * to collect wired memory information.
2662  *
2663  * @param names array with `*zonesCnt` elements.
2664  * @param info array with `*zonesCnt` elements.
2665  * @param coalesce array with `*zonesCnt` elements, must be set if `redact_info` is true.
2666  * @param zonesCnt set to the allocated count of the above, and on return will be the actual count.
2667  * @param memoryInfo optional, if set must have at least `vm_page_diagnose_estimate()` elements.
2668  * @param memoryInfoCnt optional, if set must be the count of memoryInfo, otherwise if set to 0 then on return will be `vm_page_diagnose_estimate()`.
2669  * @param redact_info if true sensitive information about zone allocations will be removed.
2670  */
2671 extern kern_return_t
2672 mach_memory_info_sample(
2673 	mach_zone_name_t *names,
2674 	mach_zone_info_t *info,
2675 	int              *coalesce,
2676 	unsigned int     *zonesCnt,
2677 	mach_memory_info_t *memoryInfo,
2678 	unsigned int       memoryInfoCnt,
2679 	bool               redact_info);
2680 
2681 extern void     zone_gc_trim(void);
2682 extern void     zone_gc_drain(void);
2683 
2684 #pragma GCC visibility pop
2685 #endif /* XNU_KERNEL_PRIVATE */
2686 
2687 /*
2688  * This macro is currently used by AppleImage4 (rdar://83924635)
2689  */
2690 #define __zalloc_ptr_load_and_erase(elem) \
2691 	os_ptr_load_and_erase(elem)
2692 
2693 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2694 
2695 #endif  /* _KERN_ZALLOC_H_ */
2696 
2697 #endif  /* KERNEL_PRIVATE */
2698