xref: /xnu-8796.101.5/osfmk/kern/zalloc.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** (obsolete) */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** (obsolete) */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Obsolete */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** This zone is a built object cache */
170 	ZC_OBJ_CACHE            = 0x0080000000000000,
171 
172 	/** Use guard pages in PGZ mode */
173 	ZC_PGZ_USE_GUARDS       = 0x0100000000000000,
174 
175 	/** Zone doesn't support TBI tagging */
176 	ZC_NOTBITAG             = 0x0200000000000000,
177 
178 	/** This zone will back a kalloc type */
179 	ZC_KALLOC_TYPE          = 0x0400000000000000,
180 
181 	/** Disable PGZ for this zone */
182 	ZC_NOPGZ                = 0x0800000000000000,
183 
184 	/** This zone contains pure data */
185 	ZC_DATA                 = 0x1000000000000000,
186 
187 	/** This zone belongs to the VM submap */
188 	ZC_VM                   = 0x2000000000000000,
189 
190 	/** Disable kasan quarantine for this zone */
191 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
192 
193 	/** Disable kasan redzones for this zone */
194 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
195 #endif /* XNU_KERNEL_PRIVATE */
196 });
197 
198 /*!
199  * @union zone_or_view
200  *
201  * @abstract
202  * A type used for calls that admit both a zone or a zone view.
203  *
204  * @discussion
205  * @c zalloc() and @c zfree() and their variants can act on both
206  * zones and zone views.
207  */
208 union zone_or_view {
209 	struct kalloc_type_view    *zov_kt_heap;
210 	struct zone_view           *zov_view;
211 	struct zone                *zov_zone;
212 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)213 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
214 	}
zone_or_view(struct zone * z)215 	inline zone_or_view(struct zone *z) : zov_zone(z) {
216 	}
zone_or_view(struct kalloc_type_view * kth)217 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
218 	}
219 #endif
220 };
221 #ifdef __cplusplus
222 typedef union zone_or_view zone_or_view_t;
223 #else
224 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
225 #endif
226 
227 /*!
228  * @enum zone_create_ro_id_t
229  *
230  * @abstract
231  * Zone creation IDs for external read only zones
232  *
233  * @discussion
234  * Kexts that desire to use the RO allocator should:
235  * 1. Add a zone creation id below
236  * 2. Add a corresponding ID to @c zone_reserved_id_t
237  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
238  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
239  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
240  */
241 __enum_decl(zone_create_ro_id_t, zone_id_t, {
242 	ZC_RO_ID_SANDBOX,
243 	ZC_RO_ID_PROFILE,
244 	ZC_RO_ID_PROTOBOX,
245 	ZC_RO_ID_SB_FILTER,
246 	ZC_RO_ID_AMFI_OSENTITLEMENTS,
247 	ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 });
249 
250 /*!
251  * @function zone_create
252  *
253  * @abstract
254  * Creates a zone with the specified parameters.
255  *
256  * @discussion
257  * A Zone is a slab allocator that returns objects of a given size very quickly.
258  *
259  * @param name          the name for the new zone.
260  * @param size          the size of the elements returned by this zone.
261  * @param flags         a set of @c zone_create_flags_t flags.
262  *
263  * @returns             the created zone, this call never fails.
264  */
265 extern zone_t   zone_create(
266 	const char             *name __unsafe_indexable,
267 	vm_size_t               size,
268 	zone_create_flags_t     flags);
269 
270 /*!
271  * @function zone_create_ro
272  *
273  * @abstract
274  * Creates a read only zone with the specified parameters from kexts
275  *
276  * @discussion
277  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
278  * kexts. Do not use this API to create read only zones in xnu.
279  *
280  * @param name          the name for the new zone.
281  * @param size          the size of the elements returned by this zone.
282  * @param flags         a set of @c zone_create_flags_t flags.
283  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
284  *
285  * @returns             the zone ID of the created zone, this call never fails.
286  */
287 extern zone_id_t   zone_create_ro(
288 	const char             *name __unsafe_indexable,
289 	vm_size_t               size,
290 	zone_create_flags_t     flags,
291 	zone_create_ro_id_t     zc_ro_id);
292 
293 /*!
294  * @function zdestroy
295  *
296  * @abstract
297  * Destroys a zone previously made with zone_create.
298  *
299  * @discussion
300  * Zones must have been made destructible for @c zdestroy() to be allowed,
301  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
302  *
303  * @param zone          the zone to destroy.
304  */
305 extern void     zdestroy(
306 	zone_t          zone);
307 
308 /*!
309  * @function zone_require
310  *
311  * @abstract
312  * Requires for a given pointer to belong to the specified zone.
313  *
314  * @discussion
315  * The function panics if the check fails as it indicates that the kernel
316  * internals have been compromised.
317  *
318  * @param zone          the zone the address needs to belong to.
319  * @param addr          the element address to check.
320  */
321 extern void     zone_require(
322 	zone_t          zone,
323 	void           *addr __unsafe_indexable);
324 
325 /*!
326  * @function zone_require_ro
327  *
328  * @abstract
329  * Version of zone require intended for zones created with ZC_READONLY
330  *
331  * @discussion
332  * This check is not sufficient to fully trust the element.
333  *
334  * Another check of its content must be performed to prove
335  * that the element is "the right one", a typical technique
336  * for when the RO data structure is 1:1 with a mutable one,
337  * is a simple circularity check with a very strict lifetime
338  * (both the mutable and read-only data structures are made
339  * and destroyed as close as possible).
340  *
341  * @param zone_id       the zone id the address needs to belong to.
342  * @param elem_size     the element size for this zone.
343  * @param addr          the element address to check.
344  */
345 extern void     zone_require_ro(
346 	zone_id_t       zone_id,
347 	vm_size_t       elem_size,
348 	void           *addr __unsafe_indexable);
349 
350 /*!
351  * @enum zalloc_flags_t
352  *
353  * @brief
354  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
355  *
356  * @discussion
357  * It is encouraged that any callsite passing flags uses exactly one of:
358  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
359  * if nothing else was specified.
360  *
361  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
362  * then @c Z_WAITOK is ignored.
363  *
364  * @const Z_WAITOK
365  * Means that it's OK for zalloc() to block to wait for memory,
366  * when Z_WAITOK is passed, zalloc will never return NULL.
367  *
368  * @const Z_NOWAIT
369  * Passing this flag means that zalloc is not allowed to ever block.
370  *
371  * @const Z_NOPAGEWAIT
372  * Passing this flag means that zalloc is allowed to wait due to lock
373  * contention, but will not wait for the VM to wait for pages when
374  * under memory pressure.
375  *
376  * @const Z_ZERO
377  * Passing this flags means that the returned memory has been zeroed out.
378  *
379  * @const Z_NOFAIL
380  * Passing this flag means that the caller expects the allocation to always
381  * succeed. This will result in a panic if this assumption isn't correct.
382  *
383  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
384  * be used on exhaustible zones.
385  *
386  * @const Z_REALLOCF
387  * For the realloc family of functions,
388  * free the incoming memory on failure cases.
389  *
390  #if XNU_KERNEL_PRIVATE
391  * @const Z_SPRAYQTN
392  * This flag tells the VM to allocate from the "spray quarantine" range when
393  * it services the allocation. For more details on what allocations qualify
394  * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
395  *
396  * @const Z_KALLOC_ARRAY
397  * Instead of returning a standard "pointer" return a pointer that encodes
398  * its size-class into the pointer itself (Only for kalloc, might limit
399  * the range of allocations that can be done).
400  *
401  * @const Z_FULLSIZE
402  * Used to indicate that the caller will use all available space in excess
403  * from the requested allocation size.
404  *
405  * @const Z_SKIP_KASAN
406  * Tell zalloc() not to do any kasan adjustments.
407  *
408  * @const Z_MAY_COPYINMAP
409  * This data allocation might be used with vm_map_copyin().
410  * This allows for those allocations to be associated with a proper VM object.
411  *
412  * @const Z_VM_TAG_BT_BIT
413  * Used to blame allocation accounting on the first kext
414  * found in the backtrace of the allocation.
415  *
416  * @const Z_NOZZC
417  * Used internally to mark allocations that will skip zero validation.
418  *
419  * @const Z_PCPU
420  * Used internally for the percpu paths.
421  *
422  * @const Z_VM_TAG_MASK
423  * Represents bits in which a vm_tag_t for the allocation can be passed.
424  * (used by kalloc for the zone tagging debugging feature).
425  #endif
426  */
427 __options_decl(zalloc_flags_t, uint32_t, {
428 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
429 	Z_WAITOK        = 0x0000,
430 	Z_NOWAIT        = 0x0001,
431 	Z_NOPAGEWAIT    = 0x0002,
432 	Z_ZERO          = 0x0004,
433 	Z_REALLOCF      = 0x0008,
434 
435 #if XNU_KERNEL_PRIVATE
436 	Z_SPRAYQTN      = 0x0080,
437 	Z_KALLOC_ARRAY  = 0x0100,
438 #if KASAN_CLASSIC
439 	Z_FULLSIZE      = 0x0000,
440 #else
441 	Z_FULLSIZE      = 0x0200,
442 #endif
443 #if KASAN
444 	Z_SKIP_KASAN    = 0x0400,
445 #else
446 	Z_SKIP_KASAN    = 0x0000,
447 #endif
448 	Z_MAY_COPYINMAP = 0x0800,
449 	Z_VM_TAG_BT_BIT = 0x1000,
450 	Z_PCPU          = 0x2000,
451 	Z_NOZZC         = 0x4000,
452 #endif /* XNU_KERNEL_PRIVATE */
453 	Z_NOFAIL        = 0x8000,
454 
455 	/* convenient c++ spellings */
456 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
457 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
458 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL,
459 #if XNU_KERNEL_PRIVATE
460 	Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
461 #endif
462 
463 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
464 #if XNU_KERNEL_PRIVATE
465 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
466 	/** used by kalloc to propagate vm tags for -zt */
467 	Z_VM_TAG_MASK   = 0xffff0000,
468 
469 #define Z_VM_TAG_SHIFT        16
470 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
471 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
472 #endif
473 });
474 
475 /*
476  * This type is used so that kalloc_internal has good calling conventions
477  * for callers who want to cheaply both know the allocated address
478  * and the actual size of the allocation.
479  */
480 struct kalloc_result {
481 	void         *addr __sized_by(size);
482 	vm_size_t     size;
483 };
484 
485 /*!
486  * @function zalloc
487  *
488  * @abstract
489  * Allocates an element from a specified zone.
490  *
491  * @discussion
492  * If the zone isn't exhaustible and is expandable, this call never fails.
493  *
494  * @param zone_or_view  the zone or zone view to allocate from
495  *
496  * @returns             NULL or the allocated element
497  */
498 __attribute__((malloc))
499 extern void *__unsafe_indexable zalloc(
500 	zone_or_view_t  zone_or_view);
501 
502 /*!
503  * @function zalloc_noblock
504  *
505  * @abstract
506  * Allocates an element from a specified zone, but never blocks.
507  *
508  * @discussion
509  * This call is suitable for preemptible code, however allocation
510  * isn't allowed from interrupt context.
511  *
512  * @param zone_or_view  the zone or zone view to allocate from
513  *
514  * @returns             NULL or the allocated element
515  */
516 __attribute__((malloc))
517 extern void *__unsafe_indexable zalloc_noblock(
518 	zone_or_view_t  zone_or_view);
519 
520 /*!
521  * @function zalloc_flags()
522  *
523  * @abstract
524  * Allocates an element from a specified zone, with flags.
525  *
526  * @param zone_or_view  the zone or zone view to allocate from
527  * @param flags         a collection of @c zalloc_flags_t.
528  *
529  * @returns             NULL or the allocated element
530  */
531 __attribute__((malloc))
532 extern void *__unsafe_indexable zalloc_flags(
533 	zone_or_view_t  zone_or_view,
534 	zalloc_flags_t  flags);
535 
536 __attribute__((malloc))
537 static inline void *__unsafe_indexable
__zalloc_flags(zone_or_view_t zone_or_view,zalloc_flags_t flags)538 __zalloc_flags(
539 	zone_or_view_t  zone_or_view,
540 	zalloc_flags_t  flags)
541 {
542 	void *__unsafe_indexable addr = (zalloc_flags)(zone_or_view, flags);
543 	if (flags & Z_NOFAIL) {
544 		__builtin_assume(addr != NULL);
545 	}
546 	return addr;
547 }
548 
549 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
550 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
551 #else
552 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
553 #endif
554 
555 /*!
556  * @macro zalloc_id
557  *
558  * @abstract
559  * Allocates an element from a specified zone ID, with flags.
560  *
561  * @param zid           The proper @c ZONE_ID_* constant.
562  * @param flags         a collection of @c zalloc_flags_t.
563  *
564  * @returns             NULL or the allocated element
565  */
566 __attribute__((malloc))
567 extern void *__unsafe_indexable zalloc_id(
568 	zone_id_t       zid,
569 	zalloc_flags_t  flags);
570 
571 __attribute__((malloc))
572 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)573 __zalloc_id(
574 	zone_id_t       zid,
575 	zalloc_flags_t  flags)
576 {
577 	void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
578 	if (flags & Z_NOFAIL) {
579 		__builtin_assume(addr != NULL);
580 	}
581 	return addr;
582 }
583 
584 #if XNU_KERNEL_PRIVATE
585 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
586 #else
587 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
588 #endif
589 
590 /*!
591  * @function zalloc_ro
592  *
593  * @abstract
594  * Allocates an element from a specified read-only zone.
595  *
596  * @param zone_id       the zone id to allocate from
597  * @param flags         a collection of @c zalloc_flags_t.
598  *
599  * @returns             NULL or the allocated element
600  */
601 __attribute__((malloc))
602 extern void *__unsafe_indexable zalloc_ro(
603 	zone_id_t       zone_id,
604 	zalloc_flags_t  flags);
605 
606 __attribute__((malloc))
607 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)608 __zalloc_ro(
609 	zone_id_t       zone_id,
610 	zalloc_flags_t  flags)
611 {
612 	void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
613 	if (flags & Z_NOFAIL) {
614 		__builtin_assume(addr != NULL);
615 	}
616 	return addr;
617 }
618 
619 #if XNU_KERNEL_PRIVATE
620 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
621 #else
622 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
623 #endif
624 
625 /*!
626  * @function zalloc_ro_mut
627  *
628  * @abstract
629  * Modifies an element from a specified read-only zone.
630  *
631  * @discussion
632  * Modifying compiler-assisted authenticated pointers using this function will
633  * not result in a signed pointer being written.  The caller is expected to
634  * sign the value appropriately beforehand if they wish to do this.
635  *
636  * @param zone_id       the zone id to allocate from
637  * @param elem          element to be modified
638  * @param offset        offset from element
639  * @param new_data      pointer to new data
640  * @param new_data_size size of modification
641  *
642  */
643 extern void zalloc_ro_mut(
644 	zone_id_t       zone_id,
645 	void           *elem __unsafe_indexable,
646 	vm_offset_t     offset,
647 	const void     *new_data __sized_by(new_data_size),
648 	vm_size_t       new_data_size);
649 
650 /*!
651  * @function zalloc_ro_update_elem
652  *
653  * @abstract
654  * Update the value of an entire element allocated in the read only allocator.
655  *
656  * @param zone_id       the zone id to allocate from
657  * @param elem          element to be modified
658  * @param new_data      pointer to new data
659  *
660  */
661 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
662 	const typeof(*(elem)) *__new_data = (new_data);                        \
663 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
664 })
665 
666 /*!
667  * @function zalloc_ro_update_field
668  *
669  * @abstract
670  * Update a single field of an element allocated in the read only allocator.
671  *
672  * @param zone_id       the zone id to allocate from
673  * @param elem          element to be modified
674  * @param field         the element field to be modified
675  * @param new_data      pointer to new data
676  *
677  */
678 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
679 	const typeof((elem)->field) *__value = (value);                        \
680 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
681 	    __value, sizeof((elem)->field));                                   \
682 })
683 
684 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
685 
686 /*!
687  * @enum zro_atomic_op_t
688  *
689  * @brief
690  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
691  * atomic operations.
692  *
693  * @discussion
694  * This enum provides all flavors of atomic operations supported in sizes 8,
695  * 16, 32, 64 bits.
696  *
697  * @const ZRO_ATOMIC_OR_*
698  * To perform an @s os_atomic_or
699  *
700  * @const ZRO_ATOMIC_XOR_*
701  * To perform an @s os_atomic_xor
702  *
703  * @const ZRO_ATOMIC_AND_*
704  * To perform an @s os_atomic_and
705  *
706  * @const ZRO_ATOMIC_ADD_*
707  * To perform an @s os_atomic_add
708  *
709  * @const ZRO_ATOMIC_XCHG_*
710  * To perform an @s os_atomic_xchg
711  *
712  */
713 __enum_decl(zro_atomic_op_t, uint32_t, {
714 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
715 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
716 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
717 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
718 
719 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
720 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
721 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
722 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
723 
724 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
725 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
726 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
727 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
728 
729 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
730 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
731 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
732 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
733 
734 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
735 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
736 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
737 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
738 
739 	/* cconvenient spellings */
740 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
741 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
742 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
743 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
744 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
745 });
746 
747 /*!
748  * @function zalloc_ro_mut_atomic
749  *
750  * @abstract
751  * Atomically update an offset in an element allocated in the read only
752  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
753  *
754  * @param zone_id       the zone id to allocate from
755  * @param elem          element to be modified
756  * @param offset        offset in the element to be modified
757  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
758  * @param value         value for the atomic operation
759  *
760  */
761 extern uint64_t zalloc_ro_mut_atomic(
762 	zone_id_t       zone_id,
763 	void           *elem __unsafe_indexable,
764 	vm_offset_t     offset,
765 	zro_atomic_op_t op,
766 	uint64_t        value);
767 
768 /*!
769  * @macro zalloc_ro_update_field_atomic
770  *
771  * @abstract
772  * Atomically update a single field of an element allocated in the read only
773  * allocator.
774  *
775  * @param zone_id       the zone id to allocate from
776  * @param elem          element to be modified
777  * @param field         the element field to be modified
778  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
779  * @param value         value for the atomic operation
780  *
781  */
782 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
783 	const typeof((elem)->field) __value = (value);                         \
784 	static_assert(sizeof(__value) == (op & 0xf));                          \
785 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
786 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
787 })
788 
789 /*!
790  * @function zalloc_ro_clear
791  *
792  * @abstract
793  * Zeroes an element from a specified read-only zone.
794  *
795  * @param zone_id       the zone id to allocate from
796  * @param elem          element to be modified
797  * @param offset        offset from element
798  * @param size          size of modification
799  */
800 extern void    zalloc_ro_clear(
801 	zone_id_t       zone_id,
802 	void           *elem __unsafe_indexable,
803 	vm_offset_t     offset,
804 	vm_size_t       size);
805 
806 /*!
807  * @function zalloc_ro_clear_field
808  *
809  * @abstract
810  * Zeroes the specified field of an element from a specified read-only zone.
811  *
812  * @param zone_id       the zone id to allocate from
813  * @param elem          element to be modified
814  * @param field         offset from element
815  */
816 #define zalloc_ro_clear_field(zone_id, elem, field) \
817 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
818 	    sizeof((elem)->field))
819 
820 /*!
821  * @function zfree_id()
822  *
823  * @abstract
824  * Frees an element previously allocated with @c zalloc_id().
825  *
826  * @param zone_id       the zone id to free the element to.
827  * @param addr          the address to free
828  */
829 extern void     zfree_id(
830 	zone_id_t       zone_id,
831 	void           *addr __unsafe_indexable);
832 #define zfree_id(zid, elem) ({ \
833 	zone_id_t __zfree_zid = (zid); \
834 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
835 })
836 
837 
838 /*!
839  * @function zfree_ro()
840  *
841  * @abstract
842  * Frees an element previously allocated with @c zalloc_ro().
843  *
844  * @param zone_id       the zone id to free the element to.
845  * @param addr          the address to free
846  */
847 extern void     zfree_ro(
848 	zone_id_t       zone_id,
849 	void           *addr __unsafe_indexable);
850 #define zfree_ro(zid, elem) ({ \
851 	zone_id_t __zfree_zid = (zid); \
852 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
853 })
854 
855 
856 /*!
857  * @function zfree
858  *
859  * @abstract
860  * Frees an element allocated with @c zalloc*.
861  *
862  * @discussion
863  * If the element being freed doesn't belong to the specified zone,
864  * then this call will panic.
865  *
866  * @param zone_or_view  the zone or zone view to free the element to.
867  * @param elem          the element to free
868  */
869 extern void     zfree(
870 	zone_or_view_t  zone_or_view,
871 	void            *elem __unsafe_indexable);
872 #define zfree(zone, elem) ({ \
873 	__auto_type __zfree_zone = (zone); \
874 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
875 })
876 
877 
878 /* deprecated KPIS */
879 
880 __zalloc_deprecated("use zone_create()")
881 extern zone_t   zinit(
882 	vm_size_t       size,           /* the size of an element */
883 	vm_size_t       maxmem,         /* maximum memory to use */
884 	vm_size_t       alloc,          /* allocation size */
885 	const char      *name __unsafe_indexable);
886 
887 
888 #pragma mark: zone views
889 
890 /*!
891  * @typedef zone_stats_t
892  *
893  * @abstract
894  * The opaque type for per-cpu zone stats that are accumulated per zone
895  * or per zone-view.
896  */
897 typedef struct zone_stats *__zpercpu zone_stats_t;
898 
899 /*!
900  * @typedef zone_view_t
901  *
902  * @abstract
903  * A view on a zone for accounting purposes.
904  *
905  * @discussion
906  * A zone view uses the zone it references for the allocations backing store,
907  * but does the allocation accounting at the view level.
908  *
909  * These accounting are surfaced by @b zprint(1) and similar tools,
910  * which allow for cheap but finer grained understanding of allocations
911  * without any fragmentation cost.
912  *
913  * Zone views are protected by the kernel lockdown and can't be initialized
914  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
915  */
916 typedef struct zone_view *zone_view_t;
917 struct zone_view {
918 	zone_t          zv_zone;
919 	zone_stats_t    zv_stats;
920 	const char     *zv_name __unsafe_indexable;
921 	zone_view_t     zv_next;
922 };
923 
924 
925 #pragma mark: implementation details
926 
927 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
928 #define __ZONE_DECLARE_TYPE2(var, type_t) \
929 	__attribute__((visibility("hidden"))) \
930 	extern type_t *__single __zalloc__##var##__type_name
931 
932 #ifdef XNU_KERNEL_PRIVATE
933 #pragma mark - XNU only interfaces
934 
935 #include <kern/cpu_number.h>
936 
937 #pragma GCC visibility push(hidden)
938 
939 #pragma mark XNU only: zalloc (extended)
940 
941 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
942 #define ZALIGN_16               (sizeof(uint16_t) - 1)
943 #define ZALIGN_32               (sizeof(uint32_t) - 1)
944 #define ZALIGN_PTR              (sizeof(void *)   - 1)
945 #define ZALIGN_64               (sizeof(uint64_t) - 1)
946 #define ZALIGN(t)               (_Alignof(t)      - 1)
947 
948 
949 /*!
950  * @function zalloc_permanent_tag()
951  *
952  * @abstract
953  * Allocates a permanent element from the permanent zone
954  *
955  * @discussion
956  * Memory returned by this function is always 0-initialized.
957  * Note that the size of this allocation can not be determined
958  * by zone_element_size so it should not be used for copyio.
959  *
960  * @param size          the element size (must be smaller than PAGE_SIZE)
961  * @param align_mask    the required alignment for this allocation
962  * @param tag           the tag to use for allocations larger than a page.
963  *
964  * @returns             the allocated element
965  */
966 __attribute__((malloc))
967 extern void *__sized_by(size) zalloc_permanent_tag(
968 	vm_size_t       size,
969 	vm_offset_t     align_mask,
970 	vm_tag_t        tag);
971 
972 /*!
973  * @function zalloc_permanent()
974  *
975  * @abstract
976  * Allocates a permanent element from the permanent zone
977  *
978  * @discussion
979  * Memory returned by this function is always 0-initialized.
980  * Note that the size of this allocation can not be determined
981  * by zone_element_size so it should not be used for copyio.
982  *
983  * @param size          the element size (must be smaller than PAGE_SIZE)
984  * @param align_mask    the required alignment for this allocation
985  *
986  * @returns             the allocated element
987  */
988 #define zalloc_permanent(size, align) \
989 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
990 
991 /*!
992  * @function zalloc_permanent_type()
993  *
994  * @abstract
995  * Allocates a permanent element of a given type with its natural alignment.
996  *
997  * @discussion
998  * Memory returned by this function is always 0-initialized.
999  *
1000  * @param type_t        the element type
1001  *
1002  * @returns             the allocated element
1003  */
1004 #define zalloc_permanent_type(type_t) \
1005 	__unsafe_forge_single(type_t *, \
1006 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1007 
1008 /*!
1009  * @function zalloc_first_proc_made()
1010  *
1011  * @abstract
1012  * Declare that the "early" allocation phase is done.
1013  */
1014 extern void zalloc_first_proc_made(void);
1015 
1016 #pragma mark XNU only: per-cpu allocations
1017 
1018 /*!
1019  * @macro zpercpu_get_cpu()
1020  *
1021  * @abstract
1022  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1023  *
1024  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1025  * @param cpu           the specified CPU number as returned by @c cpu_number()
1026  *
1027  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1028  */
1029 #define zpercpu_get_cpu(ptr, cpu) \
1030 	__zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)(cpu)))
1031 
1032 /*!
1033  * @macro zpercpu_get()
1034  *
1035  * @abstract
1036  * Get a pointer to the current CPU slot of a given per-cpu variable.
1037  *
1038  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1039  *
1040  * @returns             the per-CPU slot for @c ptr for the current CPU.
1041  */
1042 #define zpercpu_get(ptr) \
1043 	zpercpu_get_cpu(ptr, cpu_number())
1044 
1045 /*!
1046  * @macro zpercpu_foreach()
1047  *
1048  * @abstract
1049  * Enumerate all per-CPU slots by address.
1050  *
1051  * @param it            the name for the iterator
1052  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1053  */
1054 #define zpercpu_foreach(it, ptr) \
1055 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1056 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1057 	    it < __end_##it; it = __zpcpu_next(it))
1058 
1059 /*!
1060  * @macro zpercpu_foreach_cpu()
1061  *
1062  * @abstract
1063  * Enumerate all per-CPU slots by CPU slot number.
1064  *
1065  * @param cpu           the name for cpu number iterator.
1066  */
1067 #define zpercpu_foreach_cpu(cpu) \
1068 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1069 
1070 /*!
1071  * @function zalloc_percpu()
1072  *
1073  * @abstract
1074  * Allocates an element from a per-cpu zone.
1075  *
1076  * @discussion
1077  * The returned pointer cannot be used directly and must be manipulated
1078  * through the @c zpercpu_get*() interfaces.
1079  *
1080  * @param zone_or_view  the zone or zone view to allocate from
1081  * @param flags         a collection of @c zalloc_flags_t.
1082  *
1083  * @returns             NULL or the allocated element
1084  */
1085 extern void *__zpercpu zalloc_percpu(
1086 	zone_or_view_t  zone_or_view,
1087 	zalloc_flags_t  flags);
1088 
1089 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1090 __zalloc_percpu(
1091 	zone_or_view_t  zone_or_view,
1092 	zalloc_flags_t  flags)
1093 {
1094 	void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1095 	if (flags & Z_NOFAIL) {
1096 		__builtin_assume(addr != NULL);
1097 	}
1098 	return addr;
1099 }
1100 
1101 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1102 
1103 /*!
1104  * @function zfree_percpu()
1105  *
1106  * @abstract
1107  * Frees an element previously allocated with @c zalloc_percpu().
1108  *
1109  * @param zone_or_view  the zone or zone view to free the element to.
1110  * @param addr          the address to free
1111  */
1112 extern void     zfree_percpu(
1113 	zone_or_view_t  zone_or_view,
1114 	void *__zpercpu addr);
1115 
1116 /*!
1117  * @function zalloc_percpu_permanent()
1118  *
1119  * @abstract
1120  * Allocates a permanent percpu-element from the permanent percpu zone.
1121  *
1122  * @discussion
1123  * Memory returned by this function is always 0-initialized.
1124  *
1125  * @param size          the element size (must be smaller than PAGE_SIZE)
1126  * @param align_mask    the required alignment for this allocation
1127  *
1128  * @returns             the allocated element
1129  */
1130 extern void *__zpercpu zalloc_percpu_permanent(
1131 	vm_size_t       size,
1132 	vm_offset_t     align_mask);
1133 
1134 /*!
1135  * @function zalloc_percpu_permanent_type()
1136  *
1137  * @abstract
1138  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1139  * type with its natural alignment.
1140  *
1141  * @discussion
1142  * Memory returned by this function is always 0-initialized.
1143  *
1144  * @param type_t        the element type
1145  *
1146  * @returns             the allocated element
1147  */
1148 #define zalloc_percpu_permanent_type(type_t) \
1149 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1150 
1151 
1152 #pragma mark XNU only: SMR support for zones
1153 
1154 struct smr;
1155 
1156 /*!
1157  * @typedef zone_smr_free_cb_t
1158  *
1159  * @brief
1160  * Type for the delayed free callback for SMR zones.
1161  *
1162  * @description
1163  * This function is called before an element is reused,
1164  * or when memory is returned to the system.
1165  *
1166  * This function MUST zero the element, and if no special
1167  * action is to be taken on free, then @c bzero() is a fine
1168  * callback to use.
1169  *
1170  * This function also must be preemption-disabled safe,
1171  * as it runs with preemption disabled.
1172  *
1173  *
1174  * Note that this function should only clean the fields
1175  * that must be preserved for stale SMR readers to see.
1176  * Any field that is accessed after element validation
1177  * such as a try-retain or acquiring a lock on it must
1178  * be cleaned up much earlier as they might hold onto
1179  * expensive resources.
1180  *
1181  * The suggested pattern for an SMR type using this facility,
1182  * is to have 2 functions:
1183  *
1184  * - one "retire" stage that tries to clean up as much from
1185  *   the element as possible, with great care to leave no dangling
1186  *   pointers around, as elements in this stage might linger
1187  *   in the allocator for a long time, and this could possibly
1188  *   be abused during UaF exploitation.
1189  *
1190  * - one "smr_free" function which cleans up whatever was left,
1191  *   and zeroes the rest of the element.
1192  *
1193  * <code>
1194  *     void
1195  *     type_retire(type_t elem)
1196  *     {
1197  *         // invalidating the element makes most fields
1198  *         // inaccessible to readers.
1199  *         type_mark_invalid(elem);
1200  *
1201  *         // do cleanups for things requiring a validity check
1202  *         kfree_type(some_type_t, elem->expensive_thing);
1203  *         type_remove_from_global_list(&elem->linkage);
1204  *
1205  *         zfree_smr(type_zone, elem);
1206  *     }
1207  *
1208  *     void
1209  *     type_smr_free(void *_elem)
1210  *     {
1211  *         type_t elem = elem;
1212  *
1213  *         // cleanup fields that are used to "find" this element
1214  *         // and that SMR readers may access hazardously.
1215  *         lck_ticket_destroy(&elem->lock);
1216  *         kfree_data(elem->key, elem->keylen);
1217  *
1218  *         // compulsory: element must be zeroed fully
1219  *         bzero(elem, sizeof(*elem));
1220  *     }
1221  * </code>
1222  */
1223 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1224 
1225 /*!
1226  * @function zone_enable_smr()
1227  *
1228  * @abstract
1229  * Enable SMR for a zone.
1230  *
1231  * @discussion
1232  * This can only be done once, and must be done before
1233  * the first allocation is made with this zone.
1234  *
1235  * @param zone          the zone to enable SMR for
1236  * @param smr           the smr domain to use
1237  * @param free_cb       the free callback to use
1238  */
1239 extern void     zone_enable_smr(
1240 	zone_t                  zone,
1241 	struct smr             *smr,
1242 	zone_smr_free_cb_t      free_cb);
1243 
1244 /*!
1245  * @function zone_id_enable_smr()
1246  *
1247  * @abstract
1248  * Enable SMR for a zone ID.
1249  *
1250  * @discussion
1251  * This can only be done once, and must be done before
1252  * the first allocation is made with this zone.
1253  *
1254  * @param zone_id       the zone to enable SMR for
1255  * @param smr           the smr domain to use
1256  * @param free_cb       the free callback to use
1257  */
1258 #define zone_id_enable_smr(zone_id, smr, free_cb)  ({ \
1259 	void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t);      \
1260                                                                                 \
1261 	__cb = (free_cb);                                                       \
1262 	zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb);    \
1263 })
1264 
1265 /*!
1266  * @macro zalloc_smr()
1267  *
1268  * @abstract
1269  * Allocates an element from an SMR enabled zone
1270  *
1271  * @discussion
1272  * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1273  *
1274  * @param zone          the zone to allocate from
1275  * @param flags         a collection of @c zalloc_flags_t.
1276  *
1277  * @returns             NULL or the allocated element
1278  */
1279 #define zalloc_smr(zone, flags) \
1280 	zalloc_flags(zone, flags)
1281 
1282 /*!
1283  * @macro zalloc_id_smr()
1284  *
1285  * @abstract
1286  * Allocates an element from a specified zone ID with SMR enabled.
1287  *
1288  * @param zid           The proper @c ZONE_ID_* constant.
1289  * @param flags         a collection of @c zalloc_flags_t.
1290  *
1291  * @returns             NULL or the allocated element
1292  */
1293 #define zalloc_id_smr(zid, flags) \
1294 	zalloc_id(zid, flags)
1295 
1296 /*!
1297  * @macro zfree_smr()
1298  *
1299  * @abstract
1300  * Frees an element previously allocated with @c zalloc_smr().
1301  *
1302  * @discussion
1303  * When zfree_smr() is called, then the element is not immediately zeroed,
1304  * and the "free" callback that has been registered with the zone will
1305  * run later (@see zone_smr_free_cb_t).
1306  *
1307  * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1308  *
1309  *
1310  * It is guaranteed that the SMR timestamp associated with an element
1311  * will always be equal or greater than the stamp associated with
1312  * elements freed before it on the same thread.
1313  *
1314  * It means that when freeing multiple elements in a sequence, these
1315  * must be freed in topological order (parents before children).
1316  *
1317  * It is worth noting that calling zfree_smr() on several elements
1318  * in a given order doesn't necessarily mean they will be effectively
1319  * reused or cleaned up in that same order, only that their SMR clocks
1320  * will expire in that order.
1321  *
1322  *
1323  * @param zone          the zone to free the element to.
1324  * @param elem          the address to free
1325  */
1326 extern void     zfree_smr(
1327 	zone_t          zone,
1328 	void           *elem __unsafe_indexable);
1329 #define zfree_smr(zone, elem) ({ \
1330 	__auto_type __zfree_zone = (zone); \
1331 	(zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1332 })
1333 
1334 
1335 /*!
1336  * @function zfree_id_smr()
1337  *
1338  * @abstract
1339  * Frees an element previously allocated with @c zalloc_id_smr().
1340  *
1341  * @param zone_id       the zone id to free the element to.
1342  * @param addr          the address to free
1343  */
1344 extern void     zfree_id_smr(
1345 	zone_id_t       zone_id,
1346 	void           *addr __unsafe_indexable);
1347 #define zfree_id_smr(zid, elem) ({ \
1348 	zone_id_t __zfree_zid = (zid); \
1349 	(zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1350 })
1351 
1352 /*!
1353  * @macro zfree_smr_noclear()
1354  *
1355  * @abstract
1356  * Frees an element previously allocated with @c zalloc_smr().
1357  *
1358  * @discussion
1359  * This variant doesn't clear the pointer passed as an argument,
1360  * as it is often required for SMR algorithms to function correctly
1361  * to leave pointers "dangling" to an extent.
1362  *
1363  * However it expects the field in question to be an SMR_POINTER()
1364  * struct.
1365  *
1366  * @param zone          the zone to free the element to.
1367  * @param elem          the address to free
1368  */
1369 #define zfree_smr_noclear(zone, elem) \
1370 	(zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1371 
1372 /*!
1373  * @macro zfree_id_smr_noclear()
1374  *
1375  * @abstract
1376  * Frees an element previously allocated with @c zalloc_id_smr().
1377  *
1378  * @discussion
1379  * This variant doesn't clear the pointer passed as an argument,
1380  * as it is often required for SMR algorithms to function correctly
1381  * to leave pointers "dangling" to an extent.
1382  *
1383  * However it expects the field in question to be an SMR_POINTER()
1384  * struct.
1385  *
1386  * @param zone          the zone to free the element to.
1387  * @param elem          the address to free
1388  */
1389 #define zfree_id_smr_noclear(zone, elem) \
1390 	(zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1391 
1392 
1393 #pragma mark XNU only: zone creation (extended)
1394 
1395 /*!
1396  * @enum zone_reserved_id_t
1397  *
1398  * @abstract
1399  * Well known pre-registered zones, allowing use of zone_id_require()
1400  *
1401  * @discussion
1402  * @c ZONE_ID__* aren't real zone IDs.
1403  *
1404  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1405  * easy a value to produce (by malice or accident).
1406  *
1407  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1408  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1409  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1410  * listed in @c zone_create_ro_id_t.
1411  *
1412  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1413  * @c zone_create().
1414  */
1415 __enum_decl(zone_reserved_id_t, zone_id_t, {
1416 	ZONE_ID__ZERO,
1417 
1418 	ZONE_ID_PERMANENT,
1419 	ZONE_ID_PERCPU_PERMANENT,
1420 
1421 	ZONE_ID_THREAD_RO,
1422 	ZONE_ID_MAC_LABEL,
1423 	ZONE_ID_PROC_RO,
1424 	ZONE_ID_PROC_SIGACTS_RO,
1425 	ZONE_ID_KAUTH_CRED,
1426 	ZONE_ID_CS_BLOB,
1427 
1428 	ZONE_ID_SANDBOX_RO,
1429 	ZONE_ID_PROFILE_RO,
1430 	ZONE_ID_PROTOBOX,
1431 	ZONE_ID_SB_FILTER,
1432 	ZONE_ID_AMFI_OSENTITLEMENTS,
1433 
1434 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1435 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1436 	ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1437 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1438 
1439 	ZONE_ID_PMAP,
1440 	ZONE_ID_VM_MAP,
1441 	ZONE_ID_VM_MAP_ENTRY,
1442 	ZONE_ID_VM_MAP_HOLES,
1443 	ZONE_ID_VM_MAP_COPY,
1444 	ZONE_ID_VM_PAGES,
1445 	ZONE_ID_IPC_PORT,
1446 	ZONE_ID_IPC_PORT_SET,
1447 	ZONE_ID_IPC_VOUCHERS,
1448 	ZONE_ID_PROC_TASK,
1449 	ZONE_ID_THREAD,
1450 	ZONE_ID_TURNSTILE,
1451 	ZONE_ID_SEMAPHORE,
1452 	ZONE_ID_SELECT_SET,
1453 	ZONE_ID_FILEPROC,
1454 
1455 	ZONE_ID__FIRST_DYNAMIC,
1456 });
1457 
1458 /*!
1459  * @const ZONE_ID_ANY
1460  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1461  * Zone ID.
1462  */
1463 #define ZONE_ID_ANY ((zone_id_t)-1)
1464 
1465 /*!
1466  * @const ZONE_ID_INVALID
1467  * An invalid zone_id_t that corresponds to nothing.
1468  */
1469 #define ZONE_ID_INVALID ((zone_id_t)-2)
1470 
1471 /**!
1472  * @function zone_by_id
1473  *
1474  * @param zid           the specified zone ID.
1475  * @returns             the zone with that ID.
1476  */
1477 zone_t zone_by_id(
1478 	size_t                  zid) __pure2;
1479 
1480 /**!
1481  * @function zone_name
1482  *
1483  * @param zone          the specified zone
1484  * @returns             the name of the specified zone.
1485  */
1486 const char *__unsafe_indexable zone_name(
1487 	zone_t                  zone);
1488 
1489 /**!
1490  * @function zone_heap_name
1491  *
1492  * @param zone          the specified zone
1493  * @returns             the name of the heap this zone is part of, or "".
1494  */
1495 const char *__unsafe_indexable zone_heap_name(
1496 	zone_t                  zone);
1497 
1498 /*!
1499  * @function zone_create_ext
1500  *
1501  * @abstract
1502  * Creates a zone with the specified parameters.
1503  *
1504  * @discussion
1505  * This is an extended version of @c zone_create().
1506  *
1507  * @param name          the name for the new zone.
1508  * @param size          the size of the elements returned by this zone.
1509  * @param flags         a set of @c zone_create_flags_t flags.
1510  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1511  *
1512  * @param extra_setup   a block that can perform non trivial initialization
1513  *                      on the zone before it is marked valid.
1514  *                      This block can call advanced setups like:
1515  *                      - zone_set_exhaustible()
1516  *
1517  * @returns             the created zone, this call never fails.
1518  */
1519 extern zone_t   zone_create_ext(
1520 	const char             *name __unsafe_indexable,
1521 	vm_size_t               size,
1522 	zone_create_flags_t     flags,
1523 	zone_id_t               desired_zid,
1524 	void                  (^extra_setup)(zone_t));
1525 
1526 /*!
1527  * @macro ZONE_DECLARE
1528  *
1529  * @abstract
1530  * Declares a zone variable and its associated type.
1531  *
1532  * @param var           the name of the variable to declare.
1533  * @param type_t        the type of elements in the zone.
1534  */
1535 #define ZONE_DECLARE(var, type_t) \
1536 	extern zone_t var; \
1537 	__ZONE_DECLARE_TYPE(var, type_t)
1538 
1539 /*!
1540  * @macro ZONE_DECLARE_ID
1541  *
1542  * @abstract
1543  * Declares the type associated with a zone ID.
1544  *
1545  * @param id            the name of zone ID to associate a type with.
1546  * @param type_t        the type of elements in the zone.
1547  */
1548 #define ZONE_DECLARE_ID(id, type_t) \
1549 	__ZONE_DECLARE_TYPE(id, type_t)
1550 
1551 /*!
1552  * @macro ZONE_DEFINE
1553  *
1554  * @abstract
1555  * Declares a zone variable to automatically initialize with the specified
1556  * parameters.
1557  *
1558  * @discussion
1559  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1560  *
1561  * @param var           the name of the variable to declare.
1562  * @param name          the name for the zone
1563  * @param size          the size of the elements returned by this zone.
1564  * @param flags         a set of @c zone_create_flags_t flags.
1565  */
1566 #define ZONE_DEFINE(var, name, size, flags) \
1567 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1568 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1569 	static __startup_data struct zone_create_startup_spec \
1570 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1571 	    ZONE_ID_ANY, NULL }; \
1572 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1573 	    &__startup_zone_spec_ ## var)
1574 
1575 /*!
1576  * @macro ZONE_DEFINE_TYPE
1577  *
1578  * @abstract
1579  * Defines a zone variable to automatically initialize with the specified
1580  * parameters, associated with a particular type.
1581  *
1582  * @param var           the name of the variable to declare.
1583  * @param name          the name for the zone
1584  * @param type_t        the type of elements in the zone.
1585  * @param flags         a set of @c zone_create_flags_t flags.
1586  */
1587 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1588 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1589 	__ZONE_DECLARE_TYPE(var, type_t)
1590 
1591 /*!
1592  * @macro ZONE_DEFINE_ID
1593  *
1594  * @abstract
1595  * Initializes a given zone automatically during startup with the specified
1596  * parameters.
1597  *
1598  * @param zid           a @c zone_reserved_id_t value.
1599  * @param name          the name for the zone
1600  * @param type_t        the type of elements in the zone.
1601  * @param flags         a set of @c zone_create_flags_t flags.
1602  */
1603 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1604 	ZONE_DECLARE_ID(zid, type_t); \
1605 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1606 
1607 /*!
1608  * @macro ZONE_INIT
1609  *
1610  * @abstract
1611  * Initializes a given zone automatically during startup with the specified
1612  * parameters.
1613  *
1614  * @param var           the name of the variable to initialize.
1615  * @param name          the name for the zone
1616  * @param size          the size of the elements returned by this zone.
1617  * @param flags         a set of @c zone_create_flags_t flags.
1618  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1619  * @param extra_setup   a block that can perform non trivial initialization
1620  *                      (@see @c zone_create_ext()).
1621  */
1622 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1623 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1624 
1625 /*!
1626  * @function zone_id_require
1627  *
1628  * @abstract
1629  * Requires for a given pointer to belong to the specified zone, by ID and size.
1630  *
1631  * @discussion
1632  * The function panics if the check fails as it indicates that the kernel
1633  * internals have been compromised.
1634  *
1635  * This is a variant of @c zone_require() which:
1636  * - isn't sensitive to @c zone_t::elem_size being compromised,
1637  * - is slightly faster as it saves one load and a multiplication.
1638  *
1639  * @param zone_id       the zone ID the address needs to belong to.
1640  * @param elem_size     the size of elements for this zone.
1641  * @param addr          the element address to check.
1642  */
1643 extern void     zone_id_require(
1644 	zone_id_t               zone_id,
1645 	vm_size_t               elem_size,
1646 	void                   *addr __unsafe_indexable);
1647 
1648 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1649 extern void     zone_set_exhaustible(
1650 	zone_t                  zone,
1651 	vm_size_t               max_elements);
1652 
1653 /*!
1654  * @function zone_raise_reserve()
1655  *
1656  * @brief
1657  * Used to raise the reserve on a zone.
1658  *
1659  * @discussion
1660  * Can be called from any context (zone_create_ext() setup hook or after).
1661  */
1662 extern void     zone_raise_reserve(
1663 	zone_or_view_t          zone_or_view,
1664 	uint16_t                min_elements);
1665 
1666 /*!
1667  * @function zone_fill_initially
1668  *
1669  * @brief
1670  * Initially fill a non collectable zone to have the specified amount of
1671  * elements.
1672  *
1673  * @discussion
1674  * This function must be called on a non collectable permanent zone before it
1675  * has been used yet.
1676  *
1677  * @param zone          The zone to fill.
1678  * @param nelems        The number of elements to be able to hold.
1679  */
1680 extern void     zone_fill_initially(
1681 	zone_t                  zone,
1682 	vm_size_t               nelems);
1683 
1684 /*!
1685  * @function zone_drain()
1686  *
1687  * @abstract
1688  * Forces a zone to be drained (have all its data structures freed
1689  * back to its data store, and empty pages returned to the system).
1690  *
1691  * @param zone          the zone id to free the objects to.
1692  */
1693 extern void zone_drain(
1694 	zone_t                  zone);
1695 
1696 /*!
1697  * @struct zone_basic_stats
1698  *
1699  * @abstract
1700  * Used to report basic statistics about a zone.
1701  *
1702  * @field zbs_avail     the number of elements in a zone.
1703  * @field zbs_alloc     the number of allocated elements in a zone.
1704  * @field zbs_free      the number of free elements in a zone.
1705  * @field zbs_cached    the number of free elements in the per-CPU caches.
1706  *                      (included in zbs_free).
1707  * @field zbs_alloc_fail
1708  *                      the number of allocation failures.
1709  */
1710 struct zone_basic_stats {
1711 	uint64_t        zbs_avail;
1712 	uint64_t        zbs_alloc;
1713 	uint64_t        zbs_free;
1714 	uint64_t        zbs_cached;
1715 	uint64_t        zbs_alloc_fail;
1716 };
1717 
1718 /*!
1719  * @function zone_get_stats
1720  *
1721  * @abstract
1722  * Retrieves statistics about zones, include its per-CPU caches.
1723  *
1724  * @param zone          the zone to collect stats from.
1725  * @param stats         the statistics to fill.
1726  */
1727 extern void zone_get_stats(
1728 	zone_t                  zone,
1729 	struct zone_basic_stats *stats);
1730 
1731 
1732 /*!
1733  * @typedef zone_exhausted_cb_t
1734  *
1735  * @brief
1736  * The callback type for the ZONE_EXHAUSTED event.
1737  */
1738 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone);
1739 
1740 /*!
1741  * @brief
1742  * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1743  * wiring limit.
1744  *
1745  * @discussion
1746  * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1747  * performing zone expansion and no significant amount of work can be performed
1748  * from this context.
1749  *
1750  * In particular, those callbacks cannot allocate any memory, it is expected
1751  * that they will filter if the zone is of interest, and wake up another thread
1752  * to perform the actual work (for example via thread call).
1753  */
1754 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1755 
1756 
1757 #pragma mark XNU only: zone views
1758 
1759 /*!
1760  * @enum zone_kheap_id_t
1761  *
1762  * @brief
1763  * Enumerate a particular kalloc heap.
1764  *
1765  * @discussion
1766  * More documentation about heaps is available in @c <kern/kalloc.h>.
1767  *
1768  * @const KHEAP_ID_NONE
1769  * This value denotes regular zones, not used by kalloc.
1770  *
1771  * @const KHEAP_ID_DEFAULT
1772  * Indicates zones part of the KHEAP_DEFAULT heap.
1773  *
1774  * @const KHEAP_ID_DATA_BUFFERS
1775  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1776  *
1777  * @const KHEAP_ID_KT_VAR
1778  * Indicates zones part of the KHEAP_KT_VAR heap.
1779  */
1780 __enum_decl(zone_kheap_id_t, uint8_t, {
1781 	KHEAP_ID_NONE,
1782 	KHEAP_ID_DEFAULT,
1783 	KHEAP_ID_DATA_BUFFERS,
1784 	KHEAP_ID_KT_VAR,
1785 
1786 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
1787 });
1788 
1789 /*!
1790  * @macro ZONE_VIEW_DECLARE
1791  *
1792  * @abstract
1793  * (optionally) declares a zone view (in a header).
1794  *
1795  * @param var           the name for the zone view.
1796  */
1797 #define ZONE_VIEW_DECLARE(var) \
1798 	extern struct zone_view var[1]
1799 
1800 /*!
1801  * @macro ZONE_VIEW_DEFINE
1802  *
1803  * @abstract
1804  * Defines a given zone view and what it points to.
1805  *
1806  * @discussion
1807  * Zone views can either share a pre-existing zone,
1808  * or perform a lookup into a kalloc heap for the zone
1809  * backing the bucket of the proper size.
1810  *
1811  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
1812  * as the last rank. If views on zones are created, these must have been
1813  * created before this stage.
1814  *
1815  * This macro should not be used to create zone views from default
1816  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
1817  *
1818  * @param var           the name for the zone view.
1819  * @param name          a string describing the zone view.
1820  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
1821  * @param size          the element size to be allocated from this view.
1822  */
1823 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
1824 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
1825 	    .zv_name = (name), \
1826 	} }; \
1827 	static __startup_data struct zone_view_startup_spec \
1828 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
1829 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
1830 	    &__startup_zone_view_spec_ ## var)
1831 
1832 
1833 #pragma mark XNU only: batched allocations
1834 
1835 /*!
1836  * @typedef zstack_t
1837  *
1838  * @brief
1839  * A stack of allocated elements chained with delta encoding.
1840  *
1841  * @discussion
1842  * Some batch allocation interfaces interact with the data heap
1843  * where leaking kernel pointers is not acceptable. This is why
1844  * element offsets are used instead.
1845  */
1846 typedef struct zstack {
1847 	vm_offset_t     z_head;
1848 	uint32_t        z_count;
1849 } zstack_t;
1850 
1851 /*!
1852  * @function zstack_push
1853  *
1854  * @brief
1855  * Push a given element onto a zstack.
1856  */
1857 extern void zstack_push(
1858 	zstack_t               *stack,
1859 	void                   *elem __unsafe_indexable);
1860 
1861 /*!
1862  * @function zstack_pop
1863  *
1864  * @brief
1865  * Pops an element from a zstack, the caller must check it's not empty.
1866  */
1867 void *__unsafe_indexable zstack_pop(
1868 	zstack_t               *stack);
1869 
1870 /*!
1871  * @function zstack_empty
1872  *
1873  * @brief
1874  * Returns whether a stack is empty.
1875  */
1876 static inline uint32_t
zstack_count(zstack_t stack)1877 zstack_count(zstack_t stack)
1878 {
1879 	return stack.z_count;
1880 }
1881 
1882 /*!
1883  * @function zstack_empty
1884  *
1885  * @brief
1886  * Returns whether a stack is empty.
1887  */
1888 static inline bool
zstack_empty(zstack_t stack)1889 zstack_empty(zstack_t stack)
1890 {
1891 	return zstack_count(stack) == 0;
1892 }
1893 
1894 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)1895 zstack_load_and_erase(zstack_t *stackp)
1896 {
1897 	zstack_t stack = *stackp;
1898 
1899 	*stackp = (zstack_t){ };
1900 	return stack;
1901 }
1902 
1903 /*!
1904  * @function zfree_nozero
1905  *
1906  * @abstract
1907  * Frees an element allocated with @c zalloc*, without zeroing it.
1908  *
1909  * @discussion
1910  * This is for the sake of networking only, no one else should use this.
1911  *
1912  * @param zone_id       the zone id to free the element to.
1913  * @param elem          the element to free
1914  */
1915 extern void zfree_nozero(
1916 	zone_id_t               zone_id,
1917 	void                   *elem __unsafe_indexable);
1918 #define zfree_nozero(zone_id, elem) ({ \
1919 	zone_id_t __zfree_zid = (zone_id); \
1920 	(zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1921 })
1922 
1923 /*!
1924  * @function zalloc_n
1925  *
1926  * @abstract
1927  * Allocates a batch of elements from the specified zone.
1928  *
1929  * @discussion
1930  * This is for the sake of networking only, no one else should use this.
1931  *
1932  * @param zone_id       the zone id to allocate the element from.
1933  * @param count         how many elements to allocate (less might be returned)
1934  * @param flags         a set of @c zone_create_flags_t flags.
1935  */
1936 extern zstack_t zalloc_n(
1937 	zone_id_t               zone_id,
1938 	uint32_t                count,
1939 	zalloc_flags_t          flags);
1940 
1941 /*!
1942  * @function zfree_n
1943  *
1944  * @abstract
1945  * Batched variant of zfree(): frees a stack of elements.
1946  *
1947  * @param zone_id       the zone id to free the element to.
1948  * @param stack         a stack of elements to free.
1949  */
1950 extern void zfree_n(
1951 	zone_id_t               zone_id,
1952 	zstack_t                stack);
1953 #define zfree_n(zone_id, stack) ({ \
1954 	zone_id_t __zfree_zid = (zone_id); \
1955 	(zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
1956 })
1957 
1958 /*!
1959  * @function zfree_nozero_n
1960  *
1961  * @abstract
1962  * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
1963  * them.
1964  *
1965  * @discussion
1966  * This is for the sake of networking only, no one else should use this.
1967  *
1968  * @param zone_id       the zone id to free the element to.
1969  * @param stack         a stack of elements to free.
1970  */
1971 extern void zfree_nozero_n(
1972 	zone_id_t               zone_id,
1973 	zstack_t                stack);
1974 #define zfree_nozero_n(zone_id, stack) ({ \
1975 	zone_id_t __zfree_zid = (zone_id); \
1976 	(zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
1977 })
1978 
1979 #pragma mark XNU only: cached objects
1980 
1981 /*!
1982  * @typedef zone_cache_ops_t
1983  *
1984  * @brief
1985  * A set of callbacks used for a zcache (cache of composite objects).
1986  *
1987  * @field zc_op_alloc
1988  * The callback to "allocate" a cached object from scratch.
1989  *
1990  * @field zc_op_mark_valid
1991  * The callback that is called when a cached object is being reused,
1992  * will typically call @c zcache_mark_valid() on the various
1993  * sub-pieces of the composite cached object.
1994  *
1995  * @field zc_op_mark_invalid
1996  * The callback that is called when a composite object is being freed
1997  * to the cache. This will typically call @c zcache_mark_invalid()
1998  * on the various sub-pieces of the composite object.
1999  *
2000  * @field zc_op_free
2001  * The callback to "free" a composite object completely.
2002  */
2003 typedef const struct zone_cache_ops {
2004 	void         *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2005 	void         *(*zc_op_mark_valid)(zone_id_t, void *);
2006 	void         *(*zc_op_mark_invalid)(zone_id_t, void *);
2007 	void          (*zc_op_free)(zone_id_t, void *);
2008 } *zone_cache_ops_t;
2009 
2010 
2011 /*!
2012  * @function zcache_mark_valid()
2013  *
2014  * @brief
2015  * Mark an element as "valid".
2016  *
2017  * @description
2018  * This function is used to be able to integrate with KASAN or PGZ
2019  * for a cache of composite objects. It typically is a function
2020  * called in their @c zc_op_mark_valid() callback.
2021  *
2022  * If PGZ or KASAN isn't in use, then this callback is a no-op.
2023  * Otherwise the @c elem address might be updated.
2024  *
2025  * @param zone          the zone the element belongs to.
2026  * @param elem          the address of the element
2027  * @returns             the new address to correctly access @c elem.
2028  */
2029 extern vm_offset_t zcache_mark_valid(
2030 	zone_t                  zone,
2031 	vm_offset_t             elem);
2032 #define zcache_mark_valid(zone, e) \
2033 	((typeof(e))((zcache_mark_valid)(zone, (vm_offset_t)(e))))
2034 
2035 /*!
2036  * @function zcache_mark_invalid()
2037  *
2038  * @brief
2039  * Mark an element as "invalid".
2040  *
2041  * @description
2042  * This function is used to be able to integrate with KASAN or PGZ
2043  * for a cache of composite objects. It typically is a function
2044  * called in their @c zc_op_mark_invalid() callback.
2045  *
2046  * This function performs validation that @c elem belongs
2047  * to the right zone and is properly "aligned", and should
2048  * never be elided under any configuration.
2049  *
2050  * @param zone          the zone the element belongs to.
2051  * @param elem          the address of the element
2052  * @returns             the new address to correctly access @c elem.
2053  */
2054 extern vm_offset_t zcache_mark_invalid(
2055 	zone_t                  zone,
2056 	vm_offset_t             elem);
2057 #define zcache_mark_invalid(zone, e) \
2058 	((typeof(e))(zcache_mark_invalid)(zone, (vm_offset_t)(e)))
2059 
2060 
2061 /*!
2062  * @macro zcache_alloc()
2063  *
2064  * @abstract
2065  * Allocates a composite object from a cache.
2066  *
2067  * @param zone_id       The proper @c ZONE_ID_* constant.
2068  * @param flags         a collection of @c zalloc_flags_t.
2069  *
2070  * @returns             NULL or the allocated element
2071  */
2072 #define zcache_alloc(zone_id, fl) \
2073 	__zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2074 
2075 /*!
2076  * @function zcache_alloc_n()
2077  *
2078  * @abstract
2079  * Allocates a stack of composite objects from a cache.
2080  *
2081  * @param zone_id       The proper @c ZONE_ID_* constant.
2082  * @param count         how many elements to allocate (less might be returned)
2083  * @param flags         a set of @c zone_create_flags_t flags.
2084  *
2085  * @returns             NULL or the allocated composite object
2086  */
2087 extern zstack_t zcache_alloc_n(
2088 	zone_id_t               zone_id,
2089 	uint32_t                count,
2090 	zalloc_flags_t          flags,
2091 	zone_cache_ops_t        ops);
2092 #define zcache_alloc_n(zone_id, count, flags) \
2093 	(zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2094 
2095 
2096 
2097 /*!
2098  * @function zcache_free()
2099  *
2100  * @abstract
2101  * Frees a composite object previously allocated
2102  * with @c zcache_alloc() or @c zcache_alloc_n().
2103  *
2104  * @param zone_id       the zcache id to free the object to.
2105  * @param addr          the address to free
2106  * @param ops           the pointer to the zcache ops for this zcache.
2107  */
2108 extern void zcache_free(
2109 	zone_id_t               zone_id,
2110 	void                   *addr __unsafe_indexable,
2111 	zone_cache_ops_t        ops);
2112 #define zcache_free(zone_id, elem) \
2113 	(zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2114 	    __zcache_##zone_id##_ops)
2115 
2116 /*!
2117  * @function zcache_free_n()
2118  *
2119  * @abstract
2120  * Frees a stack of composite objects previously allocated
2121  * with @c zcache_alloc() or @c zcache_alloc_n().
2122  *
2123  * @param zone_id       the zcache id to free the objects to.
2124  * @param stack         a stack of composite objects
2125  * @param ops           the pointer to the zcache ops for this zcache.
2126  */
2127 extern void zcache_free_n(
2128 	zone_id_t               zone_id,
2129 	zstack_t                stack,
2130 	zone_cache_ops_t        ops);
2131 #define zcache_free_n(zone_id, stack) \
2132 	(zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2133 	    __zcache_##zone_id##_ops)
2134 
2135 
2136 /*!
2137  * @function zcache_drain()
2138  *
2139  * @abstract
2140  * Forces a zcache to be drained (have all its data structures freed
2141  * back to the original zones).
2142  *
2143  * @param zone_id       the zcache id to free the objects to.
2144  */
2145 extern void zcache_drain(
2146 	zone_id_t               zone_id);
2147 
2148 
2149 /*!
2150  * @macro ZCACHE_DECLARE
2151  *
2152  * @abstract
2153  * Declares the type associated with a zone cache ID.
2154  *
2155  * @param id            the name of zone ID to associate a type with.
2156  * @param type_t        the type of elements in the zone.
2157  */
2158 #define ZCACHE_DECLARE(id, type_t) \
2159 	__ZONE_DECLARE_TYPE(id, type_t); \
2160 	__attribute__((visibility("hidden"))) \
2161 	extern const zone_cache_ops_t __zcache_##id##_ops
2162 
2163 
2164 /*!
2165  * @macro ZCACHE_DEFINE
2166  *
2167  * @abstract
2168  * Defines a zone cache for a given ID and type.
2169  *
2170  * @param zone_id       the name of zone ID to associate a type with.
2171  * @param name          the name for the zone
2172  * @param type_t        the type of elements in the zone.
2173  * @param size          the size of elements in the cache
2174  * @param ops           the ops for this zcache.
2175  */
2176 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2177 	ZCACHE_DECLARE(zid, type_t);                                            \
2178 	ZONE_DECLARE_ID(zid, type_t);                                           \
2179 	const zone_cache_ops_t __zcache_##zid##_ops = (ops);                    \
2180 	ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) {   \
2181 	        zcache_ops[zid] = (ops);                                        \
2182 	})
2183 
2184 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2185 
2186 #pragma mark XNU only: PGZ support
2187 
2188 /*!
2189  * @function pgz_owned()
2190  *
2191  * @brief
2192  * Returns whether an address is PGZ owned.
2193  *
2194  * @param addr          The address to translate.
2195  * @returns             Whether it is PGZ owned
2196  */
2197 #if CONFIG_PROB_GZALLOC
2198 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
2199 #else
2200 #define pgz_owned(addr) false
2201 #endif
2202 
2203 /*!
2204  * @function pgz_decode()
2205  *
2206  * @brief
2207  * Translates a PGZ protected virtual address to its unprotected
2208  * backing store.
2209  *
2210  * @discussion
2211  * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
2212  * elements since the PGZ protected virtual addresses are maintained by PGZ
2213  * at the pmap level without the VM involvment.
2214  *
2215  * "allow_invalid" schemes relying on sequestering also need this
2216  * to perform the locking attempts on the unprotected address.
2217  *
2218  * @param addr          The address to translate.
2219  * @param size          The object size.
2220  * @returns             The unprotected address or @c addr.
2221  */
2222 #if CONFIG_PROB_GZALLOC
2223 #define pgz_decode(addr, size) \
2224 	((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
2225 #else
2226 #define pgz_decode(addr, size)  (addr)
2227 #endif
2228 
2229 /*!
2230  * @function pgz_decode_allow_invalid()
2231  *
2232  * @brief
2233  * Translates a PGZ protected virtual address to its unprotected
2234  * backing store, but doesn't assert it is still allocated/valid.
2235  *
2236  * @discussion
2237  * "allow_invalid" schemes relying on sequestering also need this
2238  * to perform the locking attempts on the unprotected address.
2239  *
2240  * @param addr          The address to translate.
2241  * @param want_zid      The expected zone ID for the element.
2242  * @returns             The unprotected address or @c addr.
2243  */
2244 #if CONFIG_PROB_GZALLOC
2245 #define pgz_decode_allow_invalid(addr, want_zid) \
2246 	((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
2247 #else
2248 #define pgz_decode_allow_invalid(addr, zid)  (addr)
2249 #endif
2250 
2251 #pragma mark XNU only: misc & implementation details
2252 
2253 struct zone_create_startup_spec {
2254 	zone_t                 *z_var;
2255 	const char             *z_name __unsafe_indexable;
2256 	vm_size_t               z_size;
2257 	zone_create_flags_t     z_flags;
2258 	zone_id_t               z_zid;
2259 	void                  (^z_setup)(zone_t);
2260 };
2261 
2262 extern void     zone_create_startup(
2263 	struct zone_create_startup_spec *spec);
2264 
2265 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2266 	static __startup_data struct zone_create_startup_spec \
2267 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2268 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2269 	    &__startup_zone_spec_ ## ns)
2270 
2271 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2272 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2273 
2274 #define __zalloc_cast(namespace, expr) \
2275 	((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2276 
2277 #if ZALLOC_TYPE_SAFE
2278 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
2279 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
2280 #endif /* !ZALLOC_TYPE_SAFE */
2281 
2282 struct zone_view_startup_spec {
2283 	zone_view_t         zv_view;
2284 	union {
2285 		zone_kheap_id_t zv_heapid;
2286 		zone_t         *zv_zone;
2287 	};
2288 	vm_size_t           zv_size;
2289 };
2290 
2291 extern void zone_view_startup_init(
2292 	struct zone_view_startup_spec *spec);
2293 
2294 extern void zone_userspace_reboot_checks(void);
2295 
2296 #if VM_TAG_SIZECLASSES
2297 extern void __zone_site_register(
2298 	vm_allocation_site_t   *site);
2299 
2300 #define VM_ALLOC_SITE_TAG() ({ \
2301 	__PLACE_IN_SECTION("__DATA, __data")                                   \
2302 	static vm_allocation_site_t site = { .refcount = 2, };                 \
2303 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __zone_site_register, &site);   \
2304 	site.tag;                                                              \
2305 })
2306 #else /* VM_TAG_SIZECLASSES */
2307 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
2308 #endif /* !VM_TAG_SIZECLASSES */
2309 
2310 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2311 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2312 {
2313 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2314 }
2315 
2316 #if DEBUG || DEVELOPMENT
2317 #    define ZPCPU_MANGLE_BIT    (1ul << 63)
2318 #else /* !(DEBUG || DEVELOPMENT) */
2319 #  define ZPCPU_MANGLE_BIT      0ul
2320 #endif /* !(DEBUG || DEVELOPMENT) */
2321 
2322 #define __zpcpu_mangle(ptr)     (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
2323 #define __zpcpu_demangle(ptr)   (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
2324 #define __zpcpu_addr(e)         ((vm_address_t)(e))
2325 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(ptr), e)
2326 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2327 
2328 /**
2329  * @macro __zpcpu_mangle_for_boot()
2330  *
2331  * @discussion
2332  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2333  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2334  * storage marked @c __startup_data and replace it with the proper allocation
2335  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2336  *
2337  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2338  * provides the proper mangling of the storage into a "fake" percpu pointer so
2339  * that accesses through @c zpercpu_get() functions properly.
2340  *
2341  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2342  */
2343 #define __zpcpu_mangle_for_boot(ptr)  ({ \
2344 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
2345 	__zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
2346 })
2347 
2348 extern unsigned zpercpu_count(void) __pure2;
2349 
2350 #if CONFIG_PROB_GZALLOC
2351 
2352 extern vm_offset_t __pgz_decode(
2353 	mach_vm_address_t       addr,
2354 	mach_vm_size_t          size);
2355 
2356 extern vm_offset_t __pgz_decode_allow_invalid(
2357 	vm_offset_t             offs,
2358 	zone_id_t               zid);
2359 
2360 #endif
2361 #if DEBUG || DEVELOPMENT
2362 extern size_t zone_pages_wired;
2363 extern size_t zone_guard_pages;
2364 #endif /* DEBUG || DEVELOPMENT */
2365 #if CONFIG_ZLEAKS
2366 extern uint32_t                 zleak_active;
2367 extern vm_size_t                zleak_max_zonemap_size;
2368 extern vm_size_t                zleak_global_tracking_threshold;
2369 extern vm_size_t                zleak_per_zone_tracking_threshold;
2370 
2371 extern kern_return_t zleak_update_threshold(
2372 	vm_size_t              *arg,
2373 	uint64_t                value);
2374 #endif /* CONFIG_ZLEAKS */
2375 
2376 extern uint32_t                 zone_map_jetsam_limit;
2377 
2378 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2379 
2380 extern zone_t percpu_u64_zone;
2381 
2382 #pragma GCC visibility pop
2383 #endif /* XNU_KERNEL_PRIVATE */
2384 
2385 /*
2386  * This macro is currently used by AppleImage4 (rdar://83924635)
2387  */
2388 #define __zalloc_ptr_load_and_erase(elem) \
2389 	os_ptr_load_and_erase(elem)
2390 
2391 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2392 
2393 #endif  /* _KERN_ZALLOC_H_ */
2394 
2395 #endif  /* KERNEL_PRIVATE */
2396