xref: /xnu-8792.41.9/osfmk/kern/zalloc.h (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** Force the created zone to use VA sequestering */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** Force the created zone @b NOT to use VA sequestering */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Obsolete */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** Use guard pages in PGZ mode */
170 	ZC_PGZ_USE_GUARDS       = 0x0100000000000000,
171 
172 	/** Zone doesn't support TBI tagging */
173 	ZC_NOTBITAG             = 0x0200000000000000,
174 
175 	/** This zone will back a kalloc type */
176 	ZC_KALLOC_TYPE          = 0x0400000000000000,
177 
178 	/** This zone will back a kalloc heap */
179 	ZC_KALLOC_HEAP          = 0x0800000000000000,
180 
181 	/* unused                 0x1000000000000000, */
182 
183 	/** This zone belongs to the VM submap */
184 	ZC_VM                   = 0x2000000000000000,
185 #if __LP64__
186 #define ZC_VM_LP64 ZC_VM
187 #else
188 #define ZC_VM_LP64 ZC_NONE
189 #endif
190 
191 	/** Disable kasan quarantine for this zone */
192 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
193 
194 	/** Disable kasan redzones for this zone */
195 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
196 #endif /* XNU_KERNEL_PRIVATE */
197 });
198 
199 /*!
200  * @union zone_or_view
201  *
202  * @abstract
203  * A type used for calls that admit both a zone or a zone view.
204  *
205  * @discussion
206  * @c zalloc() and @c zfree() and their variants can act on both
207  * zones and zone views.
208  */
209 union zone_or_view {
210 	struct zone_view           *zov_view;
211 	struct zone                *zov_zone;
212 	struct kalloc_type_view    *zov_kt_heap;
213 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)214 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
215 	}
zone_or_view(struct zone * z)216 	inline zone_or_view(struct zone *z) : zov_zone(z) {
217 	}
zone_or_view(struct kalloc_type_view * kth)218 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
219 	}
220 #endif
221 };
222 #ifdef __cplusplus
223 typedef union zone_or_view zone_or_view_t;
224 #else
225 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
226 #endif
227 
228 /*!
229  * @enum zone_create_ro_id_t
230  *
231  * @abstract
232  * Zone creation IDs for external read only zones
233  *
234  * @discussion
235  * Kexts that desire to use the RO allocator should:
236  * 1. Add a zone creation id below
237  * 2. Add a corresponding ID to @c zone_reserved_id_t
238  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
239  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
240  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
241  */
242 __enum_decl(zone_create_ro_id_t, zone_id_t, {
243 	ZC_RO_ID_SANDBOX,
244 	ZC_RO_ID_PROFILE,
245 	ZC_RO_ID_PROTOBOX,
246 	ZC_RO_ID_SB_FILTER,
247 	ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 	ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
249 });
250 
251 /*!
252  * @function zone_create
253  *
254  * @abstract
255  * Creates a zone with the specified parameters.
256  *
257  * @discussion
258  * A Zone is a slab allocator that returns objects of a given size very quickly.
259  *
260  * @param name          the name for the new zone.
261  * @param size          the size of the elements returned by this zone.
262  * @param flags         a set of @c zone_create_flags_t flags.
263  *
264  * @returns             the created zone, this call never fails.
265  */
266 extern zone_t   zone_create(
267 	const char             *name __unsafe_indexable,
268 	vm_size_t               size,
269 	zone_create_flags_t     flags);
270 
271 /*!
272  * @function zone_create_ro
273  *
274  * @abstract
275  * Creates a read only zone with the specified parameters from kexts
276  *
277  * @discussion
278  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
279  * kexts. Do not use this API to create read only zones in xnu.
280  *
281  * @param name          the name for the new zone.
282  * @param size          the size of the elements returned by this zone.
283  * @param flags         a set of @c zone_create_flags_t flags.
284  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
285  *
286  * @returns             the zone ID of the created zone, this call never fails.
287  */
288 extern zone_id_t   zone_create_ro(
289 	const char             *name __unsafe_indexable,
290 	vm_size_t               size,
291 	zone_create_flags_t     flags,
292 	zone_create_ro_id_t     zc_ro_id);
293 
294 /*!
295  * @function zdestroy
296  *
297  * @abstract
298  * Destroys a zone previously made with zone_create.
299  *
300  * @discussion
301  * Zones must have been made destructible for @c zdestroy() to be allowed,
302  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
303  *
304  * @param zone          the zone to destroy.
305  */
306 extern void     zdestroy(
307 	zone_t          zone);
308 
309 /*!
310  * @function zone_require
311  *
312  * @abstract
313  * Requires for a given pointer to belong to the specified zone.
314  *
315  * @discussion
316  * The function panics if the check fails as it indicates that the kernel
317  * internals have been compromised.
318  *
319  * @param zone          the zone the address needs to belong to.
320  * @param addr          the element address to check.
321  */
322 extern void     zone_require(
323 	zone_t          zone,
324 	void           *addr __unsafe_indexable);
325 
326 /*!
327  * @function zone_require_ro
328  *
329  * @abstract
330  * Version of zone require intended for zones created with ZC_READONLY
331  *
332  * @discussion
333  * This check is not sufficient to fully trust the element.
334  *
335  * Another check of its content must be performed to prove
336  * that the element is "the right one", a typical technique
337  * for when the RO data structure is 1:1 with a mutable one,
338  * is a simple circularity check with a very strict lifetime
339  * (both the mutable and read-only data structures are made
340  * and destroyed as close as possible).
341  *
342  * @param zone_id       the zone id the address needs to belong to.
343  * @param elem_size     the element size for this zone.
344  * @param addr          the element address to check.
345  */
346 extern void     zone_require_ro(
347 	zone_id_t       zone_id,
348 	vm_size_t       elem_size,
349 	void           *addr __unsafe_indexable);
350 
351 /*!
352  * @enum zalloc_flags_t
353  *
354  * @brief
355  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
356  *
357  * @discussion
358  * It is encouraged that any callsite passing flags uses exactly one of:
359  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
360  * if nothing else was specified.
361  *
362  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
363  * then @c Z_WAITOK is ignored.
364  *
365  * @const Z_WAITOK
366  * Means that it's OK for zalloc() to block to wait for memory,
367  * when Z_WAITOK is passed, zalloc will never return NULL.
368  *
369  * @const Z_NOWAIT
370  * Passing this flag means that zalloc is not allowed to ever block.
371  *
372  * @const Z_NOPAGEWAIT
373  * Passing this flag means that zalloc is allowed to wait due to lock
374  * contention, but will not wait for the VM to wait for pages when
375  * under memory pressure.
376  *
377  * @const Z_ZERO
378  * Passing this flags means that the returned memory has been zeroed out.
379  *
380  * @const Z_NOFAIL
381  * Passing this flag means that the caller expects the allocation to always
382  * succeed. This will result in a panic if this assumption isn't correct.
383  *
384  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
385  * be used on exhaustible zones.
386  *
387  * @const Z_REALLOCF
388  * For the realloc family of functions,
389  * free the incoming memory on failure cases.
390  *
391  #if XNU_KERNEL_PRIVATE
392  * @const Z_KALLOC_ARRAY
393  * Instead of returning a standard "pointer" return a pointer that encodes
394  * its size-class into the pointer itself (Only for kalloc, might limit
395  * the range of allocations that can be done).
396  *
397  * @const Z_MAY_COPYINMAP
398  * This data allocation might be used with vm_map_copyin().
399  * This allows for those allocations to be associated with a proper VM object.
400  *
401  * @const Z_FULLSIZE
402  * Used to indicate that the caller will use all available space in excess
403  * from the requested allocation size.
404  *
405  * @const Z_SKIP_KASAN
406  * Tell zalloc() not to do any kasan adjustments.
407  *
408  * @const Z_PGZ
409  * Used by zalloc internally to denote an allocation that we will try
410  * to guard with PGZ.
411  *
412  * @const Z_VM_TAG_BT_BIT
413  * Used to blame allocation accounting on the first kext
414  * found in the backtrace of the allocation.
415  *
416  * @const Z_NOZZC
417  * Used internally to mark allocations that will skip zero validation.
418  *
419  * @const Z_PCPU
420  * Used internally for the percpu paths.
421  *
422  * @const Z_VM_TAG_MASK
423  * Represents bits in which a vm_tag_t for the allocation can be passed.
424  * (used by kalloc for the zone tagging debugging feature).
425  #endif
426  */
427 __options_decl(zalloc_flags_t, uint32_t, {
428 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
429 	Z_WAITOK        = 0x0000,
430 	Z_NOWAIT        = 0x0001,
431 	Z_NOPAGEWAIT    = 0x0002,
432 	Z_ZERO          = 0x0004,
433 	Z_REALLOCF      = 0x0008,
434 
435 #if XNU_KERNEL_PRIVATE
436 	Z_KALLOC_ARRAY  = 0x0080,
437 	Z_MAY_COPYINMAP = 0x0100,
438 	Z_FULLSIZE      = 0x0200,
439 #if KASAN
440 	Z_SKIP_KASAN    = 0x0400,
441 #else
442 	Z_SKIP_KASAN    = 0x0000,
443 #endif
444 	Z_PGZ           = 0x0800,
445 	Z_VM_TAG_BT_BIT = 0x1000,
446 	Z_PCPU          = 0x2000,
447 	Z_NOZZC         = 0x4000,
448 #endif /* XNU_KERNEL_PRIVATE */
449 	Z_NOFAIL        = 0x8000,
450 
451 	/* convenient c++ spellings */
452 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
453 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
454 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL, /* convenient spelling for c++ */
455 
456 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
457 #if XNU_KERNEL_PRIVATE
458 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
459 	/** used by kalloc to propagate vm tags for -zt */
460 	Z_VM_TAG_MASK   = 0xffff0000,
461 
462 #define Z_VM_TAG_SHIFT        16
463 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
464 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
465 #endif
466 });
467 
468 /*
469  * This type is used so that kalloc_internal has good calling conventions
470  * for callers who want to cheaply both know the allocated address
471  * and the actual size of the allocation.
472  */
473 struct kalloc_result {
474 	void         *addr __sized_by(size);
475 	vm_size_t     size;
476 };
477 
478 /*!
479  * @function zalloc
480  *
481  * @abstract
482  * Allocates an element from a specified zone.
483  *
484  * @discussion
485  * If the zone isn't exhaustible and is expandable, this call never fails.
486  *
487  * @param zone_or_view  the zone or zone view to allocate from
488  *
489  * @returns             NULL or the allocated element
490  */
491 __attribute__((malloc))
492 extern void *__unsafe_indexable zalloc(
493 	zone_or_view_t  zone_or_view);
494 
495 /*!
496  * @function zalloc_noblock
497  *
498  * @abstract
499  * Allocates an element from a specified zone, but never blocks.
500  *
501  * @discussion
502  * This call is suitable for preemptible code, however allocation
503  * isn't allowed from interrupt context.
504  *
505  * @param zone_or_view  the zone or zone view to allocate from
506  *
507  * @returns             NULL or the allocated element
508  */
509 __attribute__((malloc))
510 extern void *__unsafe_indexable zalloc_noblock(
511 	zone_or_view_t  zone_or_view);
512 
513 /*!
514  * @function zalloc_flags()
515  *
516  * @abstract
517  * Allocates an element from a specified zone, with flags.
518  *
519  * @param zone_or_view  the zone or zone view to allocate from
520  * @param flags         a collection of @c zalloc_flags_t.
521  *
522  * @returns             NULL or the allocated element
523  */
524 __attribute__((malloc))
525 extern void *__unsafe_indexable zalloc_flags(
526 	zone_or_view_t  zone_or_view,
527 	zalloc_flags_t  flags);
528 
529 __attribute__((malloc))
530 static inline void *__unsafe_indexable
__zalloc_flags(zone_or_view_t zone_or_view,zalloc_flags_t flags)531 __zalloc_flags(
532 	zone_or_view_t  zone_or_view,
533 	zalloc_flags_t  flags)
534 {
535 	void *__unsafe_indexable addr = (zalloc_flags)(zone_or_view, flags);
536 	if (flags & Z_NOFAIL) {
537 		__builtin_assume(addr != NULL);
538 	}
539 	return addr;
540 }
541 
542 #ifndef XNU_KERNEL_PRIVATE
543 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
544 #endif
545 
546 /*!
547  * @macro zalloc_id
548  *
549  * @abstract
550  * Allocates an element from a specified zone ID, with flags.
551  *
552  * @param zid           The proper @c ZONE_ID_* constant.
553  * @param flags         a collection of @c zalloc_flags_t.
554  *
555  * @returns             NULL or the allocated element
556  */
557 __attribute__((malloc))
558 extern void *__unsafe_indexable zalloc_id(
559 	zone_id_t       zid,
560 	zalloc_flags_t  flags);
561 
562 __attribute__((malloc))
563 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)564 __zalloc_id(
565 	zone_id_t       zid,
566 	zalloc_flags_t  flags)
567 {
568 	void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
569 	if (flags & Z_NOFAIL) {
570 		__builtin_assume(addr != NULL);
571 	}
572 	return addr;
573 }
574 
575 #ifndef XNU_KERNEL_PRIVATE
576 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
577 #endif
578 
579 /*!
580  * @function zalloc_ro
581  *
582  * @abstract
583  * Allocates an element from a specified read-only zone.
584  *
585  * @param zone_id       the zone id to allocate from
586  * @param flags         a collection of @c zalloc_flags_t.
587  *
588  * @returns             NULL or the allocated element
589  */
590 __attribute__((malloc))
591 extern void *__unsafe_indexable zalloc_ro(
592 	zone_id_t       zone_id,
593 	zalloc_flags_t  flags);
594 
595 __attribute__((malloc))
596 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)597 __zalloc_ro(
598 	zone_id_t       zone_id,
599 	zalloc_flags_t  flags)
600 {
601 	void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
602 	if (flags & Z_NOFAIL) {
603 		__builtin_assume(addr != NULL);
604 	}
605 	return addr;
606 }
607 
608 #ifndef XNU_KERNEL_PRIVATE
609 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
610 #endif
611 
612 /*!
613  * @function zalloc_ro_mut
614  *
615  * @abstract
616  * Modifies an element from a specified read-only zone.
617  *
618  * @discussion
619  * Modifying compiler-assisted authenticated pointers using this function will
620  * not result in a signed pointer being written.  The caller is expected to
621  * sign the value appropriately beforehand if they wish to do this.
622  *
623  * @param zone_id       the zone id to allocate from
624  * @param elem          element to be modified
625  * @param offset        offset from element
626  * @param new_data      pointer to new data
627  * @param new_data_size size of modification
628  *
629  */
630 extern void zalloc_ro_mut(
631 	zone_id_t       zone_id,
632 	void           *elem __unsafe_indexable,
633 	vm_offset_t     offset,
634 	const void     *new_data __sized_by(new_data_size),
635 	vm_size_t       new_data_size);
636 
637 /*!
638  * @function zalloc_ro_update_elem
639  *
640  * @abstract
641  * Update the value of an entire element allocated in the read only allocator.
642  *
643  * @param zone_id       the zone id to allocate from
644  * @param elem          element to be modified
645  * @param new_data      pointer to new data
646  *
647  */
648 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
649 	const typeof(*(elem)) *__new_data = (new_data);                        \
650 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
651 })
652 
653 /*!
654  * @function zalloc_ro_update_field
655  *
656  * @abstract
657  * Update a single field of an element allocated in the read only allocator.
658  *
659  * @param zone_id       the zone id to allocate from
660  * @param elem          element to be modified
661  * @param field         the element field to be modified
662  * @param new_data      pointer to new data
663  *
664  */
665 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
666 	const typeof((elem)->field) *__value = (value);                        \
667 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
668 	    __value, sizeof((elem)->field));                                   \
669 })
670 
671 #if __LP64__
672 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
673 #else
674 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_32
675 #endif
676 
677 /*!
678  * @enum zro_atomic_op_t
679  *
680  * @brief
681  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
682  * atomic operations.
683  *
684  * @discussion
685  * This enum provides all flavors of atomic operations supported in sizes 8,
686  * 16, 32, 64 bits.
687  *
688  * @const ZRO_ATOMIC_OR_*
689  * To perform an @s os_atomic_or
690  *
691  * @const ZRO_ATOMIC_XOR_*
692  * To perform an @s os_atomic_xor
693  *
694  * @const ZRO_ATOMIC_AND_*
695  * To perform an @s os_atomic_and
696  *
697  * @const ZRO_ATOMIC_ADD_*
698  * To perform an @s os_atomic_add
699  *
700  * @const ZRO_ATOMIC_XCHG_*
701  * To perform an @s os_atomic_xchg
702  *
703  */
704 __enum_decl(zro_atomic_op_t, uint32_t, {
705 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
706 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
707 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
708 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
709 
710 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
711 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
712 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
713 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
714 
715 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
716 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
717 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
718 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
719 
720 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
721 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
722 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
723 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
724 
725 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
726 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
727 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
728 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
729 
730 	/* cconvenient spellings */
731 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
732 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
733 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
734 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
735 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
736 });
737 
738 /*!
739  * @function zalloc_ro_mut_atomic
740  *
741  * @abstract
742  * Atomically update an offset in an element allocated in the read only
743  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
744  *
745  * @param zone_id       the zone id to allocate from
746  * @param elem          element to be modified
747  * @param offset        offset in the element to be modified
748  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
749  * @param value         value for the atomic operation
750  *
751  */
752 extern uint64_t zalloc_ro_mut_atomic(
753 	zone_id_t       zone_id,
754 	void           *elem __unsafe_indexable,
755 	vm_offset_t     offset,
756 	zro_atomic_op_t op,
757 	uint64_t        value);
758 
759 /*!
760  * @macro zalloc_ro_update_field_atomic
761  *
762  * @abstract
763  * Atomically update a single field of an element allocated in the read only
764  * allocator.
765  *
766  * @param zone_id       the zone id to allocate from
767  * @param elem          element to be modified
768  * @param field         the element field to be modified
769  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
770  * @param value         value for the atomic operation
771  *
772  */
773 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
774 	const typeof((elem)->field) __value = (value);                         \
775 	static_assert(sizeof(__value) == (op & 0xf));                          \
776 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
777 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
778 })
779 
780 /*!
781  * @function zalloc_ro_clear
782  *
783  * @abstract
784  * Zeroes an element from a specified read-only zone.
785  *
786  * @param zone_id       the zone id to allocate from
787  * @param elem          element to be modified
788  * @param offset        offset from element
789  * @param size          size of modification
790  */
791 extern void    zalloc_ro_clear(
792 	zone_id_t       zone_id,
793 	void           *elem __unsafe_indexable,
794 	vm_offset_t     offset,
795 	vm_size_t       size);
796 
797 /*!
798  * @function zalloc_ro_clear_field
799  *
800  * @abstract
801  * Zeroes the specified field of an element from a specified read-only zone.
802  *
803  * @param zone_id       the zone id to allocate from
804  * @param elem          element to be modified
805  * @param field         offset from element
806  */
807 #define zalloc_ro_clear_field(zone_id, elem, field) \
808 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
809 	    sizeof((elem)->field))
810 
811 /*!
812  * @function zfree_id()
813  *
814  * @abstract
815  * Frees an element previously allocated with @c zalloc_id().
816  *
817  * @param zone_id       the zone id to free the element to.
818  * @param addr          the address to free
819  */
820 extern void     zfree_id(
821 	zone_id_t       zone_id,
822 	void           *addr __unsafe_indexable);
823 
824 /*!
825  * @function zfree_ro()
826  *
827  * @abstract
828  * Frees an element previously allocated with @c zalloc_ro().
829  *
830  * @param zone_id       the zone id to free the element to.
831  * @param addr          the address to free
832  */
833 extern void     zfree_ro(
834 	zone_id_t       zone_id,
835 	void           *addr __unsafe_indexable);
836 
837 /*!
838  * @function zfree
839  *
840  * @abstract
841  * Frees an element allocated with @c zalloc*.
842  *
843  * @discussion
844  * If the element being freed doesn't belong to the specified zone,
845  * then this call will panic.
846  *
847  * @param zone_or_view  the zone or zone view to free the element to.
848  * @param elem          the element to free
849  */
850 extern void     zfree(
851 	zone_or_view_t  zone_or_view,
852 	void            *elem __unsafe_indexable);
853 
854 /*
855  * This macro sets "elem" to NULL on free.
856  *
857  * Note: all values passed to zfree*() might be in the element to be freed,
858  *       temporaries must be taken, and the resetting to be done prior to free.
859  */
860 #define zfree(zone, elem) ({ \
861 	__auto_type __zfree_zone = (zone); \
862 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
863 })
864 
865 #define zfree_id(zid, elem) ({ \
866 	zone_id_t __zfree_zid = (zid); \
867 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
868 })
869 
870 #define zfree_ro(zid, elem) ({ \
871 	zone_id_t __zfree_zid = (zid); \
872 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
873 })
874 
875 /* deprecated KPIS */
876 
877 __zalloc_deprecated("use zone_create()")
878 extern zone_t   zinit(
879 	vm_size_t       size,           /* the size of an element */
880 	vm_size_t       maxmem,         /* maximum memory to use */
881 	vm_size_t       alloc,          /* allocation size */
882 	const char      *name __unsafe_indexable);
883 
884 
885 #pragma mark: zone views
886 /*!
887  * @typedef zone_stats_t
888  *
889  * @abstract
890  * The opaque type for per-cpu zone stats that are accumulated per zone
891  * or per zone-view.
892  */
893 typedef struct zone_stats *__zpercpu zone_stats_t;
894 
895 /*!
896  * @typedef zone_view_t
897  *
898  * @abstract
899  * A view on a zone for accounting purposes.
900  *
901  * @discussion
902  * A zone view uses the zone it references for the allocations backing store,
903  * but does the allocation accounting at the view level.
904  *
905  * These accounting are surfaced by @b zprint(1) and similar tools,
906  * which allow for cheap but finer grained understanding of allocations
907  * without any fragmentation cost.
908  *
909  * Zone views are protected by the kernel lockdown and can't be initialized
910  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
911  */
912 typedef struct zone_view *zone_view_t;
913 struct zone_view {
914 	zone_t          zv_zone;
915 	zone_stats_t    zv_stats;
916 	const char     *zv_name __unsafe_indexable;
917 	zone_view_t     zv_next;
918 };
919 
920 #ifdef XNU_KERNEL_PRIVATE
921 /*!
922  * @enum zone_kheap_id_t
923  *
924  * @brief
925  * Enumerate a particular kalloc heap.
926  *
927  * @discussion
928  * More documentation about heaps is available in @c <kern/kalloc.h>.
929  *
930  * @const KHEAP_ID_NONE
931  * This value denotes regular zones, not used by kalloc.
932  *
933  * @const KHEAP_ID_DEFAULT
934  * Indicates zones part of the KHEAP_DEFAULT heap.
935  *
936  * @const KHEAP_ID_DATA_BUFFERS
937  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
938  *
939  * @const KHEAP_ID_KT_VAR
940  * Indicates zones part of the KHEAP_KT_VAR heap.
941  */
942 __enum_decl(zone_kheap_id_t, uint32_t, {
943 	KHEAP_ID_NONE,
944 	KHEAP_ID_DEFAULT,
945 	KHEAP_ID_DATA_BUFFERS,
946 	KHEAP_ID_KT_VAR,
947 
948 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
949 });
950 
951 /*!
952  * @macro ZONE_VIEW_DECLARE
953  *
954  * @abstract
955  * (optionally) declares a zone view (in a header).
956  *
957  * @param var           the name for the zone view.
958  */
959 #define ZONE_VIEW_DECLARE(var) \
960 	extern struct zone_view var[1]
961 
962 /*!
963  * @macro ZONE_VIEW_DEFINE
964  *
965  * @abstract
966  * Defines a given zone view and what it points to.
967  *
968  * @discussion
969  * Zone views can either share a pre-existing zone,
970  * or perform a lookup into a kalloc heap for the zone
971  * backing the bucket of the proper size.
972  *
973  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
974  * as the last rank. If views on zones are created, these must have been
975  * created before this stage.
976  *
977  * This macro should not be used to create zone views from default
978  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
979  *
980  * @param var           the name for the zone view.
981  * @param name          a string describing the zone view.
982  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
983  * @param size          the element size to be allocated from this view.
984  */
985 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
986 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
987 	    .zv_name = name, \
988 	} }; \
989 	static __startup_data struct zone_view_startup_spec \
990 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
991 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
992 	    &__startup_zone_view_spec_ ## var)
993 
994 #endif /* XNU_KERNEL_PRIVATE */
995 
996 
997 #ifdef XNU_KERNEL_PRIVATE
998 #pragma mark - XNU only interfaces
999 
1000 #include <kern/cpu_number.h>
1001 
1002 #pragma GCC visibility push(hidden)
1003 
1004 #pragma mark XNU only: zalloc (extended)
1005 
1006 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
1007 #define ZALIGN_16               (sizeof(uint16_t) - 1)
1008 #define ZALIGN_32               (sizeof(uint32_t) - 1)
1009 #define ZALIGN_PTR              (sizeof(void *)   - 1)
1010 #define ZALIGN_64               (sizeof(uint64_t) - 1)
1011 #define ZALIGN(t)               (_Alignof(t)      - 1)
1012 
1013 
1014 /*!
1015  * @function zalloc_permanent_tag()
1016  *
1017  * @abstract
1018  * Allocates a permanent element from the permanent zone
1019  *
1020  * @discussion
1021  * Memory returned by this function is always 0-initialized.
1022  * Note that the size of this allocation can not be determined
1023  * by zone_element_size so it should not be used for copyio.
1024  *
1025  * @param size          the element size (must be smaller than PAGE_SIZE)
1026  * @param align_mask    the required alignment for this allocation
1027  * @param tag           the tag to use for allocations larger than a page.
1028  *
1029  * @returns             the allocated element
1030  */
1031 __attribute__((malloc))
1032 extern void *__sized_by(size) zalloc_permanent_tag(
1033 	vm_size_t       size,
1034 	vm_offset_t     align_mask,
1035 	vm_tag_t        tag);
1036 
1037 /*!
1038  * @function zalloc_permanent()
1039  *
1040  * @abstract
1041  * Allocates a permanent element from the permanent zone
1042  *
1043  * @discussion
1044  * Memory returned by this function is always 0-initialized.
1045  * Note that the size of this allocation can not be determined
1046  * by zone_element_size so it should not be used for copyio.
1047  *
1048  * @param size          the element size (must be smaller than PAGE_SIZE)
1049  * @param align_mask    the required alignment for this allocation
1050  *
1051  * @returns             the allocated element
1052  */
1053 #define zalloc_permanent(size, align) \
1054 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1055 
1056 /*!
1057  * @function zalloc_permanent_type()
1058  *
1059  * @abstract
1060  * Allocates a permanent element of a given type with its natural alignment.
1061  *
1062  * @discussion
1063  * Memory returned by this function is always 0-initialized.
1064  *
1065  * @param type_t        the element type
1066  *
1067  * @returns             the allocated element
1068  */
1069 #define zalloc_permanent_type(type_t) \
1070 	__unsafe_forge_single(type_t *, \
1071 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1072 
1073 /*!
1074  * @function zalloc_first_proc_made()
1075  *
1076  * @abstract
1077  * Declare that the "early" allocation phase is done.
1078  */
1079 extern void
1080 zalloc_first_proc_made(void);
1081 
1082 #pragma mark XNU only: per-cpu allocations
1083 
1084 /*!
1085  * @macro zpercpu_get_cpu()
1086  *
1087  * @abstract
1088  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1089  *
1090  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1091  * @param cpu           the specified CPU number as returned by @c cpu_number()
1092  *
1093  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1094  */
1095 #define zpercpu_get_cpu(ptr, cpu) \
1096 	__zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
1097 
1098 /*!
1099  * @macro zpercpu_get()
1100  *
1101  * @abstract
1102  * Get a pointer to the current CPU slot of a given per-cpu variable.
1103  *
1104  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1105  *
1106  * @returns             the per-CPU slot for @c ptr for the current CPU.
1107  */
1108 #define zpercpu_get(ptr) \
1109 	zpercpu_get_cpu(ptr, cpu_number())
1110 
1111 /*!
1112  * @macro zpercpu_foreach()
1113  *
1114  * @abstract
1115  * Enumerate all per-CPU slots by address.
1116  *
1117  * @param it            the name for the iterator
1118  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1119  */
1120 #define zpercpu_foreach(it, ptr) \
1121 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1122 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1123 	    it < __end_##it; it = __zpcpu_next(it))
1124 
1125 /*!
1126  * @macro zpercpu_foreach_cpu()
1127  *
1128  * @abstract
1129  * Enumerate all per-CPU slots by CPU slot number.
1130  *
1131  * @param cpu           the name for cpu number iterator.
1132  */
1133 #define zpercpu_foreach_cpu(cpu) \
1134 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1135 
1136 /*!
1137  * @function zalloc_percpu()
1138  *
1139  * @abstract
1140  * Allocates an element from a per-cpu zone.
1141  *
1142  * @discussion
1143  * The returned pointer cannot be used directly and must be manipulated
1144  * through the @c zpercpu_get*() interfaces.
1145  *
1146  * @param zone_or_view  the zone or zone view to allocate from
1147  * @param flags         a collection of @c zalloc_flags_t.
1148  *
1149  * @returns             NULL or the allocated element
1150  */
1151 extern void *__zpercpu zalloc_percpu(
1152 	zone_or_view_t  zone_or_view,
1153 	zalloc_flags_t  flags);
1154 
1155 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1156 __zalloc_percpu(
1157 	zone_or_view_t  zone_or_view,
1158 	zalloc_flags_t  flags)
1159 {
1160 	void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1161 	if (flags & Z_NOFAIL) {
1162 		__builtin_assume(addr != NULL);
1163 	}
1164 	return addr;
1165 }
1166 
1167 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1168 
1169 /*!
1170  * @function zfree_percpu()
1171  *
1172  * @abstract
1173  * Frees an element previously allocated with @c zalloc_percpu().
1174  *
1175  * @param zone_or_view  the zone or zone view to free the element to.
1176  * @param addr          the address to free
1177  */
1178 extern void     zfree_percpu(
1179 	zone_or_view_t  zone_or_view,
1180 	void *__zpercpu addr);
1181 
1182 /*!
1183  * @function zalloc_percpu_permanent()
1184  *
1185  * @abstract
1186  * Allocates a permanent percpu-element from the permanent percpu zone.
1187  *
1188  * @discussion
1189  * Memory returned by this function is always 0-initialized.
1190  *
1191  * @param size          the element size (must be smaller than PAGE_SIZE)
1192  * @param align_mask    the required alignment for this allocation
1193  *
1194  * @returns             the allocated element
1195  */
1196 extern void *__zpercpu zalloc_percpu_permanent(
1197 	vm_size_t       size,
1198 	vm_offset_t     align_mask);
1199 
1200 /*!
1201  * @function zalloc_percpu_permanent_type()
1202  *
1203  * @abstract
1204  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1205  * type with its natural alignment.
1206  *
1207  * @discussion
1208  * Memory returned by this function is always 0-initialized.
1209  *
1210  * @param type_t        the element type
1211  *
1212  * @returns             the allocated element
1213  */
1214 #define zalloc_percpu_permanent_type(type_t) \
1215 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1216 
1217 
1218 #pragma mark XNU only: zone creation (extended)
1219 
1220 /*!
1221  * @enum zone_reserved_id_t
1222  *
1223  * @abstract
1224  * Well known pre-registered zones, allowing use of zone_id_require()
1225  *
1226  * @discussion
1227  * @c ZONE_ID__* aren't real zone IDs.
1228  *
1229  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1230  * easy a value to produce (by malice or accident).
1231  *
1232  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1233  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1234  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1235  * listed in @c zone_create_ro_id_t.
1236  *
1237  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1238  * @c zone_create().
1239  */
1240 __enum_decl(zone_reserved_id_t, zone_id_t, {
1241 	ZONE_ID__ZERO,
1242 
1243 	ZONE_ID_PERMANENT,
1244 	ZONE_ID_PERCPU_PERMANENT,
1245 
1246 	ZONE_ID_THREAD_RO,
1247 	ZONE_ID_MAC_LABEL,
1248 	ZONE_ID_PROC_RO,
1249 	ZONE_ID_PROC_SIGACTS_RO,
1250 	ZONE_ID_KAUTH_CRED,
1251 	ZONE_ID_CS_BLOB,
1252 
1253 	ZONE_ID_SANDBOX_RO,
1254 	ZONE_ID_PROFILE_RO,
1255 	ZONE_ID_PROTOBOX,
1256 	ZONE_ID_SB_FILTER,
1257 	ZONE_ID_AMFI_OSENTITLEMENTS,
1258 
1259 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1260 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1261 	ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1262 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1263 
1264 	ZONE_ID_PMAP,
1265 	ZONE_ID_VM_MAP,
1266 	ZONE_ID_VM_MAP_ENTRY,
1267 	ZONE_ID_VM_MAP_HOLES,
1268 	ZONE_ID_VM_MAP_COPY,
1269 	ZONE_ID_VM_PAGES,
1270 	ZONE_ID_IPC_PORT,
1271 	ZONE_ID_IPC_PORT_SET,
1272 	ZONE_ID_IPC_VOUCHERS,
1273 	ZONE_ID_PROC_TASK,
1274 	ZONE_ID_THREAD,
1275 	ZONE_ID_TURNSTILE,
1276 	ZONE_ID_SEMAPHORE,
1277 	ZONE_ID_SELECT_SET,
1278 	ZONE_ID_FILEPROC,
1279 
1280 	ZONE_ID__FIRST_DYNAMIC,
1281 });
1282 
1283 /*!
1284  * @const ZONE_ID_ANY
1285  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1286  * Zone ID.
1287  */
1288 #define ZONE_ID_ANY ((zone_id_t)-1)
1289 
1290 /*!
1291  * @const ZONE_ID_INVALID
1292  * An invalid zone_id_t that corresponds to nothing.
1293  */
1294 #define ZONE_ID_INVALID ((zone_id_t)-2)
1295 
1296 /**!
1297  * @function zone_name
1298  *
1299  * @param zone          the specified zone
1300  * @returns             the name of the specified zone.
1301  */
1302 const char *__unsafe_indexable zone_name(
1303 	zone_t                  zone);
1304 
1305 /**!
1306  * @function zone_heap_name
1307  *
1308  * @param zone          the specified zone
1309  * @returns             the name of the heap this zone is part of, or "".
1310  */
1311 const char *__unsafe_indexable zone_heap_name(
1312 	zone_t                  zone);
1313 
1314 /*!
1315  * @function zone_create_ext
1316  *
1317  * @abstract
1318  * Creates a zone with the specified parameters.
1319  *
1320  * @discussion
1321  * This is an extended version of @c zone_create().
1322  *
1323  * @param name          the name for the new zone.
1324  * @param size          the size of the elements returned by this zone.
1325  * @param flags         a set of @c zone_create_flags_t flags.
1326  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1327  *
1328  * @param extra_setup   a block that can perform non trivial initialization
1329  *                      on the zone before it is marked valid.
1330  *                      This block can call advanced setups like:
1331  *                      - zone_set_exhaustible()
1332  *                      - zone_set_noexpand()
1333  *
1334  * @returns             the created zone, this call never fails.
1335  */
1336 extern zone_t   zone_create_ext(
1337 	const char             *name __unsafe_indexable,
1338 	vm_size_t               size,
1339 	zone_create_flags_t     flags,
1340 	zone_id_t               desired_zid,
1341 	void                  (^extra_setup)(zone_t));
1342 
1343 /*!
1344  * @macro ZONE_DECLARE
1345  *
1346  * @abstract
1347  * Declares a zone variable and its associated type.
1348  *
1349  * @param var           the name of the variable to declare.
1350  * @param type_t        the type of elements in the zone.
1351  */
1352 #define ZONE_DECLARE(var, type_t) \
1353 	extern zone_t var; \
1354 	__ZONE_DECLARE_TYPE(var, type_t)
1355 
1356 /*!
1357  * @macro ZONE_DECLARE_ID
1358  *
1359  * @abstract
1360  * Declares the type associated with a zone ID.
1361  *
1362  * @param id            the name of zone ID to associate a type with.
1363  * @param type_t        the type of elements in the zone.
1364  */
1365 #define ZONE_DECLARE_ID(id, type_t) \
1366 	__ZONE_DECLARE_TYPE(id, type_t)
1367 
1368 /*!
1369  * @macro ZONE_DEFINE
1370  *
1371  * @abstract
1372  * Declares a zone variable to automatically initialize with the specified
1373  * parameters.
1374  *
1375  * @discussion
1376  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1377  *
1378  * @param var           the name of the variable to declare.
1379  * @param name          the name for the zone
1380  * @param size          the size of the elements returned by this zone.
1381  * @param flags         a set of @c zone_create_flags_t flags.
1382  */
1383 #define ZONE_DEFINE(var, name, size, flags) \
1384 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1385 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1386 	static __startup_data struct zone_create_startup_spec \
1387 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1388 	    ZONE_ID_ANY, NULL }; \
1389 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1390 	    &__startup_zone_spec_ ## var)
1391 
1392 /*!
1393  * @macro ZONE_DEFINE_TYPE
1394  *
1395  * @abstract
1396  * Defines a zone variable to automatically initialize with the specified
1397  * parameters, associated with a particular type.
1398  *
1399  * @param var           the name of the variable to declare.
1400  * @param name          the name for the zone
1401  * @param type_t        the type of elements in the zone.
1402  * @param flags         a set of @c zone_create_flags_t flags.
1403  */
1404 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1405 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1406 	__ZONE_DECLARE_TYPE(var, type_t)
1407 
1408 /*!
1409  * @macro ZONE_DEFINE_ID
1410  *
1411  * @abstract
1412  * Initializes a given zone automatically during startup with the specified
1413  * parameters.
1414  *
1415  * @param zid           a @c zone_reserved_id_t value.
1416  * @param name          the name for the zone
1417  * @param type_t        the type of elements in the zone.
1418  * @param flags         a set of @c zone_create_flags_t flags.
1419  */
1420 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1421 	ZONE_DECLARE_ID(zid, type_t); \
1422 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1423 
1424 /*!
1425  * @macro ZONE_INIT
1426  *
1427  * @abstract
1428  * Initializes a given zone automatically during startup with the specified
1429  * parameters.
1430  *
1431  * @param var           the name of the variable to initialize.
1432  * @param name          the name for the zone
1433  * @param size          the size of the elements returned by this zone.
1434  * @param flags         a set of @c zone_create_flags_t flags.
1435  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1436  * @param extra_setup   a block that can perform non trivial initialization
1437  *                      (@see @c zone_create_ext()).
1438  */
1439 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1440 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1441 
1442 /*!
1443  * @function zone_id_require
1444  *
1445  * @abstract
1446  * Requires for a given pointer to belong to the specified zone, by ID and size.
1447  *
1448  * @discussion
1449  * The function panics if the check fails as it indicates that the kernel
1450  * internals have been compromised.
1451  *
1452  * This is a variant of @c zone_require() which:
1453  * - isn't sensitive to @c zone_t::elem_size being compromised,
1454  * - is slightly faster as it saves one load and a multiplication.
1455  *
1456  * @param zone_id       the zone ID the address needs to belong to.
1457  * @param elem_size     the size of elements for this zone.
1458  * @param addr          the element address to check.
1459  */
1460 extern void     zone_id_require(
1461 	zone_id_t               zone_id,
1462 	vm_size_t               elem_size,
1463 	void                   *addr __unsafe_indexable);
1464 
1465 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
1466 extern void     zone_set_noexpand(
1467 	zone_t          zone,
1468 	vm_size_t       max_elements);
1469 
1470 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1471 extern void     zone_set_exhaustible(
1472 	zone_t          zone,
1473 	vm_size_t       max_elements);
1474 
1475 /*!
1476  * @function zone_raise_reserve()
1477  *
1478  * @brief
1479  * Used to raise the reserve on a zone.
1480  *
1481  * @discussion
1482  * Can be called from any context (zone_create_ext() setup hook or after).
1483  */
1484 extern void     zone_raise_reserve(
1485 	zone_or_view_t  zone_or_view,
1486 	uint16_t        min_elements);
1487 
1488 /*!
1489  * @function zone_fill_initially
1490  *
1491  * @brief
1492  * Initially fill a non collectable zone to have the specified amount of
1493  * elements.
1494  *
1495  * @discussion
1496  * This function must be called on a non collectable permanent zone before it
1497  * has been used yet.
1498  *
1499  * @param zone          The zone to fill.
1500  * @param nelems        The number of elements to be able to hold.
1501  */
1502 extern void     zone_fill_initially(
1503 	zone_t          zone,
1504 	vm_size_t       nelems);
1505 
1506 #pragma mark XNU only: PGZ support
1507 
1508 /*!
1509  * @function pgz_owned()
1510  *
1511  * @brief
1512  * Returns whether an address is PGZ owned.
1513  *
1514  * @param addr          The address to translate.
1515  * @returns             Whether it is PGZ owned
1516  */
1517 #if CONFIG_PROB_GZALLOC
1518 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
1519 #else
1520 #define pgz_owned(addr) false
1521 #endif
1522 
1523 /*!
1524  * @function pgz_decode()
1525  *
1526  * @brief
1527  * Translates a PGZ protected virtual address to its unprotected
1528  * backing store.
1529  *
1530  * @discussion
1531  * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
1532  * elements since the PGZ protected virtual addresses are maintained by PGZ
1533  * at the pmap level without the VM involvment.
1534  *
1535  * "allow_invalid" schemes relying on sequestering also need this
1536  * to perform the locking attempts on the unprotected address.
1537  *
1538  * @param addr          The address to translate.
1539  * @param size          The object size.
1540  * @returns             The unprotected address or @c addr.
1541  */
1542 #if CONFIG_PROB_GZALLOC
1543 #define pgz_decode(addr, size) \
1544 	((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
1545 #else
1546 #define pgz_decode(addr, size)  (addr)
1547 #endif
1548 
1549 /*!
1550  * @function pgz_decode_allow_invalid()
1551  *
1552  * @brief
1553  * Translates a PGZ protected virtual address to its unprotected
1554  * backing store, but doesn't assert it is still allocated/valid.
1555  *
1556  * @discussion
1557  * "allow_invalid" schemes relying on sequestering also need this
1558  * to perform the locking attempts on the unprotected address.
1559  *
1560  * @param addr          The address to translate.
1561  * @param want_zid      The expected zone ID for the element.
1562  * @returns             The unprotected address or @c addr.
1563  */
1564 #if CONFIG_PROB_GZALLOC
1565 #define pgz_decode_allow_invalid(addr, want_zid) \
1566 	((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
1567 #else
1568 #define pgz_decode_allow_invalid(addr, zid)  (addr)
1569 #endif
1570 
1571 #pragma mark XNU only: misc & implementation details
1572 
1573 struct zone_create_startup_spec {
1574 	zone_t                 *z_var;
1575 	const char             *z_name __unsafe_indexable;
1576 	vm_size_t               z_size;
1577 	zone_create_flags_t     z_flags;
1578 	zone_id_t               z_zid;
1579 	void                  (^z_setup)(zone_t);
1580 };
1581 
1582 extern void     zone_create_startup(
1583 	struct zone_create_startup_spec *spec);
1584 
1585 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1586 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1587 	__attribute__((visibility("hidden"))) \
1588 	extern type_t *__single __zalloc__##var##__type_name
1589 
1590 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1591 	static __startup_data struct zone_create_startup_spec \
1592 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
1593 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1594 	    &__startup_zone_spec_ ## ns)
1595 
1596 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
1597 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1598 
1599 #define __zalloc_cast(namespace, expr) \
1600 	((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
1601 
1602 #define zalloc_id(zid, flags)   __zalloc_cast(zid, (__zalloc_id)(zid, flags))
1603 #define zalloc_ro(zid, flags)   __zalloc_cast(zid, (__zalloc_ro)(zid, flags))
1604 #if ZALLOC_TYPE_SAFE
1605 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
1606 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
1607 #define zalloc_flags(zov, fl)   __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
1608 #else /* ZALLOC_TYPE_SAFE */
1609 #define zalloc_flags(zov, fl)   __zalloc_flags(zov, fl)
1610 #endif /* !ZALLOC_TYPE_SAFE */
1611 
1612 struct zone_view_startup_spec {
1613 	zone_view_t         zv_view;
1614 	union {
1615 		zone_kheap_id_t zv_heapid;
1616 		zone_t         *zv_zone;
1617 	};
1618 	vm_size_t           zv_size;
1619 };
1620 
1621 extern void zone_view_startup_init(
1622 	struct zone_view_startup_spec *spec);
1623 
1624 extern void zone_userspace_reboot_checks(void);
1625 
1626 #if VM_TAG_SIZECLASSES
1627 extern void __zone_site_register(
1628 	vm_allocation_site_t   *site);
1629 
1630 #define VM_ALLOC_SITE_TAG() ({ \
1631 	__PLACE_IN_SECTION("__DATA, __data")                                   \
1632 	static vm_allocation_site_t site = { .refcount = 2, };                 \
1633 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __zone_site_register, &site);   \
1634 	site.tag;                                                              \
1635 })
1636 #else /* VM_TAG_SIZECLASSES */
1637 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
1638 #endif /* !VM_TAG_SIZECLASSES */
1639 
1640 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)1641 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
1642 {
1643 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
1644 }
1645 
1646 #if DEBUG || DEVELOPMENT
1647 #  if __LP64__
1648 #    define ZPCPU_MANGLE_BIT    (1ul << 63)
1649 #  else /* !__LP64__ */
1650 #    define ZPCPU_MANGLE_BIT    (1ul << 31)
1651 #  endif /* !__LP64__ */
1652 #else /* !(DEBUG || DEVELOPMENT) */
1653 #  define ZPCPU_MANGLE_BIT      0ul
1654 #endif /* !(DEBUG || DEVELOPMENT) */
1655 
1656 #define __zpcpu_mangle(ptr)     (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
1657 #define __zpcpu_demangle(ptr)   (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
1658 #define __zpcpu_addr(e)         ((vm_address_t)(e))
1659 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(ptr), e)
1660 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
1661 
1662 /**
1663  * @macro __zpcpu_mangle_for_boot()
1664  *
1665  * @discussion
1666  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
1667  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
1668  * storage marked @c __startup_data and replace it with the proper allocation
1669  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
1670  *
1671  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
1672  * provides the proper mangling of the storage into a "fake" percpu pointer so
1673  * that accesses through @c zpercpu_get() functions properly.
1674  *
1675  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
1676  */
1677 #define __zpcpu_mangle_for_boot(ptr)  ({ \
1678 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
1679 	__zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
1680 })
1681 
1682 extern unsigned zpercpu_count(void) __pure2;
1683 
1684 #if CONFIG_PROB_GZALLOC
1685 
1686 extern vm_offset_t __pgz_decode(
1687 	mach_vm_address_t       addr,
1688 	mach_vm_size_t          size);
1689 
1690 extern vm_offset_t __pgz_decode_allow_invalid(
1691 	vm_offset_t             offs,
1692 	zone_id_t               zid);
1693 
1694 #endif
1695 #if DEBUG || DEVELOPMENT
1696 extern size_t zone_pages_wired;
1697 extern size_t zone_guard_pages;
1698 #endif /* DEBUG || DEVELOPMENT */
1699 #if CONFIG_ZLEAKS
1700 extern uint32_t                 zleak_active;
1701 extern vm_size_t                zleak_max_zonemap_size;
1702 extern vm_size_t                zleak_global_tracking_threshold;
1703 extern vm_size_t                zleak_per_zone_tracking_threshold;
1704 
1705 extern kern_return_t zleak_update_threshold(
1706 	vm_size_t              *arg,
1707 	uint64_t                value);
1708 #endif /* CONFIG_ZLEAKS */
1709 
1710 extern uint32_t                 zone_map_jetsam_limit;
1711 
1712 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
1713 
1714 extern zone_t percpu_u64_zone;
1715 
1716 #pragma GCC visibility pop
1717 #endif /* XNU_KERNEL_PRIVATE */
1718 
1719 /*
1720  * This macro is currently used by AppleImage4 (rdar://83924635)
1721  */
1722 #define __zalloc_ptr_load_and_erase(elem) \
1723 	os_ptr_load_and_erase(elem)
1724 
1725 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1726 
1727 #endif  /* _KERN_ZALLOC_H_ */
1728 
1729 #endif  /* KERNEL_PRIVATE */
1730