xref: /xnu-8796.141.3/osfmk/kern/zalloc.h (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	zalloc.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	 1985
62  *
63  */
64 
65 #ifdef  KERNEL_PRIVATE
66 
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69 
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76 
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80 
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg)       __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86 
87 /*
88  * Enable this macro to force type safe zalloc/zalloc_ro/...
89  */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97 
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99 
100 /*!
101  * @macro __zpercpu
102  *
103  * @abstract
104  * Annotation that helps denoting a per-cpu pointer that requires usage of
105  * @c zpercpu_*() for access.
106  */
107 #define __zpercpu __unsafe_indexable
108 
109 /*!
110  * @typedef zone_id_t
111  *
112  * @abstract
113  * The type for a zone ID.
114  */
115 typedef uint16_t zone_id_t;
116 
117 /**
118  * @enum zone_create_flags_t
119  *
120  * @abstract
121  * Set of flags to pass to zone_create().
122  *
123  * @discussion
124  * Some kernel-wide policies affect all possible created zones.
125  * Explicit @c ZC_* win over such policies.
126  */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 	/** The default value to pass to zone_create() */
129 	ZC_NONE                 = 0x00000000,
130 
131 	/** (obsolete) */
132 	ZC_SEQUESTER            = 0x00000001,
133 	/** (obsolete) */
134 	ZC_NOSEQUESTER          = 0x00000002,
135 
136 	/** Enable per-CPU zone caching for this zone */
137 	ZC_CACHING              = 0x00000010,
138 	/** Disable per-CPU zone caching for this zone */
139 	ZC_NOCACHING            = 0x00000020,
140 
141 	/** Allocate zone pages as Read-only **/
142 	ZC_READONLY             = 0x00800000,
143 
144 	/** Mark zone as a per-cpu zone */
145 	ZC_PERCPU               = 0x01000000,
146 
147 	/** Force the created zone to clear every allocation on free */
148 	ZC_ZFREE_CLEARMEM       = 0x02000000,
149 
150 	/** Mark zone as non collectable by zone_gc() */
151 	ZC_NOGC                 = 0x04000000,
152 
153 	/** Do not encrypt this zone during hibernation */
154 	ZC_NOENCRYPT            = 0x08000000,
155 
156 	/** Type requires alignment to be preserved */
157 	ZC_ALIGNMENT_REQUIRED   = 0x10000000,
158 
159 	/** Obsolete */
160 	ZC_NOGZALLOC            = 0x20000000,
161 
162 	/** Don't asynchronously replenish the zone via callouts */
163 	ZC_NOCALLOUT            = 0x40000000,
164 
165 	/** Can be zdestroy()ed, not default unlike zinit() */
166 	ZC_DESTRUCTIBLE         = 0x80000000,
167 
168 #ifdef XNU_KERNEL_PRIVATE
169 	/** This zone is a built object cache */
170 	ZC_OBJ_CACHE            = 0x0080000000000000,
171 
172 	/** Use guard pages in PGZ mode */
173 	ZC_PGZ_USE_GUARDS       = 0x0100000000000000,
174 
175 	/** Zone doesn't support TBI tagging */
176 	ZC_NOTBITAG             = 0x0200000000000000,
177 
178 	/** This zone will back a kalloc type */
179 	ZC_KALLOC_TYPE          = 0x0400000000000000,
180 
181 	/** Disable PGZ for this zone */
182 	ZC_NOPGZ                = 0x0800000000000000,
183 
184 	/** This zone contains pure data */
185 	ZC_DATA                 = 0x1000000000000000,
186 
187 	/** This zone belongs to the VM submap */
188 	ZC_VM                   = 0x2000000000000000,
189 
190 	/** Disable kasan quarantine for this zone */
191 	ZC_KASAN_NOQUARANTINE   = 0x4000000000000000,
192 
193 	/** Disable kasan redzones for this zone */
194 	ZC_KASAN_NOREDZONE      = 0x8000000000000000,
195 #endif /* XNU_KERNEL_PRIVATE */
196 });
197 
198 /*!
199  * @union zone_or_view
200  *
201  * @abstract
202  * A type used for calls that admit both a zone or a zone view.
203  *
204  * @discussion
205  * @c zalloc() and @c zfree() and their variants can act on both
206  * zones and zone views.
207  */
208 union zone_or_view {
209 	struct kalloc_type_view    *zov_kt_heap;
210 	struct zone_view           *zov_view;
211 	struct zone                *zov_zone;
212 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)213 	inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
214 	}
zone_or_view(struct zone * z)215 	inline zone_or_view(struct zone *z) : zov_zone(z) {
216 	}
zone_or_view(struct kalloc_type_view * kth)217 	inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
218 	}
219 #endif
220 };
221 #ifdef __cplusplus
222 typedef union zone_or_view zone_or_view_t;
223 #else
224 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
225 #endif
226 
227 /*!
228  * @enum zone_create_ro_id_t
229  *
230  * @abstract
231  * Zone creation IDs for external read only zones
232  *
233  * @discussion
234  * Kexts that desire to use the RO allocator should:
235  * 1. Add a zone creation id below
236  * 2. Add a corresponding ID to @c zone_reserved_id_t
237  * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
238  * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
239  * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
240  */
241 __enum_decl(zone_create_ro_id_t, zone_id_t, {
242 	ZC_RO_ID_SANDBOX,
243 	ZC_RO_ID_PROFILE,
244 	ZC_RO_ID_PROTOBOX,
245 	ZC_RO_ID_SB_FILTER,
246 	ZC_RO_ID_AMFI_OSENTITLEMENTS,
247 	ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 });
249 
250 /*!
251  * @function zone_create
252  *
253  * @abstract
254  * Creates a zone with the specified parameters.
255  *
256  * @discussion
257  * A Zone is a slab allocator that returns objects of a given size very quickly.
258  *
259  * @param name          the name for the new zone.
260  * @param size          the size of the elements returned by this zone.
261  * @param flags         a set of @c zone_create_flags_t flags.
262  *
263  * @returns             the created zone, this call never fails.
264  */
265 extern zone_t   zone_create(
266 	const char             *name __unsafe_indexable,
267 	vm_size_t               size,
268 	zone_create_flags_t     flags);
269 
270 /*!
271  * @function zone_create_ro
272  *
273  * @abstract
274  * Creates a read only zone with the specified parameters from kexts
275  *
276  * @discussion
277  * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
278  * kexts. Do not use this API to create read only zones in xnu.
279  *
280  * @param name          the name for the new zone.
281  * @param size          the size of the elements returned by this zone.
282  * @param flags         a set of @c zone_create_flags_t flags.
283  * @param zc_ro_id      an ID declared in @c zone_create_ro_id_t
284  *
285  * @returns             the zone ID of the created zone, this call never fails.
286  */
287 extern zone_id_t   zone_create_ro(
288 	const char             *name __unsafe_indexable,
289 	vm_size_t               size,
290 	zone_create_flags_t     flags,
291 	zone_create_ro_id_t     zc_ro_id);
292 
293 /*!
294  * @function zdestroy
295  *
296  * @abstract
297  * Destroys a zone previously made with zone_create.
298  *
299  * @discussion
300  * Zones must have been made destructible for @c zdestroy() to be allowed,
301  * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
302  *
303  * @param zone          the zone to destroy.
304  */
305 extern void     zdestroy(
306 	zone_t          zone);
307 
308 /*!
309  * @function zone_require
310  *
311  * @abstract
312  * Requires for a given pointer to belong to the specified zone.
313  *
314  * @discussion
315  * The function panics if the check fails as it indicates that the kernel
316  * internals have been compromised.
317  *
318  * @param zone          the zone the address needs to belong to.
319  * @param addr          the element address to check.
320  */
321 extern void     zone_require(
322 	zone_t          zone,
323 	void           *addr __unsafe_indexable);
324 
325 /*!
326  * @function zone_require_ro
327  *
328  * @abstract
329  * Version of zone require intended for zones created with ZC_READONLY
330  *
331  * @discussion
332  * This check is not sufficient to fully trust the element.
333  *
334  * Another check of its content must be performed to prove
335  * that the element is "the right one", a typical technique
336  * for when the RO data structure is 1:1 with a mutable one,
337  * is a simple circularity check with a very strict lifetime
338  * (both the mutable and read-only data structures are made
339  * and destroyed as close as possible).
340  *
341  * @param zone_id       the zone id the address needs to belong to.
342  * @param elem_size     the element size for this zone.
343  * @param addr          the element address to check.
344  */
345 extern void     zone_require_ro(
346 	zone_id_t       zone_id,
347 	vm_size_t       elem_size,
348 	void           *addr __unsafe_indexable);
349 
350 /*!
351  * @enum zalloc_flags_t
352  *
353  * @brief
354  * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
355  *
356  * @discussion
357  * It is encouraged that any callsite passing flags uses exactly one of:
358  * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
359  * if nothing else was specified.
360  *
361  * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
362  * then @c Z_WAITOK is ignored.
363  *
364  * @const Z_WAITOK
365  * Means that it's OK for zalloc() to block to wait for memory,
366  * when Z_WAITOK is passed, zalloc will never return NULL.
367  *
368  * @const Z_NOWAIT
369  * Passing this flag means that zalloc is not allowed to ever block.
370  *
371  * @const Z_NOPAGEWAIT
372  * Passing this flag means that zalloc is allowed to wait due to lock
373  * contention, but will not wait for the VM to wait for pages when
374  * under memory pressure.
375  *
376  * @const Z_ZERO
377  * Passing this flags means that the returned memory has been zeroed out.
378  *
379  * @const Z_NOFAIL
380  * Passing this flag means that the caller expects the allocation to always
381  * succeed. This will result in a panic if this assumption isn't correct.
382  *
383  * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
384  * be used on exhaustible zones.
385  *
386  * @const Z_REALLOCF
387  * For the realloc family of functions,
388  * free the incoming memory on failure cases.
389  *
390  #if XNU_KERNEL_PRIVATE
391  * @const Z_SET_NOTSHARED
392  * Using this flag from external allocations APIs (kalloc_type/zalloc)
393  * allows the callsite to skip the shared zone for that sizeclass and
394  * directly allocated from the requested zone.
395  * Using this flag from internal APIs (zalloc_ext) will skip the shared
396  * zone only when a given threshold is exceeded. It will also set a flag
397  * to indicate that future allocations to the zone should directly go to
398  * the zone instead of the shared zone.
399  *
400  * @const Z_SPRAYQTN
401  * This flag tells the VM to allocate from the "spray quarantine" range when
402  * it services the allocation. For more details on what allocations qualify
403  * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
404  *
405  * @const Z_KALLOC_ARRAY
406  * Instead of returning a standard "pointer" return a pointer that encodes
407  * its size-class into the pointer itself (Only for kalloc, might limit
408  * the range of allocations that can be done).
409  *
410  * @const Z_FULLSIZE
411  * Used to indicate that the caller will use all available space in excess
412  * from the requested allocation size.
413  *
414  * @const Z_SKIP_KASAN
415  * Tell zalloc() not to do any kasan adjustments.
416  *
417  * @const Z_MAY_COPYINMAP
418  * This data allocation might be used with vm_map_copyin().
419  * This allows for those allocations to be associated with a proper VM object.
420  *
421  * @const Z_VM_TAG_BT_BIT
422  * Used to blame allocation accounting on the first kext
423  * found in the backtrace of the allocation.
424  *
425  * @const Z_NOZZC
426  * Used internally to mark allocations that will skip zero validation.
427  *
428  * @const Z_PCPU
429  * Used internally for the percpu paths.
430  *
431  * @const Z_VM_TAG_MASK
432  * Represents bits in which a vm_tag_t for the allocation can be passed.
433  * (used by kalloc for the zone tagging debugging feature).
434  #endif
435  */
436 __options_decl(zalloc_flags_t, uint32_t, {
437 	// values smaller than 0xff are shared with the M_* flags from BSD MALLOC
438 	Z_WAITOK        = 0x0000,
439 	Z_NOWAIT        = 0x0001,
440 	Z_NOPAGEWAIT    = 0x0002,
441 	Z_ZERO          = 0x0004,
442 	Z_REALLOCF      = 0x0008,
443 
444 #if XNU_KERNEL_PRIVATE
445 	Z_SET_NOTSHARED = 0x0040,
446 	Z_SPRAYQTN      = 0x0080,
447 	Z_KALLOC_ARRAY  = 0x0100,
448 #if KASAN_CLASSIC
449 	Z_FULLSIZE      = 0x0000,
450 #else
451 	Z_FULLSIZE      = 0x0200,
452 #endif
453 #if KASAN
454 	Z_SKIP_KASAN    = 0x0400,
455 #else
456 	Z_SKIP_KASAN    = 0x0000,
457 #endif
458 	Z_MAY_COPYINMAP = 0x0800,
459 	Z_VM_TAG_BT_BIT = 0x1000,
460 	Z_PCPU          = 0x2000,
461 	Z_NOZZC         = 0x4000,
462 #endif /* XNU_KERNEL_PRIVATE */
463 	Z_NOFAIL        = 0x8000,
464 
465 	/* convenient c++ spellings */
466 	Z_NOWAIT_ZERO          = Z_NOWAIT | Z_ZERO,
467 	Z_WAITOK_ZERO          = Z_WAITOK | Z_ZERO,
468 	Z_WAITOK_ZERO_NOFAIL   = Z_WAITOK | Z_ZERO | Z_NOFAIL,
469 #if XNU_KERNEL_PRIVATE
470 	Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
471 #endif
472 
473 	Z_KPI_MASK             = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
474 #if XNU_KERNEL_PRIVATE
475 	Z_ZERO_VM_TAG_BT_BIT   = Z_ZERO | Z_VM_TAG_BT_BIT,
476 	/** used by kalloc to propagate vm tags for -zt */
477 	Z_VM_TAG_MASK   = 0xffff0000,
478 
479 #define Z_VM_TAG_SHIFT        16
480 #define Z_VM_TAG(fl, tag)     ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
481 #define Z_VM_TAG_BT(fl, tag)  ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
482 #endif
483 });
484 
485 /*
486  * This type is used so that kalloc_internal has good calling conventions
487  * for callers who want to cheaply both know the allocated address
488  * and the actual size of the allocation.
489  */
490 struct kalloc_result {
491 	void         *addr __sized_by(size);
492 	vm_size_t     size;
493 };
494 
495 /*!
496  * @typedef zone_stats_t
497  *
498  * @abstract
499  * The opaque type for per-cpu zone stats that are accumulated per zone
500  * or per zone-view.
501  */
502 typedef struct zone_stats *__zpercpu zone_stats_t;
503 
504 /*!
505  * @typedef zone_view_t
506  *
507  * @abstract
508  * A view on a zone for accounting purposes.
509  *
510  * @discussion
511  * A zone view uses the zone it references for the allocations backing store,
512  * but does the allocation accounting at the view level.
513  *
514  * These accounting are surfaced by @b zprint(1) and similar tools,
515  * which allow for cheap but finer grained understanding of allocations
516  * without any fragmentation cost.
517  *
518  * Zone views are protected by the kernel lockdown and can't be initialized
519  * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
520  */
521 typedef struct zone_view *zone_view_t;
522 struct zone_view {
523 	zone_t          zv_zone;
524 	zone_stats_t    zv_stats;
525 	const char     *zv_name __unsafe_indexable;
526 	zone_view_t     zv_next;
527 };
528 
529 /*!
530  * @typedef kalloc_type_view_t
531  *
532  * @abstract
533  * The opaque type created at kalloc_type callsites to redirect calls to
534  * the right zone.
535  */
536 typedef struct kalloc_type_view *kalloc_type_view_t;
537 
538 #if XNU_KERNEL_PRIVATE
539 /*
540  * kalloc_type/kfree_type implementation functions
541  */
542 extern void *__unsafe_indexable kalloc_type_impl_internal(
543 	kalloc_type_view_t  kt_view,
544 	zalloc_flags_t      flags);
545 
546 extern void kfree_type_impl_internal(
547 	kalloc_type_view_t kt_view,
548 	void               *ptr __unsafe_indexable);
549 
550 static inline void *__unsafe_indexable
kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)551 kalloc_type_impl(
552 	kalloc_type_view_t      kt_view,
553 	zalloc_flags_t          flags)
554 {
555 	void *__unsafe_indexable addr = kalloc_type_impl_internal(kt_view, flags);
556 	if (flags & Z_NOFAIL) {
557 		__builtin_assume(addr != NULL);
558 	}
559 	return addr;
560 }
561 
562 #define kfree_type_impl(kt_view, ptr) \
563 	kfree_type_impl_internal(kt_view, (ptr))
564 
565 #else /* XNU_KERNEL_PRIVATE */
566 
567 extern void *__unsafe_indexable kalloc_type_impl(
568 	kalloc_type_view_t  kt_view,
569 	zalloc_flags_t      flags);
570 
571 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)572 __kalloc_type_impl(
573 	kalloc_type_view_t  kt_view,
574 	zalloc_flags_t      flags)
575 {
576 	void *addr = (kalloc_type_impl)(kt_view, flags);
577 	if (flags & Z_NOFAIL) {
578 		__builtin_assume(addr != NULL);
579 	}
580 	return addr;
581 }
582 
583 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
584 
585 extern void kfree_type_impl(
586 	kalloc_type_view_t  kt_view,
587 	void                *ptr __unsafe_indexable);
588 
589 #endif /* XNU_KERNEL_PRIVATE */
590 
591 /*!
592  * @function zalloc
593  *
594  * @abstract
595  * Allocates an element from a specified zone.
596  *
597  * @discussion
598  * If the zone isn't exhaustible and is expandable, this call never fails.
599  *
600  * @param zone          the zone or zone view to allocate from
601  *
602  * @returns             NULL or the allocated element
603  */
604 __attribute__((malloc))
605 extern void *__unsafe_indexable zalloc(
606 	zone_t          zone);
607 
608 __attribute__((malloc))
609 __attribute__((overloadable))
610 static inline void *__unsafe_indexable
zalloc(zone_view_t view)611 zalloc(zone_view_t view)
612 {
613 	return zalloc((zone_t)view);
614 }
615 
616 __attribute__((malloc))
617 __attribute__((overloadable))
618 static inline void *__unsafe_indexable
zalloc(kalloc_type_view_t kt_view)619 zalloc(kalloc_type_view_t kt_view)
620 {
621 	return (kalloc_type_impl)(kt_view, Z_WAITOK);
622 }
623 
624 /*!
625  * @function zalloc_noblock
626  *
627  * @abstract
628  * Allocates an element from a specified zone, but never blocks.
629  *
630  * @discussion
631  * This call is suitable for preemptible code, however allocation
632  * isn't allowed from interrupt context.
633  *
634  * @param zone          the zone or zone view to allocate from
635  *
636  * @returns             NULL or the allocated element
637  */
638 __attribute__((malloc))
639 extern void *__unsafe_indexable zalloc_noblock(
640 	zone_t          zone);
641 
642 __attribute__((malloc))
643 __attribute__((overloadable))
644 static inline void *__unsafe_indexable
zalloc_noblock(zone_view_t view)645 zalloc_noblock(zone_view_t view)
646 {
647 	return zalloc_noblock((zone_t)view);
648 }
649 
650 __attribute__((malloc))
651 __attribute__((overloadable))
652 static inline void *__unsafe_indexable
zalloc_noblock(kalloc_type_view_t kt_view)653 zalloc_noblock(kalloc_type_view_t kt_view)
654 {
655 	return (kalloc_type_impl)(kt_view, Z_NOWAIT);
656 }
657 
658 /*!
659  * @function zalloc_flags()
660  *
661  * @abstract
662  * Allocates an element from a specified zone, with flags.
663  *
664  * @param zone          the zone or zone view to allocate from
665  * @param flags         a collection of @c zalloc_flags_t.
666  *
667  * @returns             NULL or the allocated element
668  */
669 __attribute__((malloc))
670 extern void *__unsafe_indexable zalloc_flags(
671 	zone_t          zone,
672 	zalloc_flags_t  flags);
673 
674 __attribute__((malloc))
675 __attribute__((overloadable))
676 static inline void *__unsafe_indexable
__zalloc_flags(zone_t zone,zalloc_flags_t flags)677 __zalloc_flags(
678 	zone_t          zone,
679 	zalloc_flags_t  flags)
680 {
681 	void *__unsafe_indexable addr = (zalloc_flags)(zone, flags);
682 	if (flags & Z_NOFAIL) {
683 		__builtin_assume(addr != NULL);
684 	}
685 	return addr;
686 }
687 
688 __attribute__((malloc))
689 __attribute__((overloadable))
690 static inline void *__unsafe_indexable
__zalloc_flags(zone_view_t view,zalloc_flags_t flags)691 __zalloc_flags(
692 	zone_view_t     view,
693 	zalloc_flags_t  flags)
694 {
695 	return __zalloc_flags((zone_t)view, flags);
696 }
697 
698 __attribute__((malloc))
699 __attribute__((overloadable))
700 static inline void *__unsafe_indexable
__zalloc_flags(kalloc_type_view_t kt_view,zalloc_flags_t flags)701 __zalloc_flags(
702 	kalloc_type_view_t  kt_view,
703 	zalloc_flags_t      flags)
704 {
705 	void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
706 	if (flags & Z_NOFAIL) {
707 		__builtin_assume(addr != NULL);
708 	}
709 	return addr;
710 }
711 
712 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
713 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
714 #else
715 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
716 #endif
717 
718 /*!
719  * @macro zalloc_id
720  *
721  * @abstract
722  * Allocates an element from a specified zone ID, with flags.
723  *
724  * @param zid           The proper @c ZONE_ID_* constant.
725  * @param flags         a collection of @c zalloc_flags_t.
726  *
727  * @returns             NULL or the allocated element
728  */
729 __attribute__((malloc))
730 extern void *__unsafe_indexable zalloc_id(
731 	zone_id_t       zid,
732 	zalloc_flags_t  flags);
733 
734 __attribute__((malloc))
735 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)736 __zalloc_id(
737 	zone_id_t       zid,
738 	zalloc_flags_t  flags)
739 {
740 	void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
741 	if (flags & Z_NOFAIL) {
742 		__builtin_assume(addr != NULL);
743 	}
744 	return addr;
745 }
746 
747 #if XNU_KERNEL_PRIVATE
748 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
749 #else
750 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
751 #endif
752 
753 /*!
754  * @function zalloc_ro
755  *
756  * @abstract
757  * Allocates an element from a specified read-only zone.
758  *
759  * @param zone_id       the zone id to allocate from
760  * @param flags         a collection of @c zalloc_flags_t.
761  *
762  * @returns             NULL or the allocated element
763  */
764 __attribute__((malloc))
765 extern void *__unsafe_indexable zalloc_ro(
766 	zone_id_t       zone_id,
767 	zalloc_flags_t  flags);
768 
769 __attribute__((malloc))
770 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)771 __zalloc_ro(
772 	zone_id_t       zone_id,
773 	zalloc_flags_t  flags)
774 {
775 	void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
776 	if (flags & Z_NOFAIL) {
777 		__builtin_assume(addr != NULL);
778 	}
779 	return addr;
780 }
781 
782 #if XNU_KERNEL_PRIVATE
783 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
784 #else
785 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
786 #endif
787 
788 /*!
789  * @function zalloc_ro_mut
790  *
791  * @abstract
792  * Modifies an element from a specified read-only zone.
793  *
794  * @discussion
795  * Modifying compiler-assisted authenticated pointers using this function will
796  * not result in a signed pointer being written.  The caller is expected to
797  * sign the value appropriately beforehand if they wish to do this.
798  *
799  * @param zone_id       the zone id to allocate from
800  * @param elem          element to be modified
801  * @param offset        offset from element
802  * @param new_data      pointer to new data
803  * @param new_data_size size of modification
804  *
805  */
806 extern void zalloc_ro_mut(
807 	zone_id_t       zone_id,
808 	void           *elem __unsafe_indexable,
809 	vm_offset_t     offset,
810 	const void     *new_data __sized_by(new_data_size),
811 	vm_size_t       new_data_size);
812 
813 /*!
814  * @function zalloc_ro_update_elem
815  *
816  * @abstract
817  * Update the value of an entire element allocated in the read only allocator.
818  *
819  * @param zone_id       the zone id to allocate from
820  * @param elem          element to be modified
821  * @param new_data      pointer to new data
822  *
823  */
824 #define zalloc_ro_update_elem(zone_id, elem, new_data)  ({ \
825 	const typeof(*(elem)) *__new_data = (new_data);                        \
826 	zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data));      \
827 })
828 
829 /*!
830  * @function zalloc_ro_update_field
831  *
832  * @abstract
833  * Update a single field of an element allocated in the read only allocator.
834  *
835  * @param zone_id       the zone id to allocate from
836  * @param elem          element to be modified
837  * @param field         the element field to be modified
838  * @param new_data      pointer to new data
839  *
840  */
841 #define zalloc_ro_update_field(zone_id, elem, field, value)  ({ \
842 	const typeof((elem)->field) *__value = (value);                        \
843 	zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field),         \
844 	    __value, sizeof((elem)->field));                                   \
845 })
846 
847 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
848 
849 /*!
850  * @enum zro_atomic_op_t
851  *
852  * @brief
853  * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
854  * atomic operations.
855  *
856  * @discussion
857  * This enum provides all flavors of atomic operations supported in sizes 8,
858  * 16, 32, 64 bits.
859  *
860  * @const ZRO_ATOMIC_OR_*
861  * To perform an @s os_atomic_or
862  *
863  * @const ZRO_ATOMIC_XOR_*
864  * To perform an @s os_atomic_xor
865  *
866  * @const ZRO_ATOMIC_AND_*
867  * To perform an @s os_atomic_and
868  *
869  * @const ZRO_ATOMIC_ADD_*
870  * To perform an @s os_atomic_add
871  *
872  * @const ZRO_ATOMIC_XCHG_*
873  * To perform an @s os_atomic_xchg
874  *
875  */
876 __enum_decl(zro_atomic_op_t, uint32_t, {
877 	ZRO_ATOMIC_OR_8      = 0x00000010 | 1,
878 	ZRO_ATOMIC_OR_16     = 0x00000010 | 2,
879 	ZRO_ATOMIC_OR_32     = 0x00000010 | 4,
880 	ZRO_ATOMIC_OR_64     = 0x00000010 | 8,
881 
882 	ZRO_ATOMIC_XOR_8     = 0x00000020 | 1,
883 	ZRO_ATOMIC_XOR_16    = 0x00000020 | 2,
884 	ZRO_ATOMIC_XOR_32    = 0x00000020 | 4,
885 	ZRO_ATOMIC_XOR_64    = 0x00000020 | 8,
886 
887 	ZRO_ATOMIC_AND_8     = 0x00000030 | 1,
888 	ZRO_ATOMIC_AND_16    = 0x00000030 | 2,
889 	ZRO_ATOMIC_AND_32    = 0x00000030 | 4,
890 	ZRO_ATOMIC_AND_64    = 0x00000030 | 8,
891 
892 	ZRO_ATOMIC_ADD_8     = 0x00000040 | 1,
893 	ZRO_ATOMIC_ADD_16    = 0x00000040 | 2,
894 	ZRO_ATOMIC_ADD_32    = 0x00000040 | 4,
895 	ZRO_ATOMIC_ADD_64    = 0x00000040 | 8,
896 
897 	ZRO_ATOMIC_XCHG_8    = 0x00000050 | 1,
898 	ZRO_ATOMIC_XCHG_16   = 0x00000050 | 2,
899 	ZRO_ATOMIC_XCHG_32   = 0x00000050 | 4,
900 	ZRO_ATOMIC_XCHG_64   = 0x00000050 | 8,
901 
902 	/* cconvenient spellings */
903 	ZRO_ATOMIC_OR_LONG   = ZRO_ATOMIC_LONG(OR),
904 	ZRO_ATOMIC_XOR_LONG  = ZRO_ATOMIC_LONG(XOR),
905 	ZRO_ATOMIC_AND_LONG  = ZRO_ATOMIC_LONG(AND),
906 	ZRO_ATOMIC_ADD_LONG  = ZRO_ATOMIC_LONG(ADD),
907 	ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
908 });
909 
910 /*!
911  * @function zalloc_ro_mut_atomic
912  *
913  * @abstract
914  * Atomically update an offset in an element allocated in the read only
915  * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
916  *
917  * @param zone_id       the zone id to allocate from
918  * @param elem          element to be modified
919  * @param offset        offset in the element to be modified
920  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
921  * @param value         value for the atomic operation
922  *
923  */
924 extern uint64_t zalloc_ro_mut_atomic(
925 	zone_id_t       zone_id,
926 	void           *elem __unsafe_indexable,
927 	vm_offset_t     offset,
928 	zro_atomic_op_t op,
929 	uint64_t        value);
930 
931 /*!
932  * @macro zalloc_ro_update_field_atomic
933  *
934  * @abstract
935  * Atomically update a single field of an element allocated in the read only
936  * allocator.
937  *
938  * @param zone_id       the zone id to allocate from
939  * @param elem          element to be modified
940  * @param field         the element field to be modified
941  * @param op            atomic operation to perform (see @c zro_atomic_op_t)
942  * @param value         value for the atomic operation
943  *
944  */
945 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value)  ({ \
946 	const typeof((elem)->field) __value = (value);                         \
947 	static_assert(sizeof(__value) == (op & 0xf));                          \
948 	(os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id,    \
949 	    elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value);    \
950 })
951 
952 /*!
953  * @function zalloc_ro_clear
954  *
955  * @abstract
956  * Zeroes an element from a specified read-only zone.
957  *
958  * @param zone_id       the zone id to allocate from
959  * @param elem          element to be modified
960  * @param offset        offset from element
961  * @param size          size of modification
962  */
963 extern void    zalloc_ro_clear(
964 	zone_id_t       zone_id,
965 	void           *elem __unsafe_indexable,
966 	vm_offset_t     offset,
967 	vm_size_t       size);
968 
969 /*!
970  * @function zalloc_ro_clear_field
971  *
972  * @abstract
973  * Zeroes the specified field of an element from a specified read-only zone.
974  *
975  * @param zone_id       the zone id to allocate from
976  * @param elem          element to be modified
977  * @param field         offset from element
978  */
979 #define zalloc_ro_clear_field(zone_id, elem, field) \
980 	zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
981 	    sizeof((elem)->field))
982 
983 /*!
984  * @function zfree_id()
985  *
986  * @abstract
987  * Frees an element previously allocated with @c zalloc_id().
988  *
989  * @param zone_id       the zone id to free the element to.
990  * @param addr          the address to free
991  */
992 extern void     zfree_id(
993 	zone_id_t       zone_id,
994 	void           *addr __unsafe_indexable);
995 #define zfree_id(zid, elem) ({ \
996 	zone_id_t __zfree_zid = (zid); \
997 	(zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
998 })
999 
1000 
1001 /*!
1002  * @function zfree_ro()
1003  *
1004  * @abstract
1005  * Frees an element previously allocated with @c zalloc_ro().
1006  *
1007  * @param zone_id       the zone id to free the element to.
1008  * @param addr          the address to free
1009  */
1010 extern void     zfree_ro(
1011 	zone_id_t       zone_id,
1012 	void           *addr __unsafe_indexable);
1013 #define zfree_ro(zid, elem) ({ \
1014 	zone_id_t __zfree_zid = (zid); \
1015 	(zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1016 })
1017 
1018 
1019 /*!
1020  * @function zfree
1021  *
1022  * @abstract
1023  * Frees an element allocated with @c zalloc*.
1024  *
1025  * @discussion
1026  * If the element being freed doesn't belong to the specified zone,
1027  * then this call will panic.
1028  *
1029  * @param zone          the zone or zone view to free the element to.
1030  * @param elem          the element to free
1031  */
1032 extern void     zfree(
1033 	zone_t          zone,
1034 	void           *elem __unsafe_indexable);
1035 
1036 __attribute__((overloadable))
1037 static inline void
zfree(zone_view_t view,void * elem __unsafe_indexable)1038 zfree(
1039 	zone_view_t     view,
1040 	void           *elem __unsafe_indexable)
1041 {
1042 	zfree((zone_t)view, elem);
1043 }
1044 
1045 __attribute__((overloadable))
1046 static inline void
zfree(kalloc_type_view_t kt_view,void * elem __unsafe_indexable)1047 zfree(
1048 	kalloc_type_view_t   kt_view,
1049 	void                *elem __unsafe_indexable)
1050 {
1051 	return kfree_type_impl(kt_view, elem);
1052 }
1053 
1054 #define zfree(zone, elem) ({ \
1055 	__auto_type __zfree_zone = (zone); \
1056 	(zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1057 })
1058 
1059 
1060 /* deprecated KPIS */
1061 
1062 __zalloc_deprecated("use zone_create()")
1063 extern zone_t   zinit(
1064 	vm_size_t       size,           /* the size of an element */
1065 	vm_size_t       maxmem,         /* maximum memory to use */
1066 	vm_size_t       alloc,          /* allocation size */
1067 	const char      *name __unsafe_indexable);
1068 
1069 #pragma mark: implementation details
1070 
1071 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1072 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1073 	__attribute__((visibility("hidden"))) \
1074 	extern type_t *__single __zalloc__##var##__type_name
1075 
1076 #ifdef XNU_KERNEL_PRIVATE
1077 #pragma mark - XNU only interfaces
1078 
1079 #include <kern/cpu_number.h>
1080 
1081 #pragma GCC visibility push(hidden)
1082 
1083 #pragma mark XNU only: zalloc (extended)
1084 
1085 #define ZALIGN_NONE             (sizeof(uint8_t)  - 1)
1086 #define ZALIGN_16               (sizeof(uint16_t) - 1)
1087 #define ZALIGN_32               (sizeof(uint32_t) - 1)
1088 #define ZALIGN_PTR              (sizeof(void *)   - 1)
1089 #define ZALIGN_64               (sizeof(uint64_t) - 1)
1090 #define ZALIGN(t)               (_Alignof(t)      - 1)
1091 
1092 
1093 /*!
1094  * @function zalloc_permanent_tag()
1095  *
1096  * @abstract
1097  * Allocates a permanent element from the permanent zone
1098  *
1099  * @discussion
1100  * Memory returned by this function is always 0-initialized.
1101  * Note that the size of this allocation can not be determined
1102  * by zone_element_size so it should not be used for copyio.
1103  *
1104  * @param size          the element size (must be smaller than PAGE_SIZE)
1105  * @param align_mask    the required alignment for this allocation
1106  * @param tag           the tag to use for allocations larger than a page.
1107  *
1108  * @returns             the allocated element
1109  */
1110 __attribute__((malloc))
1111 extern void *__sized_by(size) zalloc_permanent_tag(
1112 	vm_size_t       size,
1113 	vm_offset_t     align_mask,
1114 	vm_tag_t        tag);
1115 
1116 /*!
1117  * @function zalloc_permanent()
1118  *
1119  * @abstract
1120  * Allocates a permanent element from the permanent zone
1121  *
1122  * @discussion
1123  * Memory returned by this function is always 0-initialized.
1124  * Note that the size of this allocation can not be determined
1125  * by zone_element_size so it should not be used for copyio.
1126  *
1127  * @param size          the element size (must be smaller than PAGE_SIZE)
1128  * @param align_mask    the required alignment for this allocation
1129  *
1130  * @returns             the allocated element
1131  */
1132 #define zalloc_permanent(size, align) \
1133 	zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1134 
1135 /*!
1136  * @function zalloc_permanent_type()
1137  *
1138  * @abstract
1139  * Allocates a permanent element of a given type with its natural alignment.
1140  *
1141  * @discussion
1142  * Memory returned by this function is always 0-initialized.
1143  *
1144  * @param type_t        the element type
1145  *
1146  * @returns             the allocated element
1147  */
1148 #define zalloc_permanent_type(type_t) \
1149 	__unsafe_forge_single(type_t *, \
1150 	    zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1151 
1152 /*!
1153  * @function zalloc_first_proc_made()
1154  *
1155  * @abstract
1156  * Declare that the "early" allocation phase is done.
1157  */
1158 extern void zalloc_first_proc_made(void);
1159 /*!
1160  * @function zalloc_iokit_lockdown()
1161  *
1162  * @abstract
1163  * Declare that iokit matching has started.
1164  */
1165 extern void zalloc_iokit_lockdown(void);
1166 
1167 #pragma mark XNU only: per-cpu allocations
1168 
1169 /*!
1170  * @macro zpercpu_get_cpu()
1171  *
1172  * @abstract
1173  * Get a pointer to a specific CPU slot of a given per-cpu variable.
1174  *
1175  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1176  * @param cpu           the specified CPU number as returned by @c cpu_number()
1177  *
1178  * @returns             the per-CPU slot for @c ptr for the specified CPU.
1179  */
1180 #define zpercpu_get_cpu(ptr, cpu) \
1181 	__zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)(cpu)))
1182 
1183 /*!
1184  * @macro zpercpu_get()
1185  *
1186  * @abstract
1187  * Get a pointer to the current CPU slot of a given per-cpu variable.
1188  *
1189  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1190  *
1191  * @returns             the per-CPU slot for @c ptr for the current CPU.
1192  */
1193 #define zpercpu_get(ptr) \
1194 	zpercpu_get_cpu(ptr, cpu_number())
1195 
1196 /*!
1197  * @macro zpercpu_foreach()
1198  *
1199  * @abstract
1200  * Enumerate all per-CPU slots by address.
1201  *
1202  * @param it            the name for the iterator
1203  * @param ptr           the per-cpu pointer (returned by @c zalloc_percpu*()).
1204  */
1205 #define zpercpu_foreach(it, ptr) \
1206 	for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1207 	    __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1208 	    it < __end_##it; it = __zpcpu_next(it))
1209 
1210 /*!
1211  * @macro zpercpu_foreach_cpu()
1212  *
1213  * @abstract
1214  * Enumerate all per-CPU slots by CPU slot number.
1215  *
1216  * @param cpu           the name for cpu number iterator.
1217  */
1218 #define zpercpu_foreach_cpu(cpu) \
1219 	for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1220 
1221 /*!
1222  * @function zalloc_percpu()
1223  *
1224  * @abstract
1225  * Allocates an element from a per-cpu zone.
1226  *
1227  * @discussion
1228  * The returned pointer cannot be used directly and must be manipulated
1229  * through the @c zpercpu_get*() interfaces.
1230  *
1231  * @param zone_or_view  the zone or zone view to allocate from
1232  * @param flags         a collection of @c zalloc_flags_t.
1233  *
1234  * @returns             NULL or the allocated element
1235  */
1236 extern void *__zpercpu zalloc_percpu(
1237 	zone_or_view_t  zone_or_view,
1238 	zalloc_flags_t  flags);
1239 
1240 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1241 __zalloc_percpu(
1242 	zone_or_view_t  zone_or_view,
1243 	zalloc_flags_t  flags)
1244 {
1245 	void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1246 	if (flags & Z_NOFAIL) {
1247 		__builtin_assume(addr != NULL);
1248 	}
1249 	return addr;
1250 }
1251 
1252 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1253 
1254 /*!
1255  * @function zfree_percpu()
1256  *
1257  * @abstract
1258  * Frees an element previously allocated with @c zalloc_percpu().
1259  *
1260  * @param zone_or_view  the zone or zone view to free the element to.
1261  * @param addr          the address to free
1262  */
1263 extern void     zfree_percpu(
1264 	zone_or_view_t  zone_or_view,
1265 	void *__zpercpu addr);
1266 
1267 /*!
1268  * @function zalloc_percpu_permanent()
1269  *
1270  * @abstract
1271  * Allocates a permanent percpu-element from the permanent percpu zone.
1272  *
1273  * @discussion
1274  * Memory returned by this function is always 0-initialized.
1275  *
1276  * @param size          the element size (must be smaller than PAGE_SIZE)
1277  * @param align_mask    the required alignment for this allocation
1278  *
1279  * @returns             the allocated element
1280  */
1281 extern void *__zpercpu zalloc_percpu_permanent(
1282 	vm_size_t       size,
1283 	vm_offset_t     align_mask);
1284 
1285 /*!
1286  * @function zalloc_percpu_permanent_type()
1287  *
1288  * @abstract
1289  * Allocates a permanent percpu-element from the permanent percpu zone of a given
1290  * type with its natural alignment.
1291  *
1292  * @discussion
1293  * Memory returned by this function is always 0-initialized.
1294  *
1295  * @param type_t        the element type
1296  *
1297  * @returns             the allocated element
1298  */
1299 #define zalloc_percpu_permanent_type(type_t) \
1300 	((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1301 
1302 
1303 #pragma mark XNU only: SMR support for zones
1304 
1305 struct smr;
1306 
1307 /*!
1308  * @typedef zone_smr_free_cb_t
1309  *
1310  * @brief
1311  * Type for the delayed free callback for SMR zones.
1312  *
1313  * @description
1314  * This function is called before an element is reused,
1315  * or when memory is returned to the system.
1316  *
1317  * This function MUST zero the element, and if no special
1318  * action is to be taken on free, then @c bzero() is a fine
1319  * callback to use.
1320  *
1321  * This function also must be preemption-disabled safe,
1322  * as it runs with preemption disabled.
1323  *
1324  *
1325  * Note that this function should only clean the fields
1326  * that must be preserved for stale SMR readers to see.
1327  * Any field that is accessed after element validation
1328  * such as a try-retain or acquiring a lock on it must
1329  * be cleaned up much earlier as they might hold onto
1330  * expensive resources.
1331  *
1332  * The suggested pattern for an SMR type using this facility,
1333  * is to have 2 functions:
1334  *
1335  * - one "retire" stage that tries to clean up as much from
1336  *   the element as possible, with great care to leave no dangling
1337  *   pointers around, as elements in this stage might linger
1338  *   in the allocator for a long time, and this could possibly
1339  *   be abused during UaF exploitation.
1340  *
1341  * - one "smr_free" function which cleans up whatever was left,
1342  *   and zeroes the rest of the element.
1343  *
1344  * <code>
1345  *     void
1346  *     type_retire(type_t elem)
1347  *     {
1348  *         // invalidating the element makes most fields
1349  *         // inaccessible to readers.
1350  *         type_mark_invalid(elem);
1351  *
1352  *         // do cleanups for things requiring a validity check
1353  *         kfree_type(some_type_t, elem->expensive_thing);
1354  *         type_remove_from_global_list(&elem->linkage);
1355  *
1356  *         zfree_smr(type_zone, elem);
1357  *     }
1358  *
1359  *     void
1360  *     type_smr_free(void *_elem)
1361  *     {
1362  *         type_t elem = elem;
1363  *
1364  *         // cleanup fields that are used to "find" this element
1365  *         // and that SMR readers may access hazardously.
1366  *         lck_ticket_destroy(&elem->lock);
1367  *         kfree_data(elem->key, elem->keylen);
1368  *
1369  *         // compulsory: element must be zeroed fully
1370  *         bzero(elem, sizeof(*elem));
1371  *     }
1372  * </code>
1373  */
1374 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1375 
1376 /*!
1377  * @function zone_enable_smr()
1378  *
1379  * @abstract
1380  * Enable SMR for a zone.
1381  *
1382  * @discussion
1383  * This can only be done once, and must be done before
1384  * the first allocation is made with this zone.
1385  *
1386  * @param zone          the zone to enable SMR for
1387  * @param smr           the smr domain to use
1388  * @param free_cb       the free callback to use
1389  */
1390 extern void     zone_enable_smr(
1391 	zone_t                  zone,
1392 	struct smr             *smr,
1393 	zone_smr_free_cb_t      free_cb);
1394 
1395 /*!
1396  * @function zone_id_enable_smr()
1397  *
1398  * @abstract
1399  * Enable SMR for a zone ID.
1400  *
1401  * @discussion
1402  * This can only be done once, and must be done before
1403  * the first allocation is made with this zone.
1404  *
1405  * @param zone_id       the zone to enable SMR for
1406  * @param smr           the smr domain to use
1407  * @param free_cb       the free callback to use
1408  */
1409 #define zone_id_enable_smr(zone_id, smr, free_cb)  ({ \
1410 	void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t);      \
1411                                                                                 \
1412 	__cb = (free_cb);                                                       \
1413 	zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb);    \
1414 })
1415 
1416 /*!
1417  * @macro zalloc_smr()
1418  *
1419  * @abstract
1420  * Allocates an element from an SMR enabled zone
1421  *
1422  * @discussion
1423  * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1424  *
1425  * @param zone          the zone to allocate from
1426  * @param flags         a collection of @c zalloc_flags_t.
1427  *
1428  * @returns             NULL or the allocated element
1429  */
1430 #define zalloc_smr(zone, flags) \
1431 	zalloc_flags(zone, flags)
1432 
1433 /*!
1434  * @macro zalloc_id_smr()
1435  *
1436  * @abstract
1437  * Allocates an element from a specified zone ID with SMR enabled.
1438  *
1439  * @param zid           The proper @c ZONE_ID_* constant.
1440  * @param flags         a collection of @c zalloc_flags_t.
1441  *
1442  * @returns             NULL or the allocated element
1443  */
1444 #define zalloc_id_smr(zid, flags) \
1445 	zalloc_id(zid, flags)
1446 
1447 /*!
1448  * @macro zfree_smr()
1449  *
1450  * @abstract
1451  * Frees an element previously allocated with @c zalloc_smr().
1452  *
1453  * @discussion
1454  * When zfree_smr() is called, then the element is not immediately zeroed,
1455  * and the "free" callback that has been registered with the zone will
1456  * run later (@see zone_smr_free_cb_t).
1457  *
1458  * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1459  *
1460  *
1461  * It is guaranteed that the SMR timestamp associated with an element
1462  * will always be equal or greater than the stamp associated with
1463  * elements freed before it on the same thread.
1464  *
1465  * It means that when freeing multiple elements in a sequence, these
1466  * must be freed in topological order (parents before children).
1467  *
1468  * It is worth noting that calling zfree_smr() on several elements
1469  * in a given order doesn't necessarily mean they will be effectively
1470  * reused or cleaned up in that same order, only that their SMR clocks
1471  * will expire in that order.
1472  *
1473  *
1474  * @param zone          the zone to free the element to.
1475  * @param elem          the address to free
1476  */
1477 extern void     zfree_smr(
1478 	zone_t          zone,
1479 	void           *elem __unsafe_indexable);
1480 #define zfree_smr(zone, elem) ({ \
1481 	__auto_type __zfree_zone = (zone); \
1482 	(zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1483 })
1484 
1485 
1486 /*!
1487  * @function zfree_id_smr()
1488  *
1489  * @abstract
1490  * Frees an element previously allocated with @c zalloc_id_smr().
1491  *
1492  * @param zone_id       the zone id to free the element to.
1493  * @param addr          the address to free
1494  */
1495 extern void     zfree_id_smr(
1496 	zone_id_t       zone_id,
1497 	void           *addr __unsafe_indexable);
1498 #define zfree_id_smr(zid, elem) ({ \
1499 	zone_id_t __zfree_zid = (zid); \
1500 	(zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1501 })
1502 
1503 /*!
1504  * @macro zfree_smr_noclear()
1505  *
1506  * @abstract
1507  * Frees an element previously allocated with @c zalloc_smr().
1508  *
1509  * @discussion
1510  * This variant doesn't clear the pointer passed as an argument,
1511  * as it is often required for SMR algorithms to function correctly
1512  * to leave pointers "dangling" to an extent.
1513  *
1514  * However it expects the field in question to be an SMR_POINTER()
1515  * struct.
1516  *
1517  * @param zone          the zone to free the element to.
1518  * @param elem          the address to free
1519  */
1520 #define zfree_smr_noclear(zone, elem) \
1521 	(zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1522 
1523 /*!
1524  * @macro zfree_id_smr_noclear()
1525  *
1526  * @abstract
1527  * Frees an element previously allocated with @c zalloc_id_smr().
1528  *
1529  * @discussion
1530  * This variant doesn't clear the pointer passed as an argument,
1531  * as it is often required for SMR algorithms to function correctly
1532  * to leave pointers "dangling" to an extent.
1533  *
1534  * However it expects the field in question to be an SMR_POINTER()
1535  * struct.
1536  *
1537  * @param zone          the zone to free the element to.
1538  * @param elem          the address to free
1539  */
1540 #define zfree_id_smr_noclear(zone, elem) \
1541 	(zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1542 
1543 
1544 #pragma mark XNU only: zone creation (extended)
1545 
1546 /*!
1547  * @enum zone_reserved_id_t
1548  *
1549  * @abstract
1550  * Well known pre-registered zones, allowing use of zone_id_require()
1551  *
1552  * @discussion
1553  * @c ZONE_ID__* aren't real zone IDs.
1554  *
1555  * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1556  * easy a value to produce (by malice or accident).
1557  *
1558  * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1559  * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1560  * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1561  * listed in @c zone_create_ro_id_t.
1562  *
1563  * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1564  * @c zone_create().
1565  */
1566 __enum_decl(zone_reserved_id_t, zone_id_t, {
1567 	ZONE_ID__ZERO,
1568 
1569 	ZONE_ID_PERMANENT,
1570 	ZONE_ID_PERCPU_PERMANENT,
1571 
1572 	ZONE_ID_THREAD_RO,
1573 	ZONE_ID_MAC_LABEL,
1574 	ZONE_ID_PROC_RO,
1575 	ZONE_ID_PROC_SIGACTS_RO,
1576 	ZONE_ID_KAUTH_CRED,
1577 	ZONE_ID_CS_BLOB,
1578 
1579 	ZONE_ID_SANDBOX_RO,
1580 	ZONE_ID_PROFILE_RO,
1581 	ZONE_ID_PROTOBOX,
1582 	ZONE_ID_SB_FILTER,
1583 	ZONE_ID_AMFI_OSENTITLEMENTS,
1584 
1585 	ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1586 	ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1587 	ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1588 	ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1589 
1590 	ZONE_ID_PMAP,
1591 	ZONE_ID_VM_MAP,
1592 	ZONE_ID_VM_MAP_ENTRY,
1593 	ZONE_ID_VM_MAP_HOLES,
1594 	ZONE_ID_VM_MAP_COPY,
1595 	ZONE_ID_VM_PAGES,
1596 	ZONE_ID_IPC_PORT,
1597 	ZONE_ID_IPC_PORT_SET,
1598 	ZONE_ID_IPC_VOUCHERS,
1599 	ZONE_ID_PROC_TASK,
1600 	ZONE_ID_THREAD,
1601 	ZONE_ID_TURNSTILE,
1602 	ZONE_ID_SEMAPHORE,
1603 	ZONE_ID_SELECT_SET,
1604 	ZONE_ID_FILEPROC,
1605 
1606 	ZONE_ID__FIRST_DYNAMIC,
1607 });
1608 
1609 /*!
1610  * @const ZONE_ID_ANY
1611  * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1612  * Zone ID.
1613  */
1614 #define ZONE_ID_ANY ((zone_id_t)-1)
1615 
1616 /*!
1617  * @const ZONE_ID_INVALID
1618  * An invalid zone_id_t that corresponds to nothing.
1619  */
1620 #define ZONE_ID_INVALID ((zone_id_t)-2)
1621 
1622 /**!
1623  * @function zone_by_id
1624  *
1625  * @param zid           the specified zone ID.
1626  * @returns             the zone with that ID.
1627  */
1628 zone_t zone_by_id(
1629 	size_t                  zid) __pure2;
1630 
1631 /**!
1632  * @function zone_name
1633  *
1634  * @param zone          the specified zone
1635  * @returns             the name of the specified zone.
1636  */
1637 const char *__unsafe_indexable zone_name(
1638 	zone_t                  zone);
1639 
1640 /**!
1641  * @function zone_heap_name
1642  *
1643  * @param zone          the specified zone
1644  * @returns             the name of the heap this zone is part of, or "".
1645  */
1646 const char *__unsafe_indexable zone_heap_name(
1647 	zone_t                  zone);
1648 
1649 /*!
1650  * @function zone_create_ext
1651  *
1652  * @abstract
1653  * Creates a zone with the specified parameters.
1654  *
1655  * @discussion
1656  * This is an extended version of @c zone_create().
1657  *
1658  * @param name          the name for the new zone.
1659  * @param size          the size of the elements returned by this zone.
1660  * @param flags         a set of @c zone_create_flags_t flags.
1661  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1662  *
1663  * @param extra_setup   a block that can perform non trivial initialization
1664  *                      on the zone before it is marked valid.
1665  *                      This block can call advanced setups like:
1666  *                      - zone_set_exhaustible()
1667  *
1668  * @returns             the created zone, this call never fails.
1669  */
1670 extern zone_t   zone_create_ext(
1671 	const char             *name __unsafe_indexable,
1672 	vm_size_t               size,
1673 	zone_create_flags_t     flags,
1674 	zone_id_t               desired_zid,
1675 	void                  (^extra_setup)(zone_t));
1676 
1677 /*!
1678  * @macro ZONE_DECLARE
1679  *
1680  * @abstract
1681  * Declares a zone variable and its associated type.
1682  *
1683  * @param var           the name of the variable to declare.
1684  * @param type_t        the type of elements in the zone.
1685  */
1686 #define ZONE_DECLARE(var, type_t) \
1687 	extern zone_t var; \
1688 	__ZONE_DECLARE_TYPE(var, type_t)
1689 
1690 /*!
1691  * @macro ZONE_DECLARE_ID
1692  *
1693  * @abstract
1694  * Declares the type associated with a zone ID.
1695  *
1696  * @param id            the name of zone ID to associate a type with.
1697  * @param type_t        the type of elements in the zone.
1698  */
1699 #define ZONE_DECLARE_ID(id, type_t) \
1700 	__ZONE_DECLARE_TYPE(id, type_t)
1701 
1702 /*!
1703  * @macro ZONE_DEFINE
1704  *
1705  * @abstract
1706  * Declares a zone variable to automatically initialize with the specified
1707  * parameters.
1708  *
1709  * @discussion
1710  * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1711  *
1712  * @param var           the name of the variable to declare.
1713  * @param name          the name for the zone
1714  * @param size          the size of the elements returned by this zone.
1715  * @param flags         a set of @c zone_create_flags_t flags.
1716  */
1717 #define ZONE_DEFINE(var, name, size, flags) \
1718 	SECURITY_READ_ONLY_LATE(zone_t) var; \
1719 	static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1720 	static __startup_data struct zone_create_startup_spec \
1721 	__startup_zone_spec_ ## var = { &var, name, size, flags, \
1722 	    ZONE_ID_ANY, NULL }; \
1723 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1724 	    &__startup_zone_spec_ ## var)
1725 
1726 /*!
1727  * @macro ZONE_DEFINE_TYPE
1728  *
1729  * @abstract
1730  * Defines a zone variable to automatically initialize with the specified
1731  * parameters, associated with a particular type.
1732  *
1733  * @param var           the name of the variable to declare.
1734  * @param name          the name for the zone
1735  * @param type_t        the type of elements in the zone.
1736  * @param flags         a set of @c zone_create_flags_t flags.
1737  */
1738 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1739 	ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1740 	__ZONE_DECLARE_TYPE(var, type_t)
1741 
1742 /*!
1743  * @macro ZONE_DEFINE_ID
1744  *
1745  * @abstract
1746  * Initializes a given zone automatically during startup with the specified
1747  * parameters.
1748  *
1749  * @param zid           a @c zone_reserved_id_t value.
1750  * @param name          the name for the zone
1751  * @param type_t        the type of elements in the zone.
1752  * @param flags         a set of @c zone_create_flags_t flags.
1753  */
1754 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1755 	ZONE_DECLARE_ID(zid, type_t); \
1756 	ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1757 
1758 /*!
1759  * @macro ZONE_INIT
1760  *
1761  * @abstract
1762  * Initializes a given zone automatically during startup with the specified
1763  * parameters.
1764  *
1765  * @param var           the name of the variable to initialize.
1766  * @param name          the name for the zone
1767  * @param size          the size of the elements returned by this zone.
1768  * @param flags         a set of @c zone_create_flags_t flags.
1769  * @param desired_zid   a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1770  * @param extra_setup   a block that can perform non trivial initialization
1771  *                      (@see @c zone_create_ext()).
1772  */
1773 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1774 	__ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1775 
1776 /*!
1777  * @function zone_id_require
1778  *
1779  * @abstract
1780  * Requires for a given pointer to belong to the specified zone, by ID and size.
1781  *
1782  * @discussion
1783  * The function panics if the check fails as it indicates that the kernel
1784  * internals have been compromised.
1785  *
1786  * This is a variant of @c zone_require() which:
1787  * - isn't sensitive to @c zone_t::elem_size being compromised,
1788  * - is slightly faster as it saves one load and a multiplication.
1789  *
1790  * @param zone_id       the zone ID the address needs to belong to.
1791  * @param elem_size     the size of elements for this zone.
1792  * @param addr          the element address to check.
1793  */
1794 extern void     zone_id_require(
1795 	zone_id_t               zone_id,
1796 	vm_size_t               elem_size,
1797 	void                   *addr __unsafe_indexable);
1798 
1799 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1800 extern void     zone_set_exhaustible(
1801 	zone_t                  zone,
1802 	vm_size_t               max_elements);
1803 
1804 /*!
1805  * @function zone_raise_reserve()
1806  *
1807  * @brief
1808  * Used to raise the reserve on a zone.
1809  *
1810  * @discussion
1811  * Can be called from any context (zone_create_ext() setup hook or after).
1812  */
1813 extern void     zone_raise_reserve(
1814 	zone_or_view_t          zone_or_view,
1815 	uint16_t                min_elements);
1816 
1817 /*!
1818  * @function zone_fill_initially
1819  *
1820  * @brief
1821  * Initially fill a non collectable zone to have the specified amount of
1822  * elements.
1823  *
1824  * @discussion
1825  * This function must be called on a non collectable permanent zone before it
1826  * has been used yet.
1827  *
1828  * @param zone          The zone to fill.
1829  * @param nelems        The number of elements to be able to hold.
1830  */
1831 extern void     zone_fill_initially(
1832 	zone_t                  zone,
1833 	vm_size_t               nelems);
1834 
1835 /*!
1836  * @function zone_drain()
1837  *
1838  * @abstract
1839  * Forces a zone to be drained (have all its data structures freed
1840  * back to its data store, and empty pages returned to the system).
1841  *
1842  * @param zone          the zone id to free the objects to.
1843  */
1844 extern void zone_drain(
1845 	zone_t                  zone);
1846 
1847 /*!
1848  * @struct zone_basic_stats
1849  *
1850  * @abstract
1851  * Used to report basic statistics about a zone.
1852  *
1853  * @field zbs_avail     the number of elements in a zone.
1854  * @field zbs_alloc     the number of allocated elements in a zone.
1855  * @field zbs_free      the number of free elements in a zone.
1856  * @field zbs_cached    the number of free elements in the per-CPU caches.
1857  *                      (included in zbs_free).
1858  * @field zbs_alloc_fail
1859  *                      the number of allocation failures.
1860  */
1861 struct zone_basic_stats {
1862 	uint64_t        zbs_avail;
1863 	uint64_t        zbs_alloc;
1864 	uint64_t        zbs_free;
1865 	uint64_t        zbs_cached;
1866 	uint64_t        zbs_alloc_fail;
1867 };
1868 
1869 /*!
1870  * @function zone_get_stats
1871  *
1872  * @abstract
1873  * Retrieves statistics about zones, include its per-CPU caches.
1874  *
1875  * @param zone          the zone to collect stats from.
1876  * @param stats         the statistics to fill.
1877  */
1878 extern void zone_get_stats(
1879 	zone_t                  zone,
1880 	struct zone_basic_stats *stats);
1881 
1882 
1883 /*!
1884  * @typedef zone_exhausted_cb_t
1885  *
1886  * @brief
1887  * The callback type for the ZONE_EXHAUSTED event.
1888  */
1889 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone);
1890 
1891 /*!
1892  * @brief
1893  * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1894  * wiring limit.
1895  *
1896  * @discussion
1897  * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1898  * performing zone expansion and no significant amount of work can be performed
1899  * from this context.
1900  *
1901  * In particular, those callbacks cannot allocate any memory, it is expected
1902  * that they will filter if the zone is of interest, and wake up another thread
1903  * to perform the actual work (for example via thread call).
1904  */
1905 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1906 
1907 
1908 #pragma mark XNU only: zone views
1909 
1910 /*!
1911  * @enum zone_kheap_id_t
1912  *
1913  * @brief
1914  * Enumerate a particular kalloc heap.
1915  *
1916  * @discussion
1917  * More documentation about heaps is available in @c <kern/kalloc.h>.
1918  *
1919  * @const KHEAP_ID_NONE
1920  * This value denotes regular zones, not used by kalloc.
1921  *
1922  * @const KHEAP_ID_SHARED
1923  * Indicates zones part of the KHEAP_SHARED heap.
1924  *
1925  * @const KHEAP_ID_DATA_BUFFERS
1926  * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1927  *
1928  * @const KHEAP_ID_KT_VAR
1929  * Indicates zones part of the KHEAP_KT_VAR heap.
1930  */
1931 __enum_decl(zone_kheap_id_t, uint8_t, {
1932 	KHEAP_ID_NONE,
1933 	KHEAP_ID_SHARED,
1934 	KHEAP_ID_DATA_BUFFERS,
1935 	KHEAP_ID_KT_VAR,
1936 
1937 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
1938 });
1939 
1940 /*!
1941  * @macro ZONE_VIEW_DECLARE
1942  *
1943  * @abstract
1944  * (optionally) declares a zone view (in a header).
1945  *
1946  * @param var           the name for the zone view.
1947  */
1948 #define ZONE_VIEW_DECLARE(var) \
1949 	extern struct zone_view var[1]
1950 
1951 /*!
1952  * @macro ZONE_VIEW_DEFINE
1953  *
1954  * @abstract
1955  * Defines a given zone view and what it points to.
1956  *
1957  * @discussion
1958  * Zone views can either share a pre-existing zone,
1959  * or perform a lookup into a kalloc heap for the zone
1960  * backing the bucket of the proper size.
1961  *
1962  * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
1963  * as the last rank. If views on zones are created, these must have been
1964  * created before this stage.
1965  *
1966  * This macro should not be used to create zone views from default
1967  * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
1968  *
1969  * @param var           the name for the zone view.
1970  * @param name          a string describing the zone view.
1971  * @param heap_or_zone  a @c KHEAP_ID_* constant or a pointer to a zone.
1972  * @param size          the element size to be allocated from this view.
1973  */
1974 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
1975 	SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
1976 	    .zv_name = (name), \
1977 	} }; \
1978 	static __startup_data struct zone_view_startup_spec \
1979 	__startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
1980 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
1981 	    &__startup_zone_view_spec_ ## var)
1982 
1983 
1984 #pragma mark XNU only: batched allocations
1985 
1986 /*!
1987  * @typedef zstack_t
1988  *
1989  * @brief
1990  * A stack of allocated elements chained with delta encoding.
1991  *
1992  * @discussion
1993  * Some batch allocation interfaces interact with the data heap
1994  * where leaking kernel pointers is not acceptable. This is why
1995  * element offsets are used instead.
1996  */
1997 typedef struct zstack {
1998 	vm_offset_t     z_head;
1999 	uint32_t        z_count;
2000 } zstack_t;
2001 
2002 /*!
2003  * @function zstack_push
2004  *
2005  * @brief
2006  * Push a given element onto a zstack.
2007  */
2008 extern void zstack_push(
2009 	zstack_t               *stack,
2010 	void                   *elem __unsafe_indexable);
2011 
2012 /*!
2013  * @function zstack_pop
2014  *
2015  * @brief
2016  * Pops an element from a zstack, the caller must check it's not empty.
2017  */
2018 void *__unsafe_indexable zstack_pop(
2019 	zstack_t               *stack);
2020 
2021 /*!
2022  * @function zstack_empty
2023  *
2024  * @brief
2025  * Returns whether a stack is empty.
2026  */
2027 static inline uint32_t
zstack_count(zstack_t stack)2028 zstack_count(zstack_t stack)
2029 {
2030 	return stack.z_count;
2031 }
2032 
2033 /*!
2034  * @function zstack_empty
2035  *
2036  * @brief
2037  * Returns whether a stack is empty.
2038  */
2039 static inline bool
zstack_empty(zstack_t stack)2040 zstack_empty(zstack_t stack)
2041 {
2042 	return zstack_count(stack) == 0;
2043 }
2044 
2045 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)2046 zstack_load_and_erase(zstack_t *stackp)
2047 {
2048 	zstack_t stack = *stackp;
2049 
2050 	*stackp = (zstack_t){ };
2051 	return stack;
2052 }
2053 
2054 /*!
2055  * @function zfree_nozero
2056  *
2057  * @abstract
2058  * Frees an element allocated with @c zalloc*, without zeroing it.
2059  *
2060  * @discussion
2061  * This is for the sake of networking only, no one else should use this.
2062  *
2063  * @param zone_id       the zone id to free the element to.
2064  * @param elem          the element to free
2065  */
2066 extern void zfree_nozero(
2067 	zone_id_t               zone_id,
2068 	void                   *elem __unsafe_indexable);
2069 #define zfree_nozero(zone_id, elem) ({ \
2070 	zone_id_t __zfree_zid = (zone_id); \
2071 	(zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
2072 })
2073 
2074 /*!
2075  * @function zalloc_n
2076  *
2077  * @abstract
2078  * Allocates a batch of elements from the specified zone.
2079  *
2080  * @discussion
2081  * This is for the sake of networking only, no one else should use this.
2082  *
2083  * @param zone_id       the zone id to allocate the element from.
2084  * @param count         how many elements to allocate (less might be returned)
2085  * @param flags         a set of @c zone_create_flags_t flags.
2086  */
2087 extern zstack_t zalloc_n(
2088 	zone_id_t               zone_id,
2089 	uint32_t                count,
2090 	zalloc_flags_t          flags);
2091 
2092 /*!
2093  * @function zfree_n
2094  *
2095  * @abstract
2096  * Batched variant of zfree(): frees a stack of elements.
2097  *
2098  * @param zone_id       the zone id to free the element to.
2099  * @param stack         a stack of elements to free.
2100  */
2101 extern void zfree_n(
2102 	zone_id_t               zone_id,
2103 	zstack_t                stack);
2104 #define zfree_n(zone_id, stack) ({ \
2105 	zone_id_t __zfree_zid = (zone_id); \
2106 	(zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2107 })
2108 
2109 /*!
2110  * @function zfree_nozero_n
2111  *
2112  * @abstract
2113  * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
2114  * them.
2115  *
2116  * @discussion
2117  * This is for the sake of networking only, no one else should use this.
2118  *
2119  * @param zone_id       the zone id to free the element to.
2120  * @param stack         a stack of elements to free.
2121  */
2122 extern void zfree_nozero_n(
2123 	zone_id_t               zone_id,
2124 	zstack_t                stack);
2125 #define zfree_nozero_n(zone_id, stack) ({ \
2126 	zone_id_t __zfree_zid = (zone_id); \
2127 	(zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2128 })
2129 
2130 #pragma mark XNU only: cached objects
2131 
2132 /*!
2133  * @typedef zone_cache_ops_t
2134  *
2135  * @brief
2136  * A set of callbacks used for a zcache (cache of composite objects).
2137  *
2138  * @field zc_op_alloc
2139  * The callback to "allocate" a cached object from scratch.
2140  *
2141  * @field zc_op_mark_valid
2142  * The callback that is called when a cached object is being reused,
2143  * will typically call @c zcache_mark_valid() on the various
2144  * sub-pieces of the composite cached object.
2145  *
2146  * @field zc_op_mark_invalid
2147  * The callback that is called when a composite object is being freed
2148  * to the cache. This will typically call @c zcache_mark_invalid()
2149  * on the various sub-pieces of the composite object.
2150  *
2151  * @field zc_op_free
2152  * The callback to "free" a composite object completely.
2153  */
2154 typedef const struct zone_cache_ops {
2155 	void         *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2156 	void         *(*zc_op_mark_valid)(zone_id_t, void *);
2157 	void         *(*zc_op_mark_invalid)(zone_id_t, void *);
2158 	void          (*zc_op_free)(zone_id_t, void *);
2159 } *zone_cache_ops_t;
2160 
2161 
2162 /*!
2163  * @function zcache_mark_valid()
2164  *
2165  * @brief
2166  * Mark an element as "valid".
2167  *
2168  * @description
2169  * This function is used to be able to integrate with KASAN or PGZ
2170  * for a cache of composite objects. It typically is a function
2171  * called in their @c zc_op_mark_valid() callback.
2172  *
2173  * If PGZ or KASAN isn't in use, then this callback is a no-op.
2174  * Otherwise the @c elem address might be updated.
2175  *
2176  * @param zone          the zone the element belongs to.
2177  * @param elem          the address of the element
2178  * @returns             the new address to correctly access @c elem.
2179  */
2180 extern vm_offset_t zcache_mark_valid(
2181 	zone_t                  zone,
2182 	vm_offset_t             elem);
2183 #define zcache_mark_valid(zone, e) \
2184 	((typeof(e))((zcache_mark_valid)(zone, (vm_offset_t)(e))))
2185 
2186 /*!
2187  * @function zcache_mark_invalid()
2188  *
2189  * @brief
2190  * Mark an element as "invalid".
2191  *
2192  * @description
2193  * This function is used to be able to integrate with KASAN or PGZ
2194  * for a cache of composite objects. It typically is a function
2195  * called in their @c zc_op_mark_invalid() callback.
2196  *
2197  * This function performs validation that @c elem belongs
2198  * to the right zone and is properly "aligned", and should
2199  * never be elided under any configuration.
2200  *
2201  * @param zone          the zone the element belongs to.
2202  * @param elem          the address of the element
2203  * @returns             the new address to correctly access @c elem.
2204  */
2205 extern vm_offset_t zcache_mark_invalid(
2206 	zone_t                  zone,
2207 	vm_offset_t             elem);
2208 #define zcache_mark_invalid(zone, e) \
2209 	((typeof(e))(zcache_mark_invalid)(zone, (vm_offset_t)(e)))
2210 
2211 
2212 /*!
2213  * @macro zcache_alloc()
2214  *
2215  * @abstract
2216  * Allocates a composite object from a cache.
2217  *
2218  * @param zone_id       The proper @c ZONE_ID_* constant.
2219  * @param flags         a collection of @c zalloc_flags_t.
2220  *
2221  * @returns             NULL or the allocated element
2222  */
2223 #define zcache_alloc(zone_id, fl) \
2224 	__zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2225 
2226 /*!
2227  * @function zcache_alloc_n()
2228  *
2229  * @abstract
2230  * Allocates a stack of composite objects from a cache.
2231  *
2232  * @param zone_id       The proper @c ZONE_ID_* constant.
2233  * @param count         how many elements to allocate (less might be returned)
2234  * @param flags         a set of @c zone_create_flags_t flags.
2235  *
2236  * @returns             NULL or the allocated composite object
2237  */
2238 extern zstack_t zcache_alloc_n(
2239 	zone_id_t               zone_id,
2240 	uint32_t                count,
2241 	zalloc_flags_t          flags,
2242 	zone_cache_ops_t        ops);
2243 #define zcache_alloc_n(zone_id, count, flags) \
2244 	(zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2245 
2246 
2247 
2248 /*!
2249  * @function zcache_free()
2250  *
2251  * @abstract
2252  * Frees a composite object previously allocated
2253  * with @c zcache_alloc() or @c zcache_alloc_n().
2254  *
2255  * @param zone_id       the zcache id to free the object to.
2256  * @param addr          the address to free
2257  * @param ops           the pointer to the zcache ops for this zcache.
2258  */
2259 extern void zcache_free(
2260 	zone_id_t               zone_id,
2261 	void                   *addr __unsafe_indexable,
2262 	zone_cache_ops_t        ops);
2263 #define zcache_free(zone_id, elem) \
2264 	(zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2265 	    __zcache_##zone_id##_ops)
2266 
2267 /*!
2268  * @function zcache_free_n()
2269  *
2270  * @abstract
2271  * Frees a stack of composite objects previously allocated
2272  * with @c zcache_alloc() or @c zcache_alloc_n().
2273  *
2274  * @param zone_id       the zcache id to free the objects to.
2275  * @param stack         a stack of composite objects
2276  * @param ops           the pointer to the zcache ops for this zcache.
2277  */
2278 extern void zcache_free_n(
2279 	zone_id_t               zone_id,
2280 	zstack_t                stack,
2281 	zone_cache_ops_t        ops);
2282 #define zcache_free_n(zone_id, stack) \
2283 	(zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2284 	    __zcache_##zone_id##_ops)
2285 
2286 
2287 /*!
2288  * @function zcache_drain()
2289  *
2290  * @abstract
2291  * Forces a zcache to be drained (have all its data structures freed
2292  * back to the original zones).
2293  *
2294  * @param zone_id       the zcache id to free the objects to.
2295  */
2296 extern void zcache_drain(
2297 	zone_id_t               zone_id);
2298 
2299 
2300 /*!
2301  * @macro ZCACHE_DECLARE
2302  *
2303  * @abstract
2304  * Declares the type associated with a zone cache ID.
2305  *
2306  * @param id            the name of zone ID to associate a type with.
2307  * @param type_t        the type of elements in the zone.
2308  */
2309 #define ZCACHE_DECLARE(id, type_t) \
2310 	__ZONE_DECLARE_TYPE(id, type_t); \
2311 	__attribute__((visibility("hidden"))) \
2312 	extern const zone_cache_ops_t __zcache_##id##_ops
2313 
2314 
2315 /*!
2316  * @macro ZCACHE_DEFINE
2317  *
2318  * @abstract
2319  * Defines a zone cache for a given ID and type.
2320  *
2321  * @param zone_id       the name of zone ID to associate a type with.
2322  * @param name          the name for the zone
2323  * @param type_t        the type of elements in the zone.
2324  * @param size          the size of elements in the cache
2325  * @param ops           the ops for this zcache.
2326  */
2327 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2328 	ZCACHE_DECLARE(zid, type_t);                                            \
2329 	ZONE_DECLARE_ID(zid, type_t);                                           \
2330 	const zone_cache_ops_t __zcache_##zid##_ops = (ops);                    \
2331 	ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) {   \
2332 	        zcache_ops[zid] = (ops);                                        \
2333 	})
2334 
2335 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2336 
2337 #pragma mark XNU only: PGZ support
2338 
2339 /*!
2340  * @function pgz_owned()
2341  *
2342  * @brief
2343  * Returns whether an address is PGZ owned.
2344  *
2345  * @param addr          The address to translate.
2346  * @returns             Whether it is PGZ owned
2347  */
2348 #if CONFIG_PROB_GZALLOC
2349 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
2350 #else
2351 #define pgz_owned(addr) false
2352 #endif
2353 
2354 /*!
2355  * @function pgz_decode()
2356  *
2357  * @brief
2358  * Translates a PGZ protected virtual address to its unprotected
2359  * backing store.
2360  *
2361  * @discussion
2362  * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
2363  * elements since the PGZ protected virtual addresses are maintained by PGZ
2364  * at the pmap level without the VM involvment.
2365  *
2366  * "allow_invalid" schemes relying on sequestering also need this
2367  * to perform the locking attempts on the unprotected address.
2368  *
2369  * @param addr          The address to translate.
2370  * @param size          The object size.
2371  * @returns             The unprotected address or @c addr.
2372  */
2373 #if CONFIG_PROB_GZALLOC
2374 #define pgz_decode(addr, size) \
2375 	((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
2376 #else
2377 #define pgz_decode(addr, size)  (addr)
2378 #endif
2379 
2380 /*!
2381  * @function pgz_decode_allow_invalid()
2382  *
2383  * @brief
2384  * Translates a PGZ protected virtual address to its unprotected
2385  * backing store, but doesn't assert it is still allocated/valid.
2386  *
2387  * @discussion
2388  * "allow_invalid" schemes relying on sequestering also need this
2389  * to perform the locking attempts on the unprotected address.
2390  *
2391  * @param addr          The address to translate.
2392  * @param want_zid      The expected zone ID for the element.
2393  * @returns             The unprotected address or @c addr.
2394  */
2395 #if CONFIG_PROB_GZALLOC
2396 #define pgz_decode_allow_invalid(addr, want_zid) \
2397 	((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
2398 #else
2399 #define pgz_decode_allow_invalid(addr, zid)  (addr)
2400 #endif
2401 
2402 #pragma mark XNU only: misc & implementation details
2403 
2404 struct zone_create_startup_spec {
2405 	zone_t                 *z_var;
2406 	const char             *z_name __unsafe_indexable;
2407 	vm_size_t               z_size;
2408 	zone_create_flags_t     z_flags;
2409 	zone_id_t               z_zid;
2410 	void                  (^z_setup)(zone_t);
2411 };
2412 
2413 extern void     zone_create_startup(
2414 	struct zone_create_startup_spec *spec);
2415 
2416 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2417 	static __startup_data struct zone_create_startup_spec \
2418 	__startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2419 	STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2420 	    &__startup_zone_spec_ ## ns)
2421 
2422 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2423 	__ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2424 
2425 #define __zalloc_cast(namespace, expr) \
2426 	((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2427 
2428 #if ZALLOC_TYPE_SAFE
2429 #define zalloc(zov)             __zalloc_cast(zov, (zalloc)(zov))
2430 #define zalloc_noblock(zov)     __zalloc_cast(zov, (zalloc_noblock)(zov))
2431 #endif /* !ZALLOC_TYPE_SAFE */
2432 
2433 struct zone_view_startup_spec {
2434 	zone_view_t         zv_view;
2435 	union {
2436 		zone_kheap_id_t zv_heapid;
2437 		zone_t         *zv_zone;
2438 	};
2439 	vm_size_t           zv_size;
2440 };
2441 
2442 extern void zone_view_startup_init(
2443 	struct zone_view_startup_spec *spec);
2444 
2445 extern void zone_userspace_reboot_checks(void);
2446 
2447 #if VM_TAG_SIZECLASSES
2448 extern void __zone_site_register(
2449 	vm_allocation_site_t   *site);
2450 
2451 #define VM_ALLOC_SITE_TAG() ({ \
2452 	__PLACE_IN_SECTION("__DATA, __data")                                   \
2453 	static vm_allocation_site_t site = { .refcount = 2, };                 \
2454 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __zone_site_register, &site);   \
2455 	site.tag;                                                              \
2456 })
2457 #else /* VM_TAG_SIZECLASSES */
2458 #define VM_ALLOC_SITE_TAG()                     VM_KERN_MEMORY_NONE
2459 #endif /* !VM_TAG_SIZECLASSES */
2460 
2461 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2462 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2463 {
2464 	return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2465 }
2466 
2467 #if DEBUG || DEVELOPMENT
2468 #  define ZPCPU_MANGLE_MASK     0xc0c0000000000000ul
2469 #else /* !(DEBUG || DEVELOPMENT) */
2470 #  define ZPCPU_MANGLE_MASK     0ul
2471 #endif /* !(DEBUG || DEVELOPMENT) */
2472 
2473 #define __zpcpu_mangle(ptr)     (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_MASK)
2474 #define __zpcpu_demangle(ptr)   (__zpcpu_addr(ptr) | ZPCPU_MANGLE_MASK)
2475 #define __zpcpu_addr(e)         ((vm_address_t)(e))
2476 #define __zpcpu_cast(ptr, e)    __unsafe_forge_single(typeof(ptr), e)
2477 #define __zpcpu_next(ptr)       __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2478 
2479 /**
2480  * @macro __zpcpu_mangle_for_boot()
2481  *
2482  * @discussion
2483  * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2484  * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2485  * storage marked @c __startup_data and replace it with the proper allocation
2486  * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2487  *
2488  * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2489  * provides the proper mangling of the storage into a "fake" percpu pointer so
2490  * that accesses through @c zpercpu_get() functions properly.
2491  *
2492  * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2493  */
2494 #define __zpcpu_mangle_for_boot(ptr)  ({ \
2495 	assert(startup_phase < STARTUP_SUB_ZALLOC); \
2496 	__zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
2497 })
2498 
2499 extern unsigned zpercpu_count(void) __pure2;
2500 
2501 #if CONFIG_PROB_GZALLOC
2502 
2503 extern vm_offset_t __pgz_decode(
2504 	mach_vm_address_t       addr,
2505 	mach_vm_size_t          size);
2506 
2507 extern vm_offset_t __pgz_decode_allow_invalid(
2508 	vm_offset_t             offs,
2509 	zone_id_t               zid);
2510 
2511 #endif
2512 #if DEBUG || DEVELOPMENT
2513 extern size_t zone_pages_wired;
2514 extern size_t zone_guard_pages;
2515 #endif /* DEBUG || DEVELOPMENT */
2516 #if CONFIG_ZLEAKS
2517 extern uint32_t                 zleak_active;
2518 extern vm_size_t                zleak_max_zonemap_size;
2519 extern vm_size_t                zleak_global_tracking_threshold;
2520 extern vm_size_t                zleak_per_zone_tracking_threshold;
2521 
2522 extern kern_return_t zleak_update_threshold(
2523 	vm_size_t              *arg,
2524 	uint64_t                value);
2525 #endif /* CONFIG_ZLEAKS */
2526 
2527 extern uint32_t                 zone_map_jetsam_limit;
2528 
2529 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2530 
2531 extern zone_t percpu_u64_zone;
2532 
2533 #pragma GCC visibility pop
2534 #endif /* XNU_KERNEL_PRIVATE */
2535 
2536 /*
2537  * This macro is currently used by AppleImage4 (rdar://83924635)
2538  */
2539 #define __zalloc_ptr_load_and_erase(elem) \
2540 	os_ptr_load_and_erase(elem)
2541 
2542 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2543 
2544 #endif  /* _KERN_ZALLOC_H_ */
2545 
2546 #endif  /* KERNEL_PRIVATE */
2547