1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: zalloc.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 */
64
65 #ifdef KERNEL_PRIVATE
66
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86
87 /*
88 * Enable this macro to force type safe zalloc/zalloc_ro/...
89 */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99
100 /*!
101 * @macro __zpercpu
102 *
103 * @abstract
104 * Annotation that helps denoting a per-cpu pointer that requires usage of
105 * @c zpercpu_*() for access.
106 */
107 #define __zpercpu __unsafe_indexable
108
109 /*!
110 * @typedef zone_id_t
111 *
112 * @abstract
113 * The type for a zone ID.
114 */
115 typedef uint16_t zone_id_t;
116
117 /**
118 * @enum zone_create_flags_t
119 *
120 * @abstract
121 * Set of flags to pass to zone_create().
122 *
123 * @discussion
124 * Some kernel-wide policies affect all possible created zones.
125 * Explicit @c ZC_* win over such policies.
126 */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 /** The default value to pass to zone_create() */
129 ZC_NONE = 0x00000000,
130
131 /** (obsolete) */
132 ZC_SEQUESTER = 0x00000001,
133 /** (obsolete) */
134 ZC_NOSEQUESTER = 0x00000002,
135
136 /** Enable per-CPU zone caching for this zone */
137 ZC_CACHING = 0x00000010,
138 /** Disable per-CPU zone caching for this zone */
139 ZC_NOCACHING = 0x00000020,
140
141 /** Allocate zone pages as Read-only **/
142 ZC_READONLY = 0x00800000,
143
144 /** Mark zone as a per-cpu zone */
145 ZC_PERCPU = 0x01000000,
146
147 /** Force the created zone to clear every allocation on free */
148 ZC_ZFREE_CLEARMEM = 0x02000000,
149
150 /** Mark zone as non collectable by zone_gc() */
151 ZC_NOGC = 0x04000000,
152
153 /** Do not encrypt this zone during hibernation */
154 ZC_NOENCRYPT = 0x08000000,
155
156 /** Type requires alignment to be preserved */
157 ZC_ALIGNMENT_REQUIRED = 0x10000000,
158
159 /** Obsolete */
160 ZC_NOGZALLOC = 0x20000000,
161
162 /** Don't asynchronously replenish the zone via callouts */
163 ZC_NOCALLOUT = 0x40000000,
164
165 /** Can be zdestroy()ed, not default unlike zinit() */
166 ZC_DESTRUCTIBLE = 0x80000000,
167
168 #ifdef XNU_KERNEL_PRIVATE
169 /** This zone is a built object cache */
170 ZC_OBJ_CACHE = 0x0080000000000000,
171
172 /** Use guard pages in PGZ mode */
173 ZC_PGZ_USE_GUARDS = 0x0100000000000000,
174
175 /** Zone doesn't support TBI tagging */
176 ZC_NOTBITAG = 0x0200000000000000,
177
178 /** This zone will back a kalloc type */
179 ZC_KALLOC_TYPE = 0x0400000000000000,
180
181 /** Disable PGZ for this zone */
182 ZC_NOPGZ = 0x0800000000000000,
183
184 /** This zone contains pure data */
185 ZC_DATA = 0x1000000000000000,
186
187 /** This zone belongs to the VM submap */
188 ZC_VM = 0x2000000000000000,
189
190 /** Disable kasan quarantine for this zone */
191 ZC_KASAN_NOQUARANTINE = 0x4000000000000000,
192
193 /** Disable kasan redzones for this zone */
194 ZC_KASAN_NOREDZONE = 0x8000000000000000,
195 #endif /* XNU_KERNEL_PRIVATE */
196 });
197
198 /*!
199 * @union zone_or_view
200 *
201 * @abstract
202 * A type used for calls that admit both a zone or a zone view.
203 *
204 * @discussion
205 * @c zalloc() and @c zfree() and their variants can act on both
206 * zones and zone views.
207 */
208 union zone_or_view {
209 struct kalloc_type_view *zov_kt_heap;
210 struct zone_view *zov_view;
211 struct zone *zov_zone;
212 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)213 inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
214 }
zone_or_view(struct zone * z)215 inline zone_or_view(struct zone *z) : zov_zone(z) {
216 }
zone_or_view(struct kalloc_type_view * kth)217 inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
218 }
219 #endif
220 };
221 #ifdef __cplusplus
222 typedef union zone_or_view zone_or_view_t;
223 #else
224 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
225 #endif
226
227 /*!
228 * @enum zone_create_ro_id_t
229 *
230 * @abstract
231 * Zone creation IDs for external read only zones
232 *
233 * @discussion
234 * Kexts that desire to use the RO allocator should:
235 * 1. Add a zone creation id below
236 * 2. Add a corresponding ID to @c zone_reserved_id_t
237 * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
238 * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
239 * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
240 */
241 __enum_decl(zone_create_ro_id_t, zone_id_t, {
242 ZC_RO_ID_SANDBOX,
243 ZC_RO_ID_PROFILE,
244 ZC_RO_ID_PROTOBOX,
245 ZC_RO_ID_SB_FILTER,
246 ZC_RO_ID_AMFI_OSENTITLEMENTS,
247 ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 });
249
250 /*!
251 * @function zone_create
252 *
253 * @abstract
254 * Creates a zone with the specified parameters.
255 *
256 * @discussion
257 * A Zone is a slab allocator that returns objects of a given size very quickly.
258 *
259 * @param name the name for the new zone.
260 * @param size the size of the elements returned by this zone.
261 * @param flags a set of @c zone_create_flags_t flags.
262 *
263 * @returns the created zone, this call never fails.
264 */
265 extern zone_t zone_create(
266 const char *name __unsafe_indexable,
267 vm_size_t size,
268 zone_create_flags_t flags);
269
270 /*!
271 * @function zone_create_ro
272 *
273 * @abstract
274 * Creates a read only zone with the specified parameters from kexts
275 *
276 * @discussion
277 * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
278 * kexts. Do not use this API to create read only zones in xnu.
279 *
280 * @param name the name for the new zone.
281 * @param size the size of the elements returned by this zone.
282 * @param flags a set of @c zone_create_flags_t flags.
283 * @param zc_ro_id an ID declared in @c zone_create_ro_id_t
284 *
285 * @returns the zone ID of the created zone, this call never fails.
286 */
287 extern zone_id_t zone_create_ro(
288 const char *name __unsafe_indexable,
289 vm_size_t size,
290 zone_create_flags_t flags,
291 zone_create_ro_id_t zc_ro_id);
292
293 /*!
294 * @function zdestroy
295 *
296 * @abstract
297 * Destroys a zone previously made with zone_create.
298 *
299 * @discussion
300 * Zones must have been made destructible for @c zdestroy() to be allowed,
301 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
302 *
303 * @param zone the zone to destroy.
304 */
305 extern void zdestroy(
306 zone_t zone);
307
308 /*!
309 * @function zone_require
310 *
311 * @abstract
312 * Requires for a given pointer to belong to the specified zone.
313 *
314 * @discussion
315 * The function panics if the check fails as it indicates that the kernel
316 * internals have been compromised.
317 *
318 * @param zone the zone the address needs to belong to.
319 * @param addr the element address to check.
320 */
321 extern void zone_require(
322 zone_t zone,
323 void *addr __unsafe_indexable);
324
325 /*!
326 * @function zone_require_ro
327 *
328 * @abstract
329 * Version of zone require intended for zones created with ZC_READONLY
330 *
331 * @discussion
332 * This check is not sufficient to fully trust the element.
333 *
334 * Another check of its content must be performed to prove
335 * that the element is "the right one", a typical technique
336 * for when the RO data structure is 1:1 with a mutable one,
337 * is a simple circularity check with a very strict lifetime
338 * (both the mutable and read-only data structures are made
339 * and destroyed as close as possible).
340 *
341 * @param zone_id the zone id the address needs to belong to.
342 * @param elem_size the element size for this zone.
343 * @param addr the element address to check.
344 */
345 extern void zone_require_ro(
346 zone_id_t zone_id,
347 vm_size_t elem_size,
348 void *addr __unsafe_indexable);
349
350 /*!
351 * @enum zalloc_flags_t
352 *
353 * @brief
354 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
355 *
356 * @discussion
357 * It is encouraged that any callsite passing flags uses exactly one of:
358 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
359 * if nothing else was specified.
360 *
361 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
362 * then @c Z_WAITOK is ignored.
363 *
364 * @const Z_WAITOK
365 * Passing this flag means that zalloc() will be allowed to sleep
366 * for memory to become available for this allocation. If the zone
367 * isn't exhaustible, zalloc(Z_WAITOK) never fails.
368 *
369 * If the zone is exhaustible, zalloc() might still fail if the zone
370 * is at its maximum allowed memory usage, unless Z_NOFAIL is passed,
371 * in which case zalloc() will block until an element is freed.
372 *
373 * @const Z_NOWAIT
374 * Passing this flag means that zalloc is not allowed to ever block.
375 *
376 * @const Z_NOPAGEWAIT
377 * Passing this flag means that zalloc is allowed to wait due to lock
378 * contention, but will not wait for the VM to wait for pages when
379 * under memory pressure.
380 *
381 * @const Z_ZERO
382 * Passing this flags means that the returned memory has been zeroed out.
383 *
384 * @const Z_NOFAIL
385 * Passing this flag means that the caller expects the allocation to always
386 * succeed. This will result in a panic if this assumption isn't correct.
387 *
388 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT.
389 * For exhaustible zones, it forces the caller to wait until a zfree() happend
390 * if the zone has reached its maximum of allowed elements.
391 *
392 * @const Z_REALLOCF
393 * For the realloc family of functions,
394 * free the incoming memory on failure cases.
395 *
396 #if XNU_KERNEL_PRIVATE
397 * @const Z_SET_NOTSHARED
398 * Using this flag from external allocations APIs (kalloc_type/zalloc)
399 * allows the callsite to skip the shared zone for that sizeclass and
400 * directly allocated from the requested zone.
401 * Using this flag from internal APIs (zalloc_ext) will skip the shared
402 * zone only when a given threshold is exceeded. It will also set a flag
403 * to indicate that future allocations to the zone should directly go to
404 * the zone instead of the shared zone.
405 *
406 * @const Z_SPRAYQTN
407 * This flag tells the VM to allocate from the "spray quarantine" range when
408 * it services the allocation. For more details on what allocations qualify
409 * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
410 *
411 * @const Z_KALLOC_ARRAY
412 * Instead of returning a standard "pointer" return a pointer that encodes
413 * its size-class into the pointer itself (Only for kalloc, might limit
414 * the range of allocations that can be done).
415 *
416 * @const Z_FULLSIZE
417 * Used to indicate that the caller will use all available space in excess
418 * from the requested allocation size.
419 *
420 * @const Z_SKIP_KASAN
421 * Tell zalloc() not to do any kasan adjustments.
422 *
423 * @const Z_MAY_COPYINMAP
424 * This data allocation might be used with vm_map_copyin().
425 * This allows for those allocations to be associated with a proper VM object.
426 *
427 * @const Z_VM_TAG_BT_BIT
428 * Used to blame allocation accounting on the first kext
429 * found in the backtrace of the allocation.
430 *
431 * @const Z_NOZZC
432 * Used internally to mark allocations that will skip zero validation.
433 *
434 * @const Z_PCPU
435 * Used internally for the percpu paths.
436 *
437 * @const Z_VM_TAG_MASK
438 * Represents bits in which a vm_tag_t for the allocation can be passed.
439 * (used by kalloc for the zone tagging debugging feature).
440 #endif
441 */
442 __options_decl(zalloc_flags_t, uint32_t, {
443 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
444 Z_WAITOK = 0x0000,
445 Z_NOWAIT = 0x0001,
446 Z_NOPAGEWAIT = 0x0002,
447 Z_ZERO = 0x0004,
448 Z_REALLOCF = 0x0008,
449
450 #if XNU_KERNEL_PRIVATE
451 Z_SET_NOTSHARED = 0x0040,
452 Z_SPRAYQTN = 0x0080,
453 Z_KALLOC_ARRAY = 0x0100,
454 #if KASAN_CLASSIC
455 Z_FULLSIZE = 0x0000,
456 #else
457 Z_FULLSIZE = 0x0200,
458 #endif
459 #if KASAN_CLASSIC
460 Z_SKIP_KASAN = 0x0400,
461 #else
462 Z_SKIP_KASAN = 0x0000,
463 #endif
464 Z_MAY_COPYINMAP = 0x0800,
465 Z_VM_TAG_BT_BIT = 0x1000,
466 Z_PCPU = 0x2000,
467 Z_NOZZC = 0x4000,
468 #endif /* XNU_KERNEL_PRIVATE */
469 Z_NOFAIL = 0x8000,
470
471 /* convenient c++ spellings */
472 Z_NOWAIT_ZERO = Z_NOWAIT | Z_ZERO,
473 Z_WAITOK_ZERO = Z_WAITOK | Z_ZERO,
474 Z_WAITOK_ZERO_NOFAIL = Z_WAITOK | Z_ZERO | Z_NOFAIL,
475 #if XNU_KERNEL_PRIVATE
476 Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
477 #endif
478
479 Z_KPI_MASK = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
480 #if XNU_KERNEL_PRIVATE
481 Z_ZERO_VM_TAG_BT_BIT = Z_ZERO | Z_VM_TAG_BT_BIT,
482 /** used by kalloc to propagate vm tags for -zt */
483 Z_VM_TAG_MASK = 0xffff0000,
484
485 #define Z_VM_TAG_SHIFT 16
486 #define Z_VM_TAG(fl, tag) ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
487 #define Z_VM_TAG_BT(fl, tag) ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
488 #endif
489 });
490
491 /*
492 * This type is used so that kalloc_internal has good calling conventions
493 * for callers who want to cheaply both know the allocated address
494 * and the actual size of the allocation.
495 */
496 struct kalloc_result {
497 void *addr __sized_by(size);
498 vm_size_t size;
499 };
500
501 /*!
502 * @typedef zone_stats_t
503 *
504 * @abstract
505 * The opaque type for per-cpu zone stats that are accumulated per zone
506 * or per zone-view.
507 */
508 typedef struct zone_stats *__zpercpu zone_stats_t;
509
510 /*!
511 * @typedef zone_view_t
512 *
513 * @abstract
514 * A view on a zone for accounting purposes.
515 *
516 * @discussion
517 * A zone view uses the zone it references for the allocations backing store,
518 * but does the allocation accounting at the view level.
519 *
520 * These accounting are surfaced by @b zprint(1) and similar tools,
521 * which allow for cheap but finer grained understanding of allocations
522 * without any fragmentation cost.
523 *
524 * Zone views are protected by the kernel lockdown and can't be initialized
525 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
526 */
527 typedef struct zone_view *zone_view_t;
528 struct zone_view {
529 zone_t zv_zone;
530 zone_stats_t zv_stats;
531 const char *zv_name __unsafe_indexable;
532 zone_view_t zv_next;
533 };
534
535 /*!
536 * @typedef kalloc_type_view_t
537 *
538 * @abstract
539 * The opaque type created at kalloc_type callsites to redirect calls to
540 * the right zone.
541 */
542 typedef struct kalloc_type_view *kalloc_type_view_t;
543
544 #if XNU_KERNEL_PRIVATE
545 /*
546 * kalloc_type/kfree_type implementation functions
547 */
548 extern void *__unsafe_indexable kalloc_type_impl_internal(
549 kalloc_type_view_t kt_view,
550 zalloc_flags_t flags);
551
552 extern void kfree_type_impl_internal(
553 kalloc_type_view_t kt_view,
554 void *ptr __unsafe_indexable);
555
556 static inline void *__unsafe_indexable
kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)557 kalloc_type_impl(
558 kalloc_type_view_t kt_view,
559 zalloc_flags_t flags)
560 {
561 void *__unsafe_indexable addr = kalloc_type_impl_internal(kt_view, flags);
562 if (flags & Z_NOFAIL) {
563 __builtin_assume(addr != NULL);
564 }
565 return addr;
566 }
567
568 #define kfree_type_impl(kt_view, ptr) \
569 kfree_type_impl_internal(kt_view, (ptr))
570
571 #else /* XNU_KERNEL_PRIVATE */
572
573 extern void *__unsafe_indexable kalloc_type_impl(
574 kalloc_type_view_t kt_view,
575 zalloc_flags_t flags);
576
577 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)578 __kalloc_type_impl(
579 kalloc_type_view_t kt_view,
580 zalloc_flags_t flags)
581 {
582 void *addr = (kalloc_type_impl)(kt_view, flags);
583 if (flags & Z_NOFAIL) {
584 __builtin_assume(addr != NULL);
585 }
586 return addr;
587 }
588
589 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
590
591 extern void kfree_type_impl(
592 kalloc_type_view_t kt_view,
593 void *ptr __unsafe_indexable);
594
595 #endif /* XNU_KERNEL_PRIVATE */
596
597 /*!
598 * @function zalloc
599 *
600 * @abstract
601 * Allocates an element from a specified zone.
602 *
603 * @discussion
604 * If the zone isn't exhaustible and is expandable, this call never fails.
605 *
606 * @param zone the zone or zone view to allocate from
607 *
608 * @returns NULL or the allocated element
609 */
610 __attribute__((malloc))
611 extern void *__unsafe_indexable zalloc(
612 zone_t zone);
613
614 __attribute__((malloc))
615 __attribute__((overloadable))
616 static inline void *__unsafe_indexable
zalloc(zone_view_t view)617 zalloc(zone_view_t view)
618 {
619 return zalloc((zone_t)view);
620 }
621
622 __attribute__((malloc))
623 __attribute__((overloadable))
624 static inline void *__unsafe_indexable
zalloc(kalloc_type_view_t kt_view)625 zalloc(kalloc_type_view_t kt_view)
626 {
627 return (kalloc_type_impl)(kt_view, Z_WAITOK);
628 }
629
630 /*!
631 * @function zalloc_noblock
632 *
633 * @abstract
634 * Allocates an element from a specified zone, but never blocks.
635 *
636 * @discussion
637 * This call is suitable for preemptible code, however allocation
638 * isn't allowed from interrupt context.
639 *
640 * @param zone the zone or zone view to allocate from
641 *
642 * @returns NULL or the allocated element
643 */
644 __attribute__((malloc))
645 extern void *__unsafe_indexable zalloc_noblock(
646 zone_t zone);
647
648 __attribute__((malloc))
649 __attribute__((overloadable))
650 static inline void *__unsafe_indexable
zalloc_noblock(zone_view_t view)651 zalloc_noblock(zone_view_t view)
652 {
653 return zalloc_noblock((zone_t)view);
654 }
655
656 __attribute__((malloc))
657 __attribute__((overloadable))
658 static inline void *__unsafe_indexable
zalloc_noblock(kalloc_type_view_t kt_view)659 zalloc_noblock(kalloc_type_view_t kt_view)
660 {
661 return (kalloc_type_impl)(kt_view, Z_NOWAIT);
662 }
663
664 /*!
665 * @function zalloc_flags()
666 *
667 * @abstract
668 * Allocates an element from a specified zone, with flags.
669 *
670 * @param zone the zone or zone view to allocate from
671 * @param flags a collection of @c zalloc_flags_t.
672 *
673 * @returns NULL or the allocated element
674 */
675 __attribute__((malloc))
676 extern void *__unsafe_indexable zalloc_flags(
677 zone_t zone,
678 zalloc_flags_t flags);
679
680 __attribute__((malloc))
681 __attribute__((overloadable))
682 static inline void *__unsafe_indexable
__zalloc_flags(zone_t zone,zalloc_flags_t flags)683 __zalloc_flags(
684 zone_t zone,
685 zalloc_flags_t flags)
686 {
687 void *__unsafe_indexable addr = (zalloc_flags)(zone, flags);
688 if (flags & Z_NOFAIL) {
689 __builtin_assume(addr != NULL);
690 }
691 return addr;
692 }
693
694 __attribute__((malloc))
695 __attribute__((overloadable))
696 static inline void *__unsafe_indexable
__zalloc_flags(zone_view_t view,zalloc_flags_t flags)697 __zalloc_flags(
698 zone_view_t view,
699 zalloc_flags_t flags)
700 {
701 return __zalloc_flags((zone_t)view, flags);
702 }
703
704 __attribute__((malloc))
705 __attribute__((overloadable))
706 static inline void *__unsafe_indexable
__zalloc_flags(kalloc_type_view_t kt_view,zalloc_flags_t flags)707 __zalloc_flags(
708 kalloc_type_view_t kt_view,
709 zalloc_flags_t flags)
710 {
711 void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
712 if (flags & Z_NOFAIL) {
713 __builtin_assume(addr != NULL);
714 }
715 return addr;
716 }
717
718 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
719 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
720 #else
721 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
722 #endif
723
724 /*!
725 * @macro zalloc_id
726 *
727 * @abstract
728 * Allocates an element from a specified zone ID, with flags.
729 *
730 * @param zid The proper @c ZONE_ID_* constant.
731 * @param flags a collection of @c zalloc_flags_t.
732 *
733 * @returns NULL or the allocated element
734 */
735 __attribute__((malloc))
736 extern void *__unsafe_indexable zalloc_id(
737 zone_id_t zid,
738 zalloc_flags_t flags);
739
740 __attribute__((malloc))
741 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)742 __zalloc_id(
743 zone_id_t zid,
744 zalloc_flags_t flags)
745 {
746 void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
747 if (flags & Z_NOFAIL) {
748 __builtin_assume(addr != NULL);
749 }
750 return addr;
751 }
752
753 #if XNU_KERNEL_PRIVATE
754 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
755 #else
756 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
757 #endif
758
759 /*!
760 * @function zalloc_ro
761 *
762 * @abstract
763 * Allocates an element from a specified read-only zone.
764 *
765 * @param zone_id the zone id to allocate from
766 * @param flags a collection of @c zalloc_flags_t.
767 *
768 * @returns NULL or the allocated element
769 */
770 __attribute__((malloc))
771 extern void *__unsafe_indexable zalloc_ro(
772 zone_id_t zone_id,
773 zalloc_flags_t flags);
774
775 __attribute__((malloc))
776 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)777 __zalloc_ro(
778 zone_id_t zone_id,
779 zalloc_flags_t flags)
780 {
781 void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
782 if (flags & Z_NOFAIL) {
783 __builtin_assume(addr != NULL);
784 }
785 return addr;
786 }
787
788 #if XNU_KERNEL_PRIVATE
789 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
790 #else
791 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
792 #endif
793
794 /*!
795 * @function zalloc_ro_mut
796 *
797 * @abstract
798 * Modifies an element from a specified read-only zone.
799 *
800 * @discussion
801 * Modifying compiler-assisted authenticated pointers using this function will
802 * not result in a signed pointer being written. The caller is expected to
803 * sign the value appropriately beforehand if they wish to do this.
804 *
805 * @param zone_id the zone id to allocate from
806 * @param elem element to be modified
807 * @param offset offset from element
808 * @param new_data pointer to new data
809 * @param new_data_size size of modification
810 *
811 */
812 extern void zalloc_ro_mut(
813 zone_id_t zone_id,
814 void *elem __unsafe_indexable,
815 vm_offset_t offset,
816 const void *new_data __sized_by(new_data_size),
817 vm_size_t new_data_size);
818
819 /*!
820 * @function zalloc_ro_update_elem
821 *
822 * @abstract
823 * Update the value of an entire element allocated in the read only allocator.
824 *
825 * @param zone_id the zone id to allocate from
826 * @param elem element to be modified
827 * @param new_data pointer to new data
828 *
829 */
830 #define zalloc_ro_update_elem(zone_id, elem, new_data) ({ \
831 const typeof(*(elem)) *__new_data = (new_data); \
832 zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data)); \
833 })
834
835 /*!
836 * @function zalloc_ro_update_field
837 *
838 * @abstract
839 * Update a single field of an element allocated in the read only allocator.
840 *
841 * @param zone_id the zone id to allocate from
842 * @param elem element to be modified
843 * @param field the element field to be modified
844 * @param new_data pointer to new data
845 *
846 */
847 #define zalloc_ro_update_field(zone_id, elem, field, value) ({ \
848 const typeof((elem)->field) *__value = (value); \
849 zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field), \
850 __value, sizeof((elem)->field)); \
851 })
852
853 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
854
855 /*!
856 * @enum zro_atomic_op_t
857 *
858 * @brief
859 * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
860 * atomic operations.
861 *
862 * @discussion
863 * This enum provides all flavors of atomic operations supported in sizes 8,
864 * 16, 32, 64 bits.
865 *
866 * @const ZRO_ATOMIC_OR_*
867 * To perform an @s os_atomic_or
868 *
869 * @const ZRO_ATOMIC_XOR_*
870 * To perform an @s os_atomic_xor
871 *
872 * @const ZRO_ATOMIC_AND_*
873 * To perform an @s os_atomic_and
874 *
875 * @const ZRO_ATOMIC_ADD_*
876 * To perform an @s os_atomic_add
877 *
878 * @const ZRO_ATOMIC_XCHG_*
879 * To perform an @s os_atomic_xchg
880 *
881 */
882 __enum_decl(zro_atomic_op_t, uint32_t, {
883 ZRO_ATOMIC_OR_8 = 0x00000010 | 1,
884 ZRO_ATOMIC_OR_16 = 0x00000010 | 2,
885 ZRO_ATOMIC_OR_32 = 0x00000010 | 4,
886 ZRO_ATOMIC_OR_64 = 0x00000010 | 8,
887
888 ZRO_ATOMIC_XOR_8 = 0x00000020 | 1,
889 ZRO_ATOMIC_XOR_16 = 0x00000020 | 2,
890 ZRO_ATOMIC_XOR_32 = 0x00000020 | 4,
891 ZRO_ATOMIC_XOR_64 = 0x00000020 | 8,
892
893 ZRO_ATOMIC_AND_8 = 0x00000030 | 1,
894 ZRO_ATOMIC_AND_16 = 0x00000030 | 2,
895 ZRO_ATOMIC_AND_32 = 0x00000030 | 4,
896 ZRO_ATOMIC_AND_64 = 0x00000030 | 8,
897
898 ZRO_ATOMIC_ADD_8 = 0x00000040 | 1,
899 ZRO_ATOMIC_ADD_16 = 0x00000040 | 2,
900 ZRO_ATOMIC_ADD_32 = 0x00000040 | 4,
901 ZRO_ATOMIC_ADD_64 = 0x00000040 | 8,
902
903 ZRO_ATOMIC_XCHG_8 = 0x00000050 | 1,
904 ZRO_ATOMIC_XCHG_16 = 0x00000050 | 2,
905 ZRO_ATOMIC_XCHG_32 = 0x00000050 | 4,
906 ZRO_ATOMIC_XCHG_64 = 0x00000050 | 8,
907
908 /* cconvenient spellings */
909 ZRO_ATOMIC_OR_LONG = ZRO_ATOMIC_LONG(OR),
910 ZRO_ATOMIC_XOR_LONG = ZRO_ATOMIC_LONG(XOR),
911 ZRO_ATOMIC_AND_LONG = ZRO_ATOMIC_LONG(AND),
912 ZRO_ATOMIC_ADD_LONG = ZRO_ATOMIC_LONG(ADD),
913 ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
914 });
915
916 /*!
917 * @function zalloc_ro_mut_atomic
918 *
919 * @abstract
920 * Atomically update an offset in an element allocated in the read only
921 * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
922 *
923 * @param zone_id the zone id to allocate from
924 * @param elem element to be modified
925 * @param offset offset in the element to be modified
926 * @param op atomic operation to perform (see @c zro_atomic_op_t)
927 * @param value value for the atomic operation
928 *
929 */
930 extern uint64_t zalloc_ro_mut_atomic(
931 zone_id_t zone_id,
932 void *elem __unsafe_indexable,
933 vm_offset_t offset,
934 zro_atomic_op_t op,
935 uint64_t value);
936
937 /*!
938 * @macro zalloc_ro_update_field_atomic
939 *
940 * @abstract
941 * Atomically update a single field of an element allocated in the read only
942 * allocator.
943 *
944 * @param zone_id the zone id to allocate from
945 * @param elem element to be modified
946 * @param field the element field to be modified
947 * @param op atomic operation to perform (see @c zro_atomic_op_t)
948 * @param value value for the atomic operation
949 *
950 */
951 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value) ({ \
952 const typeof((elem)->field) __value = (value); \
953 static_assert(sizeof(__value) == (op & 0xf)); \
954 (os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id, \
955 elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value); \
956 })
957
958 /*!
959 * @function zalloc_ro_clear
960 *
961 * @abstract
962 * Zeroes an element from a specified read-only zone.
963 *
964 * @param zone_id the zone id to allocate from
965 * @param elem element to be modified
966 * @param offset offset from element
967 * @param size size of modification
968 */
969 extern void zalloc_ro_clear(
970 zone_id_t zone_id,
971 void *elem __unsafe_indexable,
972 vm_offset_t offset,
973 vm_size_t size);
974
975 /*!
976 * @function zalloc_ro_clear_field
977 *
978 * @abstract
979 * Zeroes the specified field of an element from a specified read-only zone.
980 *
981 * @param zone_id the zone id to allocate from
982 * @param elem element to be modified
983 * @param field offset from element
984 */
985 #define zalloc_ro_clear_field(zone_id, elem, field) \
986 zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
987 sizeof((elem)->field))
988
989 /*!
990 * @function zfree_id()
991 *
992 * @abstract
993 * Frees an element previously allocated with @c zalloc_id().
994 *
995 * @param zone_id the zone id to free the element to.
996 * @param addr the address to free
997 */
998 extern void zfree_id(
999 zone_id_t zone_id,
1000 void *addr __unsafe_indexable);
1001 #define zfree_id(zid, elem) ({ \
1002 zone_id_t __zfree_zid = (zid); \
1003 (zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1004 })
1005
1006
1007 /*!
1008 * @function zfree_ro()
1009 *
1010 * @abstract
1011 * Frees an element previously allocated with @c zalloc_ro().
1012 *
1013 * @param zone_id the zone id to free the element to.
1014 * @param addr the address to free
1015 */
1016 extern void zfree_ro(
1017 zone_id_t zone_id,
1018 void *addr __unsafe_indexable);
1019 #define zfree_ro(zid, elem) ({ \
1020 zone_id_t __zfree_zid = (zid); \
1021 (zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1022 })
1023
1024
1025 /*!
1026 * @function zfree
1027 *
1028 * @abstract
1029 * Frees an element allocated with @c zalloc*.
1030 *
1031 * @discussion
1032 * If the element being freed doesn't belong to the specified zone,
1033 * then this call will panic.
1034 *
1035 * @param zone the zone or zone view to free the element to.
1036 * @param elem the element to free
1037 */
1038 extern void zfree(
1039 zone_t zone,
1040 void *elem __unsafe_indexable);
1041
1042 __attribute__((overloadable))
1043 static inline void
zfree(zone_view_t view,void * elem __unsafe_indexable)1044 zfree(
1045 zone_view_t view,
1046 void *elem __unsafe_indexable)
1047 {
1048 zfree((zone_t)view, elem);
1049 }
1050
1051 __attribute__((overloadable))
1052 static inline void
zfree(kalloc_type_view_t kt_view,void * elem __unsafe_indexable)1053 zfree(
1054 kalloc_type_view_t kt_view,
1055 void *elem __unsafe_indexable)
1056 {
1057 return kfree_type_impl(kt_view, elem);
1058 }
1059
1060 #define zfree(zone, elem) ({ \
1061 __auto_type __zfree_zone = (zone); \
1062 (zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1063 })
1064
1065
1066 /* deprecated KPIS */
1067
1068 __zalloc_deprecated("use zone_create()")
1069 extern zone_t zinit(
1070 vm_size_t size, /* the size of an element */
1071 vm_size_t maxmem, /* maximum memory to use */
1072 vm_size_t alloc, /* allocation size */
1073 const char *name __unsafe_indexable);
1074
1075 #pragma mark: implementation details
1076
1077 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1078 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1079 __attribute__((visibility("hidden"))) \
1080 extern type_t *__single __zalloc__##var##__type_name
1081
1082 #ifdef XNU_KERNEL_PRIVATE
1083 #pragma mark - XNU only interfaces
1084
1085 #include <kern/cpu_number.h>
1086
1087 #pragma GCC visibility push(hidden)
1088
1089 #pragma mark XNU only: zalloc (extended)
1090
1091 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
1092 #define ZALIGN_16 (sizeof(uint16_t) - 1)
1093 #define ZALIGN_32 (sizeof(uint32_t) - 1)
1094 #define ZALIGN_PTR (sizeof(void *) - 1)
1095 #define ZALIGN_64 (sizeof(uint64_t) - 1)
1096 #define ZALIGN(t) (_Alignof(t) - 1)
1097
1098
1099 /*!
1100 * @function zalloc_permanent_tag()
1101 *
1102 * @abstract
1103 * Allocates a permanent element from the permanent zone
1104 *
1105 * @discussion
1106 * Memory returned by this function is always 0-initialized.
1107 * Note that the size of this allocation can not be determined
1108 * by zone_element_size so it should not be used for copyio.
1109 *
1110 * @param size the element size (must be smaller than PAGE_SIZE)
1111 * @param align_mask the required alignment for this allocation
1112 * @param tag the tag to use for allocations larger than a page.
1113 *
1114 * @returns the allocated element
1115 */
1116 __attribute__((malloc))
1117 extern void *__sized_by(size) zalloc_permanent_tag(
1118 vm_size_t size,
1119 vm_offset_t align_mask,
1120 vm_tag_t tag)
1121 __attribute__((__diagnose_if__((align_mask & (align_mask + 1)),
1122 "align mask looks invalid", "error")));
1123
1124 /*!
1125 * @function zalloc_permanent()
1126 *
1127 * @abstract
1128 * Allocates a permanent element from the permanent zone
1129 *
1130 * @discussion
1131 * Memory returned by this function is always 0-initialized.
1132 * Note that the size of this allocation can not be determined
1133 * by zone_element_size so it should not be used for copyio.
1134 *
1135 * @param size the element size (must be smaller than PAGE_SIZE)
1136 * @param align_mask the required alignment for this allocation
1137 *
1138 * @returns the allocated element
1139 */
1140 #define zalloc_permanent(size, align) \
1141 zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1142
1143 /*!
1144 * @function zalloc_permanent_type()
1145 *
1146 * @abstract
1147 * Allocates a permanent element of a given type with its natural alignment.
1148 *
1149 * @discussion
1150 * Memory returned by this function is always 0-initialized.
1151 *
1152 * @param type_t the element type
1153 *
1154 * @returns the allocated element
1155 */
1156 #define zalloc_permanent_type(type_t) \
1157 __unsafe_forge_single(type_t *, \
1158 zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1159
1160 /*!
1161 * @function zalloc_first_proc_made()
1162 *
1163 * @abstract
1164 * Declare that the "early" allocation phase is done.
1165 */
1166 extern void zalloc_first_proc_made(void);
1167 /*!
1168 * @function zalloc_iokit_lockdown()
1169 *
1170 * @abstract
1171 * Declare that iokit matching has started.
1172 */
1173 extern void zalloc_iokit_lockdown(void);
1174
1175 #pragma mark XNU only: per-cpu allocations
1176
1177 /*!
1178 * @macro zpercpu_get_cpu()
1179 *
1180 * @abstract
1181 * Get a pointer to a specific CPU slot of a given per-cpu variable.
1182 *
1183 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1184 * @param cpu the specified CPU number as returned by @c cpu_number()
1185 *
1186 * @returns the per-CPU slot for @c ptr for the specified CPU.
1187 */
1188 #define zpercpu_get_cpu(ptr, cpu) \
1189 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)(cpu)))
1190
1191 /*!
1192 * @macro zpercpu_get()
1193 *
1194 * @abstract
1195 * Get a pointer to the current CPU slot of a given per-cpu variable.
1196 *
1197 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1198 *
1199 * @returns the per-CPU slot for @c ptr for the current CPU.
1200 */
1201 #define zpercpu_get(ptr) \
1202 zpercpu_get_cpu(ptr, cpu_number())
1203
1204 /*!
1205 * @macro zpercpu_foreach()
1206 *
1207 * @abstract
1208 * Enumerate all per-CPU slots by address.
1209 *
1210 * @param it the name for the iterator
1211 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1212 */
1213 #define zpercpu_foreach(it, ptr) \
1214 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1215 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1216 it < __end_##it; it = __zpcpu_next(it))
1217
1218 /*!
1219 * @macro zpercpu_foreach_cpu()
1220 *
1221 * @abstract
1222 * Enumerate all per-CPU slots by CPU slot number.
1223 *
1224 * @param cpu the name for cpu number iterator.
1225 */
1226 #define zpercpu_foreach_cpu(cpu) \
1227 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1228
1229 /*!
1230 * @function zalloc_percpu()
1231 *
1232 * @abstract
1233 * Allocates an element from a per-cpu zone.
1234 *
1235 * @discussion
1236 * The returned pointer cannot be used directly and must be manipulated
1237 * through the @c zpercpu_get*() interfaces.
1238 *
1239 * @param zone_or_view the zone or zone view to allocate from
1240 * @param flags a collection of @c zalloc_flags_t.
1241 *
1242 * @returns NULL or the allocated element
1243 */
1244 extern void *__zpercpu zalloc_percpu(
1245 zone_or_view_t zone_or_view,
1246 zalloc_flags_t flags);
1247
1248 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1249 __zalloc_percpu(
1250 zone_or_view_t zone_or_view,
1251 zalloc_flags_t flags)
1252 {
1253 void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1254 if (flags & Z_NOFAIL) {
1255 __builtin_assume(addr != NULL);
1256 }
1257 return addr;
1258 }
1259
1260 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1261
1262 /*!
1263 * @function zfree_percpu()
1264 *
1265 * @abstract
1266 * Frees an element previously allocated with @c zalloc_percpu().
1267 *
1268 * @param zone_or_view the zone or zone view to free the element to.
1269 * @param addr the address to free
1270 */
1271 extern void zfree_percpu(
1272 zone_or_view_t zone_or_view,
1273 void *__zpercpu addr);
1274
1275 /*!
1276 * @function zalloc_percpu_permanent()
1277 *
1278 * @abstract
1279 * Allocates a permanent percpu-element from the permanent percpu zone.
1280 *
1281 * @discussion
1282 * Memory returned by this function is always 0-initialized.
1283 *
1284 * @param size the element size (must be smaller than PAGE_SIZE)
1285 * @param align_mask the required alignment for this allocation
1286 *
1287 * @returns the allocated element
1288 */
1289 extern void *__zpercpu zalloc_percpu_permanent(
1290 vm_size_t size,
1291 vm_offset_t align_mask);
1292
1293 /*!
1294 * @function zalloc_percpu_permanent_type()
1295 *
1296 * @abstract
1297 * Allocates a permanent percpu-element from the permanent percpu zone of a given
1298 * type with its natural alignment.
1299 *
1300 * @discussion
1301 * Memory returned by this function is always 0-initialized.
1302 *
1303 * @param type_t the element type
1304 *
1305 * @returns the allocated element
1306 */
1307 #define zalloc_percpu_permanent_type(type_t) \
1308 ((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1309
1310
1311 #pragma mark XNU only: SMR support for zones
1312
1313 struct smr;
1314
1315 /*!
1316 * @typedef zone_smr_free_cb_t
1317 *
1318 * @brief
1319 * Type for the delayed free callback for SMR zones.
1320 *
1321 * @description
1322 * This function is called before an element is reused,
1323 * or when memory is returned to the system.
1324 *
1325 * This function MUST zero the element, and if no special
1326 * action is to be taken on free, then @c bzero() is a fine
1327 * callback to use.
1328 *
1329 * This function also must be preemption-disabled safe,
1330 * as it runs with preemption disabled.
1331 *
1332 *
1333 * Note that this function should only clean the fields
1334 * that must be preserved for stale SMR readers to see.
1335 * Any field that is accessed after element validation
1336 * such as a try-retain or acquiring a lock on it must
1337 * be cleaned up much earlier as they might hold onto
1338 * expensive resources.
1339 *
1340 * The suggested pattern for an SMR type using this facility,
1341 * is to have 2 functions:
1342 *
1343 * - one "retire" stage that tries to clean up as much from
1344 * the element as possible, with great care to leave no dangling
1345 * pointers around, as elements in this stage might linger
1346 * in the allocator for a long time, and this could possibly
1347 * be abused during UaF exploitation.
1348 *
1349 * - one "smr_free" function which cleans up whatever was left,
1350 * and zeroes the rest of the element.
1351 *
1352 * <code>
1353 * void
1354 * type_retire(type_t elem)
1355 * {
1356 * // invalidating the element makes most fields
1357 * // inaccessible to readers.
1358 * type_mark_invalid(elem);
1359 *
1360 * // do cleanups for things requiring a validity check
1361 * kfree_type(some_type_t, elem->expensive_thing);
1362 * type_remove_from_global_list(&elem->linkage);
1363 *
1364 * zfree_smr(type_zone, elem);
1365 * }
1366 *
1367 * void
1368 * type_smr_free(void *_elem)
1369 * {
1370 * type_t elem = elem;
1371 *
1372 * // cleanup fields that are used to "find" this element
1373 * // and that SMR readers may access hazardously.
1374 * lck_ticket_destroy(&elem->lock);
1375 * kfree_data(elem->key, elem->keylen);
1376 *
1377 * // compulsory: element must be zeroed fully
1378 * bzero(elem, sizeof(*elem));
1379 * }
1380 * </code>
1381 */
1382 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1383
1384 /*!
1385 * @function zone_enable_smr()
1386 *
1387 * @abstract
1388 * Enable SMR for a zone.
1389 *
1390 * @discussion
1391 * This can only be done once, and must be done before
1392 * the first allocation is made with this zone.
1393 *
1394 * @param zone the zone to enable SMR for
1395 * @param smr the smr domain to use
1396 * @param free_cb the free callback to use
1397 */
1398 extern void zone_enable_smr(
1399 zone_t zone,
1400 struct smr *smr,
1401 zone_smr_free_cb_t free_cb);
1402
1403 /*!
1404 * @function zone_id_enable_smr()
1405 *
1406 * @abstract
1407 * Enable SMR for a zone ID.
1408 *
1409 * @discussion
1410 * This can only be done once, and must be done before
1411 * the first allocation is made with this zone.
1412 *
1413 * @param zone_id the zone to enable SMR for
1414 * @param smr the smr domain to use
1415 * @param free_cb the free callback to use
1416 */
1417 #define zone_id_enable_smr(zone_id, smr, free_cb) ({ \
1418 void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t); \
1419 \
1420 __cb = (free_cb); \
1421 zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb); \
1422 })
1423
1424 /*!
1425 * @macro zalloc_smr()
1426 *
1427 * @abstract
1428 * Allocates an element from an SMR enabled zone
1429 *
1430 * @discussion
1431 * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1432 *
1433 * @param zone the zone to allocate from
1434 * @param flags a collection of @c zalloc_flags_t.
1435 *
1436 * @returns NULL or the allocated element
1437 */
1438 #define zalloc_smr(zone, flags) \
1439 zalloc_flags(zone, flags)
1440
1441 /*!
1442 * @macro zalloc_id_smr()
1443 *
1444 * @abstract
1445 * Allocates an element from a specified zone ID with SMR enabled.
1446 *
1447 * @param zid The proper @c ZONE_ID_* constant.
1448 * @param flags a collection of @c zalloc_flags_t.
1449 *
1450 * @returns NULL or the allocated element
1451 */
1452 #define zalloc_id_smr(zid, flags) \
1453 zalloc_id(zid, flags)
1454
1455 /*!
1456 * @macro zfree_smr()
1457 *
1458 * @abstract
1459 * Frees an element previously allocated with @c zalloc_smr().
1460 *
1461 * @discussion
1462 * When zfree_smr() is called, then the element is not immediately zeroed,
1463 * and the "free" callback that has been registered with the zone will
1464 * run later (@see zone_smr_free_cb_t).
1465 *
1466 * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1467 *
1468 *
1469 * It is guaranteed that the SMR timestamp associated with an element
1470 * will always be equal or greater than the stamp associated with
1471 * elements freed before it on the same thread.
1472 *
1473 * It means that when freeing multiple elements in a sequence, these
1474 * must be freed in topological order (parents before children).
1475 *
1476 * It is worth noting that calling zfree_smr() on several elements
1477 * in a given order doesn't necessarily mean they will be effectively
1478 * reused or cleaned up in that same order, only that their SMR clocks
1479 * will expire in that order.
1480 *
1481 *
1482 * @param zone the zone to free the element to.
1483 * @param elem the address to free
1484 */
1485 extern void zfree_smr(
1486 zone_t zone,
1487 void *elem __unsafe_indexable);
1488 #define zfree_smr(zone, elem) ({ \
1489 __auto_type __zfree_zone = (zone); \
1490 (zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1491 })
1492
1493
1494 /*!
1495 * @function zfree_id_smr()
1496 *
1497 * @abstract
1498 * Frees an element previously allocated with @c zalloc_id_smr().
1499 *
1500 * @param zone_id the zone id to free the element to.
1501 * @param addr the address to free
1502 */
1503 extern void zfree_id_smr(
1504 zone_id_t zone_id,
1505 void *addr __unsafe_indexable);
1506 #define zfree_id_smr(zid, elem) ({ \
1507 zone_id_t __zfree_zid = (zid); \
1508 (zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1509 })
1510
1511 /*!
1512 * @macro zfree_smr_noclear()
1513 *
1514 * @abstract
1515 * Frees an element previously allocated with @c zalloc_smr().
1516 *
1517 * @discussion
1518 * This variant doesn't clear the pointer passed as an argument,
1519 * as it is often required for SMR algorithms to function correctly
1520 * to leave pointers "dangling" to an extent.
1521 *
1522 * However it expects the field in question to be an SMR_POINTER()
1523 * struct.
1524 *
1525 * @param zone the zone to free the element to.
1526 * @param elem the address to free
1527 */
1528 #define zfree_smr_noclear(zone, elem) \
1529 (zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1530
1531 /*!
1532 * @macro zfree_id_smr_noclear()
1533 *
1534 * @abstract
1535 * Frees an element previously allocated with @c zalloc_id_smr().
1536 *
1537 * @discussion
1538 * This variant doesn't clear the pointer passed as an argument,
1539 * as it is often required for SMR algorithms to function correctly
1540 * to leave pointers "dangling" to an extent.
1541 *
1542 * However it expects the field in question to be an SMR_POINTER()
1543 * struct.
1544 *
1545 * @param zone the zone to free the element to.
1546 * @param elem the address to free
1547 */
1548 #define zfree_id_smr_noclear(zone, elem) \
1549 (zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1550
1551
1552 #pragma mark XNU only: zone creation (extended)
1553
1554 /*!
1555 * @enum zone_reserved_id_t
1556 *
1557 * @abstract
1558 * Well known pre-registered zones, allowing use of zone_id_require()
1559 *
1560 * @discussion
1561 * @c ZONE_ID__* aren't real zone IDs.
1562 *
1563 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1564 * easy a value to produce (by malice or accident).
1565 *
1566 * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1567 * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1568 * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1569 * listed in @c zone_create_ro_id_t.
1570 *
1571 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1572 * @c zone_create().
1573 */
1574 __enum_decl(zone_reserved_id_t, zone_id_t, {
1575 ZONE_ID__ZERO,
1576
1577 ZONE_ID_PERMANENT,
1578 ZONE_ID_PERCPU_PERMANENT,
1579
1580 ZONE_ID_THREAD_RO,
1581 ZONE_ID_MAC_LABEL,
1582 ZONE_ID_PROC_RO,
1583 ZONE_ID_PROC_SIGACTS_RO,
1584 ZONE_ID_KAUTH_CRED,
1585 ZONE_ID_CS_BLOB,
1586
1587 ZONE_ID_SANDBOX_RO,
1588 ZONE_ID_PROFILE_RO,
1589 ZONE_ID_PROTOBOX,
1590 ZONE_ID_SB_FILTER,
1591 ZONE_ID_AMFI_OSENTITLEMENTS,
1592
1593 ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1594 ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1595 ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1596 ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1597
1598 ZONE_ID_PMAP,
1599 ZONE_ID_VM_MAP,
1600 ZONE_ID_VM_MAP_ENTRY,
1601 ZONE_ID_VM_MAP_HOLES,
1602 ZONE_ID_VM_MAP_COPY,
1603 ZONE_ID_VM_PAGES,
1604 ZONE_ID_IPC_PORT,
1605 ZONE_ID_IPC_PORT_SET,
1606 ZONE_ID_IPC_VOUCHERS,
1607 ZONE_ID_PROC_TASK,
1608 ZONE_ID_THREAD,
1609 ZONE_ID_TURNSTILE,
1610 ZONE_ID_SEMAPHORE,
1611 ZONE_ID_SELECT_SET,
1612 ZONE_ID_FILEPROC,
1613
1614 #if !CONFIG_MBUF_MCACHE
1615 ZONE_ID_MBUF_REF,
1616 ZONE_ID_MBUF,
1617 ZONE_ID_CLUSTER_2K,
1618 ZONE_ID_CLUSTER_4K,
1619 ZONE_ID_CLUSTER_16K,
1620 ZONE_ID_MBUF_CLUSTER_2K,
1621 ZONE_ID_MBUF_CLUSTER_4K,
1622 ZONE_ID_MBUF_CLUSTER_16K,
1623 #endif /* !CONFIG_MBUF_MCACHE */
1624
1625 ZONE_ID__FIRST_DYNAMIC,
1626 });
1627
1628 /*!
1629 * @const ZONE_ID_ANY
1630 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1631 * Zone ID.
1632 */
1633 #define ZONE_ID_ANY ((zone_id_t)-1)
1634
1635 /*!
1636 * @const ZONE_ID_INVALID
1637 * An invalid zone_id_t that corresponds to nothing.
1638 */
1639 #define ZONE_ID_INVALID ((zone_id_t)-2)
1640
1641 /**!
1642 * @function zone_by_id
1643 *
1644 * @param zid the specified zone ID.
1645 * @returns the zone with that ID.
1646 */
1647 zone_t zone_by_id(
1648 size_t zid) __pure2;
1649
1650 /**!
1651 * @function zone_name
1652 *
1653 * @param zone the specified zone
1654 * @returns the name of the specified zone.
1655 */
1656 const char *__unsafe_indexable zone_name(
1657 zone_t zone);
1658
1659 /**!
1660 * @function zone_heap_name
1661 *
1662 * @param zone the specified zone
1663 * @returns the name of the heap this zone is part of, or "".
1664 */
1665 const char *__unsafe_indexable zone_heap_name(
1666 zone_t zone);
1667
1668 /*!
1669 * @function zone_create_ext
1670 *
1671 * @abstract
1672 * Creates a zone with the specified parameters.
1673 *
1674 * @discussion
1675 * This is an extended version of @c zone_create().
1676 *
1677 * @param name the name for the new zone.
1678 * @param size the size of the elements returned by this zone.
1679 * @param flags a set of @c zone_create_flags_t flags.
1680 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1681 *
1682 * @param extra_setup a block that can perform non trivial initialization
1683 * on the zone before it is marked valid.
1684 * This block can call advanced setups like:
1685 * - zone_set_exhaustible()
1686 *
1687 * @returns the created zone, this call never fails.
1688 */
1689 extern zone_t zone_create_ext(
1690 const char *name __unsafe_indexable,
1691 vm_size_t size,
1692 zone_create_flags_t flags,
1693 zone_id_t desired_zid,
1694 void (^extra_setup)(zone_t));
1695
1696 /*!
1697 * @macro ZONE_DECLARE
1698 *
1699 * @abstract
1700 * Declares a zone variable and its associated type.
1701 *
1702 * @param var the name of the variable to declare.
1703 * @param type_t the type of elements in the zone.
1704 */
1705 #define ZONE_DECLARE(var, type_t) \
1706 extern zone_t var; \
1707 __ZONE_DECLARE_TYPE(var, type_t)
1708
1709 /*!
1710 * @macro ZONE_DECLARE_ID
1711 *
1712 * @abstract
1713 * Declares the type associated with a zone ID.
1714 *
1715 * @param id the name of zone ID to associate a type with.
1716 * @param type_t the type of elements in the zone.
1717 */
1718 #define ZONE_DECLARE_ID(id, type_t) \
1719 __ZONE_DECLARE_TYPE(id, type_t)
1720
1721 /*!
1722 * @macro ZONE_DEFINE
1723 *
1724 * @abstract
1725 * Declares a zone variable to automatically initialize with the specified
1726 * parameters.
1727 *
1728 * @discussion
1729 * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1730 *
1731 * @param var the name of the variable to declare.
1732 * @param name the name for the zone
1733 * @param size the size of the elements returned by this zone.
1734 * @param flags a set of @c zone_create_flags_t flags.
1735 */
1736 #define ZONE_DEFINE(var, name, size, flags) \
1737 SECURITY_READ_ONLY_LATE(zone_t) var; \
1738 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1739 static __startup_data struct zone_create_startup_spec \
1740 __startup_zone_spec_ ## var = { &var, name, size, flags, \
1741 ZONE_ID_ANY, NULL }; \
1742 STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1743 &__startup_zone_spec_ ## var)
1744
1745 /*!
1746 * @macro ZONE_DEFINE_TYPE
1747 *
1748 * @abstract
1749 * Defines a zone variable to automatically initialize with the specified
1750 * parameters, associated with a particular type.
1751 *
1752 * @param var the name of the variable to declare.
1753 * @param name the name for the zone
1754 * @param type_t the type of elements in the zone.
1755 * @param flags a set of @c zone_create_flags_t flags.
1756 */
1757 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1758 ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1759 __ZONE_DECLARE_TYPE(var, type_t)
1760
1761 /*!
1762 * @macro ZONE_DEFINE_ID
1763 *
1764 * @abstract
1765 * Initializes a given zone automatically during startup with the specified
1766 * parameters.
1767 *
1768 * @param zid a @c zone_reserved_id_t value.
1769 * @param name the name for the zone
1770 * @param type_t the type of elements in the zone.
1771 * @param flags a set of @c zone_create_flags_t flags.
1772 */
1773 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1774 ZONE_DECLARE_ID(zid, type_t); \
1775 ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1776
1777 /*!
1778 * @macro ZONE_INIT
1779 *
1780 * @abstract
1781 * Initializes a given zone automatically during startup with the specified
1782 * parameters.
1783 *
1784 * @param var the name of the variable to initialize.
1785 * @param name the name for the zone
1786 * @param size the size of the elements returned by this zone.
1787 * @param flags a set of @c zone_create_flags_t flags.
1788 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1789 * @param extra_setup a block that can perform non trivial initialization
1790 * (@see @c zone_create_ext()).
1791 */
1792 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1793 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1794
1795 /*!
1796 * @function zone_id_require
1797 *
1798 * @abstract
1799 * Requires for a given pointer to belong to the specified zone, by ID and size.
1800 *
1801 * @discussion
1802 * The function panics if the check fails as it indicates that the kernel
1803 * internals have been compromised.
1804 *
1805 * This is a variant of @c zone_require() which:
1806 * - isn't sensitive to @c zone_t::elem_size being compromised,
1807 * - is slightly faster as it saves one load and a multiplication.
1808 *
1809 * @param zone_id the zone ID the address needs to belong to.
1810 * @param elem_size the size of elements for this zone.
1811 * @param addr the element address to check.
1812 */
1813 extern void zone_id_require(
1814 zone_id_t zone_id,
1815 vm_size_t elem_size,
1816 void *addr __unsafe_indexable);
1817
1818 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1819 extern void zone_set_exhaustible(
1820 zone_t zone,
1821 vm_size_t max_elements,
1822 bool exhausts_by_design);
1823
1824 /*!
1825 * @function zone_raise_reserve()
1826 *
1827 * @brief
1828 * Used to raise the reserve on a zone.
1829 *
1830 * @discussion
1831 * Can be called from any context (zone_create_ext() setup hook or after).
1832 */
1833 extern void zone_raise_reserve(
1834 zone_or_view_t zone_or_view,
1835 uint16_t min_elements);
1836
1837 /*!
1838 * @function zone_fill_initially
1839 *
1840 * @brief
1841 * Initially fill a non collectable zone to have the specified amount of
1842 * elements.
1843 *
1844 * @discussion
1845 * This function must be called on a non collectable permanent zone before it
1846 * has been used yet.
1847 *
1848 * @param zone The zone to fill.
1849 * @param nelems The number of elements to be able to hold.
1850 */
1851 extern void zone_fill_initially(
1852 zone_t zone,
1853 vm_size_t nelems);
1854
1855 /*!
1856 * @function zone_drain()
1857 *
1858 * @abstract
1859 * Forces a zone to be drained (have all its data structures freed
1860 * back to its data store, and empty pages returned to the system).
1861 *
1862 * @param zone the zone id to free the objects to.
1863 */
1864 extern void zone_drain(
1865 zone_t zone);
1866
1867 /*!
1868 * @struct zone_basic_stats
1869 *
1870 * @abstract
1871 * Used to report basic statistics about a zone.
1872 *
1873 * @field zbs_avail the number of elements in a zone.
1874 * @field zbs_alloc the number of allocated elements in a zone.
1875 * @field zbs_free the number of free elements in a zone.
1876 * @field zbs_cached the number of free elements in the per-CPU caches.
1877 * (included in zbs_free).
1878 * @field zbs_alloc_fail
1879 * the number of allocation failures.
1880 */
1881 struct zone_basic_stats {
1882 uint64_t zbs_avail;
1883 uint64_t zbs_alloc;
1884 uint64_t zbs_free;
1885 uint64_t zbs_cached;
1886 uint64_t zbs_alloc_fail;
1887 };
1888
1889 /*!
1890 * @function zone_get_stats
1891 *
1892 * @abstract
1893 * Retrieves statistics about zones, include its per-CPU caches.
1894 *
1895 * @param zone the zone to collect stats from.
1896 * @param stats the statistics to fill.
1897 */
1898 extern void zone_get_stats(
1899 zone_t zone,
1900 struct zone_basic_stats *stats);
1901
1902
1903 /*!
1904 * @typedef zone_exhausted_cb_t
1905 *
1906 * @brief
1907 * The callback type for the ZONE_EXHAUSTED event.
1908 */
1909 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone, bool exhausted);
1910
1911 /*!
1912 * @brief
1913 * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1914 * wiring limit.
1915 *
1916 * @discussion
1917 * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1918 * performing zone expansion and no significant amount of work can be performed
1919 * from this context.
1920 *
1921 * In particular, those callbacks cannot allocate any memory, it is expected
1922 * that they will filter if the zone is of interest, and wake up another thread
1923 * to perform the actual work (for example via thread call).
1924 */
1925 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1926
1927
1928 #pragma mark XNU only: zone views
1929
1930 /*!
1931 * @enum zone_kheap_id_t
1932 *
1933 * @brief
1934 * Enumerate a particular kalloc heap.
1935 *
1936 * @discussion
1937 * More documentation about heaps is available in @c <kern/kalloc.h>.
1938 *
1939 * @const KHEAP_ID_NONE
1940 * This value denotes regular zones, not used by kalloc.
1941 *
1942 * @const KHEAP_ID_SHARED
1943 * Indicates zones part of the KHEAP_SHARED heap.
1944 *
1945 * @const KHEAP_ID_DATA_BUFFERS
1946 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1947 *
1948 * @const KHEAP_ID_KT_VAR
1949 * Indicates zones part of the KHEAP_KT_VAR heap.
1950 */
1951 __enum_decl(zone_kheap_id_t, uint8_t, {
1952 KHEAP_ID_NONE,
1953 KHEAP_ID_SHARED,
1954 KHEAP_ID_DATA_BUFFERS,
1955 KHEAP_ID_KT_VAR,
1956
1957 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
1958 });
1959
1960 /*!
1961 * @macro ZONE_VIEW_DECLARE
1962 *
1963 * @abstract
1964 * (optionally) declares a zone view (in a header).
1965 *
1966 * @param var the name for the zone view.
1967 */
1968 #define ZONE_VIEW_DECLARE(var) \
1969 extern struct zone_view var[1]
1970
1971 /*!
1972 * @macro ZONE_VIEW_DEFINE
1973 *
1974 * @abstract
1975 * Defines a given zone view and what it points to.
1976 *
1977 * @discussion
1978 * Zone views can either share a pre-existing zone,
1979 * or perform a lookup into a kalloc heap for the zone
1980 * backing the bucket of the proper size.
1981 *
1982 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
1983 * as the last rank. If views on zones are created, these must have been
1984 * created before this stage.
1985 *
1986 * This macro should not be used to create zone views from default
1987 * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
1988 *
1989 * @param var the name for the zone view.
1990 * @param name a string describing the zone view.
1991 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
1992 * @param size the element size to be allocated from this view.
1993 */
1994 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
1995 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
1996 .zv_name = (name), \
1997 } }; \
1998 static __startup_data struct zone_view_startup_spec \
1999 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
2000 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_view_startup_init, \
2001 &__startup_zone_view_spec_ ## var)
2002
2003
2004 #pragma mark XNU only: batched allocations
2005
2006 /*!
2007 * @typedef zstack_t
2008 *
2009 * @brief
2010 * A stack of allocated elements chained with delta encoding.
2011 *
2012 * @discussion
2013 * Some batch allocation interfaces interact with the data heap
2014 * where leaking kernel pointers is not acceptable. This is why
2015 * element offsets are used instead.
2016 */
2017 typedef struct zstack {
2018 vm_offset_t z_head;
2019 uint32_t z_count;
2020 } zstack_t;
2021
2022 /*!
2023 * @function zstack_push
2024 *
2025 * @brief
2026 * Push a given element onto a zstack.
2027 */
2028 extern void zstack_push(
2029 zstack_t *stack,
2030 void *elem);
2031
2032 /*!
2033 * @function zstack_pop
2034 *
2035 * @brief
2036 * Pops an element from a zstack, the caller must check it's not empty.
2037 */
2038 void *zstack_pop(
2039 zstack_t *stack);
2040
2041 /*!
2042 * @function zstack_empty
2043 *
2044 * @brief
2045 * Returns whether a stack is empty.
2046 */
2047 static inline uint32_t
zstack_count(zstack_t stack)2048 zstack_count(zstack_t stack)
2049 {
2050 return stack.z_count;
2051 }
2052
2053 /*!
2054 * @function zstack_empty
2055 *
2056 * @brief
2057 * Returns whether a stack is empty.
2058 */
2059 static inline bool
zstack_empty(zstack_t stack)2060 zstack_empty(zstack_t stack)
2061 {
2062 return zstack_count(stack) == 0;
2063 }
2064
2065 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)2066 zstack_load_and_erase(zstack_t *stackp)
2067 {
2068 zstack_t stack = *stackp;
2069
2070 *stackp = (zstack_t){ };
2071 return stack;
2072 }
2073
2074 /*!
2075 * @function zfree_nozero
2076 *
2077 * @abstract
2078 * Frees an element allocated with @c zalloc*, without zeroing it.
2079 *
2080 * @discussion
2081 * This is for the sake of networking only, no one else should use this.
2082 *
2083 * @param zone_id the zone id to free the element to.
2084 * @param elem the element to free
2085 */
2086 extern void zfree_nozero(
2087 zone_id_t zone_id,
2088 void *elem __unsafe_indexable);
2089 #define zfree_nozero(zone_id, elem) ({ \
2090 zone_id_t __zfree_zid = (zone_id); \
2091 (zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
2092 })
2093
2094 /*!
2095 * @function zalloc_n
2096 *
2097 * @abstract
2098 * Allocates a batch of elements from the specified zone.
2099 *
2100 * @discussion
2101 * This is for the sake of networking only, no one else should use this.
2102 *
2103 * @param zone_id the zone id to allocate the element from.
2104 * @param count how many elements to allocate (less might be returned)
2105 * @param flags a set of @c zone_create_flags_t flags.
2106 */
2107 extern zstack_t zalloc_n(
2108 zone_id_t zone_id,
2109 uint32_t count,
2110 zalloc_flags_t flags);
2111
2112 /*!
2113 * @function zfree_n
2114 *
2115 * @abstract
2116 * Batched variant of zfree(): frees a stack of elements.
2117 *
2118 * @param zone_id the zone id to free the element to.
2119 * @param stack a stack of elements to free.
2120 */
2121 extern void zfree_n(
2122 zone_id_t zone_id,
2123 zstack_t stack);
2124 #define zfree_n(zone_id, stack) ({ \
2125 zone_id_t __zfree_zid = (zone_id); \
2126 (zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2127 })
2128
2129 /*!
2130 * @function zfree_nozero_n
2131 *
2132 * @abstract
2133 * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
2134 * them.
2135 *
2136 * @discussion
2137 * This is for the sake of networking only, no one else should use this.
2138 *
2139 * @param zone_id the zone id to free the element to.
2140 * @param stack a stack of elements to free.
2141 */
2142 extern void zfree_nozero_n(
2143 zone_id_t zone_id,
2144 zstack_t stack);
2145 #define zfree_nozero_n(zone_id, stack) ({ \
2146 zone_id_t __zfree_zid = (zone_id); \
2147 (zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2148 })
2149
2150 #pragma mark XNU only: cached objects
2151
2152 /*!
2153 * @typedef zone_cache_ops_t
2154 *
2155 * @brief
2156 * A set of callbacks used for a zcache (cache of composite objects).
2157 *
2158 * @field zc_op_alloc
2159 * The callback to "allocate" a cached object from scratch.
2160 *
2161 * @field zc_op_mark_valid
2162 * The callback that is called when a cached object is being reused,
2163 * will typically call @c zcache_mark_valid() on the various
2164 * sub-pieces of the composite cached object.
2165 *
2166 * @field zc_op_mark_invalid
2167 * The callback that is called when a composite object is being freed
2168 * to the cache. This will typically call @c zcache_mark_invalid()
2169 * on the various sub-pieces of the composite object.
2170 *
2171 * @field zc_op_free
2172 * The callback to "free" a composite object completely.
2173 */
2174 typedef const struct zone_cache_ops {
2175 void *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2176 void *(*zc_op_mark_valid)(zone_id_t, void *);
2177 void *(*zc_op_mark_invalid)(zone_id_t, void *);
2178 void (*zc_op_free)(zone_id_t, void *);
2179 } *zone_cache_ops_t;
2180
2181 #if __has_ptrcheck
2182 static inline char *__bidi_indexable
zcache_transpose_bounds(char * __bidi_indexable pointer_with_bounds,char * __unsafe_indexable unsafe_pointer)2183 zcache_transpose_bounds(
2184 char *__bidi_indexable pointer_with_bounds,
2185 char *__unsafe_indexable unsafe_pointer)
2186 {
2187 vm_offset_t offset_from_start = pointer_with_bounds - __ptr_lower_bound(pointer_with_bounds);
2188 vm_offset_t offset_to_end = __ptr_upper_bound(pointer_with_bounds) - pointer_with_bounds;
2189 vm_offset_t size = offset_from_start + offset_to_end;
2190 return __unsafe_forge_bidi_indexable(char *, unsafe_pointer - offset_from_start, size)
2191 + offset_from_start;
2192 }
2193 #else
2194 static inline char *__header_indexable
zcache_transpose_bounds(char * __header_indexable pointer_with_bounds __unused,char * __unsafe_indexable unsafe_pointer)2195 zcache_transpose_bounds(
2196 char *__header_indexable pointer_with_bounds __unused,
2197 char *__unsafe_indexable unsafe_pointer)
2198 {
2199 return unsafe_pointer;
2200 }
2201 #endif // __has_ptrcheck
2202
2203 /*!
2204 * @function zcache_mark_valid()
2205 *
2206 * @brief
2207 * Mark an element as "valid".
2208 *
2209 * @description
2210 * This function is used to be able to integrate with KASAN or PGZ
2211 * for a cache of composite objects. It typically is a function
2212 * called in their @c zc_op_mark_valid() callback.
2213 *
2214 * If PGZ or KASAN isn't in use, then this callback is a no-op.
2215 * Otherwise the @c elem address might be updated.
2216 *
2217 * @param zone the zone the element belongs to.
2218 * @param elem the address of the element
2219 * @returns the new address to correctly access @c elem.
2220 */
2221 extern void *__unsafe_indexable zcache_mark_valid(
2222 zone_t zone,
2223 void *elem __unsafe_indexable);
2224
2225 static inline void *
zcache_mark_valid_single(zone_t zone,void * elem)2226 zcache_mark_valid_single(
2227 zone_t zone,
2228 void *elem)
2229 {
2230 return __unsafe_forge_single(void *, zcache_mark_valid(zone, elem));
2231 }
2232
2233 static inline void *__header_bidi_indexable
zcache_mark_valid_indexable(zone_t zone,void * elem __header_bidi_indexable)2234 zcache_mark_valid_indexable(
2235 zone_t zone,
2236 void *elem __header_bidi_indexable)
2237 {
2238 return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_valid(zone, elem));
2239 }
2240
2241 /*!
2242 * @function zcache_mark_invalid()
2243 *
2244 * @brief
2245 * Mark an element as "invalid".
2246 *
2247 * @description
2248 * This function is used to be able to integrate with KASAN or PGZ
2249 * for a cache of composite objects. It typically is a function
2250 * called in their @c zc_op_mark_invalid() callback.
2251 *
2252 * This function performs validation that @c elem belongs
2253 * to the right zone and is properly "aligned", and should
2254 * never be elided under any configuration.
2255 *
2256 * @param zone the zone the element belongs to.
2257 * @param elem the address of the element
2258 * @returns the new address to correctly access @c elem.
2259 */
2260 extern void *__unsafe_indexable zcache_mark_invalid(
2261 zone_t zone,
2262 void *elem __unsafe_indexable);
2263
2264 static inline void *
zcache_mark_invalid_single(zone_t zone,void * elem)2265 zcache_mark_invalid_single(
2266 zone_t zone,
2267 void *elem)
2268 {
2269 return __unsafe_forge_single(void *, zcache_mark_invalid(zone, elem));
2270 }
2271
2272 static inline void *__header_bidi_indexable
zcache_mark_invalid_indexable(zone_t zone,void * elem __header_bidi_indexable)2273 zcache_mark_invalid_indexable(
2274 zone_t zone,
2275 void *elem __header_bidi_indexable)
2276 {
2277 return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_invalid(zone, elem));
2278 }
2279
2280 /*!
2281 * @macro zcache_alloc()
2282 *
2283 * @abstract
2284 * Allocates a composite object from a cache.
2285 *
2286 * @param zone_id The proper @c ZONE_ID_* constant.
2287 * @param flags a collection of @c zalloc_flags_t.
2288 *
2289 * @returns NULL or the allocated element
2290 */
2291 #define zcache_alloc(zone_id, fl) \
2292 __zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2293
2294 /*!
2295 * @function zcache_alloc_n()
2296 *
2297 * @abstract
2298 * Allocates a stack of composite objects from a cache.
2299 *
2300 * @param zone_id The proper @c ZONE_ID_* constant.
2301 * @param count how many elements to allocate (less might be returned)
2302 * @param flags a set of @c zone_create_flags_t flags.
2303 *
2304 * @returns NULL or the allocated composite object
2305 */
2306 extern zstack_t zcache_alloc_n(
2307 zone_id_t zone_id,
2308 uint32_t count,
2309 zalloc_flags_t flags,
2310 zone_cache_ops_t ops);
2311 #define zcache_alloc_n(zone_id, count, flags) \
2312 (zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2313
2314
2315
2316 /*!
2317 * @function zcache_free()
2318 *
2319 * @abstract
2320 * Frees a composite object previously allocated
2321 * with @c zcache_alloc() or @c zcache_alloc_n().
2322 *
2323 * @param zone_id the zcache id to free the object to.
2324 * @param addr the address to free
2325 * @param ops the pointer to the zcache ops for this zcache.
2326 */
2327 extern void zcache_free(
2328 zone_id_t zone_id,
2329 void *addr __unsafe_indexable,
2330 zone_cache_ops_t ops);
2331 #define zcache_free(zone_id, elem) \
2332 (zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2333 __zcache_##zone_id##_ops)
2334
2335 /*!
2336 * @function zcache_free_n()
2337 *
2338 * @abstract
2339 * Frees a stack of composite objects previously allocated
2340 * with @c zcache_alloc() or @c zcache_alloc_n().
2341 *
2342 * @param zone_id the zcache id to free the objects to.
2343 * @param stack a stack of composite objects
2344 * @param ops the pointer to the zcache ops for this zcache.
2345 */
2346 extern void zcache_free_n(
2347 zone_id_t zone_id,
2348 zstack_t stack,
2349 zone_cache_ops_t ops);
2350 #define zcache_free_n(zone_id, stack) \
2351 (zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2352 __zcache_##zone_id##_ops)
2353
2354
2355 /*!
2356 * @function zcache_drain()
2357 *
2358 * @abstract
2359 * Forces a zcache to be drained (have all its data structures freed
2360 * back to the original zones).
2361 *
2362 * @param zone_id the zcache id to free the objects to.
2363 */
2364 extern void zcache_drain(
2365 zone_id_t zone_id);
2366
2367
2368 /*!
2369 * @macro ZCACHE_DECLARE
2370 *
2371 * @abstract
2372 * Declares the type associated with a zone cache ID.
2373 *
2374 * @param id the name of zone ID to associate a type with.
2375 * @param type_t the type of elements in the zone.
2376 */
2377 #define ZCACHE_DECLARE(id, type_t) \
2378 __ZONE_DECLARE_TYPE(id, type_t); \
2379 __attribute__((visibility("hidden"))) \
2380 extern const zone_cache_ops_t __zcache_##id##_ops
2381
2382
2383 /*!
2384 * @macro ZCACHE_DEFINE
2385 *
2386 * @abstract
2387 * Defines a zone cache for a given ID and type.
2388 *
2389 * @param zone_id the name of zone ID to associate a type with.
2390 * @param name the name for the zone
2391 * @param type_t the type of elements in the zone.
2392 * @param size the size of elements in the cache
2393 * @param ops the ops for this zcache.
2394 */
2395 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2396 ZCACHE_DECLARE(zid, type_t); \
2397 ZONE_DECLARE_ID(zid, type_t); \
2398 const zone_cache_ops_t __zcache_##zid##_ops = (ops); \
2399 ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) { \
2400 zcache_ops[zid] = (ops); \
2401 })
2402
2403 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2404
2405 #pragma mark XNU only: PGZ support
2406
2407 /*!
2408 * @function pgz_owned()
2409 *
2410 * @brief
2411 * Returns whether an address is PGZ owned.
2412 *
2413 * @param addr The address to translate.
2414 * @returns Whether it is PGZ owned
2415 */
2416 #if CONFIG_PROB_GZALLOC
2417 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
2418 #else
2419 #define pgz_owned(addr) false
2420 #endif
2421
2422 /*!
2423 * @function pgz_decode()
2424 *
2425 * @brief
2426 * Translates a PGZ protected virtual address to its unprotected
2427 * backing store.
2428 *
2429 * @discussion
2430 * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
2431 * elements since the PGZ protected virtual addresses are maintained by PGZ
2432 * at the pmap level without the VM involvment.
2433 *
2434 * "allow_invalid" schemes relying on sequestering also need this
2435 * to perform the locking attempts on the unprotected address.
2436 *
2437 * @param addr The address to translate.
2438 * @param size The object size.
2439 * @returns The unprotected address or @c addr.
2440 */
2441 #if CONFIG_PROB_GZALLOC
2442 #define pgz_decode(addr, size) \
2443 ((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
2444 #else
2445 #define pgz_decode(addr, size) (addr)
2446 #endif
2447
2448 /*!
2449 * @function pgz_decode_allow_invalid()
2450 *
2451 * @brief
2452 * Translates a PGZ protected virtual address to its unprotected
2453 * backing store, but doesn't assert it is still allocated/valid.
2454 *
2455 * @discussion
2456 * "allow_invalid" schemes relying on sequestering also need this
2457 * to perform the locking attempts on the unprotected address.
2458 *
2459 * @param addr The address to translate.
2460 * @param want_zid The expected zone ID for the element.
2461 * @returns The unprotected address or @c addr.
2462 */
2463 #if CONFIG_PROB_GZALLOC
2464 #define pgz_decode_allow_invalid(addr, want_zid) \
2465 ((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
2466 #else
2467 #define pgz_decode_allow_invalid(addr, zid) (addr)
2468 #endif
2469
2470 #pragma mark XNU only: misc & implementation details
2471
2472 struct zone_create_startup_spec {
2473 zone_t *z_var;
2474 const char *z_name __unsafe_indexable;
2475 vm_size_t z_size;
2476 zone_create_flags_t z_flags;
2477 zone_id_t z_zid;
2478 void (^z_setup)(zone_t);
2479 };
2480
2481 extern void zone_create_startup(
2482 struct zone_create_startup_spec *spec);
2483
2484 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2485 static __startup_data struct zone_create_startup_spec \
2486 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2487 STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2488 &__startup_zone_spec_ ## ns)
2489
2490 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2491 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2492
2493 #define __zalloc_cast(namespace, expr) \
2494 ((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2495
2496 #if ZALLOC_TYPE_SAFE
2497 #define zalloc(zov) __zalloc_cast(zov, (zalloc)(zov))
2498 #define zalloc_noblock(zov) __zalloc_cast(zov, (zalloc_noblock)(zov))
2499 #endif /* !ZALLOC_TYPE_SAFE */
2500
2501 struct zone_view_startup_spec {
2502 zone_view_t zv_view;
2503 union {
2504 zone_kheap_id_t zv_heapid;
2505 zone_t *zv_zone;
2506 };
2507 vm_size_t zv_size;
2508 };
2509
2510 extern void zone_view_startup_init(
2511 struct zone_view_startup_spec *spec);
2512
2513 extern void zone_userspace_reboot_checks(void);
2514
2515 #if VM_TAG_SIZECLASSES
2516 extern void __zone_site_register(
2517 vm_allocation_site_t *site);
2518
2519 #define VM_ALLOC_SITE_TAG() ({ \
2520 __PLACE_IN_SECTION("__DATA, __data") \
2521 static vm_allocation_site_t site = { .refcount = 2, }; \
2522 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, __zone_site_register, &site); \
2523 site.tag; \
2524 })
2525 #else /* VM_TAG_SIZECLASSES */
2526 #define VM_ALLOC_SITE_TAG() VM_KERN_MEMORY_NONE
2527 #endif /* !VM_TAG_SIZECLASSES */
2528
2529 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2530 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2531 {
2532 return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2533 }
2534
2535 #if DEBUG || DEVELOPMENT
2536 # define ZPCPU_MANGLE_MASK 0xc0c0000000000000ul
2537 #else /* !(DEBUG || DEVELOPMENT) */
2538 # define ZPCPU_MANGLE_MASK 0ul
2539 #endif /* !(DEBUG || DEVELOPMENT) */
2540
2541 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_MASK)
2542 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_MASK)
2543 #define __zpcpu_addr(e) ((vm_address_t)(e))
2544 #define __zpcpu_cast(ptr, e) __unsafe_forge_single(typeof(ptr), e)
2545 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2546
2547 /**
2548 * @macro __zpcpu_mangle_for_boot()
2549 *
2550 * @discussion
2551 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2552 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2553 * storage marked @c __startup_data and replace it with the proper allocation
2554 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2555 *
2556 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2557 * provides the proper mangling of the storage into a "fake" percpu pointer so
2558 * that accesses through @c zpercpu_get() functions properly.
2559 *
2560 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2561 */
2562 #define __zpcpu_mangle_for_boot(ptr) ({ \
2563 assert(startup_phase < STARTUP_SUB_ZALLOC); \
2564 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
2565 })
2566
2567 extern unsigned zpercpu_count(void) __pure2;
2568
2569 #if CONFIG_PROB_GZALLOC
2570
2571 extern vm_offset_t __pgz_decode(
2572 mach_vm_address_t addr,
2573 mach_vm_size_t size);
2574
2575 extern vm_offset_t __pgz_decode_allow_invalid(
2576 vm_offset_t offs,
2577 zone_id_t zid);
2578
2579 #endif
2580 #if DEBUG || DEVELOPMENT
2581 extern size_t zone_pages_wired;
2582 extern size_t zone_guard_pages;
2583 #endif /* DEBUG || DEVELOPMENT */
2584 #if CONFIG_ZLEAKS
2585 extern uint32_t zleak_active;
2586 extern vm_size_t zleak_max_zonemap_size;
2587 extern vm_size_t zleak_per_zone_tracking_threshold;
2588
2589 extern kern_return_t zleak_update_threshold(
2590 vm_size_t *arg,
2591 uint64_t value);
2592 #endif /* CONFIG_ZLEAKS */
2593
2594 extern uint32_t zone_map_jetsam_limit;
2595
2596 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2597
2598 extern zone_t percpu_u64_zone;
2599
2600 #pragma GCC visibility pop
2601 #endif /* XNU_KERNEL_PRIVATE */
2602
2603 /*
2604 * This macro is currently used by AppleImage4 (rdar://83924635)
2605 */
2606 #define __zalloc_ptr_load_and_erase(elem) \
2607 os_ptr_load_and_erase(elem)
2608
2609 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2610
2611 #endif /* _KERN_ZALLOC_H_ */
2612
2613 #endif /* KERNEL_PRIVATE */
2614