1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: zalloc.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 */
64
65 #ifdef KERNEL_PRIVATE
66
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86
87 /*
88 * Enable this macro to force type safe zalloc/zalloc_ro/...
89 */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99
100 /*!
101 * @macro __zpercpu
102 *
103 * @abstract
104 * Annotation that helps denoting a per-cpu pointer that requires usage of
105 * @c zpercpu_*() for access.
106 */
107 #define __zpercpu __unsafe_indexable
108
109 /*!
110 * @typedef zone_id_t
111 *
112 * @abstract
113 * The type for a zone ID.
114 */
115 typedef uint16_t zone_id_t;
116
117 /**
118 * @enum zone_create_flags_t
119 *
120 * @abstract
121 * Set of flags to pass to zone_create().
122 *
123 * @discussion
124 * Some kernel-wide policies affect all possible created zones.
125 * Explicit @c ZC_* win over such policies.
126 */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 /** The default value to pass to zone_create() */
129 ZC_NONE = 0x00000000,
130
131 /** Force the created zone to use VA sequestering */
132 ZC_SEQUESTER = 0x00000001,
133 /** Force the created zone @b NOT to use VA sequestering */
134 ZC_NOSEQUESTER = 0x00000002,
135
136 /** Enable per-CPU zone caching for this zone */
137 ZC_CACHING = 0x00000010,
138 /** Disable per-CPU zone caching for this zone */
139 ZC_NOCACHING = 0x00000020,
140
141 /** Allocate zone pages as Read-only **/
142 ZC_READONLY = 0x00800000,
143
144 /** Mark zone as a per-cpu zone */
145 ZC_PERCPU = 0x01000000,
146
147 /** Force the created zone to clear every allocation on free */
148 ZC_ZFREE_CLEARMEM = 0x02000000,
149
150 /** Mark zone as non collectable by zone_gc() */
151 ZC_NOGC = 0x04000000,
152
153 /** Do not encrypt this zone during hibernation */
154 ZC_NOENCRYPT = 0x08000000,
155
156 /** Type requires alignment to be preserved */
157 ZC_ALIGNMENT_REQUIRED = 0x10000000,
158
159 /** Obsolete */
160 ZC_NOGZALLOC = 0x20000000,
161
162 /** Don't asynchronously replenish the zone via callouts */
163 ZC_NOCALLOUT = 0x40000000,
164
165 /** Can be zdestroy()ed, not default unlike zinit() */
166 ZC_DESTRUCTIBLE = 0x80000000,
167
168 #ifdef XNU_KERNEL_PRIVATE
169 /** Use guard pages in PGZ mode */
170 ZC_PGZ_USE_GUARDS = 0x0100000000000000,
171
172 /** Zone doesn't support TBI tagging */
173 ZC_NOTBITAG = 0x0200000000000000,
174
175 /** This zone will back a kalloc type */
176 ZC_KALLOC_TYPE = 0x0400000000000000,
177
178 /** This zone will back a kalloc heap */
179 ZC_KALLOC_HEAP = 0x0800000000000000,
180
181 /* unused 0x1000000000000000, */
182
183 /** This zone belongs to the VM submap */
184 ZC_VM = 0x2000000000000000,
185 #if __LP64__
186 #define ZC_VM_LP64 ZC_VM
187 #else
188 #define ZC_VM_LP64 ZC_NONE
189 #endif
190
191 /** Disable kasan quarantine for this zone */
192 ZC_KASAN_NOQUARANTINE = 0x4000000000000000,
193
194 /** Disable kasan redzones for this zone */
195 ZC_KASAN_NOREDZONE = 0x8000000000000000,
196 #endif /* XNU_KERNEL_PRIVATE */
197 });
198
199 /*!
200 * @union zone_or_view
201 *
202 * @abstract
203 * A type used for calls that admit both a zone or a zone view.
204 *
205 * @discussion
206 * @c zalloc() and @c zfree() and their variants can act on both
207 * zones and zone views.
208 */
209 union zone_or_view {
210 struct zone_view *zov_view;
211 struct zone *zov_zone;
212 struct kalloc_type_view *zov_kt_heap;
213 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)214 inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
215 }
zone_or_view(struct zone * z)216 inline zone_or_view(struct zone *z) : zov_zone(z) {
217 }
zone_or_view(struct kalloc_type_view * kth)218 inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
219 }
220 #endif
221 };
222 #ifdef __cplusplus
223 typedef union zone_or_view zone_or_view_t;
224 #else
225 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
226 #endif
227
228 /*!
229 * @enum zone_create_ro_id_t
230 *
231 * @abstract
232 * Zone creation IDs for external read only zones
233 *
234 * @discussion
235 * Kexts that desire to use the RO allocator should:
236 * 1. Add a zone creation id below
237 * 2. Add a corresponding ID to @c zone_reserved_id_t
238 * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
239 * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
240 * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
241 */
242 __enum_decl(zone_create_ro_id_t, zone_id_t, {
243 ZC_RO_ID_SANDBOX,
244 ZC_RO_ID_PROFILE,
245 ZC_RO_ID_PROTOBOX,
246 ZC_RO_ID_SB_FILTER,
247 ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
249 });
250
251 /*!
252 * @function zone_create
253 *
254 * @abstract
255 * Creates a zone with the specified parameters.
256 *
257 * @discussion
258 * A Zone is a slab allocator that returns objects of a given size very quickly.
259 *
260 * @param name the name for the new zone.
261 * @param size the size of the elements returned by this zone.
262 * @param flags a set of @c zone_create_flags_t flags.
263 *
264 * @returns the created zone, this call never fails.
265 */
266 extern zone_t zone_create(
267 const char *name __unsafe_indexable,
268 vm_size_t size,
269 zone_create_flags_t flags);
270
271 /*!
272 * @function zone_create_ro
273 *
274 * @abstract
275 * Creates a read only zone with the specified parameters from kexts
276 *
277 * @discussion
278 * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
279 * kexts. Do not use this API to create read only zones in xnu.
280 *
281 * @param name the name for the new zone.
282 * @param size the size of the elements returned by this zone.
283 * @param flags a set of @c zone_create_flags_t flags.
284 * @param zc_ro_id an ID declared in @c zone_create_ro_id_t
285 *
286 * @returns the zone ID of the created zone, this call never fails.
287 */
288 extern zone_id_t zone_create_ro(
289 const char *name __unsafe_indexable,
290 vm_size_t size,
291 zone_create_flags_t flags,
292 zone_create_ro_id_t zc_ro_id);
293
294 /*!
295 * @function zdestroy
296 *
297 * @abstract
298 * Destroys a zone previously made with zone_create.
299 *
300 * @discussion
301 * Zones must have been made destructible for @c zdestroy() to be allowed,
302 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
303 *
304 * @param zone the zone to destroy.
305 */
306 extern void zdestroy(
307 zone_t zone);
308
309 /*!
310 * @function zone_require
311 *
312 * @abstract
313 * Requires for a given pointer to belong to the specified zone.
314 *
315 * @discussion
316 * The function panics if the check fails as it indicates that the kernel
317 * internals have been compromised.
318 *
319 * @param zone the zone the address needs to belong to.
320 * @param addr the element address to check.
321 */
322 extern void zone_require(
323 zone_t zone,
324 void *addr __unsafe_indexable);
325
326 /*!
327 * @function zone_require_ro
328 *
329 * @abstract
330 * Version of zone require intended for zones created with ZC_READONLY
331 *
332 * @discussion
333 * This check is not sufficient to fully trust the element.
334 *
335 * Another check of its content must be performed to prove
336 * that the element is "the right one", a typical technique
337 * for when the RO data structure is 1:1 with a mutable one,
338 * is a simple circularity check with a very strict lifetime
339 * (both the mutable and read-only data structures are made
340 * and destroyed as close as possible).
341 *
342 * @param zone_id the zone id the address needs to belong to.
343 * @param elem_size the element size for this zone.
344 * @param addr the element address to check.
345 */
346 extern void zone_require_ro(
347 zone_id_t zone_id,
348 vm_size_t elem_size,
349 void *addr __unsafe_indexable);
350
351 /*!
352 * @enum zalloc_flags_t
353 *
354 * @brief
355 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
356 *
357 * @discussion
358 * It is encouraged that any callsite passing flags uses exactly one of:
359 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
360 * if nothing else was specified.
361 *
362 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
363 * then @c Z_WAITOK is ignored.
364 *
365 * @const Z_WAITOK
366 * Means that it's OK for zalloc() to block to wait for memory,
367 * when Z_WAITOK is passed, zalloc will never return NULL.
368 *
369 * @const Z_NOWAIT
370 * Passing this flag means that zalloc is not allowed to ever block.
371 *
372 * @const Z_NOPAGEWAIT
373 * Passing this flag means that zalloc is allowed to wait due to lock
374 * contention, but will not wait for the VM to wait for pages when
375 * under memory pressure.
376 *
377 * @const Z_ZERO
378 * Passing this flags means that the returned memory has been zeroed out.
379 *
380 * @const Z_NOFAIL
381 * Passing this flag means that the caller expects the allocation to always
382 * succeed. This will result in a panic if this assumption isn't correct.
383 *
384 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
385 * be used on exhaustible zones.
386 *
387 * @const Z_REALLOCF
388 * For the realloc family of functions,
389 * free the incoming memory on failure cases.
390 *
391 #if XNU_KERNEL_PRIVATE
392 * @const Z_SPRAYQTN
393 * This flag tells the VM to allocate from the "spray quarantine" range when
394 * it services the allocation. For more details on what allocations qualify
395 * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
396 *
397 * @const Z_KALLOC_ARRAY
398 * Instead of returning a standard "pointer" return a pointer that encodes
399 * its size-class into the pointer itself (Only for kalloc, might limit
400 * the range of allocations that can be done).
401 *
402 * @const Z_MAY_COPYINMAP
403 * This data allocation might be used with vm_map_copyin().
404 * This allows for those allocations to be associated with a proper VM object.
405 *
406 * @const Z_FULLSIZE
407 * Used to indicate that the caller will use all available space in excess
408 * from the requested allocation size.
409 *
410 * @const Z_SKIP_KASAN
411 * Tell zalloc() not to do any kasan adjustments.
412 *
413 * @const Z_PGZ
414 * Used by zalloc internally to denote an allocation that we will try
415 * to guard with PGZ.
416 *
417 * @const Z_VM_TAG_BT_BIT
418 * Used to blame allocation accounting on the first kext
419 * found in the backtrace of the allocation.
420 *
421 * @const Z_NOZZC
422 * Used internally to mark allocations that will skip zero validation.
423 *
424 * @const Z_PCPU
425 * Used internally for the percpu paths.
426 *
427 * @const Z_VM_TAG_MASK
428 * Represents bits in which a vm_tag_t for the allocation can be passed.
429 * (used by kalloc for the zone tagging debugging feature).
430 #endif
431 */
432 __options_decl(zalloc_flags_t, uint32_t, {
433 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
434 Z_WAITOK = 0x0000,
435 Z_NOWAIT = 0x0001,
436 Z_NOPAGEWAIT = 0x0002,
437 Z_ZERO = 0x0004,
438 Z_REALLOCF = 0x0008,
439
440 #if XNU_KERNEL_PRIVATE
441 Z_SPRAYQTN = 0x0040,
442 Z_KALLOC_ARRAY = 0x0080,
443 Z_MAY_COPYINMAP = 0x0100,
444 Z_FULLSIZE = 0x0200,
445 #if KASAN
446 Z_SKIP_KASAN = 0x0400,
447 #else
448 Z_SKIP_KASAN = 0x0000,
449 #endif
450 Z_PGZ = 0x0800,
451 Z_VM_TAG_BT_BIT = 0x1000,
452 Z_PCPU = 0x2000,
453 Z_NOZZC = 0x4000,
454 #endif /* XNU_KERNEL_PRIVATE */
455 Z_NOFAIL = 0x8000,
456
457 /* convenient c++ spellings */
458 Z_NOWAIT_ZERO = Z_NOWAIT | Z_ZERO,
459 Z_WAITOK_ZERO = Z_WAITOK | Z_ZERO,
460 Z_WAITOK_ZERO_NOFAIL = Z_WAITOK | Z_ZERO | Z_NOFAIL,
461 #if XNU_KERNEL_PRIVATE
462 Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
463 #endif
464
465 Z_KPI_MASK = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
466 #if XNU_KERNEL_PRIVATE
467 Z_ZERO_VM_TAG_BT_BIT = Z_ZERO | Z_VM_TAG_BT_BIT,
468 /** used by kalloc to propagate vm tags for -zt */
469 Z_VM_TAG_MASK = 0xffff0000,
470
471 #define Z_VM_TAG_SHIFT 16
472 #define Z_VM_TAG(fl, tag) ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
473 #define Z_VM_TAG_BT(fl, tag) ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
474 #endif
475 });
476
477 /*
478 * This type is used so that kalloc_internal has good calling conventions
479 * for callers who want to cheaply both know the allocated address
480 * and the actual size of the allocation.
481 */
482 struct kalloc_result {
483 void *addr __sized_by(size);
484 vm_size_t size;
485 };
486
487 /*!
488 * @function zalloc
489 *
490 * @abstract
491 * Allocates an element from a specified zone.
492 *
493 * @discussion
494 * If the zone isn't exhaustible and is expandable, this call never fails.
495 *
496 * @param zone_or_view the zone or zone view to allocate from
497 *
498 * @returns NULL or the allocated element
499 */
500 __attribute__((malloc))
501 extern void *__unsafe_indexable zalloc(
502 zone_or_view_t zone_or_view);
503
504 /*!
505 * @function zalloc_noblock
506 *
507 * @abstract
508 * Allocates an element from a specified zone, but never blocks.
509 *
510 * @discussion
511 * This call is suitable for preemptible code, however allocation
512 * isn't allowed from interrupt context.
513 *
514 * @param zone_or_view the zone or zone view to allocate from
515 *
516 * @returns NULL or the allocated element
517 */
518 __attribute__((malloc))
519 extern void *__unsafe_indexable zalloc_noblock(
520 zone_or_view_t zone_or_view);
521
522 /*!
523 * @function zalloc_flags()
524 *
525 * @abstract
526 * Allocates an element from a specified zone, with flags.
527 *
528 * @param zone_or_view the zone or zone view to allocate from
529 * @param flags a collection of @c zalloc_flags_t.
530 *
531 * @returns NULL or the allocated element
532 */
533 __attribute__((malloc))
534 extern void *__unsafe_indexable zalloc_flags(
535 zone_or_view_t zone_or_view,
536 zalloc_flags_t flags);
537
538 __attribute__((malloc))
539 static inline void *__unsafe_indexable
__zalloc_flags(zone_or_view_t zone_or_view,zalloc_flags_t flags)540 __zalloc_flags(
541 zone_or_view_t zone_or_view,
542 zalloc_flags_t flags)
543 {
544 void *__unsafe_indexable addr = (zalloc_flags)(zone_or_view, flags);
545 if (flags & Z_NOFAIL) {
546 __builtin_assume(addr != NULL);
547 }
548 return addr;
549 }
550
551 #ifndef XNU_KERNEL_PRIVATE
552 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
553 #endif
554
555 /*!
556 * @macro zalloc_id
557 *
558 * @abstract
559 * Allocates an element from a specified zone ID, with flags.
560 *
561 * @param zid The proper @c ZONE_ID_* constant.
562 * @param flags a collection of @c zalloc_flags_t.
563 *
564 * @returns NULL or the allocated element
565 */
566 __attribute__((malloc))
567 extern void *__unsafe_indexable zalloc_id(
568 zone_id_t zid,
569 zalloc_flags_t flags);
570
571 __attribute__((malloc))
572 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)573 __zalloc_id(
574 zone_id_t zid,
575 zalloc_flags_t flags)
576 {
577 void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
578 if (flags & Z_NOFAIL) {
579 __builtin_assume(addr != NULL);
580 }
581 return addr;
582 }
583
584 #ifndef XNU_KERNEL_PRIVATE
585 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
586 #endif
587
588 /*!
589 * @function zalloc_ro
590 *
591 * @abstract
592 * Allocates an element from a specified read-only zone.
593 *
594 * @param zone_id the zone id to allocate from
595 * @param flags a collection of @c zalloc_flags_t.
596 *
597 * @returns NULL or the allocated element
598 */
599 __attribute__((malloc))
600 extern void *__unsafe_indexable zalloc_ro(
601 zone_id_t zone_id,
602 zalloc_flags_t flags);
603
604 __attribute__((malloc))
605 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)606 __zalloc_ro(
607 zone_id_t zone_id,
608 zalloc_flags_t flags)
609 {
610 void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
611 if (flags & Z_NOFAIL) {
612 __builtin_assume(addr != NULL);
613 }
614 return addr;
615 }
616
617 #ifndef XNU_KERNEL_PRIVATE
618 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
619 #endif
620
621 /*!
622 * @function zalloc_ro_mut
623 *
624 * @abstract
625 * Modifies an element from a specified read-only zone.
626 *
627 * @discussion
628 * Modifying compiler-assisted authenticated pointers using this function will
629 * not result in a signed pointer being written. The caller is expected to
630 * sign the value appropriately beforehand if they wish to do this.
631 *
632 * @param zone_id the zone id to allocate from
633 * @param elem element to be modified
634 * @param offset offset from element
635 * @param new_data pointer to new data
636 * @param new_data_size size of modification
637 *
638 */
639 extern void zalloc_ro_mut(
640 zone_id_t zone_id,
641 void *elem __unsafe_indexable,
642 vm_offset_t offset,
643 const void *new_data __sized_by(new_data_size),
644 vm_size_t new_data_size);
645
646 /*!
647 * @function zalloc_ro_update_elem
648 *
649 * @abstract
650 * Update the value of an entire element allocated in the read only allocator.
651 *
652 * @param zone_id the zone id to allocate from
653 * @param elem element to be modified
654 * @param new_data pointer to new data
655 *
656 */
657 #define zalloc_ro_update_elem(zone_id, elem, new_data) ({ \
658 const typeof(*(elem)) *__new_data = (new_data); \
659 zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data)); \
660 })
661
662 /*!
663 * @function zalloc_ro_update_field
664 *
665 * @abstract
666 * Update a single field of an element allocated in the read only allocator.
667 *
668 * @param zone_id the zone id to allocate from
669 * @param elem element to be modified
670 * @param field the element field to be modified
671 * @param new_data pointer to new data
672 *
673 */
674 #define zalloc_ro_update_field(zone_id, elem, field, value) ({ \
675 const typeof((elem)->field) *__value = (value); \
676 zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field), \
677 __value, sizeof((elem)->field)); \
678 })
679
680 #if __LP64__
681 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
682 #else
683 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_32
684 #endif
685
686 /*!
687 * @enum zro_atomic_op_t
688 *
689 * @brief
690 * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
691 * atomic operations.
692 *
693 * @discussion
694 * This enum provides all flavors of atomic operations supported in sizes 8,
695 * 16, 32, 64 bits.
696 *
697 * @const ZRO_ATOMIC_OR_*
698 * To perform an @s os_atomic_or
699 *
700 * @const ZRO_ATOMIC_XOR_*
701 * To perform an @s os_atomic_xor
702 *
703 * @const ZRO_ATOMIC_AND_*
704 * To perform an @s os_atomic_and
705 *
706 * @const ZRO_ATOMIC_ADD_*
707 * To perform an @s os_atomic_add
708 *
709 * @const ZRO_ATOMIC_XCHG_*
710 * To perform an @s os_atomic_xchg
711 *
712 */
713 __enum_decl(zro_atomic_op_t, uint32_t, {
714 ZRO_ATOMIC_OR_8 = 0x00000010 | 1,
715 ZRO_ATOMIC_OR_16 = 0x00000010 | 2,
716 ZRO_ATOMIC_OR_32 = 0x00000010 | 4,
717 ZRO_ATOMIC_OR_64 = 0x00000010 | 8,
718
719 ZRO_ATOMIC_XOR_8 = 0x00000020 | 1,
720 ZRO_ATOMIC_XOR_16 = 0x00000020 | 2,
721 ZRO_ATOMIC_XOR_32 = 0x00000020 | 4,
722 ZRO_ATOMIC_XOR_64 = 0x00000020 | 8,
723
724 ZRO_ATOMIC_AND_8 = 0x00000030 | 1,
725 ZRO_ATOMIC_AND_16 = 0x00000030 | 2,
726 ZRO_ATOMIC_AND_32 = 0x00000030 | 4,
727 ZRO_ATOMIC_AND_64 = 0x00000030 | 8,
728
729 ZRO_ATOMIC_ADD_8 = 0x00000040 | 1,
730 ZRO_ATOMIC_ADD_16 = 0x00000040 | 2,
731 ZRO_ATOMIC_ADD_32 = 0x00000040 | 4,
732 ZRO_ATOMIC_ADD_64 = 0x00000040 | 8,
733
734 ZRO_ATOMIC_XCHG_8 = 0x00000050 | 1,
735 ZRO_ATOMIC_XCHG_16 = 0x00000050 | 2,
736 ZRO_ATOMIC_XCHG_32 = 0x00000050 | 4,
737 ZRO_ATOMIC_XCHG_64 = 0x00000050 | 8,
738
739 /* cconvenient spellings */
740 ZRO_ATOMIC_OR_LONG = ZRO_ATOMIC_LONG(OR),
741 ZRO_ATOMIC_XOR_LONG = ZRO_ATOMIC_LONG(XOR),
742 ZRO_ATOMIC_AND_LONG = ZRO_ATOMIC_LONG(AND),
743 ZRO_ATOMIC_ADD_LONG = ZRO_ATOMIC_LONG(ADD),
744 ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
745 });
746
747 /*!
748 * @function zalloc_ro_mut_atomic
749 *
750 * @abstract
751 * Atomically update an offset in an element allocated in the read only
752 * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
753 *
754 * @param zone_id the zone id to allocate from
755 * @param elem element to be modified
756 * @param offset offset in the element to be modified
757 * @param op atomic operation to perform (see @c zro_atomic_op_t)
758 * @param value value for the atomic operation
759 *
760 */
761 extern uint64_t zalloc_ro_mut_atomic(
762 zone_id_t zone_id,
763 void *elem __unsafe_indexable,
764 vm_offset_t offset,
765 zro_atomic_op_t op,
766 uint64_t value);
767
768 /*!
769 * @macro zalloc_ro_update_field_atomic
770 *
771 * @abstract
772 * Atomically update a single field of an element allocated in the read only
773 * allocator.
774 *
775 * @param zone_id the zone id to allocate from
776 * @param elem element to be modified
777 * @param field the element field to be modified
778 * @param op atomic operation to perform (see @c zro_atomic_op_t)
779 * @param value value for the atomic operation
780 *
781 */
782 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value) ({ \
783 const typeof((elem)->field) __value = (value); \
784 static_assert(sizeof(__value) == (op & 0xf)); \
785 (os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id, \
786 elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value); \
787 })
788
789 /*!
790 * @function zalloc_ro_clear
791 *
792 * @abstract
793 * Zeroes an element from a specified read-only zone.
794 *
795 * @param zone_id the zone id to allocate from
796 * @param elem element to be modified
797 * @param offset offset from element
798 * @param size size of modification
799 */
800 extern void zalloc_ro_clear(
801 zone_id_t zone_id,
802 void *elem __unsafe_indexable,
803 vm_offset_t offset,
804 vm_size_t size);
805
806 /*!
807 * @function zalloc_ro_clear_field
808 *
809 * @abstract
810 * Zeroes the specified field of an element from a specified read-only zone.
811 *
812 * @param zone_id the zone id to allocate from
813 * @param elem element to be modified
814 * @param field offset from element
815 */
816 #define zalloc_ro_clear_field(zone_id, elem, field) \
817 zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
818 sizeof((elem)->field))
819
820 /*!
821 * @function zfree_id()
822 *
823 * @abstract
824 * Frees an element previously allocated with @c zalloc_id().
825 *
826 * @param zone_id the zone id to free the element to.
827 * @param addr the address to free
828 */
829 extern void zfree_id(
830 zone_id_t zone_id,
831 void *addr __unsafe_indexable);
832
833 /*!
834 * @function zfree_ro()
835 *
836 * @abstract
837 * Frees an element previously allocated with @c zalloc_ro().
838 *
839 * @param zone_id the zone id to free the element to.
840 * @param addr the address to free
841 */
842 extern void zfree_ro(
843 zone_id_t zone_id,
844 void *addr __unsafe_indexable);
845
846 /*!
847 * @function zfree
848 *
849 * @abstract
850 * Frees an element allocated with @c zalloc*.
851 *
852 * @discussion
853 * If the element being freed doesn't belong to the specified zone,
854 * then this call will panic.
855 *
856 * @param zone_or_view the zone or zone view to free the element to.
857 * @param elem the element to free
858 */
859 extern void zfree(
860 zone_or_view_t zone_or_view,
861 void *elem __unsafe_indexable);
862
863 /*
864 * This macro sets "elem" to NULL on free.
865 *
866 * Note: all values passed to zfree*() might be in the element to be freed,
867 * temporaries must be taken, and the resetting to be done prior to free.
868 */
869 #define zfree(zone, elem) ({ \
870 __auto_type __zfree_zone = (zone); \
871 (zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
872 })
873
874 #define zfree_id(zid, elem) ({ \
875 zone_id_t __zfree_zid = (zid); \
876 (zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
877 })
878
879 #define zfree_ro(zid, elem) ({ \
880 zone_id_t __zfree_zid = (zid); \
881 (zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
882 })
883
884 /* deprecated KPIS */
885
886 __zalloc_deprecated("use zone_create()")
887 extern zone_t zinit(
888 vm_size_t size, /* the size of an element */
889 vm_size_t maxmem, /* maximum memory to use */
890 vm_size_t alloc, /* allocation size */
891 const char *name __unsafe_indexable);
892
893
894 #pragma mark: zone views
895 /*!
896 * @typedef zone_stats_t
897 *
898 * @abstract
899 * The opaque type for per-cpu zone stats that are accumulated per zone
900 * or per zone-view.
901 */
902 typedef struct zone_stats *__zpercpu zone_stats_t;
903
904 /*!
905 * @typedef zone_view_t
906 *
907 * @abstract
908 * A view on a zone for accounting purposes.
909 *
910 * @discussion
911 * A zone view uses the zone it references for the allocations backing store,
912 * but does the allocation accounting at the view level.
913 *
914 * These accounting are surfaced by @b zprint(1) and similar tools,
915 * which allow for cheap but finer grained understanding of allocations
916 * without any fragmentation cost.
917 *
918 * Zone views are protected by the kernel lockdown and can't be initialized
919 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
920 */
921 typedef struct zone_view *zone_view_t;
922 struct zone_view {
923 zone_t zv_zone;
924 zone_stats_t zv_stats;
925 const char *zv_name __unsafe_indexable;
926 zone_view_t zv_next;
927 };
928
929 #ifdef XNU_KERNEL_PRIVATE
930 /*!
931 * @enum zone_kheap_id_t
932 *
933 * @brief
934 * Enumerate a particular kalloc heap.
935 *
936 * @discussion
937 * More documentation about heaps is available in @c <kern/kalloc.h>.
938 *
939 * @const KHEAP_ID_NONE
940 * This value denotes regular zones, not used by kalloc.
941 *
942 * @const KHEAP_ID_DEFAULT
943 * Indicates zones part of the KHEAP_DEFAULT heap.
944 *
945 * @const KHEAP_ID_DATA_BUFFERS
946 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
947 *
948 * @const KHEAP_ID_KT_VAR
949 * Indicates zones part of the KHEAP_KT_VAR heap.
950 */
951 __enum_decl(zone_kheap_id_t, uint32_t, {
952 KHEAP_ID_NONE,
953 KHEAP_ID_DEFAULT,
954 KHEAP_ID_DATA_BUFFERS,
955 KHEAP_ID_KT_VAR,
956
957 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
958 });
959
960 /*!
961 * @macro ZONE_VIEW_DECLARE
962 *
963 * @abstract
964 * (optionally) declares a zone view (in a header).
965 *
966 * @param var the name for the zone view.
967 */
968 #define ZONE_VIEW_DECLARE(var) \
969 extern struct zone_view var[1]
970
971 /*!
972 * @macro ZONE_VIEW_DEFINE
973 *
974 * @abstract
975 * Defines a given zone view and what it points to.
976 *
977 * @discussion
978 * Zone views can either share a pre-existing zone,
979 * or perform a lookup into a kalloc heap for the zone
980 * backing the bucket of the proper size.
981 *
982 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
983 * as the last rank. If views on zones are created, these must have been
984 * created before this stage.
985 *
986 * This macro should not be used to create zone views from default
987 * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
988 *
989 * @param var the name for the zone view.
990 * @param name a string describing the zone view.
991 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
992 * @param size the element size to be allocated from this view.
993 */
994 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
995 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
996 .zv_name = name, \
997 } }; \
998 static __startup_data struct zone_view_startup_spec \
999 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
1000 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
1001 &__startup_zone_view_spec_ ## var)
1002
1003 #endif /* XNU_KERNEL_PRIVATE */
1004
1005
1006 #ifdef XNU_KERNEL_PRIVATE
1007 #pragma mark - XNU only interfaces
1008
1009 #include <kern/cpu_number.h>
1010
1011 #pragma GCC visibility push(hidden)
1012
1013 #pragma mark XNU only: zalloc (extended)
1014
1015 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
1016 #define ZALIGN_16 (sizeof(uint16_t) - 1)
1017 #define ZALIGN_32 (sizeof(uint32_t) - 1)
1018 #define ZALIGN_PTR (sizeof(void *) - 1)
1019 #define ZALIGN_64 (sizeof(uint64_t) - 1)
1020 #define ZALIGN(t) (_Alignof(t) - 1)
1021
1022
1023 /*!
1024 * @function zalloc_permanent_tag()
1025 *
1026 * @abstract
1027 * Allocates a permanent element from the permanent zone
1028 *
1029 * @discussion
1030 * Memory returned by this function is always 0-initialized.
1031 * Note that the size of this allocation can not be determined
1032 * by zone_element_size so it should not be used for copyio.
1033 *
1034 * @param size the element size (must be smaller than PAGE_SIZE)
1035 * @param align_mask the required alignment for this allocation
1036 * @param tag the tag to use for allocations larger than a page.
1037 *
1038 * @returns the allocated element
1039 */
1040 __attribute__((malloc))
1041 extern void *__sized_by(size) zalloc_permanent_tag(
1042 vm_size_t size,
1043 vm_offset_t align_mask,
1044 vm_tag_t tag);
1045
1046 /*!
1047 * @function zalloc_permanent()
1048 *
1049 * @abstract
1050 * Allocates a permanent element from the permanent zone
1051 *
1052 * @discussion
1053 * Memory returned by this function is always 0-initialized.
1054 * Note that the size of this allocation can not be determined
1055 * by zone_element_size so it should not be used for copyio.
1056 *
1057 * @param size the element size (must be smaller than PAGE_SIZE)
1058 * @param align_mask the required alignment for this allocation
1059 *
1060 * @returns the allocated element
1061 */
1062 #define zalloc_permanent(size, align) \
1063 zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1064
1065 /*!
1066 * @function zalloc_permanent_type()
1067 *
1068 * @abstract
1069 * Allocates a permanent element of a given type with its natural alignment.
1070 *
1071 * @discussion
1072 * Memory returned by this function is always 0-initialized.
1073 *
1074 * @param type_t the element type
1075 *
1076 * @returns the allocated element
1077 */
1078 #define zalloc_permanent_type(type_t) \
1079 __unsafe_forge_single(type_t *, \
1080 zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1081
1082 /*!
1083 * @function zalloc_first_proc_made()
1084 *
1085 * @abstract
1086 * Declare that the "early" allocation phase is done.
1087 */
1088 extern void
1089 zalloc_first_proc_made(void);
1090
1091 #pragma mark XNU only: per-cpu allocations
1092
1093 /*!
1094 * @macro zpercpu_get_cpu()
1095 *
1096 * @abstract
1097 * Get a pointer to a specific CPU slot of a given per-cpu variable.
1098 *
1099 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1100 * @param cpu the specified CPU number as returned by @c cpu_number()
1101 *
1102 * @returns the per-CPU slot for @c ptr for the specified CPU.
1103 */
1104 #define zpercpu_get_cpu(ptr, cpu) \
1105 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
1106
1107 /*!
1108 * @macro zpercpu_get()
1109 *
1110 * @abstract
1111 * Get a pointer to the current CPU slot of a given per-cpu variable.
1112 *
1113 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1114 *
1115 * @returns the per-CPU slot for @c ptr for the current CPU.
1116 */
1117 #define zpercpu_get(ptr) \
1118 zpercpu_get_cpu(ptr, cpu_number())
1119
1120 /*!
1121 * @macro zpercpu_foreach()
1122 *
1123 * @abstract
1124 * Enumerate all per-CPU slots by address.
1125 *
1126 * @param it the name for the iterator
1127 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1128 */
1129 #define zpercpu_foreach(it, ptr) \
1130 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1131 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1132 it < __end_##it; it = __zpcpu_next(it))
1133
1134 /*!
1135 * @macro zpercpu_foreach_cpu()
1136 *
1137 * @abstract
1138 * Enumerate all per-CPU slots by CPU slot number.
1139 *
1140 * @param cpu the name for cpu number iterator.
1141 */
1142 #define zpercpu_foreach_cpu(cpu) \
1143 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1144
1145 /*!
1146 * @function zalloc_percpu()
1147 *
1148 * @abstract
1149 * Allocates an element from a per-cpu zone.
1150 *
1151 * @discussion
1152 * The returned pointer cannot be used directly and must be manipulated
1153 * through the @c zpercpu_get*() interfaces.
1154 *
1155 * @param zone_or_view the zone or zone view to allocate from
1156 * @param flags a collection of @c zalloc_flags_t.
1157 *
1158 * @returns NULL or the allocated element
1159 */
1160 extern void *__zpercpu zalloc_percpu(
1161 zone_or_view_t zone_or_view,
1162 zalloc_flags_t flags);
1163
1164 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1165 __zalloc_percpu(
1166 zone_or_view_t zone_or_view,
1167 zalloc_flags_t flags)
1168 {
1169 void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1170 if (flags & Z_NOFAIL) {
1171 __builtin_assume(addr != NULL);
1172 }
1173 return addr;
1174 }
1175
1176 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1177
1178 /*!
1179 * @function zfree_percpu()
1180 *
1181 * @abstract
1182 * Frees an element previously allocated with @c zalloc_percpu().
1183 *
1184 * @param zone_or_view the zone or zone view to free the element to.
1185 * @param addr the address to free
1186 */
1187 extern void zfree_percpu(
1188 zone_or_view_t zone_or_view,
1189 void *__zpercpu addr);
1190
1191 /*!
1192 * @function zalloc_percpu_permanent()
1193 *
1194 * @abstract
1195 * Allocates a permanent percpu-element from the permanent percpu zone.
1196 *
1197 * @discussion
1198 * Memory returned by this function is always 0-initialized.
1199 *
1200 * @param size the element size (must be smaller than PAGE_SIZE)
1201 * @param align_mask the required alignment for this allocation
1202 *
1203 * @returns the allocated element
1204 */
1205 extern void *__zpercpu zalloc_percpu_permanent(
1206 vm_size_t size,
1207 vm_offset_t align_mask);
1208
1209 /*!
1210 * @function zalloc_percpu_permanent_type()
1211 *
1212 * @abstract
1213 * Allocates a permanent percpu-element from the permanent percpu zone of a given
1214 * type with its natural alignment.
1215 *
1216 * @discussion
1217 * Memory returned by this function is always 0-initialized.
1218 *
1219 * @param type_t the element type
1220 *
1221 * @returns the allocated element
1222 */
1223 #define zalloc_percpu_permanent_type(type_t) \
1224 ((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1225
1226
1227 #pragma mark XNU only: zone creation (extended)
1228
1229 /*!
1230 * @enum zone_reserved_id_t
1231 *
1232 * @abstract
1233 * Well known pre-registered zones, allowing use of zone_id_require()
1234 *
1235 * @discussion
1236 * @c ZONE_ID__* aren't real zone IDs.
1237 *
1238 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1239 * easy a value to produce (by malice or accident).
1240 *
1241 * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1242 * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1243 * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1244 * listed in @c zone_create_ro_id_t.
1245 *
1246 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1247 * @c zone_create().
1248 */
1249 __enum_decl(zone_reserved_id_t, zone_id_t, {
1250 ZONE_ID__ZERO,
1251
1252 ZONE_ID_PERMANENT,
1253 ZONE_ID_PERCPU_PERMANENT,
1254
1255 ZONE_ID_THREAD_RO,
1256 ZONE_ID_MAC_LABEL,
1257 ZONE_ID_PROC_RO,
1258 ZONE_ID_PROC_SIGACTS_RO,
1259 ZONE_ID_KAUTH_CRED,
1260 ZONE_ID_CS_BLOB,
1261
1262 ZONE_ID_SANDBOX_RO,
1263 ZONE_ID_PROFILE_RO,
1264 ZONE_ID_PROTOBOX,
1265 ZONE_ID_SB_FILTER,
1266 ZONE_ID_AMFI_OSENTITLEMENTS,
1267
1268 ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1269 ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1270 ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1271 ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1272
1273 ZONE_ID_PMAP,
1274 ZONE_ID_VM_MAP,
1275 ZONE_ID_VM_MAP_ENTRY,
1276 ZONE_ID_VM_MAP_HOLES,
1277 ZONE_ID_VM_MAP_COPY,
1278 ZONE_ID_VM_PAGES,
1279 ZONE_ID_IPC_PORT,
1280 ZONE_ID_IPC_PORT_SET,
1281 ZONE_ID_IPC_VOUCHERS,
1282 ZONE_ID_PROC_TASK,
1283 ZONE_ID_THREAD,
1284 ZONE_ID_TURNSTILE,
1285 ZONE_ID_SEMAPHORE,
1286 ZONE_ID_SELECT_SET,
1287 ZONE_ID_FILEPROC,
1288
1289 ZONE_ID__FIRST_DYNAMIC,
1290 });
1291
1292 /*!
1293 * @const ZONE_ID_ANY
1294 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1295 * Zone ID.
1296 */
1297 #define ZONE_ID_ANY ((zone_id_t)-1)
1298
1299 /*!
1300 * @const ZONE_ID_INVALID
1301 * An invalid zone_id_t that corresponds to nothing.
1302 */
1303 #define ZONE_ID_INVALID ((zone_id_t)-2)
1304
1305 /**!
1306 * @function zone_name
1307 *
1308 * @param zone the specified zone
1309 * @returns the name of the specified zone.
1310 */
1311 const char *__unsafe_indexable zone_name(
1312 zone_t zone);
1313
1314 /**!
1315 * @function zone_heap_name
1316 *
1317 * @param zone the specified zone
1318 * @returns the name of the heap this zone is part of, or "".
1319 */
1320 const char *__unsafe_indexable zone_heap_name(
1321 zone_t zone);
1322
1323 /*!
1324 * @function zone_create_ext
1325 *
1326 * @abstract
1327 * Creates a zone with the specified parameters.
1328 *
1329 * @discussion
1330 * This is an extended version of @c zone_create().
1331 *
1332 * @param name the name for the new zone.
1333 * @param size the size of the elements returned by this zone.
1334 * @param flags a set of @c zone_create_flags_t flags.
1335 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1336 *
1337 * @param extra_setup a block that can perform non trivial initialization
1338 * on the zone before it is marked valid.
1339 * This block can call advanced setups like:
1340 * - zone_set_exhaustible()
1341 * - zone_set_noexpand()
1342 *
1343 * @returns the created zone, this call never fails.
1344 */
1345 extern zone_t zone_create_ext(
1346 const char *name __unsafe_indexable,
1347 vm_size_t size,
1348 zone_create_flags_t flags,
1349 zone_id_t desired_zid,
1350 void (^extra_setup)(zone_t));
1351
1352 /*!
1353 * @macro ZONE_DECLARE
1354 *
1355 * @abstract
1356 * Declares a zone variable and its associated type.
1357 *
1358 * @param var the name of the variable to declare.
1359 * @param type_t the type of elements in the zone.
1360 */
1361 #define ZONE_DECLARE(var, type_t) \
1362 extern zone_t var; \
1363 __ZONE_DECLARE_TYPE(var, type_t)
1364
1365 /*!
1366 * @macro ZONE_DECLARE_ID
1367 *
1368 * @abstract
1369 * Declares the type associated with a zone ID.
1370 *
1371 * @param id the name of zone ID to associate a type with.
1372 * @param type_t the type of elements in the zone.
1373 */
1374 #define ZONE_DECLARE_ID(id, type_t) \
1375 __ZONE_DECLARE_TYPE(id, type_t)
1376
1377 /*!
1378 * @macro ZONE_DEFINE
1379 *
1380 * @abstract
1381 * Declares a zone variable to automatically initialize with the specified
1382 * parameters.
1383 *
1384 * @discussion
1385 * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1386 *
1387 * @param var the name of the variable to declare.
1388 * @param name the name for the zone
1389 * @param size the size of the elements returned by this zone.
1390 * @param flags a set of @c zone_create_flags_t flags.
1391 */
1392 #define ZONE_DEFINE(var, name, size, flags) \
1393 SECURITY_READ_ONLY_LATE(zone_t) var; \
1394 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1395 static __startup_data struct zone_create_startup_spec \
1396 __startup_zone_spec_ ## var = { &var, name, size, flags, \
1397 ZONE_ID_ANY, NULL }; \
1398 STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1399 &__startup_zone_spec_ ## var)
1400
1401 /*!
1402 * @macro ZONE_DEFINE_TYPE
1403 *
1404 * @abstract
1405 * Defines a zone variable to automatically initialize with the specified
1406 * parameters, associated with a particular type.
1407 *
1408 * @param var the name of the variable to declare.
1409 * @param name the name for the zone
1410 * @param type_t the type of elements in the zone.
1411 * @param flags a set of @c zone_create_flags_t flags.
1412 */
1413 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1414 ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1415 __ZONE_DECLARE_TYPE(var, type_t)
1416
1417 /*!
1418 * @macro ZONE_DEFINE_ID
1419 *
1420 * @abstract
1421 * Initializes a given zone automatically during startup with the specified
1422 * parameters.
1423 *
1424 * @param zid a @c zone_reserved_id_t value.
1425 * @param name the name for the zone
1426 * @param type_t the type of elements in the zone.
1427 * @param flags a set of @c zone_create_flags_t flags.
1428 */
1429 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1430 ZONE_DECLARE_ID(zid, type_t); \
1431 ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1432
1433 /*!
1434 * @macro ZONE_INIT
1435 *
1436 * @abstract
1437 * Initializes a given zone automatically during startup with the specified
1438 * parameters.
1439 *
1440 * @param var the name of the variable to initialize.
1441 * @param name the name for the zone
1442 * @param size the size of the elements returned by this zone.
1443 * @param flags a set of @c zone_create_flags_t flags.
1444 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1445 * @param extra_setup a block that can perform non trivial initialization
1446 * (@see @c zone_create_ext()).
1447 */
1448 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1449 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1450
1451 /*!
1452 * @function zone_id_require
1453 *
1454 * @abstract
1455 * Requires for a given pointer to belong to the specified zone, by ID and size.
1456 *
1457 * @discussion
1458 * The function panics if the check fails as it indicates that the kernel
1459 * internals have been compromised.
1460 *
1461 * This is a variant of @c zone_require() which:
1462 * - isn't sensitive to @c zone_t::elem_size being compromised,
1463 * - is slightly faster as it saves one load and a multiplication.
1464 *
1465 * @param zone_id the zone ID the address needs to belong to.
1466 * @param elem_size the size of elements for this zone.
1467 * @param addr the element address to check.
1468 */
1469 extern void zone_id_require(
1470 zone_id_t zone_id,
1471 vm_size_t elem_size,
1472 void *addr __unsafe_indexable);
1473
1474 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
1475 extern void zone_set_noexpand(
1476 zone_t zone,
1477 vm_size_t max_elements);
1478
1479 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1480 extern void zone_set_exhaustible(
1481 zone_t zone,
1482 vm_size_t max_elements);
1483
1484 /*!
1485 * @function zone_raise_reserve()
1486 *
1487 * @brief
1488 * Used to raise the reserve on a zone.
1489 *
1490 * @discussion
1491 * Can be called from any context (zone_create_ext() setup hook or after).
1492 */
1493 extern void zone_raise_reserve(
1494 zone_or_view_t zone_or_view,
1495 uint16_t min_elements);
1496
1497 /*!
1498 * @function zone_fill_initially
1499 *
1500 * @brief
1501 * Initially fill a non collectable zone to have the specified amount of
1502 * elements.
1503 *
1504 * @discussion
1505 * This function must be called on a non collectable permanent zone before it
1506 * has been used yet.
1507 *
1508 * @param zone The zone to fill.
1509 * @param nelems The number of elements to be able to hold.
1510 */
1511 extern void zone_fill_initially(
1512 zone_t zone,
1513 vm_size_t nelems);
1514
1515 #pragma mark XNU only: PGZ support
1516
1517 /*!
1518 * @function pgz_owned()
1519 *
1520 * @brief
1521 * Returns whether an address is PGZ owned.
1522 *
1523 * @param addr The address to translate.
1524 * @returns Whether it is PGZ owned
1525 */
1526 #if CONFIG_PROB_GZALLOC
1527 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
1528 #else
1529 #define pgz_owned(addr) false
1530 #endif
1531
1532 /*!
1533 * @function pgz_decode()
1534 *
1535 * @brief
1536 * Translates a PGZ protected virtual address to its unprotected
1537 * backing store.
1538 *
1539 * @discussion
1540 * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
1541 * elements since the PGZ protected virtual addresses are maintained by PGZ
1542 * at the pmap level without the VM involvment.
1543 *
1544 * "allow_invalid" schemes relying on sequestering also need this
1545 * to perform the locking attempts on the unprotected address.
1546 *
1547 * @param addr The address to translate.
1548 * @param size The object size.
1549 * @returns The unprotected address or @c addr.
1550 */
1551 #if CONFIG_PROB_GZALLOC
1552 #define pgz_decode(addr, size) \
1553 ((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
1554 #else
1555 #define pgz_decode(addr, size) (addr)
1556 #endif
1557
1558 /*!
1559 * @function pgz_decode_allow_invalid()
1560 *
1561 * @brief
1562 * Translates a PGZ protected virtual address to its unprotected
1563 * backing store, but doesn't assert it is still allocated/valid.
1564 *
1565 * @discussion
1566 * "allow_invalid" schemes relying on sequestering also need this
1567 * to perform the locking attempts on the unprotected address.
1568 *
1569 * @param addr The address to translate.
1570 * @param want_zid The expected zone ID for the element.
1571 * @returns The unprotected address or @c addr.
1572 */
1573 #if CONFIG_PROB_GZALLOC
1574 #define pgz_decode_allow_invalid(addr, want_zid) \
1575 ((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
1576 #else
1577 #define pgz_decode_allow_invalid(addr, zid) (addr)
1578 #endif
1579
1580 #pragma mark XNU only: misc & implementation details
1581
1582 struct zone_create_startup_spec {
1583 zone_t *z_var;
1584 const char *z_name __unsafe_indexable;
1585 vm_size_t z_size;
1586 zone_create_flags_t z_flags;
1587 zone_id_t z_zid;
1588 void (^z_setup)(zone_t);
1589 };
1590
1591 extern void zone_create_startup(
1592 struct zone_create_startup_spec *spec);
1593
1594 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1595 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1596 __attribute__((visibility("hidden"))) \
1597 extern type_t *__single __zalloc__##var##__type_name
1598
1599 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1600 static __startup_data struct zone_create_startup_spec \
1601 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
1602 STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1603 &__startup_zone_spec_ ## ns)
1604
1605 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
1606 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1607
1608 #define __zalloc_cast(namespace, expr) \
1609 ((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
1610
1611 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
1612 #define zalloc_ro(zid, flags) __zalloc_cast(zid, (__zalloc_ro)(zid, flags))
1613 #if ZALLOC_TYPE_SAFE
1614 #define zalloc(zov) __zalloc_cast(zov, (zalloc)(zov))
1615 #define zalloc_noblock(zov) __zalloc_cast(zov, (zalloc_noblock)(zov))
1616 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
1617 #else /* ZALLOC_TYPE_SAFE */
1618 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
1619 #endif /* !ZALLOC_TYPE_SAFE */
1620
1621 struct zone_view_startup_spec {
1622 zone_view_t zv_view;
1623 union {
1624 zone_kheap_id_t zv_heapid;
1625 zone_t *zv_zone;
1626 };
1627 vm_size_t zv_size;
1628 };
1629
1630 extern void zone_view_startup_init(
1631 struct zone_view_startup_spec *spec);
1632
1633 extern void zone_userspace_reboot_checks(void);
1634
1635 #if VM_TAG_SIZECLASSES
1636 extern void __zone_site_register(
1637 vm_allocation_site_t *site);
1638
1639 #define VM_ALLOC_SITE_TAG() ({ \
1640 __PLACE_IN_SECTION("__DATA, __data") \
1641 static vm_allocation_site_t site = { .refcount = 2, }; \
1642 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __zone_site_register, &site); \
1643 site.tag; \
1644 })
1645 #else /* VM_TAG_SIZECLASSES */
1646 #define VM_ALLOC_SITE_TAG() VM_KERN_MEMORY_NONE
1647 #endif /* !VM_TAG_SIZECLASSES */
1648
1649 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)1650 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
1651 {
1652 return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
1653 }
1654
1655 #if DEBUG || DEVELOPMENT
1656 # if __LP64__
1657 # define ZPCPU_MANGLE_BIT (1ul << 63)
1658 # else /* !__LP64__ */
1659 # define ZPCPU_MANGLE_BIT (1ul << 31)
1660 # endif /* !__LP64__ */
1661 #else /* !(DEBUG || DEVELOPMENT) */
1662 # define ZPCPU_MANGLE_BIT 0ul
1663 #endif /* !(DEBUG || DEVELOPMENT) */
1664
1665 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
1666 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
1667 #define __zpcpu_addr(e) ((vm_address_t)(e))
1668 #define __zpcpu_cast(ptr, e) __unsafe_forge_single(typeof(ptr), e)
1669 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
1670
1671 /**
1672 * @macro __zpcpu_mangle_for_boot()
1673 *
1674 * @discussion
1675 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
1676 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
1677 * storage marked @c __startup_data and replace it with the proper allocation
1678 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
1679 *
1680 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
1681 * provides the proper mangling of the storage into a "fake" percpu pointer so
1682 * that accesses through @c zpercpu_get() functions properly.
1683 *
1684 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
1685 */
1686 #define __zpcpu_mangle_for_boot(ptr) ({ \
1687 assert(startup_phase < STARTUP_SUB_ZALLOC); \
1688 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
1689 })
1690
1691 extern unsigned zpercpu_count(void) __pure2;
1692
1693 #if CONFIG_PROB_GZALLOC
1694
1695 extern vm_offset_t __pgz_decode(
1696 mach_vm_address_t addr,
1697 mach_vm_size_t size);
1698
1699 extern vm_offset_t __pgz_decode_allow_invalid(
1700 vm_offset_t offs,
1701 zone_id_t zid);
1702
1703 #endif
1704 #if DEBUG || DEVELOPMENT
1705 extern size_t zone_pages_wired;
1706 extern size_t zone_guard_pages;
1707 #endif /* DEBUG || DEVELOPMENT */
1708 #if CONFIG_ZLEAKS
1709 extern uint32_t zleak_active;
1710 extern vm_size_t zleak_max_zonemap_size;
1711 extern vm_size_t zleak_global_tracking_threshold;
1712 extern vm_size_t zleak_per_zone_tracking_threshold;
1713
1714 extern kern_return_t zleak_update_threshold(
1715 vm_size_t *arg,
1716 uint64_t value);
1717 #endif /* CONFIG_ZLEAKS */
1718
1719 extern uint32_t zone_map_jetsam_limit;
1720
1721 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
1722
1723 extern zone_t percpu_u64_zone;
1724
1725 #pragma GCC visibility pop
1726 #endif /* XNU_KERNEL_PRIVATE */
1727
1728 /*
1729 * This macro is currently used by AppleImage4 (rdar://83924635)
1730 */
1731 #define __zalloc_ptr_load_and_erase(elem) \
1732 os_ptr_load_and_erase(elem)
1733
1734 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1735
1736 #endif /* _KERN_ZALLOC_H_ */
1737
1738 #endif /* KERNEL_PRIVATE */
1739