1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: zalloc.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 */
64
65 #ifdef KERNEL_PRIVATE
66
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86
87 /*
88 * Enable this macro to force type safe zalloc/zalloc_ro/...
89 */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99
100 /*!
101 * @macro __zpercpu
102 *
103 * @abstract
104 * Annotation that helps denoting a per-cpu pointer that requires usage of
105 * @c zpercpu_*() for access.
106 */
107 #define __zpercpu __unsafe_indexable
108
109 /*!
110 * @typedef zone_id_t
111 *
112 * @abstract
113 * The type for a zone ID.
114 */
115 typedef uint16_t zone_id_t;
116
117 /**
118 * @enum zone_create_flags_t
119 *
120 * @abstract
121 * Set of flags to pass to zone_create().
122 *
123 * @discussion
124 * Some kernel-wide policies affect all possible created zones.
125 * Explicit @c ZC_* win over such policies.
126 */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 /** The default value to pass to zone_create() */
129 ZC_NONE = 0x00000000,
130
131 /** Force the created zone to use VA sequestering */
132 ZC_SEQUESTER = 0x00000001,
133 /** Force the created zone @b NOT to use VA sequestering */
134 ZC_NOSEQUESTER = 0x00000002,
135
136 /** Enable per-CPU zone caching for this zone */
137 ZC_CACHING = 0x00000010,
138 /** Disable per-CPU zone caching for this zone */
139 ZC_NOCACHING = 0x00000020,
140
141 /** Allocate zone pages as Read-only **/
142 ZC_READONLY = 0x00800000,
143
144 /** Mark zone as a per-cpu zone */
145 ZC_PERCPU = 0x01000000,
146
147 /** Force the created zone to clear every allocation on free */
148 ZC_ZFREE_CLEARMEM = 0x02000000,
149
150 /** Mark zone as non collectable by zone_gc() */
151 ZC_NOGC = 0x04000000,
152
153 /** Do not encrypt this zone during hibernation */
154 ZC_NOENCRYPT = 0x08000000,
155
156 /** Type requires alignment to be preserved */
157 ZC_ALIGNMENT_REQUIRED = 0x10000000,
158
159 /** Do not track this zone when gzalloc is engaged */
160 ZC_NOGZALLOC = 0x20000000,
161
162 /** Don't asynchronously replenish the zone via callouts */
163 ZC_NOCALLOUT = 0x40000000,
164
165 /** Can be zdestroy()ed, not default unlike zinit() */
166 ZC_DESTRUCTIBLE = 0x80000000,
167
168 #ifdef XNU_KERNEL_PRIVATE
169 /** Use guard pages in PGZ mode */
170 ZC_PGZ_USE_GUARDS = 0x0100000000000000,
171
172 /** Zone doesn't support TBI tagging */
173 ZC_NOTBITAG = 0x0200000000000000,
174
175 /** This zone will back a kalloc type */
176 ZC_KALLOC_TYPE = 0x0400000000000000,
177
178 /** This zone will back a kalloc heap */
179 ZC_KALLOC_HEAP = 0x0800000000000000,
180
181 /** This zone can be crammed with foreign pages */
182 ZC_ALLOW_FOREIGN = 0x1000000000000000,
183
184 /** This zone belongs to the VM submap */
185 ZC_VM = 0x2000000000000000,
186 #if __LP64__
187 #define ZC_VM_LP64 ZC_VM
188 #else
189 #define ZC_VM_LP64 ZC_NONE
190 #endif
191
192 /** Disable kasan quarantine for this zone */
193 ZC_KASAN_NOQUARANTINE = 0x4000000000000000,
194
195 /** Disable kasan redzones for this zone */
196 ZC_KASAN_NOREDZONE = 0x8000000000000000,
197 #endif /* XNU_KERNEL_PRIVATE */
198 });
199
200 /*!
201 * @union zone_or_view
202 *
203 * @abstract
204 * A type used for calls that admit both a zone or a zone view.
205 *
206 * @discussion
207 * @c zalloc() and @c zfree() and their variants can act on both
208 * zones and zone views.
209 */
210 union zone_or_view {
211 struct zone_view *zov_view;
212 struct zone *zov_zone;
213 struct kalloc_type_view *zov_kt_heap;
214 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)215 inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
216 }
zone_or_view(struct zone * z)217 inline zone_or_view(struct zone *z) : zov_zone(z) {
218 }
zone_or_view(struct kalloc_type_view * kth)219 inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
220 }
221 #endif
222 };
223 #ifdef __cplusplus
224 typedef union zone_or_view zone_or_view_t;
225 #else
226 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
227 #endif
228
229 /*!
230 * @enum zone_create_ro_id_t
231 *
232 * @abstract
233 * Zone creation IDs for external read only zones
234 *
235 * @discussion
236 * Kexts that desire to use the RO allocator should:
237 * 1. Add a zone creation id below
238 * 2. Add a corresponding ID to @c zone_reserved_id_t
239 * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
240 * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
241 * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
242 */
243 __enum_decl(zone_create_ro_id_t, zone_id_t, {
244 ZC_RO_ID_SANDBOX,
245 ZC_RO_ID_PROFILE,
246 ZC_RO_ID_PROTOBOX,
247 ZC_RO_ID_SB_FILTER,
248 ZC_RO_ID__LAST = ZC_RO_ID_SB_FILTER,
249 });
250
251 /*!
252 * @function zone_create
253 *
254 * @abstract
255 * Creates a zone with the specified parameters.
256 *
257 * @discussion
258 * A Zone is a slab allocator that returns objects of a given size very quickly.
259 *
260 * @param name the name for the new zone.
261 * @param size the size of the elements returned by this zone.
262 * @param flags a set of @c zone_create_flags_t flags.
263 *
264 * @returns the created zone, this call never fails.
265 */
266 extern zone_t zone_create(
267 const char *name __unsafe_indexable,
268 vm_size_t size,
269 zone_create_flags_t flags);
270
271 /*!
272 * @function zone_create_ro
273 *
274 * @abstract
275 * Creates a read only zone with the specified parameters from kexts
276 *
277 * @discussion
278 * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
279 * kexts. Do not use this API to create read only zones in xnu.
280 *
281 * @param name the name for the new zone.
282 * @param size the size of the elements returned by this zone.
283 * @param flags a set of @c zone_create_flags_t flags.
284 * @param zc_ro_id an ID declared in @c zone_create_ro_id_t
285 *
286 * @returns the zone ID of the created zone, this call never fails.
287 */
288 extern zone_id_t zone_create_ro(
289 const char *name __unsafe_indexable,
290 vm_size_t size,
291 zone_create_flags_t flags,
292 zone_create_ro_id_t zc_ro_id);
293
294 /*!
295 * @function zdestroy
296 *
297 * @abstract
298 * Destroys a zone previously made with zone_create.
299 *
300 * @discussion
301 * Zones must have been made destructible for @c zdestroy() to be allowed,
302 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
303 *
304 * @param zone the zone to destroy.
305 */
306 extern void zdestroy(
307 zone_t zone);
308
309 /*!
310 * @function zone_require
311 *
312 * @abstract
313 * Requires for a given pointer to belong to the specified zone.
314 *
315 * @discussion
316 * The function panics if the check fails as it indicates that the kernel
317 * internals have been compromised.
318 *
319 * Note that zone_require() can only work with:
320 * - zones not allowing foreign memory
321 * - zones in the general submap.
322 *
323 * @param zone the zone the address needs to belong to.
324 * @param addr the element address to check.
325 */
326 extern void zone_require(
327 zone_t zone,
328 void *addr __unsafe_indexable);
329
330 /*!
331 * @function zone_require_ro
332 *
333 * @abstract
334 * Version of zone require intended for zones created with ZC_READONLY
335 *
336 * @discussion
337 * This check is not sufficient to fully trust the element.
338 *
339 * Another check of its content must be performed to prove
340 * that the element is "the right one", a typical technique
341 * for when the RO data structure is 1:1 with a mutable one,
342 * is a simple circularity check with a very strict lifetime
343 * (both the mutable and read-only data structures are made
344 * and destroyed as close as possible).
345 *
346 * @param zone_id the zone id the address needs to belong to.
347 * @param elem_size the element size for this zone.
348 * @param addr the element address to check.
349 */
350 extern void zone_require_ro(
351 zone_id_t zone_id,
352 vm_size_t elem_size,
353 void *addr __unsafe_indexable);
354
355 /*!
356 * @function zone_require_ro_range_contains
357 *
358 * @abstract
359 * Version of zone require intended for zones created with ZC_READONLY
360 * that only checks that the zone is RO and that the address is in
361 * the zone's submap
362 *
363 * @param zone_id the zone id the address needs to belong to.
364 * @param addr the element address to check.
365 */
366 extern void zone_require_ro_range_contains(
367 zone_id_t zone_id,
368 void *addr __unsafe_indexable);
369
370 /*!
371 * @enum zalloc_flags_t
372 *
373 * @brief
374 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
375 *
376 * @discussion
377 * It is encouraged that any callsite passing flags uses exactly one of:
378 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
379 * if nothing else was specified.
380 *
381 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
382 * then @c Z_WAITOK is ignored.
383 *
384 * @const Z_WAITOK
385 * Means that it's OK for zalloc() to block to wait for memory,
386 * when Z_WAITOK is passed, zalloc will never return NULL.
387 *
388 * @const Z_NOWAIT
389 * Passing this flag means that zalloc is not allowed to ever block.
390 *
391 * @const Z_NOPAGEWAIT
392 * Passing this flag means that zalloc is allowed to wait due to lock
393 * contention, but will not wait for the VM to wait for pages when
394 * under memory pressure.
395 *
396 * @const Z_ZERO
397 * Passing this flags means that the returned memory has been zeroed out.
398 *
399 * @const Z_NOFAIL
400 * Passing this flag means that the caller expects the allocation to always
401 * succeed. This will result in a panic if this assumption isn't correct.
402 *
403 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
404 * be used on exhaustible zones.
405 *
406 * @const Z_REALLOCF
407 * For the realloc family of functions,
408 * free the incoming memory on failure cases.
409 *
410 #if XNU_KERNEL_PRIVATE
411 * @const Z_PGZ
412 * Used by zalloc internally to denote an allocation that we will try
413 * to guard with PGZ.
414 *
415 * @const Z_VM_TAG_BT_BIT
416 * Used to blame allocation accounting on the first kext
417 * found in the backtrace of the allocation.
418 *
419 * @const Z_NOZZC
420 * Used internally to mark allocations that will skip zero validation.
421 *
422 * @const Z_PCPU
423 * Used internally for the percpu paths.
424 *
425 * @const Z_VM_TAG_MASK
426 * Represents bits in which a vm_tag_t for the allocation can be passed.
427 * (used by kalloc for the zone tagging debugging feature).
428 #endif
429 */
430 __options_decl(zalloc_flags_t, uint32_t, {
431 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
432 Z_WAITOK = 0x0000,
433 Z_NOWAIT = 0x0001,
434 Z_NOPAGEWAIT = 0x0002,
435 Z_ZERO = 0x0004,
436 Z_REALLOCF = 0x0008,
437
438 #if XNU_KERNEL_PRIVATE
439 Z_PGZ = 0x0800,
440 Z_VM_TAG_BT_BIT = 0x1000,
441 Z_PCPU = 0x2000,
442 Z_NOZZC = 0x4000,
443 #endif /* XNU_KERNEL_PRIVATE */
444 Z_NOFAIL = 0x8000,
445
446 /* convenient c++ spellings */
447 Z_NOWAIT_ZERO = Z_NOWAIT | Z_ZERO,
448 Z_WAITOK_ZERO = Z_WAITOK | Z_ZERO,
449 Z_WAITOK_ZERO_NOFAIL = Z_WAITOK | Z_ZERO | Z_NOFAIL, /* convenient spelling for c++ */
450
451 Z_KPI_MASK = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
452 #if XNU_KERNEL_PRIVATE
453 /** used by kalloc to propagate vm tags for -zt */
454 Z_VM_TAG_MASK = 0xffff0000,
455
456 #define Z_VM_TAG_SHIFT 16
457 #define Z_VM_TAG(fl, tag) ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
458 #define Z_VM_TAG_BT(fl, tag) ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
459 #endif
460 });
461
462 /*!
463 * @function zalloc
464 *
465 * @abstract
466 * Allocates an element from a specified zone.
467 *
468 * @discussion
469 * If the zone isn't exhaustible and is expandable, this call never fails.
470 *
471 * @param zone_or_view the zone or zone view to allocate from
472 *
473 * @returns NULL or the allocated element
474 */
475 __attribute__((malloc))
476 extern void *__unsafe_indexable zalloc(
477 zone_or_view_t zone_or_view);
478
479 /*!
480 * @function zalloc_noblock
481 *
482 * @abstract
483 * Allocates an element from a specified zone, but never blocks.
484 *
485 * @discussion
486 * This call is suitable for preemptible code, however allocation
487 * isn't allowed from interrupt context.
488 *
489 * @param zone_or_view the zone or zone view to allocate from
490 *
491 * @returns NULL or the allocated element
492 */
493 __attribute__((malloc))
494 extern void *__unsafe_indexable zalloc_noblock(
495 zone_or_view_t zone_or_view);
496
497 /*!
498 * @function zalloc_flags()
499 *
500 * @abstract
501 * Allocates an element from a specified zone, with flags.
502 *
503 * @param zone_or_view the zone or zone view to allocate from
504 * @param flags a collection of @c zalloc_flags_t.
505 *
506 * @returns NULL or the allocated element
507 */
508 __attribute__((malloc))
509 extern void *__unsafe_indexable zalloc_flags(
510 zone_or_view_t zone_or_view,
511 zalloc_flags_t flags);
512
513 /*!
514 * @macro zalloc_id
515 *
516 * @abstract
517 * Allocates an element from a specified zone ID, with flags.
518 *
519 * @param zid The proper @c ZONE_ID_* constant.
520 * @param flags a collection of @c zalloc_flags_t.
521 *
522 * @returns NULL or the allocated element
523 */
524 __attribute__((malloc))
525 extern void *__unsafe_indexable zalloc_id(
526 zone_id_t zid,
527 zalloc_flags_t flags);
528
529 /*!
530 * @function zalloc_ro
531 *
532 * @abstract
533 * Allocates an element from a specified read-only zone.
534 *
535 * @param zone_id the zone id to allocate from
536 * @param flags a collection of @c zalloc_flags_t.
537 *
538 * @returns NULL or the allocated element
539 */
540 __attribute__((malloc))
541 extern void *__unsafe_indexable zalloc_ro(
542 zone_id_t zone_id,
543 zalloc_flags_t flags);
544
545 /*!
546 * @function zalloc_ro_mut
547 *
548 * @abstract
549 * Modifies an element from a specified read-only zone.
550 *
551 * @discussion
552 * Modifying compiler-assisted authenticated pointers using this function will
553 * not result in a signed pointer being written. The caller is expected to
554 * sign the value appropriately beforehand if they wish to do this.
555 *
556 * @param zone_id the zone id to allocate from
557 * @param elem element to be modified
558 * @param offset offset from element
559 * @param new_data pointer to new data
560 * @param new_data_size size of modification
561 *
562 */
563 extern void zalloc_ro_mut(
564 zone_id_t zone_id,
565 void *elem __unsafe_indexable,
566 vm_offset_t offset,
567 const void *new_data __sized_by(new_data_size),
568 vm_size_t new_data_size);
569
570 /*!
571 * @function zalloc_ro_update_elem
572 *
573 * @abstract
574 * Update the value of an entire element allocated in the read only allocator.
575 *
576 * @param zone_id the zone id to allocate from
577 * @param elem element to be modified
578 * @param new_data pointer to new data
579 *
580 */
581 #define zalloc_ro_update_elem(zone_id, elem, new_data) ({ \
582 const typeof(*(elem)) *__new_data = (new_data); \
583 zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data)); \
584 })
585
586 /*!
587 * @function zalloc_ro_update_field
588 *
589 * @abstract
590 * Update a single field of an element allocated in the read only allocator.
591 *
592 * @param zone_id the zone id to allocate from
593 * @param elem element to be modified
594 * @param field the element field to be modified
595 * @param new_data pointer to new data
596 *
597 */
598 #define zalloc_ro_update_field(zone_id, elem, field, value) ({ \
599 const typeof((elem)->field) *__value = (value); \
600 zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field), \
601 __value, sizeof((elem)->field)); \
602 })
603
604 #if __LP64__
605 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
606 #else
607 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_32
608 #endif
609
610 /*!
611 * @enum zro_atomic_op_t
612 *
613 * @brief
614 * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
615 * atomic operations.
616 *
617 * @discussion
618 * This enum provides all flavors of atomic operations supported in sizes 8,
619 * 16, 32, 64 bits.
620 *
621 * @const ZRO_ATOMIC_OR_*
622 * To perform an @s os_atomic_or
623 *
624 * @const ZRO_ATOMIC_XOR_*
625 * To perform an @s os_atomic_xor
626 *
627 * @const ZRO_ATOMIC_AND_*
628 * To perform an @s os_atomic_and
629 *
630 * @const ZRO_ATOMIC_ADD_*
631 * To perform an @s os_atomic_add
632 *
633 * @const ZRO_ATOMIC_XCHG_*
634 * To perform an @s os_atomic_xchg
635 *
636 */
637 __enum_decl(zro_atomic_op_t, uint32_t, {
638 ZRO_ATOMIC_OR_8 = 0x00000010 | 1,
639 ZRO_ATOMIC_OR_16 = 0x00000010 | 2,
640 ZRO_ATOMIC_OR_32 = 0x00000010 | 4,
641 ZRO_ATOMIC_OR_64 = 0x00000010 | 8,
642
643 ZRO_ATOMIC_XOR_8 = 0x00000020 | 1,
644 ZRO_ATOMIC_XOR_16 = 0x00000020 | 2,
645 ZRO_ATOMIC_XOR_32 = 0x00000020 | 4,
646 ZRO_ATOMIC_XOR_64 = 0x00000020 | 8,
647
648 ZRO_ATOMIC_AND_8 = 0x00000030 | 1,
649 ZRO_ATOMIC_AND_16 = 0x00000030 | 2,
650 ZRO_ATOMIC_AND_32 = 0x00000030 | 4,
651 ZRO_ATOMIC_AND_64 = 0x00000030 | 8,
652
653 ZRO_ATOMIC_ADD_8 = 0x00000040 | 1,
654 ZRO_ATOMIC_ADD_16 = 0x00000040 | 2,
655 ZRO_ATOMIC_ADD_32 = 0x00000040 | 4,
656 ZRO_ATOMIC_ADD_64 = 0x00000040 | 8,
657
658 ZRO_ATOMIC_XCHG_8 = 0x00000050 | 1,
659 ZRO_ATOMIC_XCHG_16 = 0x00000050 | 2,
660 ZRO_ATOMIC_XCHG_32 = 0x00000050 | 4,
661 ZRO_ATOMIC_XCHG_64 = 0x00000050 | 8,
662
663 /* cconvenient spellings */
664 ZRO_ATOMIC_OR_LONG = ZRO_ATOMIC_LONG(OR),
665 ZRO_ATOMIC_XOR_LONG = ZRO_ATOMIC_LONG(XOR),
666 ZRO_ATOMIC_AND_LONG = ZRO_ATOMIC_LONG(AND),
667 ZRO_ATOMIC_ADD_LONG = ZRO_ATOMIC_LONG(ADD),
668 ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
669 });
670
671 /*!
672 * @function zalloc_ro_mut_atomic
673 *
674 * @abstract
675 * Atomically update an offset in an element allocated in the read only
676 * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
677 *
678 * @param zone_id the zone id to allocate from
679 * @param elem element to be modified
680 * @param offset offset in the element to be modified
681 * @param op atomic operation to perform (see @c zro_atomic_op_t)
682 * @param value value for the atomic operation
683 *
684 */
685 extern uint64_t zalloc_ro_mut_atomic(
686 zone_id_t zone_id,
687 void *elem __unsafe_indexable,
688 vm_offset_t offset,
689 zro_atomic_op_t op,
690 uint64_t value);
691
692 /*!
693 * @macro zalloc_ro_update_field_atomic
694 *
695 * @abstract
696 * Atomically update a single field of an element allocated in the read only
697 * allocator.
698 *
699 * @param zone_id the zone id to allocate from
700 * @param elem element to be modified
701 * @param field the element field to be modified
702 * @param op atomic operation to perform (see @c zro_atomic_op_t)
703 * @param value value for the atomic operation
704 *
705 */
706 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value) ({ \
707 const typeof((elem)->field) __value = (value); \
708 static_assert(sizeof(__value) == (op & 0xf)); \
709 (os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id, \
710 elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value); \
711 })
712
713 /*!
714 * @function zalloc_ro_clear
715 *
716 * @abstract
717 * Zeroes an element from a specified read-only zone.
718 *
719 * @param zone_id the zone id to allocate from
720 * @param elem element to be modified
721 * @param offset offset from element
722 * @param size size of modification
723 */
724 extern void zalloc_ro_clear(
725 zone_id_t zone_id,
726 void *elem __unsafe_indexable,
727 vm_offset_t offset,
728 vm_size_t size);
729
730 /*!
731 * @function zalloc_ro_clear_field
732 *
733 * @abstract
734 * Zeroes the specified field of an element from a specified read-only zone.
735 *
736 * @param zone_id the zone id to allocate from
737 * @param elem element to be modified
738 * @param field offset from element
739 */
740 #define zalloc_ro_clear_field(zone_id, elem, field) \
741 zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
742 sizeof((elem)->field))
743
744 /*!
745 * @function zfree_id()
746 *
747 * @abstract
748 * Frees an element previously allocated with @c zalloc_id().
749 *
750 * @param zone_id the zone id to free the element to.
751 * @param addr the address to free
752 */
753 extern void zfree_id(
754 zone_id_t zone_id,
755 void *addr __unsafe_indexable);
756
757 /*!
758 * @function zfree_ro()
759 *
760 * @abstract
761 * Frees an element previously allocated with @c zalloc_ro().
762 *
763 * @param zone_id the zone id to free the element to.
764 * @param addr the address to free
765 */
766 extern void zfree_ro(
767 zone_id_t zone_id,
768 void *addr __unsafe_indexable);
769
770 /*!
771 * @function zfree
772 *
773 * @abstract
774 * Frees an element allocated with @c zalloc*.
775 *
776 * @discussion
777 * If the element being freed doesn't belong to the specified zone,
778 * then this call will panic.
779 *
780 * @param zone_or_view the zone or zone view to free the element to.
781 * @param elem the element to free
782 */
783 extern void zfree(
784 zone_or_view_t zone_or_view,
785 void *elem __unsafe_indexable);
786
787 /*
788 * This macro sets "elem" to NULL on free.
789 *
790 * Note: all values passed to zfree*() might be in the element to be freed,
791 * temporaries must be taken, and the resetting to be done prior to free.
792 */
793 #define zfree(zone, elem) ({ \
794 __auto_type __zfree_zone = (zone); \
795 (zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
796 })
797
798 #define zfree_id(zid, elem) ({ \
799 zone_id_t __zfree_zid = (zid); \
800 (zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
801 })
802
803 #define zfree_ro(zid, elem) ({ \
804 zone_id_t __zfree_zid = (zid); \
805 (zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
806 })
807
808 /* deprecated KPIS */
809
810 __zalloc_deprecated("use zone_create()")
811 extern zone_t zinit(
812 vm_size_t size, /* the size of an element */
813 vm_size_t maxmem, /* maximum memory to use */
814 vm_size_t alloc, /* allocation size */
815 const char *name __unsafe_indexable);
816
817
818 #pragma mark: zone views
819 /*!
820 * @typedef zone_stats_t
821 *
822 * @abstract
823 * The opaque type for per-cpu zone stats that are accumulated per zone
824 * or per zone-view.
825 */
826 typedef struct zone_stats *__zpercpu zone_stats_t;
827
828 /*!
829 * @typedef zone_view_t
830 *
831 * @abstract
832 * A view on a zone for accounting purposes.
833 *
834 * @discussion
835 * A zone view uses the zone it references for the allocations backing store,
836 * but does the allocation accounting at the view level.
837 *
838 * These accounting are surfaced by @b zprint(1) and similar tools,
839 * which allow for cheap but finer grained understanding of allocations
840 * without any fragmentation cost.
841 *
842 * Zone views are protected by the kernel lockdown and can't be initialized
843 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
844 */
845 typedef struct zone_view *zone_view_t;
846 struct zone_view {
847 zone_t zv_zone;
848 zone_stats_t zv_stats;
849 const char *zv_name __unsafe_indexable;
850 zone_view_t zv_next;
851 };
852
853 #ifdef XNU_KERNEL_PRIVATE
854 /*!
855 * @enum zone_kheap_id_t
856 *
857 * @brief
858 * Enumerate a particular kalloc heap.
859 *
860 * @discussion
861 * More documentation about heaps is available in @c <kern/kalloc.h>.
862 *
863 * @const KHEAP_ID_NONE
864 * This value denotes regular zones, not used by kalloc.
865 *
866 * @const KHEAP_ID_DEFAULT
867 * Indicates zones part of the KHEAP_DEFAULT heap.
868 *
869 * @const KHEAP_ID_DATA_BUFFERS
870 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
871 *
872 * @const KHEAP_ID_KT_VAR
873 * Indicates zones part of the KHEAP_KT_VAR heap.
874 *
875 * @const KHEAP_ID_KEXT
876 * Indicates zones part of the KHEAP_KEXT heap.
877 */
878 __enum_decl(zone_kheap_id_t, uint32_t, {
879 KHEAP_ID_NONE,
880 KHEAP_ID_DEFAULT,
881 KHEAP_ID_DATA_BUFFERS,
882 KHEAP_ID_KT_VAR,
883 KHEAP_ID_KEXT,
884
885 #define KHEAP_ID_COUNT (KHEAP_ID_KEXT + 1)
886 });
887
888 /*!
889 * @macro ZONE_VIEW_DECLARE
890 *
891 * @abstract
892 * (optionally) declares a zone view (in a header).
893 *
894 * @param var the name for the zone view.
895 */
896 #define ZONE_VIEW_DECLARE(var) \
897 extern struct zone_view var[1]
898
899 /*!
900 * @macro ZONE_VIEW_DEFINE
901 *
902 * @abstract
903 * Defines a given zone view and what it points to.
904 *
905 * @discussion
906 * Zone views can either share a pre-existing zone,
907 * or perform a lookup into a kalloc heap for the zone
908 * backing the bucket of the proper size.
909 *
910 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
911 * as the last rank. If views on zones are created, these must have been
912 * created before this stage.
913 *
914 * This macro should not be used to create zone views from default
915 * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
916 *
917 * @param var the name for the zone view.
918 * @param name a string describing the zone view.
919 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
920 * @param size the element size to be allocated from this view.
921 */
922 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
923 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
924 .zv_name = name, \
925 } }; \
926 static __startup_data struct zone_view_startup_spec \
927 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
928 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
929 &__startup_zone_view_spec_ ## var)
930
931 #endif /* XNU_KERNEL_PRIVATE */
932
933
934 #ifdef XNU_KERNEL_PRIVATE
935 #pragma mark - XNU only interfaces
936
937 #include <kern/cpu_number.h>
938
939 #pragma GCC visibility push(hidden)
940
941 #pragma mark XNU only: zalloc (extended)
942
943 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
944 #define ZALIGN_16 (sizeof(uint16_t) - 1)
945 #define ZALIGN_32 (sizeof(uint32_t) - 1)
946 #define ZALIGN_PTR (sizeof(void *) - 1)
947 #define ZALIGN_64 (sizeof(uint64_t) - 1)
948 #define ZALIGN(t) (_Alignof(t) - 1)
949
950
951 /*!
952 * @function zalloc_permanent_tag()
953 *
954 * @abstract
955 * Allocates a permanent element from the permanent zone
956 *
957 * @discussion
958 * Memory returned by this function is always 0-initialized.
959 * Note that the size of this allocation can not be determined
960 * by zone_element_size so it should not be used for copyio.
961 *
962 * @param size the element size (must be smaller than PAGE_SIZE)
963 * @param align_mask the required alignment for this allocation
964 * @param tag the tag to use for allocations larger than a page.
965 *
966 * @returns the allocated element
967 */
968 __attribute__((malloc))
969 extern void *__unsafe_indexable zalloc_permanent_tag(
970 vm_size_t size,
971 vm_offset_t align_mask,
972 vm_tag_t tag);
973
974 /*!
975 * @function zalloc_permanent()
976 *
977 * @abstract
978 * Allocates a permanent element from the permanent zone
979 *
980 * @discussion
981 * Memory returned by this function is always 0-initialized.
982 * Note that the size of this allocation can not be determined
983 * by zone_element_size so it should not be used for copyio.
984 *
985 * @param size the element size (must be smaller than PAGE_SIZE)
986 * @param align_mask the required alignment for this allocation
987 *
988 * @returns the allocated element
989 */
990 #define zalloc_permanent(size, align) \
991 zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
992
993 /*!
994 * @function zalloc_permanent_type()
995 *
996 * @abstract
997 * Allocates a permanent element of a given type with its natural alignment.
998 *
999 * @discussion
1000 * Memory returned by this function is always 0-initialized.
1001 *
1002 * @param type_t the element type
1003 *
1004 * @returns the allocated element
1005 */
1006 #define zalloc_permanent_type(type_t) \
1007 __unsafe_forge_single(type_t *, \
1008 zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1009
1010 /*!
1011 * @function zalloc_first_proc_made()
1012 *
1013 * @abstract
1014 * Declare that the "early" allocation phase is done.
1015 */
1016 extern void
1017 zalloc_first_proc_made(void);
1018
1019 #pragma mark XNU only: per-cpu allocations
1020
1021 /*!
1022 * @macro zpercpu_get_cpu()
1023 *
1024 * @abstract
1025 * Get a pointer to a specific CPU slot of a given per-cpu variable.
1026 *
1027 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1028 * @param cpu the specified CPU number as returned by @c cpu_number()
1029 *
1030 * @returns the per-CPU slot for @c ptr for the specified CPU.
1031 */
1032 #define zpercpu_get_cpu(ptr, cpu) \
1033 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
1034
1035 /*!
1036 * @macro zpercpu_get()
1037 *
1038 * @abstract
1039 * Get a pointer to the current CPU slot of a given per-cpu variable.
1040 *
1041 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1042 *
1043 * @returns the per-CPU slot for @c ptr for the current CPU.
1044 */
1045 #define zpercpu_get(ptr) \
1046 zpercpu_get_cpu(ptr, cpu_number())
1047
1048 /*!
1049 * @macro zpercpu_foreach()
1050 *
1051 * @abstract
1052 * Enumerate all per-CPU slots by address.
1053 *
1054 * @param it the name for the iterator
1055 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1056 */
1057 #define zpercpu_foreach(it, ptr) \
1058 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1059 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1060 it < __end_##it; it = __zpcpu_next(it))
1061
1062 /*!
1063 * @macro zpercpu_foreach_cpu()
1064 *
1065 * @abstract
1066 * Enumerate all per-CPU slots by CPU slot number.
1067 *
1068 * @param cpu the name for cpu number iterator.
1069 */
1070 #define zpercpu_foreach_cpu(cpu) \
1071 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1072
1073 /*!
1074 * @function zalloc_percpu()
1075 *
1076 * @abstract
1077 * Allocates an element from a per-cpu zone.
1078 *
1079 * @discussion
1080 * The returned pointer cannot be used directly and must be manipulated
1081 * through the @c zpercpu_get*() interfaces.
1082 *
1083 * @param zone_or_view the zone or zone view to allocate from
1084 * @param flags a collection of @c zalloc_flags_t.
1085 *
1086 * @returns NULL or the allocated element
1087 */
1088 extern void *__zpercpu zalloc_percpu(
1089 zone_or_view_t zone_or_view,
1090 zalloc_flags_t flags);
1091
1092 /*!
1093 * @function zfree_percpu()
1094 *
1095 * @abstract
1096 * Frees an element previously allocated with @c zalloc_percpu().
1097 *
1098 * @param zone_or_view the zone or zone view to free the element to.
1099 * @param addr the address to free
1100 */
1101 extern void zfree_percpu(
1102 zone_or_view_t zone_or_view,
1103 void *__zpercpu addr);
1104
1105 /*!
1106 * @function zalloc_percpu_permanent()
1107 *
1108 * @abstract
1109 * Allocates a permanent percpu-element from the permanent percpu zone.
1110 *
1111 * @discussion
1112 * Memory returned by this function is always 0-initialized.
1113 *
1114 * @param size the element size (must be smaller than PAGE_SIZE)
1115 * @param align_mask the required alignment for this allocation
1116 *
1117 * @returns the allocated element
1118 */
1119 extern void *__zpercpu zalloc_percpu_permanent(
1120 vm_size_t size,
1121 vm_offset_t align_mask);
1122
1123 /*!
1124 * @function zalloc_percpu_permanent_type()
1125 *
1126 * @abstract
1127 * Allocates a permanent percpu-element from the permanent percpu zone of a given
1128 * type with its natural alignment.
1129 *
1130 * @discussion
1131 * Memory returned by this function is always 0-initialized.
1132 *
1133 * @param type_t the element type
1134 *
1135 * @returns the allocated element
1136 */
1137 #define zalloc_percpu_permanent_type(type_t) \
1138 ((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1139
1140
1141 #pragma mark XNU only: zone creation (extended)
1142
1143 /*!
1144 * @enum zone_reserved_id_t
1145 *
1146 * @abstract
1147 * Well known pre-registered zones, allowing use of zone_id_require()
1148 *
1149 * @discussion
1150 * @c ZONE_ID__* aren't real zone IDs.
1151 *
1152 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1153 * easy a value to produce (by malice or accident).
1154 *
1155 * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1156 * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1157 * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1158 * listed in @c zone_create_ro_id_t.
1159 *
1160 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1161 * @c zone_create().
1162 */
1163 __enum_decl(zone_reserved_id_t, zone_id_t, {
1164 ZONE_ID__ZERO,
1165
1166 ZONE_ID_PERMANENT,
1167 ZONE_ID_PERCPU_PERMANENT,
1168
1169 ZONE_ID_THREAD_RO,
1170 ZONE_ID_MAC_LABEL,
1171 ZONE_ID_PROC_RO,
1172 ZONE_ID_PROC_SIGACTS_RO,
1173 ZONE_ID_KAUTH_CRED,
1174 ZONE_ID_CS_BLOB,
1175
1176 ZONE_ID_SANDBOX_RO,
1177 ZONE_ID_PROFILE_RO,
1178 ZONE_ID_PROTOBOX,
1179 ZONE_ID_SB_FILTER,
1180
1181 ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1182 ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1183 ZONE_ID__LAST_RO_EXT = ZONE_ID_SB_FILTER,
1184 ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1185
1186 ZONE_ID_PMAP,
1187 ZONE_ID_VM_MAP,
1188 ZONE_ID_VM_MAP_COPY,
1189 ZONE_ID_VM_MAP_ENTRY,
1190 ZONE_ID_VM_MAP_HOLES,
1191 ZONE_ID_VM_PAGES,
1192 ZONE_ID_IPC_PORT,
1193 ZONE_ID_IPC_PORT_SET,
1194 ZONE_ID_IPC_VOUCHERS,
1195 ZONE_ID_TASK,
1196 ZONE_ID_PROC,
1197 ZONE_ID_THREAD,
1198 ZONE_ID_TURNSTILE,
1199 ZONE_ID_SEMAPHORE,
1200 ZONE_ID_SELECT_SET,
1201 ZONE_ID_FILEPROC,
1202
1203 ZONE_ID__FIRST_DYNAMIC,
1204 });
1205
1206 /*!
1207 * @const ZONE_ID_ANY
1208 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1209 * Zone ID.
1210 */
1211 #define ZONE_ID_ANY ((zone_id_t)-1)
1212
1213 /*!
1214 * @const ZONE_ID_INVALID
1215 * An invalid zone_id_t that corresponds to nothing.
1216 */
1217 #define ZONE_ID_INVALID ((zone_id_t)-2)
1218
1219 /**!
1220 * @function zone_name
1221 *
1222 * @param zone the specified zone
1223 * @returns the name of the specified zone.
1224 */
1225 const char *__unsafe_indexable zone_name(
1226 zone_t zone);
1227
1228 /**!
1229 * @function zone_heap_name
1230 *
1231 * @param zone the specified zone
1232 * @returns the name of the heap this zone is part of, or "".
1233 */
1234 const char *__unsafe_indexable zone_heap_name(
1235 zone_t zone);
1236
1237 /*!
1238 * @function zone_create_ext
1239 *
1240 * @abstract
1241 * Creates a zone with the specified parameters.
1242 *
1243 * @discussion
1244 * This is an extended version of @c zone_create().
1245 *
1246 * @param name the name for the new zone.
1247 * @param size the size of the elements returned by this zone.
1248 * @param flags a set of @c zone_create_flags_t flags.
1249 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1250 *
1251 * @param extra_setup a block that can perform non trivial initialization
1252 * on the zone before it is marked valid.
1253 * This block can call advanced setups like:
1254 * - zone_set_exhaustible()
1255 * - zone_set_noexpand()
1256 *
1257 * @returns the created zone, this call never fails.
1258 */
1259 extern zone_t zone_create_ext(
1260 const char *name __unsafe_indexable,
1261 vm_size_t size,
1262 zone_create_flags_t flags,
1263 zone_id_t desired_zid,
1264 void (^extra_setup)(zone_t));
1265
1266 /*!
1267 * @macro ZONE_DECLARE
1268 *
1269 * @abstract
1270 * Declares a zone variable and its associated type.
1271 *
1272 * @param var the name of the variable to declare.
1273 * @param type_t the type of elements in the zone.
1274 */
1275 #define ZONE_DECLARE(var, type_t) \
1276 extern zone_t var; \
1277 __ZONE_DECLARE_TYPE(var, type_t)
1278
1279 /*!
1280 * @macro ZONE_DECLARE_ID
1281 *
1282 * @abstract
1283 * Declares the type associated with a zone ID.
1284 *
1285 * @param id the name of zone ID to associate a type with.
1286 * @param type_t the type of elements in the zone.
1287 */
1288 #define ZONE_DECLARE_ID(id, type_t) \
1289 __ZONE_DECLARE_TYPE(id, type_t)
1290
1291 /*!
1292 * @macro ZONE_DEFINE
1293 *
1294 * @abstract
1295 * Declares a zone variable to automatically initialize with the specified
1296 * parameters.
1297 *
1298 * @discussion
1299 * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1300 *
1301 * @param var the name of the variable to declare.
1302 * @param name the name for the zone
1303 * @param size the size of the elements returned by this zone.
1304 * @param flags a set of @c zone_create_flags_t flags.
1305 */
1306 #define ZONE_DEFINE(var, name, size, flags) \
1307 SECURITY_READ_ONLY_LATE(zone_t) var; \
1308 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1309 static __startup_data struct zone_create_startup_spec \
1310 __startup_zone_spec_ ## var = { &var, name, size, flags, \
1311 ZONE_ID_ANY, NULL }; \
1312 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
1313 &__startup_zone_spec_ ## var)
1314
1315 /*!
1316 * @macro ZONE_DEFINE_TYPE
1317 *
1318 * @abstract
1319 * Defines a zone variable to automatically initialize with the specified
1320 * parameters, associated with a particular type.
1321 *
1322 * @param var the name of the variable to declare.
1323 * @param name the name for the zone
1324 * @param type_t the type of elements in the zone.
1325 * @param flags a set of @c zone_create_flags_t flags.
1326 */
1327 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1328 ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1329 __ZONE_DECLARE_TYPE(var, type_t)
1330
1331 /*!
1332 * @macro ZONE_DEFINE_ID
1333 *
1334 * @abstract
1335 * Initializes a given zone automatically during startup with the specified
1336 * parameters.
1337 *
1338 * @param zid a @c zone_reserved_id_t value.
1339 * @param name the name for the zone
1340 * @param type_t the type of elements in the zone.
1341 * @param flags a set of @c zone_create_flags_t flags.
1342 */
1343 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1344 ZONE_DECLARE_ID(zid, type_t); \
1345 ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1346
1347 /*!
1348 * @macro ZONE_INIT
1349 *
1350 * @abstract
1351 * Initializes a given zone automatically during startup with the specified
1352 * parameters.
1353 *
1354 * @param var the name of the variable to initialize.
1355 * @param name the name for the zone
1356 * @param size the size of the elements returned by this zone.
1357 * @param flags a set of @c zone_create_flags_t flags.
1358 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1359 * @param extra_setup a block that can perform non trivial initialization
1360 * (@see @c zone_create_ext()).
1361 */
1362 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1363 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1364
1365 /*!
1366 * @function zone_id_require
1367 *
1368 * @abstract
1369 * Requires for a given pointer to belong to the specified zone, by ID and size.
1370 *
1371 * @discussion
1372 * The function panics if the check fails as it indicates that the kernel
1373 * internals have been compromised.
1374 *
1375 * This is a variant of @c zone_require() which:
1376 * - isn't sensitive to @c zone_t::elem_size being compromised,
1377 * - is slightly faster as it saves one load and a multiplication.
1378 *
1379 * @warning: zones using foreign memory can't use this interface.
1380 *
1381 * @param zone_id the zone ID the address needs to belong to.
1382 * @param elem_size the size of elements for this zone.
1383 * @param addr the element address to check.
1384 */
1385 extern void zone_id_require(
1386 zone_id_t zone_id,
1387 vm_size_t elem_size,
1388 void *addr __unsafe_indexable);
1389
1390 /*!
1391 * @function zone_id_require_allow_foreign
1392 *
1393 * @abstract
1394 * Requires for a given pointer to belong to the specified zone, by ID and size.
1395 *
1396 * @discussion
1397 * This is a version of @c zone_id_require() that works with zones allowing
1398 * foreign memory.
1399 */
1400 extern void zone_id_require_allow_foreign(
1401 zone_id_t zone_id,
1402 vm_size_t elem_size,
1403 void *addr __unsafe_indexable);
1404
1405 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
1406 extern void zone_set_noexpand(
1407 zone_t zone,
1408 vm_size_t max_elements);
1409
1410 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1411 extern void zone_set_exhaustible(
1412 zone_t zone,
1413 vm_size_t max_elements);
1414
1415 /*!
1416 * @function zone_fill_initially
1417 *
1418 * @brief
1419 * Initially fill a non collectable zone to have the specified amount of
1420 * elements.
1421 *
1422 * @discussion
1423 * This function must be called on a non collectable permanent zone before it
1424 * has been used yet.
1425 *
1426 * @param zone The zone to fill.
1427 * @param nelems The number of elements to be able to hold.
1428 */
1429 extern void zone_fill_initially(
1430 zone_t zone,
1431 vm_size_t nelems);
1432
1433 #pragma mark XNU only: PGZ support
1434
1435 /*!
1436 * @function pgz_owned()
1437 *
1438 * @brief
1439 * Returns whether an address is PGZ owned.
1440 *
1441 * @param addr The address to translate.
1442 * @returns Whether it is PGZ owned
1443 */
1444 #if CONFIG_PROB_GZALLOC
1445 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
1446 #else
1447 #define pgz_owned(addr) false
1448 #endif
1449
1450 /*!
1451 * @function pgz_decode()
1452 *
1453 * @brief
1454 * Translates a PGZ protected virtual address to its unprotected
1455 * backing store.
1456 *
1457 * @discussion
1458 * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
1459 * elements since the PGZ protected virtual addresses are maintained by PGZ
1460 * at the pmap level without the VM involvment.
1461 *
1462 * "allow_invalid" schemes relying on sequestering also need this
1463 * to perform the locking attempts on the unprotected address.
1464 *
1465 * @param addr The address to translate.
1466 * @param size The object size.
1467 * @returns The unprotected address or @c addr.
1468 */
1469 #if CONFIG_PROB_GZALLOC
1470 #define pgz_decode(addr, size) \
1471 ((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
1472 #else
1473 #define pgz_decode(addr, size) (addr)
1474 #endif
1475
1476 /*!
1477 * @function pgz_decode_allow_invalid()
1478 *
1479 * @brief
1480 * Translates a PGZ protected virtual address to its unprotected
1481 * backing store, but doesn't assert it is still allocated/valid.
1482 *
1483 * @discussion
1484 * "allow_invalid" schemes relying on sequestering also need this
1485 * to perform the locking attempts on the unprotected address.
1486 *
1487 * @param addr The address to translate.
1488 * @param want_zid The expected zone ID for the element.
1489 * @returns The unprotected address or @c addr.
1490 */
1491 #if CONFIG_PROB_GZALLOC
1492 #define pgz_decode_allow_invalid(addr, want_zid) \
1493 ((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
1494 #else
1495 #define pgz_decode_allow_invalid(addr, zid) (addr)
1496 #endif
1497
1498 #pragma mark XNU only: misc & implementation details
1499
1500 struct zone_create_startup_spec {
1501 zone_t *z_var;
1502 const char *z_name __unsafe_indexable;
1503 vm_size_t z_size;
1504 zone_create_flags_t z_flags;
1505 zone_id_t z_zid;
1506 void (^z_setup)(zone_t);
1507 };
1508
1509 extern void zone_create_startup(
1510 struct zone_create_startup_spec *spec);
1511
1512 #define __ZONE_DECLARE_TYPE(var, type_t) \
1513 __attribute__((visibility("hidden"))) \
1514 extern type_t *__zalloc__##var##__type_name
1515
1516 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1517 static __startup_data struct zone_create_startup_spec \
1518 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
1519 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
1520 &__startup_zone_spec_ ## ns)
1521
1522 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
1523 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
1524
1525 #define __zalloc_cast(namespace, expr) \
1526 __unsafe_forge_single(typeof(__zalloc__##namespace##__type_name), expr)
1527
1528 #define zalloc_id(zid, flags) __zalloc_cast(zid, (zalloc_id)(zid, flags))
1529 #define zalloc_ro(zid, flags) __zalloc_cast(zid, (zalloc_ro)(zid, flags))
1530 #if ZALLOC_TYPE_SAFE
1531 #define zalloc(zov) __zalloc_cast(zov, (zalloc)(zov))
1532 #define zalloc_noblock(zov) __zalloc_cast(zov, (zalloc_noblock)(zov))
1533 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (zalloc_flags)(zov, fl))
1534 #endif
1535
1536 struct zone_view_startup_spec {
1537 zone_view_t zv_view;
1538 union {
1539 zone_kheap_id_t zv_heapid;
1540 zone_t *zv_zone;
1541 };
1542 vm_size_t zv_size;
1543 };
1544
1545 extern void zone_view_startup_init(
1546 struct zone_view_startup_spec *spec);
1547
1548 extern void zone_userspace_reboot_checks(void);
1549
1550 #if VM_TAG_SIZECLASSES
1551 extern zalloc_flags_t __zone_flags_mix_tag(
1552 zone_t zone,
1553 zalloc_flags_t flags,
1554 vm_allocation_site_t *site) __pure2;
1555 #else
1556 static inline zalloc_flags_t
__zone_flags_mix_tag(zone_t zone,zalloc_flags_t flags,vm_allocation_site_t * site)1557 __zone_flags_mix_tag(
1558 zone_t zone,
1559 zalloc_flags_t flags,
1560 vm_allocation_site_t *site)
1561 {
1562 #pragma unused(zone, site)
1563 return flags;
1564 }
1565 #endif
1566
1567 #if DEBUG || DEVELOPMENT
1568 # if __LP64__
1569 # define ZPCPU_MANGLE_BIT (1ul << 63)
1570 # else /* !__LP64__ */
1571 # define ZPCPU_MANGLE_BIT (1ul << 31)
1572 # endif /* !__LP64__ */
1573 #else /* !(DEBUG || DEVELOPMENT) */
1574 # define ZPCPU_MANGLE_BIT 0ul
1575 #endif /* !(DEBUG || DEVELOPMENT) */
1576
1577 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
1578 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
1579 #define __zpcpu_addr(e) ((vm_address_t)(e))
1580 #define __zpcpu_cast(ptr, e) __unsafe_forge_single(typeof(ptr), e)
1581 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
1582
1583 /**
1584 * @macro __zpcpu_mangle_for_boot()
1585 *
1586 * @discussion
1587 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
1588 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
1589 * storage marked @c __startup_data and replace it with the proper allocation
1590 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
1591 *
1592 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
1593 * provides the proper mangling of the storage into a "fake" percpu pointer so
1594 * that accesses through @c zpercpu_get() functions properly.
1595 *
1596 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
1597 */
1598 #define __zpcpu_mangle_for_boot(ptr) ({ \
1599 assert(startup_phase < STARTUP_SUB_ZALLOC); \
1600 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
1601 })
1602
1603 extern unsigned zpercpu_count(void) __pure2;
1604
1605 #if CONFIG_PROB_GZALLOC
1606
1607 extern vm_offset_t __pgz_decode(
1608 mach_vm_address_t addr,
1609 mach_vm_size_t size);
1610
1611 extern vm_offset_t __pgz_decode_allow_invalid(
1612 vm_offset_t offs,
1613 zone_id_t zid);
1614
1615 #endif
1616 #if CONFIG_ZLEAKS
1617 extern uint32_t zleak_active;
1618 extern vm_size_t zleak_max_zonemap_size;
1619 extern vm_size_t zleak_global_tracking_threshold;
1620 extern vm_size_t zleak_per_zone_tracking_threshold;
1621
1622 extern kern_return_t zleak_update_threshold(
1623 vm_size_t *arg,
1624 uint64_t value);
1625 #endif /* CONFIG_ZLEAKS */
1626
1627 extern uint32_t zone_map_jetsam_limit;
1628
1629 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
1630
1631 extern zone_t percpu_u64_zone;
1632
1633 #pragma GCC visibility pop
1634 #endif /* XNU_KERNEL_PRIVATE */
1635
1636 /*
1637 * This macro is currently used by AppleImage4 (rdar://83924635)
1638 */
1639 #define __zalloc_ptr_load_and_erase(elem) \
1640 os_ptr_load_and_erase(elem)
1641
1642 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1643
1644 #endif /* _KERN_ZALLOC_H_ */
1645
1646 #endif /* KERNEL_PRIVATE */
1647