1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #ifndef _KERN_ZALLOC_INTERNAL_H_
60 #define _KERN_ZALLOC_INTERNAL_H_
61
62 #include <kern/zalloc.h>
63 #include <kern/locks.h>
64 #include <kern/simple_lock.h>
65
66 #include <os/atomic_private.h>
67 #include <sys/queue.h>
68 #include <vm/vm_map_internal.h>
69
70 #if KASAN
71 #include <san/kasan.h>
72 #include <kern/spl.h>
73 #endif /* !KASAN */
74
75 /*
76 * Disable zalloc zero validation under kasan as it is
77 * double-duty with what kasan already does.
78 */
79 #if KASAN
80 #define ZALLOC_ENABLE_ZERO_CHECK 0
81 #else
82 #define ZALLOC_ENABLE_ZERO_CHECK 1
83 #endif
84
85 #if KASAN
86 #define ZALLOC_ENABLE_LOGGING 0
87 #elif DEBUG || DEVELOPMENT
88 #define ZALLOC_ENABLE_LOGGING 1
89 #else
90 #define ZALLOC_ENABLE_LOGGING 0
91 #endif
92
93 /*!
94 * @file <kern/zalloc_internal.h>
95 *
96 * @abstract
97 * Exposes some guts of zalloc to interact with the VM, debugging, copyio and
98 * kalloc subsystems.
99 */
100
101 __BEGIN_DECLS
102
103 #pragma GCC visibility push(hidden)
104
105 /*
106 * A zone is a collection of fixed size blocks for which there
107 * is fast allocation/deallocation access. Kernel routines can
108 * use zones to manage data structures dynamically, creating a zone
109 * for each type of data structure to be managed.
110 *
111 */
112
113 /*!
114 * @typedef zone_pva_t
115 *
116 * @brief
117 * Type used to point to a page virtual address in the zone allocator.
118 *
119 * @description
120 * - Valid pages have the top bit set.
121 * - 0 represents the "NULL" page
122 * - non 0 values with the top bit cleared represent queue heads,
123 * indexed from the beginning of the __DATA section of the kernel.
124 * (see zone_pageq_base).
125 */
126 typedef struct zone_packed_virtual_address {
127 uint32_t packed_address;
128 } zone_pva_t;
129
130 /*!
131 * @struct zone_stats
132 *
133 * @abstract
134 * Per-cpu structure used for basic zone stats.
135 *
136 * @discussion
137 * The values aren't scaled for per-cpu zones.
138 */
139 struct zone_stats {
140 uint64_t zs_mem_allocated;
141 uint64_t zs_mem_freed;
142 uint64_t zs_alloc_fail;
143 uint32_t zs_alloc_rr; /* allocation rr bias */
144 uint32_t _Atomic zs_alloc_not_shared;
145 };
146
147 typedef struct zone_magazine *zone_magazine_t;
148
149 /*!
150 * @struct zone_depot
151 *
152 * @abstract
153 * Holds a list of full and empty magazines.
154 *
155 * @discussion
156 * The data structure is a "STAILQ" and an "SLIST" combined with counters
157 * to know their lengths in O(1). Here is a graphical example:
158 *
159 * zd_full = 3
160 * zd_empty = 1
161 * ╭─── zd_head
162 * │ ╭─ zd_tail
163 * │ ╰────────────────────────────────────╮
164 * │ ╭───────╮ ╭───────╮ ╭───────╮ v ╭───────╮
165 * ╰───>│███████┼──>│███████┼──>│███████┼──>│ ┼─> X
166 * ╰───────╯ ╰───────╯ ╰───────╯ ╰───────╯
167 */
168 struct zone_depot {
169 uint32_t zd_full;
170 uint32_t zd_empty;
171 zone_magazine_t zd_head;
172 zone_magazine_t *zd_tail;
173 };
174
175 /* see https://lemire.me/blog/2019/02/20/more-fun-with-fast-remainders-when-the-divisor-is-a-constant/ */
176 #define Z_MAGIC_QUO(s) (((1ull << 32) - 1) / (uint64_t)(s) + 1)
177 #define Z_MAGIC_ALIGNED(s) (~0u / (uint32_t)(s) + 1)
178
179 /*
180 * Returns (offs / size) if offs is small enough
181 * and magic = Z_MAGIC_QUO(size)
182 */
183 static inline uint32_t
Z_FAST_QUO(uint64_t offs,uint64_t magic)184 Z_FAST_QUO(uint64_t offs, uint64_t magic)
185 {
186 return (offs * magic) >> 32;
187 }
188
189 /*
190 * Returns (offs % size) if offs is small enough
191 * and magic = Z_MAGIC_QUO(size)
192 */
193 static inline uint32_t
Z_FAST_MOD(uint64_t offs,uint64_t magic,uint64_t size)194 Z_FAST_MOD(uint64_t offs, uint64_t magic, uint64_t size)
195 {
196 uint32_t lowbits = (uint32_t)(offs * magic);
197
198 return (lowbits * size) >> 32;
199 }
200
201 /*
202 * Returns whether (offs % size) == 0 if offs is small enough
203 * and magic = Z_MAGIC_ALIGNED(size)
204 */
205 static inline bool
Z_FAST_ALIGNED(uint64_t offs,uint32_t magic)206 Z_FAST_ALIGNED(uint64_t offs, uint32_t magic)
207 {
208 return (uint32_t)(offs * magic) < magic;
209 }
210
211 struct zone_size_params {
212 uint32_t z_align_magic; /* magic to use with Z_FAST_ALIGNED() */
213 uint32_t z_elem_size; /* size of an element */
214 };
215
216 struct zone_expand {
217 struct zone_expand *ze_next;
218 thread_t ze_thread;
219 bool ze_pg_wait;
220 bool ze_vm_priv;
221 bool ze_clear_priv;
222 };
223
224 #define Z_WMA_UNIT (1u << 8)
225 #define Z_WMA_MIX(base, e) ((3 * (base) + (e) * Z_WMA_UNIT) / 4)
226
227 struct zone {
228 /*
229 * Readonly / rarely written fields
230 */
231
232 /*
233 * The first 4 fields match a zone_view.
234 *
235 * z_self points back to the zone when the zone is initialized,
236 * or is NULL else.
237 */
238 struct zone *z_self;
239 zone_stats_t z_stats;
240 const char *z_name;
241 struct zone_view *z_views;
242 struct zone_expand *z_expander;
243
244 uint64_t z_quo_magic;
245 uint32_t z_align_magic;
246 uint16_t z_elem_size;
247 uint16_t z_elem_offs;
248 uint16_t z_chunk_pages;
249 uint16_t z_chunk_elems;
250
251 uint32_t /* 32 bits */
252 /*
253 * Lifecycle state (Mutable after creation)
254 */
255 z_destroyed :1, /* zone is (being) destroyed */
256 z_async_refilling :1, /* asynchronous allocation pending? */
257 z_depot_cleanup :1, /* per cpu depots need cleaning */
258 z_expanding_wait :1, /* is thread waiting for expansion? */
259 z_exhausted_wait :1, /* are threads waiting for exhaustion end */
260 z_exhausts :1, /* whether the zone exhausts by design */
261
262 /*
263 * Behavior configuration bits
264 */
265 z_percpu :1, /* the zone is percpu */
266 z_smr :1, /* the zone uses SMR */
267 z_permanent :1, /* the zone allocations are permanent */
268 z_nocaching :1, /* disallow zone caching for this zone */
269 collectable :1, /* garbage collect empty pages */
270 no_callout :1,
271 z_destructible :1, /* zone can be zdestroy()ed */
272
273 _reserved :6,
274
275 /*
276 * Debugging features
277 */
278 z_pgz_tracked :1, /* this zone is tracked by pgzalloc */
279 z_pgz_use_guards :1, /* this zone uses guards with PGZ */
280 z_kasan_fakestacks :1,
281 z_kasan_quarantine :1, /* whether to use the kasan quarantine */
282 z_tags_sizeclass :6, /* idx into zone_tags_sizeclasses to associate
283 * sizeclass for a particualr kalloc tag */
284 z_uses_tags :1,
285 z_log_on :1, /* zone logging was enabled by boot-arg */
286 z_tbi_tag :1; /* Zone supports tbi tagging */
287
288 uint8_t z_cacheline1[0] __attribute__((aligned(64)));
289
290 /*
291 * Zone caching / recirculation cacheline
292 *
293 * z_recirc* fields are protected by the recirculation lock.
294 *
295 * z_recirc_cont_wma:
296 * weighted moving average of the number of contentions per second,
297 * in Z_WMA_UNIT units (fixed point decimal).
298 *
299 * z_recirc_cont_cur:
300 * count of recorded contentions that will be fused
301 * in z_recirc_cont_wma at the next period.
302 *
303 * Note: if caching is disabled,
304 * this field is used under the zone lock.
305 *
306 * z_elems_free_{min,wma} (overloaded on z_recirc_empty*):
307 * tracks the history of the minimum values of z_elems_free over time
308 * with "min" being the minimum it hit for the current period,
309 * and "wma" the weighted moving average of those value.
310 *
311 * This field is used if z_pcpu_cache is NULL,
312 * otherwise it aliases with z_recirc_empty_{min,wma}
313 *
314 * z_recirc_{full,empty}_{min,wma}:
315 * tracks the history of the the minimum number of full/empty
316 * magazines in the depot over time, with "min" being the minimum
317 * it hit for the current period, and "wma" the weighted moving
318 * average of those value.
319 */
320 struct zone_cache *__zpercpu z_pcpu_cache;
321 struct zone_depot z_recirc;
322
323 hw_lck_ticket_t z_recirc_lock;
324 uint32_t z_recirc_full_min;
325 uint32_t z_recirc_full_wma;
326 union {
327 uint32_t z_recirc_empty_min;
328 uint32_t z_elems_free_min;
329 };
330 union {
331 uint32_t z_recirc_empty_wma;
332 uint32_t z_elems_free_wma;
333 };
334 uint32_t z_recirc_cont_cur;
335 uint32_t z_recirc_cont_wma;
336
337 uint16_t z_depot_size;
338 uint16_t z_depot_limit;
339
340 uint8_t z_cacheline2[0] __attribute__((aligned(64)));
341
342 /*
343 * often mutated fields
344 */
345
346 hw_lck_ticket_t z_lock;
347
348 /*
349 * Page accounting (wired / VA)
350 *
351 * Those numbers are unscaled for z_percpu zones
352 * (zone_scale_for_percpu() needs to be used to find the true value).
353 */
354 uint32_t z_wired_max; /* how large can this zone grow */
355 uint32_t z_wired_hwm; /* z_wired_cur high watermark */
356 uint32_t z_wired_cur; /* number of pages used by this zone */
357 uint32_t z_wired_empty; /* pages collectable by GC */
358 uint32_t z_va_cur; /* amount of VA used by this zone */
359
360 /*
361 * list of metadata structs, which maintain per-page free element lists
362 */
363 zone_pva_t z_pageq_empty; /* populated, completely empty pages */
364 zone_pva_t z_pageq_partial;/* populated, partially filled pages */
365 zone_pva_t z_pageq_full; /* populated, completely full pages */
366 zone_pva_t z_pageq_va; /* non-populated VA pages */
367
368 /*
369 * Zone statistics
370 *
371 * z_elems_avail:
372 * number of elements in the zone (at all).
373 */
374 uint32_t z_elems_free; /* Number of free elements */
375 uint32_t z_elems_avail; /* Number of elements available */
376 uint32_t z_elems_rsv;
377 uint32_t z_array_size_class;
378
379 struct zone *z_kt_next;
380
381 uint8_t z_cacheline3[0] __attribute__((aligned(64)));
382
383 #if KASAN_CLASSIC
384 uint16_t z_kasan_redzone;
385 spl_t z_kasan_spl;
386 #endif
387 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
388 /*
389 * the allocation logs are used when:
390 *
391 * - zlog<n>= boot-args are used (and then z_log_on is set)
392 *
393 * - the leak detection was triggered for the zone.
394 * In that case, the log can't ever be freed,
395 * but it can be enabled/disabled dynamically.
396 */
397 struct btlog *z_btlog;
398 struct btlog *z_btlog_disabled;
399 #endif
400 } __attribute__((aligned((64))));
401
402 /*!
403 * @typedef zone_security_flags_t
404 *
405 * @brief
406 * Type used to store the immutable security properties of a zone.
407 *
408 * @description
409 * These properties influence the security nature of a zone and can't be
410 * modified after lockdown.
411 */
412 typedef struct zone_security_flags {
413 uint16_t
414 /*
415 * Security sensitive configuration bits
416 */
417 z_submap_idx :8, /* a Z_SUBMAP_IDX_* value */
418 z_kheap_id :2, /* zone_kheap_id_t when part of a kalloc heap */
419 z_kalloc_type :1, /* zones that does types based seggregation */
420 z_lifo :1, /* depot and recirculation layer are LIFO */
421 z_pgz_use_guards :1, /* this zone uses guards with PGZ */
422 z_submap_from_end :1, /* allocate from the left or the right ? */
423 z_noencrypt :1, /* do not encrypt pages when hibernating */
424 z_unused :1;
425 /*
426 * Signature equivalance zone
427 */
428 zone_id_t z_sig_eq;
429 } zone_security_flags_t;
430
431
432 /*
433 * Zsecurity config to enable strict free of iokit objects to zone
434 * or heap they were allocated from.
435 *
436 * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not
437 * not break third party kexts that haven't yet been recompiled
438 * to use the new iokit macros.
439 */
440 #if XNU_PLATFORM_MacOSX && __x86_64__
441 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE OFF
442 #else
443 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE ON
444 #endif
445
446 /*
447 * Zsecurity config to enable the read-only allocator
448 */
449 #if KASAN_CLASSIC
450 # define ZSECURITY_CONFIG_READ_ONLY OFF
451 #else
452 # define ZSECURITY_CONFIG_READ_ONLY ON
453 #endif
454
455 /*
456 * Zsecurity config to enable making heap feng-shui
457 * less reliable.
458 */
459 #if KASAN_CLASSIC
460 # define ZSECURITY_CONFIG_SAD_FENG_SHUI OFF
461 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 1
462 #else
463 # define ZSECURITY_CONFIG_SAD_FENG_SHUI ON
464 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 4
465 #endif
466
467 /*
468 * Zsecurity config to enable adjusting of elements
469 * with PGZ-OOB to right-align them in their space.
470 */
471 #if KASAN || defined(__x86_64__) || CONFIG_KERNEL_TAGGING
472 # define ZSECURITY_CONFIG_PGZ_OOB_ADJUST OFF
473 #else
474 # define ZSECURITY_CONFIG_PGZ_OOB_ADJUST ON
475 #endif
476
477 /*
478 * Zsecurity config to enable kalloc type segregation
479 */
480 #if XNU_TARGET_OS_WATCH || KASAN_CLASSIC
481 # define ZSECURITY_CONFIG_KT_BUDGET 120
482 # define ZSECURITY_CONFIG_KT_VAR_BUDGET 6
483 #else
484 # define ZSECURITY_CONFIG_KT_BUDGET 260
485 # define ZSECURITY_CONFIG_KT_VAR_BUDGET 6
486 #endif
487
488
489 __options_decl(kalloc_type_options_t, uint64_t, {
490 /*
491 * kalloc type option to switch default accounting to private.
492 */
493 KT_OPTIONS_ACCT = 0x00000001,
494 /*
495 * kalloc type option to print additional stats regarding zone
496 * budget distribution and signatures.
497 */
498 KT_OPTIONS_DEBUG = 0x00000002,
499 /*
500 * kalloc type option to allow loose freeing between heaps
501 */
502 KT_OPTIONS_LOOSE_FREE = 0x00000004,
503 });
504
505 __enum_decl(kt_var_heap_id_t, uint32_t, {
506 /*
507 * Fake "data" heap used to link views of data-only allocation that
508 * have been redirected to KHEAP_DATA_BUFFERS
509 */
510 KT_VAR_DATA_HEAP,
511 /*
512 * Heaps for pointer arrays
513 */
514 KT_VAR_PTR_HEAP0,
515 KT_VAR_PTR_HEAP1,
516 /*
517 * Indicating first additional heap added
518 */
519 KT_VAR__FIRST_FLEXIBLE_HEAP,
520 });
521
522 /*
523 * Zone submap indices
524 *
525 * Z_SUBMAP_IDX_VM
526 * this map has the special property that its allocations
527 * can be done without ever locking the submap, and doesn't use
528 * VM entries in the map (which limits certain VM map operations on it).
529 *
530 * On ILP32 a single zone lives here (the vm_map_entry_reserved_zone).
531 *
532 * On LP64 it is also used to restrict VM allocations on LP64 lower
533 * in the kernel VA space, for pointer packing purposes.
534 *
535 * Z_SUBMAP_IDX_GENERAL_{0,1,2,3}
536 * used for unrestricted allocations
537 *
538 * Z_SUBMAP_IDX_DATA
539 * used to sequester bags of bytes from all other allocations and allow VA reuse
540 * within the map
541 *
542 * Z_SUBMAP_IDX_READ_ONLY
543 * used for the read-only allocator
544 */
545 __enum_decl(zone_submap_idx_t, uint32_t, {
546 Z_SUBMAP_IDX_VM,
547 Z_SUBMAP_IDX_READ_ONLY,
548 Z_SUBMAP_IDX_GENERAL_0,
549 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
550 Z_SUBMAP_IDX_GENERAL_1,
551 Z_SUBMAP_IDX_GENERAL_2,
552 Z_SUBMAP_IDX_GENERAL_3,
553 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
554 Z_SUBMAP_IDX_DATA,
555
556 Z_SUBMAP_IDX_COUNT,
557 });
558
559 #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
560
561 /*
562 * Variable kalloc_type heap config
563 */
564 struct kheap_info {
565 zone_id_t kh_zstart;
566 kalloc_heap_t kh_views;
567 kalloc_type_var_view_t kt_views;
568 };
569 typedef union kalloc_type_views {
570 struct kalloc_type_view *ktv_fixed;
571 struct kalloc_type_var_view *ktv_var;
572 } kalloc_type_views_t;
573
574 #define KT_VAR_MAX_HEAPS 8
575 #define MAX_ZONES 690
576 extern struct kheap_info kalloc_type_heap_array[KT_VAR_MAX_HEAPS];
577 extern zone_id_t _Atomic num_zones;
578 extern uint32_t zone_view_count;
579 extern struct zone zone_array[MAX_ZONES];
580 extern struct zone_size_params zone_ro_size_params[ZONE_ID__LAST_RO + 1];
581 extern zone_security_flags_t zone_security_array[];
582 extern const char * const kalloc_heap_names[KHEAP_ID_COUNT];
583 extern mach_memory_info_t *panic_kext_memory_info;
584 extern vm_size_t panic_kext_memory_size;
585 extern vm_offset_t panic_fault_address;
586 extern uint16_t _zc_mag_size;
587
588 #define zone_index_foreach(i) \
589 for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \
590 i < num_zones_##i; i++)
591
592 #define zone_foreach(z) \
593 for (zone_t z = &zone_array[1], \
594 last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \
595 z < last_zone_##z; z++)
596
597 __abortlike
598 extern void zone_invalid_panic(zone_t zone);
599
600 __pure2
601 static inline zone_id_t
zone_index(zone_t z)602 zone_index(zone_t z)
603 {
604 unsigned long delta;
605 uint64_t quo;
606
607 delta = (unsigned long)z - (unsigned long)zone_array;
608 if (delta >= MAX_ZONES * sizeof(*z)) {
609 zone_invalid_panic(z);
610 }
611 quo = Z_FAST_QUO(delta, Z_MAGIC_QUO(sizeof(*z)));
612 __builtin_assume(quo < MAX_ZONES);
613 return (zone_id_t)quo;
614 }
615
616 __pure2
617 static inline bool
zone_is_ro(zone_t zone)618 zone_is_ro(zone_t zone)
619 {
620 return zone >= &zone_array[ZONE_ID__FIRST_RO] &&
621 zone <= &zone_array[ZONE_ID__LAST_RO];
622 }
623
624 static inline bool
zone_addr_size_crosses_page(mach_vm_address_t addr,mach_vm_size_t size)625 zone_addr_size_crosses_page(mach_vm_address_t addr, mach_vm_size_t size)
626 {
627 return atop(addr ^ (addr + size - 1)) != 0;
628 }
629
630 __pure2
631 static inline uint16_t
zone_elem_redzone(zone_t zone)632 zone_elem_redzone(zone_t zone)
633 {
634 #if KASAN_CLASSIC
635 return zone->z_kasan_redzone;
636 #else
637 (void)zone;
638 return 0;
639 #endif
640 }
641
642 __pure2
643 static inline uint16_t
zone_elem_inner_offs(zone_t zone)644 zone_elem_inner_offs(zone_t zone)
645 {
646 return zone->z_elem_offs;
647 }
648
649 __pure2
650 static inline uint16_t
zone_elem_outer_offs(zone_t zone)651 zone_elem_outer_offs(zone_t zone)
652 {
653 return zone_elem_inner_offs(zone) - zone_elem_redzone(zone);
654 }
655
656 __pure2
657 static inline vm_offset_t
zone_elem_inner_size(zone_t zone)658 zone_elem_inner_size(zone_t zone)
659 {
660 return zone->z_elem_size;
661 }
662
663 __pure2
664 static inline vm_offset_t
zone_elem_outer_size(zone_t zone)665 zone_elem_outer_size(zone_t zone)
666 {
667 return zone_elem_inner_size(zone) + zone_elem_redzone(zone);
668 }
669
670 __pure2
671 static inline zone_security_flags_t
zone_security_config(zone_t z)672 zone_security_config(zone_t z)
673 {
674 zone_id_t zid = zone_index(z);
675 return zone_security_array[zid];
676 }
677
678 static inline uint32_t
zone_count_free(zone_t zone)679 zone_count_free(zone_t zone)
680 {
681 return zone->z_elems_free + zone->z_recirc.zd_full * _zc_mag_size;
682 }
683
684 static inline uint32_t
zone_count_allocated(zone_t zone)685 zone_count_allocated(zone_t zone)
686 {
687 return zone->z_elems_avail - zone_count_free(zone);
688 }
689
690 static inline vm_size_t
zone_scale_for_percpu(zone_t zone,vm_size_t size)691 zone_scale_for_percpu(zone_t zone, vm_size_t size)
692 {
693 if (zone->z_percpu) {
694 size *= zpercpu_count();
695 }
696 return size;
697 }
698
699 static inline vm_size_t
zone_size_wired(zone_t zone)700 zone_size_wired(zone_t zone)
701 {
702 /*
703 * this either require the zone lock,
704 * or to be used for statistics purposes only.
705 */
706 vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed));
707 return zone_scale_for_percpu(zone, size);
708 }
709
710 static inline vm_size_t
zone_size_free(zone_t zone)711 zone_size_free(zone_t zone)
712 {
713 return zone_scale_for_percpu(zone,
714 zone_elem_inner_size(zone) * zone_count_free(zone));
715 }
716
717 /* Under KASAN builds, this also accounts for quarantined elements. */
718 static inline vm_size_t
zone_size_allocated(zone_t zone)719 zone_size_allocated(zone_t zone)
720 {
721 return zone_scale_for_percpu(zone,
722 zone_elem_inner_size(zone) * zone_count_allocated(zone));
723 }
724
725 static inline vm_size_t
zone_size_wasted(zone_t zone)726 zone_size_wasted(zone_t zone)
727 {
728 return zone_size_wired(zone) - zone_scale_for_percpu(zone,
729 zone_elem_outer_size(zone) * zone->z_elems_avail);
730 }
731
732 __pure2
733 static inline bool
zone_exhaustible(zone_t zone)734 zone_exhaustible(zone_t zone)
735 {
736 return zone->z_wired_max != ~0u;
737 }
738
739 __pure2
740 static inline bool
zone_exhausted(zone_t zone)741 zone_exhausted(zone_t zone)
742 {
743 return zone->z_wired_cur >= zone->z_wired_max;
744 }
745
746 /*
747 * Set and get the signature equivalance for the given zone
748 */
749 extern void zone_set_sig_eq(zone_t zone, zone_id_t sig_eq);
750 extern zone_id_t zone_get_sig_eq(zone_t zone);
751 /*
752 * Return the accumulated allocated memory on the given zone stats
753 */
754 static inline vm_size_t
zone_stats_get_mem_allocated(zone_stats_t stats)755 zone_stats_get_mem_allocated(zone_stats_t stats)
756 {
757 return stats->zs_mem_allocated;
758 }
759
760 /*
761 * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a
762 * userspace reboot is needed. The only other way to query for this information
763 * is via mach_memory_info() which is unavailable on release kernels.
764 */
765 extern uint64_t get_zones_collectable_bytes(void);
766
767 /*!
768 * @enum zone_gc_level_t
769 *
770 * @const ZONE_GC_TRIM
771 * Request a trimming GC: it will trim allocations in excess
772 * of the working set size estimate only.
773 *
774 * @const ZONE_GC_DRAIN
775 * Request a draining GC: this is an aggressive mode that will
776 * cause all caches to be drained and all free pages returned to the system.
777 *
778 * @const ZONE_GC_JETSAM
779 * Request to consider a jetsam, and then fallback to @c ZONE_GC_TRIM or
780 * @c ZONE_GC_DRAIN depending on the state of the zone map.
781 * To avoid deadlocks, only @c vm_pageout_garbage_collect() should ever
782 * request a @c ZONE_GC_JETSAM level.
783 */
784 __enum_closed_decl(zone_gc_level_t, uint32_t, {
785 ZONE_GC_TRIM,
786 ZONE_GC_DRAIN,
787 ZONE_GC_JETSAM,
788 });
789
790 /*!
791 * @function zone_gc
792 *
793 * @brief
794 * Reduces memory used by zones by trimming caches and freelists.
795 *
796 * @discussion
797 * @c zone_gc() is called:
798 * - by the pageout daemon when the system needs more free pages.
799 * - by the VM when contiguous page allocation requests get stuck
800 * (see vm_page_find_contiguous()).
801 *
802 * @param level The zone GC level requested.
803 */
804 extern void zone_gc(zone_gc_level_t level);
805
806 extern void zone_gc_trim(void);
807 extern void zone_gc_drain(void);
808
809 #define ZONE_WSS_UPDATE_PERIOD 15
810 /*!
811 * @function compute_zone_working_set_size
812 *
813 * @brief
814 * Recomputes the working set size for every zone
815 *
816 * @discussion
817 * This runs about every @c ZONE_WSS_UPDATE_PERIOD seconds (10),
818 * computing an exponential moving average with a weight of 75%,
819 * so that the history of the last minute is the dominating factor.
820 */
821 extern void compute_zone_working_set_size(void *);
822
823 /* Debug logging for zone-map-exhaustion jetsams. */
824 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
825 extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size);
826
827 /* Bootstrap zone module (create zone zone) */
828 extern void zone_bootstrap(void);
829
830 /* Force-enable caching on a zone, generally unsafe to call directly */
831 extern void zone_enable_caching(zone_t zone);
832
833 /*!
834 * @function zone_early_mem_init
835 *
836 * @brief
837 * Steal memory from pmap (prior to initialization of zalloc)
838 * for the special vm zones that allow bootstrap memory and store
839 * the range so as to facilitate range checking in zfree.
840 *
841 * @param size the size to steal (must be a page multiple)
842 */
843 __startup_func
844 extern vm_offset_t zone_early_mem_init(
845 vm_size_t size);
846
847 /*!
848 * @function zone_get_early_alloc_size
849 *
850 * @brief
851 * Compute the correct size (greater than @c ptoa(min_pages)) that is a multiple
852 * of the allocation granule for the zone with the given creation flags and
853 * element size.
854 */
855 __startup_func
856 extern vm_size_t zone_get_early_alloc_size(
857 const char *name __unused,
858 vm_size_t elem_size,
859 zone_create_flags_t flags,
860 vm_size_t min_elems);
861
862 /*!
863 * @function zone_cram_early
864 *
865 * @brief
866 * Cram memory allocated with @c zone_early_mem_init() into a zone.
867 *
868 * @param zone The zone to cram memory into.
869 * @param newmem The base address for the memory to cram.
870 * @param size The size of the memory to cram into the zone.
871 */
872 __startup_func
873 extern void zone_cram_early(
874 zone_t zone,
875 vm_offset_t newmem,
876 vm_size_t size);
877
878 extern bool zone_maps_owned(
879 vm_address_t addr,
880 vm_size_t size);
881
882 #if KASAN_LIGHT
883 extern bool kasan_zone_maps_owned(
884 vm_address_t addr,
885 vm_size_t size);
886 #endif /* KASAN_LIGHT */
887
888 extern void zone_map_sizes(
889 vm_map_size_t *psize,
890 vm_map_size_t *pfree,
891 vm_map_size_t *plargest_free);
892
893 extern bool
894 zone_map_nearing_exhaustion(void);
895
896 static inline vm_tag_t
zalloc_flags_get_tag(zalloc_flags_t flags)897 zalloc_flags_get_tag(zalloc_flags_t flags)
898 {
899 return (vm_tag_t)((flags & Z_VM_TAG_MASK) >> Z_VM_TAG_SHIFT);
900 }
901
902 extern struct kalloc_result zalloc_ext(
903 zone_t zone,
904 zone_stats_t zstats,
905 zalloc_flags_t flags);
906
907 #if KASAN
908 #define ZFREE_PACK_SIZE(esize, usize) (((uint64_t)(usize) << 32) | (esize))
909 #define ZFREE_ELEM_SIZE(combined) ((uint32_t)(combined))
910 #define ZFREE_USER_SIZE(combined) ((combined) >> 32)
911 #else
912 #define ZFREE_PACK_SIZE(esize, usize) (esize)
913 #define ZFREE_ELEM_SIZE(combined) (combined)
914 #endif
915
916 extern void zfree_ext(
917 zone_t zone,
918 zone_stats_t zstats,
919 void *addr,
920 uint64_t combined_size);
921
922 extern zone_id_t zone_id_for_element(
923 void *addr,
924 vm_size_t esize);
925
926 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
927 extern void *zone_element_pgz_oob_adjust(
928 void *addr,
929 vm_size_t req_size,
930 vm_size_t elem_size);
931 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
932
933 extern void zone_element_bounds_check(
934 vm_address_t addr,
935 vm_size_t len);
936
937 extern vm_size_t zone_element_size(
938 void *addr,
939 zone_t *z,
940 bool clear_oob,
941 vm_offset_t *oob_offs);
942
943 /*!
944 * @function zone_spans_ro_va
945 *
946 * @abstract
947 * This function is used to check whether the specified address range
948 * spans through the read-only zone range.
949 *
950 * @discussion
951 * This only checks for the range specified within ZONE_ADDR_READONLY.
952 * The parameters addr_start and addr_end are stripped off of PAC bits
953 * before the check is made.
954 */
955 extern bool zone_spans_ro_va(
956 vm_offset_t addr_start,
957 vm_offset_t addr_end);
958
959 /*!
960 * @function __zalloc_ro_mut_atomic
961 *
962 * @abstract
963 * This function is called from the pmap to perform the specified atomic
964 * operation on memory from the read-only allocator.
965 *
966 * @discussion
967 * This function is for internal use only and should not be called directly.
968 */
969 static inline uint64_t
__zalloc_ro_mut_atomic(vm_offset_t dst,zro_atomic_op_t op,uint64_t value)970 __zalloc_ro_mut_atomic(vm_offset_t dst, zro_atomic_op_t op, uint64_t value)
971 {
972 #define __ZALLOC_RO_MUT_OP(op, op2) \
973 case ZRO_ATOMIC_##op##_8: \
974 return os_atomic_##op2((uint8_t *)dst, (uint8_t)value, seq_cst); \
975 case ZRO_ATOMIC_##op##_16: \
976 return os_atomic_##op2((uint16_t *)dst, (uint16_t)value, seq_cst); \
977 case ZRO_ATOMIC_##op##_32: \
978 return os_atomic_##op2((uint32_t *)dst, (uint32_t)value, seq_cst); \
979 case ZRO_ATOMIC_##op##_64: \
980 return os_atomic_##op2((uint64_t *)dst, (uint64_t)value, seq_cst)
981
982 switch (op) {
983 __ZALLOC_RO_MUT_OP(OR, or_orig);
984 __ZALLOC_RO_MUT_OP(XOR, xor_orig);
985 __ZALLOC_RO_MUT_OP(AND, and_orig);
986 __ZALLOC_RO_MUT_OP(ADD, add_orig);
987 __ZALLOC_RO_MUT_OP(XCHG, xchg);
988 default:
989 panic("%s: Invalid atomic operation: %d", __func__, op);
990 }
991
992 #undef __ZALLOC_RO_MUT_OP
993 }
994
995 /*!
996 * @function zone_owns
997 *
998 * @abstract
999 * This function is a soft version of zone_require that checks if a given
1000 * pointer belongs to the specified zone and should not be used outside
1001 * allocator code.
1002 *
1003 * @discussion
1004 * Note that zone_owns() can only work with:
1005 * - zones not allowing foreign memory
1006 * - zones in the general submap.
1007 *
1008 * @param zone the zone the address needs to belong to.
1009 * @param addr the element address to check.
1010 */
1011 extern bool zone_owns(
1012 zone_t zone,
1013 void *addr);
1014
1015 /**!
1016 * @function zone_submap
1017 *
1018 * @param zsflags the security flags of a specified zone.
1019 * @returns the zone (sub)map this zone allocates from.
1020 */
1021 __pure2
1022 extern vm_map_t zone_submap(
1023 zone_security_flags_t zsflags);
1024
1025 #ifndef VM_TAG_SIZECLASSES
1026 #error MAX_TAG_ZONES
1027 #endif
1028 #if VM_TAG_SIZECLASSES
1029
1030 extern uint16_t zone_index_from_tag_index(
1031 uint32_t tag_zone_index);
1032
1033 #endif /* VM_TAG_SIZECLASSES */
1034
1035 extern lck_grp_t zone_locks_grp;
1036
1037 static inline void
zone_lock(zone_t zone)1038 zone_lock(zone_t zone)
1039 {
1040 #if KASAN_FAKESTACK
1041 spl_t s = 0;
1042 if (zone->z_kasan_fakestacks) {
1043 s = splsched();
1044 }
1045 #endif /* KASAN_FAKESTACK */
1046 hw_lck_ticket_lock(&zone->z_lock, &zone_locks_grp);
1047 #if KASAN_FAKESTACK
1048 zone->z_kasan_spl = s;
1049 #endif /* KASAN_FAKESTACK */
1050 }
1051
1052 static inline void
zone_unlock(zone_t zone)1053 zone_unlock(zone_t zone)
1054 {
1055 #if KASAN_FAKESTACK
1056 spl_t s = zone->z_kasan_spl;
1057 zone->z_kasan_spl = 0;
1058 #endif /* KASAN_FAKESTACK */
1059 hw_lck_ticket_unlock(&zone->z_lock);
1060 #if KASAN_FAKESTACK
1061 if (zone->z_kasan_fakestacks) {
1062 splx(s);
1063 }
1064 #endif /* KASAN_FAKESTACK */
1065 }
1066
1067 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
1068
1069 int track_this_zone(const char *zonename, const char *logname);
1070 extern bool panic_include_kalloc_types;
1071 extern zone_t kalloc_type_src_zone;
1072 extern zone_t kalloc_type_dst_zone;
1073
1074 #if DEBUG || DEVELOPMENT
1075 extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag);
1076 #endif /* DEBUG || DEVELOPMENT */
1077
1078 #pragma GCC visibility pop
1079
1080 __END_DECLS
1081
1082 #endif /* _KERN_ZALLOC_INTERNAL_H_ */
1083