1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #ifndef _KERN_ZALLOC_INTERNAL_H_
60 #define _KERN_ZALLOC_INTERNAL_H_
61
62 #include <kern/zalloc.h>
63 #include <kern/locks.h>
64 #include <kern/simple_lock.h>
65
66 #include <os/atomic_private.h>
67 #include <sys/queue.h>
68 #include <vm/vm_map_internal.h>
69
70 #if KASAN
71 #include <san/kasan.h>
72 #include <kern/spl.h>
73 #endif /* !KASAN */
74
75 /*
76 * Disable zalloc zero validation under kasan as it is
77 * double-duty with what kasan already does.
78 */
79 #if KASAN
80 #define ZALLOC_ENABLE_ZERO_CHECK 0
81 #else
82 #define ZALLOC_ENABLE_ZERO_CHECK 1
83 #endif
84
85 #if KASAN
86 #define ZALLOC_ENABLE_LOGGING 0
87 #elif DEBUG || DEVELOPMENT
88 #define ZALLOC_ENABLE_LOGGING 1
89 #else
90 #define ZALLOC_ENABLE_LOGGING 0
91 #endif
92
93 /*!
94 * @file <kern/zalloc_internal.h>
95 *
96 * @abstract
97 * Exposes some guts of zalloc to interact with the VM, debugging, copyio and
98 * kalloc subsystems.
99 */
100
101 __BEGIN_DECLS
102
103 #pragma GCC visibility push(hidden)
104
105 /*
106 * A zone is a collection of fixed size blocks for which there
107 * is fast allocation/deallocation access. Kernel routines can
108 * use zones to manage data structures dynamically, creating a zone
109 * for each type of data structure to be managed.
110 *
111 */
112
113 /*!
114 * @typedef zone_pva_t
115 *
116 * @brief
117 * Type used to point to a page virtual address in the zone allocator.
118 *
119 * @description
120 * - Valid pages have the top bit set.
121 * - 0 represents the "NULL" page
122 * - non 0 values with the top bit cleared represent queue heads,
123 * indexed from the beginning of the __DATA section of the kernel.
124 * (see zone_pageq_base).
125 */
126 typedef struct zone_packed_virtual_address {
127 uint32_t packed_address;
128 } zone_pva_t;
129
130 /*!
131 * @struct zone_stats
132 *
133 * @abstract
134 * Per-cpu structure used for basic zone stats.
135 *
136 * @discussion
137 * The values aren't scaled for per-cpu zones.
138 */
139 struct zone_stats {
140 uint64_t zs_mem_allocated;
141 uint64_t zs_mem_freed;
142 uint64_t zs_alloc_fail;
143 uint32_t zs_alloc_rr; /* allocation rr bias */
144 uint32_t _Atomic zs_alloc_not_shared;
145 };
146
147 typedef struct zone_magazine *zone_magazine_t;
148
149 /*!
150 * @struct zone_depot
151 *
152 * @abstract
153 * Holds a list of full and empty magazines.
154 *
155 * @discussion
156 * The data structure is a "STAILQ" and an "SLIST" combined with counters
157 * to know their lengths in O(1). Here is a graphical example:
158 *
159 * zd_full = 3
160 * zd_empty = 1
161 * ╭─── zd_head
162 * │ ╭─ zd_tail
163 * │ ╰────────────────────────────────────╮
164 * │ ╭───────╮ ╭───────╮ ╭───────╮ v ╭───────╮
165 * ╰───>│███████┼──>│███████┼──>│███████┼──>│ ┼─> X
166 * ╰───────╯ ╰───────╯ ╰───────╯ ╰───────╯
167 */
168 struct zone_depot {
169 uint32_t zd_full;
170 uint32_t zd_empty;
171 zone_magazine_t zd_head;
172 zone_magazine_t *zd_tail;
173 };
174
175 /* see https://lemire.me/blog/2019/02/20/more-fun-with-fast-remainders-when-the-divisor-is-a-constant/ */
176 #define Z_MAGIC_QUO(s) (((1ull << 32) - 1) / (uint64_t)(s) + 1)
177 #define Z_MAGIC_ALIGNED(s) (~0u / (uint32_t)(s) + 1)
178
179 /*
180 * Returns (offs / size) if offs is small enough
181 * and magic = Z_MAGIC_QUO(size)
182 */
183 static inline uint32_t
Z_FAST_QUO(uint64_t offs,uint64_t magic)184 Z_FAST_QUO(uint64_t offs, uint64_t magic)
185 {
186 return (offs * magic) >> 32;
187 }
188
189 /*
190 * Returns (offs % size) if offs is small enough
191 * and magic = Z_MAGIC_QUO(size)
192 */
193 static inline uint32_t
Z_FAST_MOD(uint64_t offs,uint64_t magic,uint64_t size)194 Z_FAST_MOD(uint64_t offs, uint64_t magic, uint64_t size)
195 {
196 uint32_t lowbits = (uint32_t)(offs * magic);
197
198 return (lowbits * size) >> 32;
199 }
200
201 /*
202 * Returns whether (offs % size) == 0 if offs is small enough
203 * and magic = Z_MAGIC_ALIGNED(size)
204 */
205 static inline bool
Z_FAST_ALIGNED(uint64_t offs,uint32_t magic)206 Z_FAST_ALIGNED(uint64_t offs, uint32_t magic)
207 {
208 return (uint32_t)(offs * magic) < magic;
209 }
210
211 struct zone_size_params {
212 uint32_t z_align_magic; /* magic to use with Z_FAST_ALIGNED() */
213 uint32_t z_elem_size; /* size of an element */
214 };
215
216 struct zone_expand {
217 struct zone_expand *ze_next;
218 thread_t ze_thread;
219 bool ze_pg_wait;
220 bool ze_vm_priv;
221 bool ze_clear_priv;
222 };
223
224 #define Z_WMA_UNIT (1u << 8)
225 #define Z_WMA_MIX(base, e) ((3 * (base) + (e) * Z_WMA_UNIT) / 4)
226
227 struct zone {
228 /*
229 * Readonly / rarely written fields
230 */
231
232 /*
233 * The first 4 fields match a zone_view.
234 *
235 * z_self points back to the zone when the zone is initialized,
236 * or is NULL else.
237 */
238 struct zone *z_self;
239 zone_stats_t z_stats;
240 const char *z_name;
241 struct zone_view *z_views;
242 struct zone_expand *z_expander;
243
244 uint64_t z_quo_magic;
245 uint32_t z_align_magic;
246 uint16_t z_elem_size;
247 uint16_t z_elem_offs;
248 uint16_t z_chunk_pages;
249 uint16_t z_chunk_elems;
250
251 uint32_t /* 32 bits */
252 /*
253 * Lifecycle state (Mutable after creation)
254 */
255 z_destroyed :1, /* zone is (being) destroyed */
256 z_async_refilling :1, /* asynchronous allocation pending? */
257 z_depot_cleanup :1, /* per cpu depots need cleaning */
258 z_expanding_wait :1, /* is thread waiting for expansion? */
259
260 /*
261 * Behavior configuration bits
262 */
263 z_percpu :1, /* the zone is percpu */
264 z_smr :1, /* the zone uses SMR */
265 z_permanent :1, /* the zone allocations are permanent */
266 z_nocaching :1, /* disallow zone caching for this zone */
267 collectable :1, /* garbage collect empty pages */
268 exhaustible :1, /* merely return if empty? */
269 no_callout :1,
270 z_destructible :1, /* zone can be zdestroy()ed */
271
272 _reserved :7,
273
274 /*
275 * Debugging features
276 */
277 z_pgz_tracked :1, /* this zone is tracked by pgzalloc */
278 z_pgz_use_guards :1, /* this zone uses guards with PGZ */
279 z_kasan_fakestacks :1,
280 z_kasan_quarantine :1, /* whether to use the kasan quarantine */
281 z_tags_sizeclass :6, /* idx into zone_tags_sizeclasses to associate
282 * sizeclass for a particualr kalloc tag */
283 z_uses_tags :1,
284 z_log_on :1, /* zone logging was enabled by boot-arg */
285 z_tbi_tag :1; /* Zone supports tbi tagging */
286
287 uint8_t z_cacheline1[0] __attribute__((aligned(64)));
288
289 /*
290 * Zone caching / recirculation cacheline
291 *
292 * z_recirc* fields are protected by the recirculation lock.
293 *
294 * z_recirc_cont_wma:
295 * weighted moving average of the number of contentions per second,
296 * in Z_WMA_UNIT units (fixed point decimal).
297 *
298 * z_recirc_cont_cur:
299 * count of recorded contentions that will be fused
300 * in z_recirc_cont_wma at the next period.
301 *
302 * Note: if caching is disabled,
303 * this field is used under the zone lock.
304 *
305 * z_elems_free_{min,wma} (overloaded on z_recirc_empty*):
306 * tracks the history of the minimum values of z_elems_free over time
307 * with "min" being the minimum it hit for the current period,
308 * and "wma" the weighted moving average of those value.
309 *
310 * This field is used if z_pcpu_cache is NULL,
311 * otherwise it aliases with z_recirc_empty_{min,wma}
312 *
313 * z_recirc_{full,empty}_{min,wma}:
314 * tracks the history of the the minimum number of full/empty
315 * magazines in the depot over time, with "min" being the minimum
316 * it hit for the current period, and "wma" the weighted moving
317 * average of those value.
318 */
319 struct zone_cache *__zpercpu z_pcpu_cache;
320 struct zone_depot z_recirc;
321
322 hw_lck_ticket_t z_recirc_lock;
323 uint32_t z_recirc_full_min;
324 uint32_t z_recirc_full_wma;
325 union {
326 uint32_t z_recirc_empty_min;
327 uint32_t z_elems_free_min;
328 };
329 union {
330 uint32_t z_recirc_empty_wma;
331 uint32_t z_elems_free_wma;
332 };
333 uint32_t z_recirc_cont_cur;
334 uint32_t z_recirc_cont_wma;
335
336 uint16_t z_depot_size;
337 uint16_t z_depot_limit;
338
339 uint8_t z_cacheline2[0] __attribute__((aligned(64)));
340
341 /*
342 * often mutated fields
343 */
344
345 hw_lck_ticket_t z_lock;
346
347 /*
348 * Page accounting (wired / VA)
349 *
350 * Those numbers are unscaled for z_percpu zones
351 * (zone_scale_for_percpu() needs to be used to find the true value).
352 */
353 uint32_t z_wired_max; /* how large can this zone grow */
354 uint32_t z_wired_hwm; /* z_wired_cur high watermark */
355 uint32_t z_wired_cur; /* number of pages used by this zone */
356 uint32_t z_wired_empty; /* pages collectable by GC */
357 uint32_t z_va_cur; /* amount of VA used by this zone */
358
359 /*
360 * list of metadata structs, which maintain per-page free element lists
361 */
362 zone_pva_t z_pageq_empty; /* populated, completely empty pages */
363 zone_pva_t z_pageq_partial;/* populated, partially filled pages */
364 zone_pva_t z_pageq_full; /* populated, completely full pages */
365 zone_pva_t z_pageq_va; /* non-populated VA pages */
366
367 /*
368 * Zone statistics
369 *
370 * z_elems_avail:
371 * number of elements in the zone (at all).
372 */
373 uint32_t z_elems_free; /* Number of free elements */
374 uint32_t z_elems_avail; /* Number of elements available */
375 uint32_t z_elems_rsv;
376 uint32_t z_array_size_class;
377
378 struct zone *z_kt_next;
379
380 uint8_t z_cacheline3[0] __attribute__((aligned(64)));
381
382 #if KASAN_CLASSIC
383 uint16_t z_kasan_redzone;
384 spl_t z_kasan_spl;
385 #endif
386 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
387 /*
388 * the allocation logs are used when:
389 *
390 * - zlog<n>= boot-args are used (and then z_log_on is set)
391 *
392 * - the leak detection was triggered for the zone.
393 * In that case, the log can't ever be freed,
394 * but it can be enabled/disabled dynamically.
395 */
396 struct btlog *z_btlog;
397 struct btlog *z_btlog_disabled;
398 #endif
399 } __attribute__((aligned((64))));
400
401 /*!
402 * @typedef zone_security_flags_t
403 *
404 * @brief
405 * Type used to store the immutable security properties of a zone.
406 *
407 * @description
408 * These properties influence the security nature of a zone and can't be
409 * modified after lockdown.
410 */
411 typedef struct zone_security_flags {
412 uint16_t
413 /*
414 * Security sensitive configuration bits
415 */
416 z_submap_idx :8, /* a Z_SUBMAP_IDX_* value */
417 z_kheap_id :2, /* zone_kheap_id_t when part of a kalloc heap */
418 z_kalloc_type :1, /* zones that does types based seggregation */
419 z_lifo :1, /* depot and recirculation layer are LIFO */
420 z_pgz_use_guards :1, /* this zone uses guards with PGZ */
421 z_submap_from_end :1, /* allocate from the left or the right ? */
422 z_noencrypt :1, /* do not encrypt pages when hibernating */
423 z_unused :1;
424 /*
425 * Signature equivalance zone
426 */
427 zone_id_t z_sig_eq;
428 } zone_security_flags_t;
429
430
431 /*
432 * Zsecurity config to enable strict free of iokit objects to zone
433 * or heap they were allocated from.
434 *
435 * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not
436 * not break third party kexts that haven't yet been recompiled
437 * to use the new iokit macros.
438 */
439 #if XNU_PLATFORM_MacOSX && __x86_64__
440 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE OFF
441 #else
442 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE ON
443 #endif
444
445 /*
446 * Zsecurity config to enable the read-only allocator
447 */
448 #if KASAN_CLASSIC
449 # define ZSECURITY_CONFIG_READ_ONLY OFF
450 #else
451 # define ZSECURITY_CONFIG_READ_ONLY ON
452 #endif
453
454 /*
455 * Zsecurity config to enable making heap feng-shui
456 * less reliable.
457 */
458 #if KASAN_CLASSIC
459 # define ZSECURITY_CONFIG_SAD_FENG_SHUI OFF
460 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 1
461 #else
462 # define ZSECURITY_CONFIG_SAD_FENG_SHUI ON
463 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 4
464 #endif
465
466 /*
467 * Zsecurity config to enable adjusting of elements
468 * with PGZ-OOB to right-align them in their space.
469 */
470 #if KASAN || defined(__x86_64__)
471 # define ZSECURITY_CONFIG_PGZ_OOB_ADJUST OFF
472 #else
473 # define ZSECURITY_CONFIG_PGZ_OOB_ADJUST ON
474 #endif
475
476 /*
477 * Zsecurity config to enable kalloc type segregation
478 */
479 #if XNU_TARGET_OS_WATCH || KASAN_CLASSIC
480 # define ZSECURITY_CONFIG_KT_BUDGET 120
481 # define ZSECURITY_CONFIG_KT_VAR_BUDGET 6
482 #else
483 # define ZSECURITY_CONFIG_KT_BUDGET 260
484 # define ZSECURITY_CONFIG_KT_VAR_BUDGET 6
485 #endif
486
487
488 __options_decl(kalloc_type_options_t, uint64_t, {
489 /*
490 * kalloc type option to switch default accounting to private.
491 */
492 KT_OPTIONS_ACCT = 0x00000001,
493 /*
494 * kalloc type option to print additional stats regarding zone
495 * budget distribution and signatures.
496 */
497 KT_OPTIONS_DEBUG = 0x00000002,
498 /*
499 * kalloc type option to allow loose freeing between heaps
500 */
501 KT_OPTIONS_LOOSE_FREE = 0x00000004,
502 });
503
504 __enum_decl(kt_var_heap_id_t, uint32_t, {
505 /*
506 * Fake "data" heap used to link views of data-only allocation that
507 * have been redirected to KHEAP_DATA_BUFFERS
508 */
509 KT_VAR_DATA_HEAP,
510 /*
511 * Heaps for pointer arrays
512 */
513 KT_VAR_PTR_HEAP0,
514 KT_VAR_PTR_HEAP1,
515 /*
516 * Indicating first additional heap added
517 */
518 KT_VAR__FIRST_FLEXIBLE_HEAP,
519 });
520
521 /*
522 * Zone submap indices
523 *
524 * Z_SUBMAP_IDX_VM
525 * this map has the special property that its allocations
526 * can be done without ever locking the submap, and doesn't use
527 * VM entries in the map (which limits certain VM map operations on it).
528 *
529 * On ILP32 a single zone lives here (the vm_map_entry_reserved_zone).
530 *
531 * On LP64 it is also used to restrict VM allocations on LP64 lower
532 * in the kernel VA space, for pointer packing purposes.
533 *
534 * Z_SUBMAP_IDX_GENERAL_{0,1,2,3}
535 * used for unrestricted allocations
536 *
537 * Z_SUBMAP_IDX_DATA
538 * used to sequester bags of bytes from all other allocations and allow VA reuse
539 * within the map
540 *
541 * Z_SUBMAP_IDX_READ_ONLY
542 * used for the read-only allocator
543 */
544 __enum_decl(zone_submap_idx_t, uint32_t, {
545 Z_SUBMAP_IDX_VM,
546 Z_SUBMAP_IDX_READ_ONLY,
547 Z_SUBMAP_IDX_GENERAL_0,
548 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
549 Z_SUBMAP_IDX_GENERAL_1,
550 Z_SUBMAP_IDX_GENERAL_2,
551 Z_SUBMAP_IDX_GENERAL_3,
552 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
553 Z_SUBMAP_IDX_DATA,
554
555 Z_SUBMAP_IDX_COUNT,
556 });
557
558 #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
559
560 /*
561 * Variable kalloc_type heap config
562 */
563 struct kheap_info {
564 zone_id_t kh_zstart;
565 kalloc_heap_t kh_views;
566 kalloc_type_var_view_t kt_views;
567 };
568 typedef union kalloc_type_views {
569 struct kalloc_type_view *ktv_fixed;
570 struct kalloc_type_var_view *ktv_var;
571 } kalloc_type_views_t;
572
573 #define KT_VAR_MAX_HEAPS 8
574 #define MAX_ZONES 690
575 extern struct kheap_info kalloc_type_heap_array[KT_VAR_MAX_HEAPS];
576 extern zone_id_t _Atomic num_zones;
577 extern uint32_t zone_view_count;
578 extern struct zone zone_array[MAX_ZONES];
579 extern struct zone_size_params zone_ro_size_params[ZONE_ID__LAST_RO + 1];
580 extern zone_security_flags_t zone_security_array[];
581 extern const char * const kalloc_heap_names[KHEAP_ID_COUNT];
582 extern mach_memory_info_t *panic_kext_memory_info;
583 extern vm_size_t panic_kext_memory_size;
584 extern vm_offset_t panic_fault_address;
585 extern uint16_t _zc_mag_size;
586
587 #define zone_index_foreach(i) \
588 for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \
589 i < num_zones_##i; i++)
590
591 #define zone_foreach(z) \
592 for (zone_t z = &zone_array[1], \
593 last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \
594 z < last_zone_##z; z++)
595
596 __abortlike
597 extern void zone_invalid_panic(zone_t zone);
598
599 __pure2
600 static inline zone_id_t
zone_index(zone_t z)601 zone_index(zone_t z)
602 {
603 unsigned long delta;
604 uint64_t quo;
605
606 delta = (unsigned long)z - (unsigned long)zone_array;
607 if (delta >= MAX_ZONES * sizeof(*z)) {
608 zone_invalid_panic(z);
609 }
610 quo = Z_FAST_QUO(delta, Z_MAGIC_QUO(sizeof(*z)));
611 __builtin_assume(quo < MAX_ZONES);
612 return (zone_id_t)quo;
613 }
614
615 __pure2
616 static inline bool
zone_is_ro(zone_t zone)617 zone_is_ro(zone_t zone)
618 {
619 return zone >= &zone_array[ZONE_ID__FIRST_RO] &&
620 zone <= &zone_array[ZONE_ID__LAST_RO];
621 }
622
623 static inline bool
zone_addr_size_crosses_page(mach_vm_address_t addr,mach_vm_size_t size)624 zone_addr_size_crosses_page(mach_vm_address_t addr, mach_vm_size_t size)
625 {
626 return atop(addr ^ (addr + size - 1)) != 0;
627 }
628
629 __pure2
630 static inline uint16_t
zone_elem_redzone(zone_t zone)631 zone_elem_redzone(zone_t zone)
632 {
633 #if KASAN_CLASSIC
634 return zone->z_kasan_redzone;
635 #else
636 (void)zone;
637 return 0;
638 #endif
639 }
640
641 __pure2
642 static inline uint16_t
zone_elem_inner_offs(zone_t zone)643 zone_elem_inner_offs(zone_t zone)
644 {
645 return zone->z_elem_offs;
646 }
647
648 __pure2
649 static inline uint16_t
zone_elem_outer_offs(zone_t zone)650 zone_elem_outer_offs(zone_t zone)
651 {
652 return zone_elem_inner_offs(zone) - zone_elem_redzone(zone);
653 }
654
655 __pure2
656 static inline vm_offset_t
zone_elem_inner_size(zone_t zone)657 zone_elem_inner_size(zone_t zone)
658 {
659 return zone->z_elem_size;
660 }
661
662 __pure2
663 static inline vm_offset_t
zone_elem_outer_size(zone_t zone)664 zone_elem_outer_size(zone_t zone)
665 {
666 return zone_elem_inner_size(zone) + zone_elem_redzone(zone);
667 }
668
669 __pure2
670 static inline zone_security_flags_t
zone_security_config(zone_t z)671 zone_security_config(zone_t z)
672 {
673 zone_id_t zid = zone_index(z);
674 return zone_security_array[zid];
675 }
676
677 static inline uint32_t
zone_count_free(zone_t zone)678 zone_count_free(zone_t zone)
679 {
680 return zone->z_elems_free + zone->z_recirc.zd_full * _zc_mag_size;
681 }
682
683 static inline uint32_t
zone_count_allocated(zone_t zone)684 zone_count_allocated(zone_t zone)
685 {
686 return zone->z_elems_avail - zone_count_free(zone);
687 }
688
689 static inline vm_size_t
zone_scale_for_percpu(zone_t zone,vm_size_t size)690 zone_scale_for_percpu(zone_t zone, vm_size_t size)
691 {
692 if (zone->z_percpu) {
693 size *= zpercpu_count();
694 }
695 return size;
696 }
697
698 static inline vm_size_t
zone_size_wired(zone_t zone)699 zone_size_wired(zone_t zone)
700 {
701 /*
702 * this either require the zone lock,
703 * or to be used for statistics purposes only.
704 */
705 vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed));
706 return zone_scale_for_percpu(zone, size);
707 }
708
709 static inline vm_size_t
zone_size_free(zone_t zone)710 zone_size_free(zone_t zone)
711 {
712 return zone_scale_for_percpu(zone,
713 zone_elem_inner_size(zone) * zone_count_free(zone));
714 }
715
716 /* Under KASAN builds, this also accounts for quarantined elements. */
717 static inline vm_size_t
zone_size_allocated(zone_t zone)718 zone_size_allocated(zone_t zone)
719 {
720 return zone_scale_for_percpu(zone,
721 zone_elem_inner_size(zone) * zone_count_allocated(zone));
722 }
723
724 static inline vm_size_t
zone_size_wasted(zone_t zone)725 zone_size_wasted(zone_t zone)
726 {
727 return zone_size_wired(zone) - zone_scale_for_percpu(zone,
728 zone_elem_outer_size(zone) * zone->z_elems_avail);
729 }
730
731 /*
732 * Set and get the signature equivalance for the given zone
733 */
734 extern void zone_set_sig_eq(zone_t zone, zone_id_t sig_eq);
735 extern zone_id_t zone_get_sig_eq(zone_t zone);
736 /*
737 * Return the accumulated allocated memory on the given zone stats
738 */
739 static inline vm_size_t
zone_stats_get_mem_allocated(zone_stats_t stats)740 zone_stats_get_mem_allocated(zone_stats_t stats)
741 {
742 return stats->zs_mem_allocated;
743 }
744
745 /*
746 * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a
747 * userspace reboot is needed. The only other way to query for this information
748 * is via mach_memory_info() which is unavailable on release kernels.
749 */
750 extern uint64_t get_zones_collectable_bytes(void);
751
752 /*!
753 * @enum zone_gc_level_t
754 *
755 * @const ZONE_GC_TRIM
756 * Request a trimming GC: it will trim allocations in excess
757 * of the working set size estimate only.
758 *
759 * @const ZONE_GC_DRAIN
760 * Request a draining GC: this is an aggressive mode that will
761 * cause all caches to be drained and all free pages returned to the system.
762 *
763 * @const ZONE_GC_JETSAM
764 * Request to consider a jetsam, and then fallback to @c ZONE_GC_TRIM or
765 * @c ZONE_GC_DRAIN depending on the state of the zone map.
766 * To avoid deadlocks, only @c vm_pageout_garbage_collect() should ever
767 * request a @c ZONE_GC_JETSAM level.
768 */
769 __enum_closed_decl(zone_gc_level_t, uint32_t, {
770 ZONE_GC_TRIM,
771 ZONE_GC_DRAIN,
772 ZONE_GC_JETSAM,
773 });
774
775 /*!
776 * @function zone_gc
777 *
778 * @brief
779 * Reduces memory used by zones by trimming caches and freelists.
780 *
781 * @discussion
782 * @c zone_gc() is called:
783 * - by the pageout daemon when the system needs more free pages.
784 * - by the VM when contiguous page allocation requests get stuck
785 * (see vm_page_find_contiguous()).
786 *
787 * @param level The zone GC level requested.
788 */
789 extern void zone_gc(zone_gc_level_t level);
790
791 extern void zone_gc_trim(void);
792 extern void zone_gc_drain(void);
793
794 #define ZONE_WSS_UPDATE_PERIOD 15
795 /*!
796 * @function compute_zone_working_set_size
797 *
798 * @brief
799 * Recomputes the working set size for every zone
800 *
801 * @discussion
802 * This runs about every @c ZONE_WSS_UPDATE_PERIOD seconds (10),
803 * computing an exponential moving average with a weight of 75%,
804 * so that the history of the last minute is the dominating factor.
805 */
806 extern void compute_zone_working_set_size(void *);
807
808 /* Debug logging for zone-map-exhaustion jetsams. */
809 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
810 extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size);
811
812 /* Bootstrap zone module (create zone zone) */
813 extern void zone_bootstrap(void);
814
815 /* Force-enable caching on a zone, generally unsafe to call directly */
816 extern void zone_enable_caching(zone_t zone);
817
818 /*!
819 * @function zone_early_mem_init
820 *
821 * @brief
822 * Steal memory from pmap (prior to initialization of zalloc)
823 * for the special vm zones that allow bootstrap memory and store
824 * the range so as to facilitate range checking in zfree.
825 *
826 * @param size the size to steal (must be a page multiple)
827 */
828 __startup_func
829 extern vm_offset_t zone_early_mem_init(
830 vm_size_t size);
831
832 /*!
833 * @function zone_get_early_alloc_size
834 *
835 * @brief
836 * Compute the correct size (greater than @c ptoa(min_pages)) that is a multiple
837 * of the allocation granule for the zone with the given creation flags and
838 * element size.
839 */
840 __startup_func
841 extern vm_size_t zone_get_early_alloc_size(
842 const char *name __unused,
843 vm_size_t elem_size,
844 zone_create_flags_t flags,
845 vm_size_t min_elems);
846
847 /*!
848 * @function zone_cram_early
849 *
850 * @brief
851 * Cram memory allocated with @c zone_early_mem_init() into a zone.
852 *
853 * @param zone The zone to cram memory into.
854 * @param newmem The base address for the memory to cram.
855 * @param size The size of the memory to cram into the zone.
856 */
857 __startup_func
858 extern void zone_cram_early(
859 zone_t zone,
860 vm_offset_t newmem,
861 vm_size_t size);
862
863 extern bool zone_maps_owned(
864 vm_address_t addr,
865 vm_size_t size);
866
867 #if KASAN_LIGHT
868 extern bool kasan_zone_maps_owned(
869 vm_address_t addr,
870 vm_size_t size);
871 #endif /* KASAN_LIGHT */
872
873 extern void zone_map_sizes(
874 vm_map_size_t *psize,
875 vm_map_size_t *pfree,
876 vm_map_size_t *plargest_free);
877
878 extern bool
879 zone_map_nearing_exhaustion(void);
880
881 static inline vm_tag_t
zalloc_flags_get_tag(zalloc_flags_t flags)882 zalloc_flags_get_tag(zalloc_flags_t flags)
883 {
884 return (vm_tag_t)((flags & Z_VM_TAG_MASK) >> Z_VM_TAG_SHIFT);
885 }
886
887 extern struct kalloc_result zalloc_ext(
888 zone_t zone,
889 zone_stats_t zstats,
890 zalloc_flags_t flags);
891
892 #if KASAN
893 #define ZFREE_PACK_SIZE(esize, usize) (((uint64_t)(usize) << 32) | (esize))
894 #define ZFREE_ELEM_SIZE(combined) ((uint32_t)(combined))
895 #define ZFREE_USER_SIZE(combined) ((combined) >> 32)
896 #else
897 #define ZFREE_PACK_SIZE(esize, usize) (esize)
898 #define ZFREE_ELEM_SIZE(combined) (combined)
899 #endif
900
901 extern void zfree_ext(
902 zone_t zone,
903 zone_stats_t zstats,
904 void *addr,
905 uint64_t combined_size);
906
907 extern zone_id_t zone_id_for_element(
908 void *addr,
909 vm_size_t esize);
910
911 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
912 extern void *zone_element_pgz_oob_adjust(
913 void *addr,
914 vm_size_t req_size,
915 vm_size_t elem_size);
916 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
917
918 extern void zone_element_bounds_check(
919 vm_address_t addr,
920 vm_size_t len);
921
922 extern vm_size_t zone_element_size(
923 void *addr,
924 zone_t *z,
925 bool clear_oob,
926 vm_offset_t *oob_offs);
927
928 /*!
929 * @function zone_spans_ro_va
930 *
931 * @abstract
932 * This function is used to check whether the specified address range
933 * spans through the read-only zone range.
934 *
935 * @discussion
936 * This only checks for the range specified within ZONE_ADDR_READONLY.
937 * The parameters addr_start and addr_end are stripped off of PAC bits
938 * before the check is made.
939 */
940 extern bool zone_spans_ro_va(
941 vm_offset_t addr_start,
942 vm_offset_t addr_end);
943
944 /*!
945 * @function __zalloc_ro_mut_atomic
946 *
947 * @abstract
948 * This function is called from the pmap to perform the specified atomic
949 * operation on memory from the read-only allocator.
950 *
951 * @discussion
952 * This function is for internal use only and should not be called directly.
953 */
954 static inline uint64_t
__zalloc_ro_mut_atomic(vm_offset_t dst,zro_atomic_op_t op,uint64_t value)955 __zalloc_ro_mut_atomic(vm_offset_t dst, zro_atomic_op_t op, uint64_t value)
956 {
957 #define __ZALLOC_RO_MUT_OP(op, op2) \
958 case ZRO_ATOMIC_##op##_8: \
959 return os_atomic_##op2((uint8_t *)dst, (uint8_t)value, seq_cst); \
960 case ZRO_ATOMIC_##op##_16: \
961 return os_atomic_##op2((uint16_t *)dst, (uint16_t)value, seq_cst); \
962 case ZRO_ATOMIC_##op##_32: \
963 return os_atomic_##op2((uint32_t *)dst, (uint32_t)value, seq_cst); \
964 case ZRO_ATOMIC_##op##_64: \
965 return os_atomic_##op2((uint64_t *)dst, (uint64_t)value, seq_cst)
966
967 switch (op) {
968 __ZALLOC_RO_MUT_OP(OR, or_orig);
969 __ZALLOC_RO_MUT_OP(XOR, xor_orig);
970 __ZALLOC_RO_MUT_OP(AND, and_orig);
971 __ZALLOC_RO_MUT_OP(ADD, add_orig);
972 __ZALLOC_RO_MUT_OP(XCHG, xchg);
973 default:
974 panic("%s: Invalid atomic operation: %d", __func__, op);
975 }
976
977 #undef __ZALLOC_RO_MUT_OP
978 }
979
980 /*!
981 * @function zone_owns
982 *
983 * @abstract
984 * This function is a soft version of zone_require that checks if a given
985 * pointer belongs to the specified zone and should not be used outside
986 * allocator code.
987 *
988 * @discussion
989 * Note that zone_owns() can only work with:
990 * - zones not allowing foreign memory
991 * - zones in the general submap.
992 *
993 * @param zone the zone the address needs to belong to.
994 * @param addr the element address to check.
995 */
996 extern bool zone_owns(
997 zone_t zone,
998 void *addr);
999
1000 /**!
1001 * @function zone_submap
1002 *
1003 * @param zsflags the security flags of a specified zone.
1004 * @returns the zone (sub)map this zone allocates from.
1005 */
1006 __pure2
1007 extern vm_map_t zone_submap(
1008 zone_security_flags_t zsflags);
1009
1010 #ifndef VM_TAG_SIZECLASSES
1011 #error MAX_TAG_ZONES
1012 #endif
1013 #if VM_TAG_SIZECLASSES
1014
1015 extern uint16_t zone_index_from_tag_index(
1016 uint32_t tag_zone_index);
1017
1018 #endif /* VM_TAG_SIZECLASSES */
1019
1020 extern lck_grp_t zone_locks_grp;
1021
1022 static inline void
zone_lock(zone_t zone)1023 zone_lock(zone_t zone)
1024 {
1025 #if KASAN_FAKESTACK
1026 spl_t s = 0;
1027 if (zone->z_kasan_fakestacks) {
1028 s = splsched();
1029 }
1030 #endif /* KASAN_FAKESTACK */
1031 hw_lck_ticket_lock(&zone->z_lock, &zone_locks_grp);
1032 #if KASAN_FAKESTACK
1033 zone->z_kasan_spl = s;
1034 #endif /* KASAN_FAKESTACK */
1035 }
1036
1037 static inline void
zone_unlock(zone_t zone)1038 zone_unlock(zone_t zone)
1039 {
1040 #if KASAN_FAKESTACK
1041 spl_t s = zone->z_kasan_spl;
1042 zone->z_kasan_spl = 0;
1043 #endif /* KASAN_FAKESTACK */
1044 hw_lck_ticket_unlock(&zone->z_lock);
1045 #if KASAN_FAKESTACK
1046 if (zone->z_kasan_fakestacks) {
1047 splx(s);
1048 }
1049 #endif /* KASAN_FAKESTACK */
1050 }
1051
1052 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
1053
1054 int track_this_zone(const char *zonename, const char *logname);
1055 extern bool panic_include_kalloc_types;
1056 extern zone_t kalloc_type_src_zone;
1057 extern zone_t kalloc_type_dst_zone;
1058
1059 #if DEBUG || DEVELOPMENT
1060 extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag);
1061 #endif /* DEBUG || DEVELOPMENT */
1062
1063 #pragma GCC visibility pop
1064
1065 __END_DECLS
1066
1067 #endif /* _KERN_ZALLOC_INTERNAL_H_ */
1068