1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #ifndef _KERN_ZALLOC_INTERNAL_H_
60 #define _KERN_ZALLOC_INTERNAL_H_
61
62 #include <kern/zalloc.h>
63 #include <kern/locks.h>
64 #include <kern/simple_lock.h>
65
66 #include <os/atomic_private.h>
67 #include <sys/queue.h>
68 #include <vm/vm_map_internal.h>
69
70 #if KASAN
71 #include <san/kasan.h>
72 #include <kern/spl.h>
73 #endif /* !KASAN */
74
75 #if KASAN_ZALLOC
76 /*
77 * Disable zalloc zero validation under kasan as it is
78 * double-duty with what kasan already does.
79 */
80 #define ZALLOC_ENABLE_ZERO_CHECK 0
81 #define ZONE_ENABLE_LOGGING 0
82 #elif DEBUG || DEVELOPMENT
83 #define ZALLOC_ENABLE_ZERO_CHECK 1
84 #define ZONE_ENABLE_LOGGING 1
85 #else
86 #define ZALLOC_ENABLE_ZERO_CHECK 1
87 #define ZONE_ENABLE_LOGGING 0
88 #endif
89
90 /*!
91 * @file <kern/zalloc_internal.h>
92 *
93 * @abstract
94 * Exposes some guts of zalloc to interact with the VM, debugging, copyio and
95 * kalloc subsystems.
96 */
97
98 __BEGIN_DECLS
99
100 #pragma GCC visibility push(hidden)
101
102 #if CONFIG_GZALLOC
103 typedef struct gzalloc_data {
104 uint32_t gzfc_index;
105 vm_offset_t *gzfc;
106 } gzalloc_data_t;
107 #endif
108
109 /*
110 * A zone is a collection of fixed size blocks for which there
111 * is fast allocation/deallocation access. Kernel routines can
112 * use zones to manage data structures dynamically, creating a zone
113 * for each type of data structure to be managed.
114 *
115 */
116
117 /*!
118 * @typedef zone_pva_t
119 *
120 * @brief
121 * Type used to point to a page virtual address in the zone allocator.
122 *
123 * @description
124 * - Valid pages have the top bit set.
125 * - 0 represents the "NULL" page
126 * - non 0 values with the top bit cleared represent queue heads,
127 * indexed from the beginning of the __DATA section of the kernel.
128 * (see zone_pageq_base).
129 */
130 typedef struct zone_packed_virtual_address {
131 uint32_t packed_address;
132 } zone_pva_t;
133
134 /*!
135 * @struct zone_stats
136 *
137 * @abstract
138 * Per-cpu structure used for basic zone stats.
139 *
140 * @discussion
141 * The values aren't scaled for per-cpu zones.
142 */
143 struct zone_stats {
144 uint64_t zs_mem_allocated;
145 uint64_t zs_mem_freed;
146 uint32_t zs_alloc_rr; /* allocation rr bias */
147 };
148
149 STAILQ_HEAD(zone_depot, zone_magazine);
150
151 struct zone {
152 /*
153 * Readonly / rarely written fields
154 */
155
156 /*
157 * The first 4 fields match a zone_view.
158 *
159 * z_self points back to the zone when the zone is initialized,
160 * or is NULL else.
161 */
162 struct zone *z_self;
163 zone_stats_t z_stats;
164 const char *z_name;
165 struct zone_view *z_views;
166
167 struct thread *z_expander;
168 struct zone_cache *__zpercpu z_pcpu_cache;
169
170 uint16_t z_chunk_pages; /* size used for more memory in pages */
171 uint16_t z_chunk_elems; /* count of allocations per chunk */
172 uint16_t z_elems_rsv; /* maintain a free reserve of elements */
173 uint16_t z_elem_size; /* size of an element */
174 uint16_t z_pgz_oob_offs; /* element initial offset */
175
176 uint64_t /* 48 bits */
177 /*
178 * Lifecycle state (Mutable after creation)
179 */
180 z_destroyed :1, /* zone is (being) destroyed */
181 z_async_refilling :1, /* asynchronous allocation pending? */
182 z_expanding_wait :1, /* is thread waiting for expansion? */
183 z_expander_vm_priv :1, /* a vm privileged thread is expanding */
184
185 /*
186 * Behavior configuration bits
187 */
188 z_percpu :1, /* the zone is percpu */
189 z_permanent :1, /* the zone allocations are permanent */
190 z_nocaching :1, /* disallow zone caching for this zone */
191 collectable :1, /* garbage collect empty pages */
192 exhaustible :1, /* merely return if empty? */
193 expandable :1, /* expand zone (with message)? */
194 no_callout :1,
195 z_destructible :1, /* zone can be zdestroy()ed */
196
197 _reserved :19,
198
199 /*
200 * Debugging features
201 */
202 alignment_required :1, /* element alignment needs to be preserved */
203 z_pgz_tracked :1, /* this zone is tracked by pgzalloc */
204 z_pgz_use_guards :1, /* this zone uses guards with PGZ */
205 z_gzalloc_tracked :1, /* this zone is tracked by gzalloc */
206 z_nogzalloc :1, /* this zone doesn't participate with (p)gzalloc */
207 kasan_fakestacks :1,
208 kasan_noquarantine :1, /* whether to use the kasan quarantine */
209 z_tags_sizeclass :6, /* idx into zone_tags_sizeclasses to associate
210 * sizeclass for a particualr kalloc tag */
211 z_uses_tags :1,
212 z_tags_inline :1,
213 z_log_on :1, /* zone logging was enabled by boot-arg */
214 z_tbi_tag :1; /* Zone supports tbi tagging */
215
216 /*
217 * often mutated fields
218 */
219
220 lck_spin_t z_lock;
221 struct zone_depot z_recirc;
222
223 /*
224 * Page accounting (wired / VA)
225 *
226 * Those numbers are unscaled for z_percpu zones
227 * (zone_scale_for_percpu() needs to be used to find the true value).
228 */
229 uint32_t z_wired_max; /* how large can this zone grow */
230 uint32_t z_wired_hwm; /* z_wired_cur high watermark */
231 uint32_t z_wired_cur; /* number of pages used by this zone */
232 uint32_t z_wired_empty; /* pages collectable by GC */
233 uint32_t z_va_cur; /* amount of VA used by this zone */
234
235 /*
236 * list of metadata structs, which maintain per-page free element lists
237 */
238 zone_pva_t z_pageq_empty; /* populated, completely empty pages */
239 zone_pva_t z_pageq_partial;/* populated, partially filled pages */
240 zone_pva_t z_pageq_full; /* populated, completely full pages */
241 zone_pva_t z_pageq_va; /* non-populated VA pages */
242
243 /*
244 * Zone statistics
245 *
246 * z_contention_wma:
247 * weighted moving average of the number of contentions per second,
248 * in Z_CONTENTION_WMA_UNIT units (fixed point decimal).
249 *
250 * z_contention_cur:
251 * count of recorded contentions that will be fused in z_contention_wma
252 * at the next period.
253 *
254 * z_recirc_cur:
255 * number of magazines in the recirculation depot.
256 *
257 * z_elems_free:
258 * number of free elements in the zone.
259 *
260 * z_elems_{min,max}:
261 * tracks the low/high watermark of z_elems_free for the current
262 * weighted moving average period.
263 *
264 * z_elems_free_wss:
265 * weighted moving average of the (z_elems_free_max - z_elems_free_min)
266 * amplited which is used by the GC for trim operations.
267 *
268 * z_elems_avail:
269 * number of elements in the zone (at all).
270 */
271 #define Z_CONTENTION_WMA_UNIT (1u << 8)
272 uint32_t z_contention_wma;
273 uint32_t z_contention_cur;
274 uint32_t z_recirc_cur;
275 uint32_t z_elems_free_max;
276 uint32_t z_elems_free_wss;
277 uint32_t z_elems_free_min;
278 uint32_t z_elems_free; /* Number of free elements */
279 uint32_t z_elems_avail; /* Number of elements available */
280
281 #if CONFIG_GZALLOC
282 gzalloc_data_t gz;
283 #endif
284 #if KASAN_ZALLOC
285 uint32_t z_kasan_redzone;
286 spl_t z_kasan_spl;
287 #endif
288 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
289 /*
290 * the allocation logs are used when:
291 *
292 * - zlog<n>= boot-args are used (and then z_log_on is set)
293 *
294 * - the leak detection was triggered for the zone.
295 * In that case, the log can't ever be freed,
296 * but it can be enabled/disabled dynamically.
297 */
298 struct btlog *z_btlog;
299 struct btlog *z_btlog_disabled;
300 #endif
301 #if DEBUG || DEVELOPMENT
302 struct zone *z_kt_next;
303 #endif
304 };
305
306 /*!
307 * @typedef zone_security_flags_t
308 *
309 * @brief
310 * Type used to store the immutable security properties of a zone.
311 *
312 * @description
313 * These properties influence the security nature of a zone and can't be
314 * modified after lockdown.
315 */
316 typedef struct zone_security_flags {
317 uint16_t
318 /*
319 * Security sensitive configuration bits
320 */
321 z_submap_idx :8, /* a Z_SUBMAP_IDX_* value */
322 z_pgz_use_guards :1, /* this zone uses guards with PGZ */
323 z_submap_from_end :1, /* allocate from the left or the right ? */
324 z_kheap_id :3, /* zone_kheap_id_t when part of a kalloc heap */
325 z_noencrypt :1, /* do not encrypt pages when hibernating */
326 z_va_sequester :1, /* page sequester: no VA reuse with other zones */
327 z_kalloc_type :1; /* zones that does types based seggregation */
328 } zone_security_flags_t;
329
330
331 /*
332 * Zsecurity config to enable sequestering VA of zones
333 */
334 #if KASAN_ZALLOC || !defined(__LP64__)
335 # define ZSECURITY_CONFIG_SEQUESTER OFF
336 #else
337 # define ZSECURITY_CONFIG_SEQUESTER ON
338 #endif
339
340 /*
341 * Zsecurity config to enable creating separate kalloc zones for
342 * bags of bytes
343 */
344 #if KASAN_ZALLOC || !defined(__LP64__)
345 # define ZSECURITY_CONFIG_SUBMAP_USER_DATA OFF
346 #else
347 # define ZSECURITY_CONFIG_SUBMAP_USER_DATA ON
348 #endif
349
350 /*
351 * Zsecurity config to enable strict free of iokit objects to zone
352 * or heap they were allocated from.
353 *
354 * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not
355 * not break third party kexts that haven't yet been recompiled
356 * to use the new iokit macros.
357 */
358 #if PLATFORM_MacOSX && __x86_64__
359 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE OFF
360 #else
361 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE ON
362 #endif
363
364 /*
365 * Zsecurity config to enable the read-only allocator
366 */
367 #if KASAN_ZALLOC || !defined(__LP64__)
368 # define ZSECURITY_CONFIG_READ_ONLY OFF
369 #else
370 # define ZSECURITY_CONFIG_READ_ONLY ON
371 #endif
372
373 /*
374 * Zsecurity config to enable making heap feng-shui
375 * less reliable.
376 */
377 #if KASAN_ZALLOC || !defined(__LP64__)
378 # define ZSECURITY_CONFIG_SAD_FENG_SHUI OFF
379 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 1
380 #else
381 # define ZSECURITY_CONFIG_SAD_FENG_SHUI ON
382 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 4
383 #endif
384
385 /*
386 * Zsecurity config to enable adjusting of elements
387 * with PGZ-OOB to right-align them in their space.
388 */
389 #if KASAN || defined(__x86_64__) || !defined(__LP64__)
390 # define ZSECURITY_CONFIG_PGZ_OOB_ADJUST OFF
391 #else
392 # define ZSECURITY_CONFIG_PGZ_OOB_ADJUST ON
393 #endif
394
395 /*
396 * Zsecurity config to enable kalloc type segregation
397 */
398 #if KASAN_ZALLOC || !defined(__LP64__)
399 # define ZSECURITY_CONFIG_KALLOC_TYPE OFF
400 # define ZSECURITY_CONFIG_KT_BUDGET 0
401 # define ZSECURITY_CONFIG_KT_VAR_BUDGET 0
402 #else
403 # define ZSECURITY_CONFIG_KALLOC_TYPE ON
404 #if XNU_TARGET_OS_WATCH
405 # define ZSECURITY_CONFIG_KT_BUDGET 85
406 #else
407 # define ZSECURITY_CONFIG_KT_BUDGET 200
408 #endif
409 # define ZSECURITY_CONFIG_KT_VAR_BUDGET 3
410 #endif
411
412
413 __options_decl(kalloc_type_options_t, uint64_t, {
414 /*
415 * kalloc type option to switch default accounting to private.
416 */
417 KT_OPTIONS_ACCT = 0x00000001,
418 /*
419 * kalloc type option to print additional stats regarding zone
420 * budget distribution and signatures.
421 */
422 KT_OPTIONS_DEBUG = 0x00000002,
423 /*
424 * kalloc type option to allow loose freeing between heaps
425 */
426 KT_OPTIONS_LOOSE_FREE = 0x00000004,
427 });
428
429 __enum_decl(kt_var_heap_id_t, uint32_t, {
430 /*
431 * Fake "data" heap used to link views of data-only allocation that
432 * have been redirected to KHEAP_DATA_BUFFERS
433 */
434 KT_VAR_DATA_HEAP,
435 /*
436 * Heap for pointer arrays
437 */
438 KT_VAR_PTR_HEAP,
439 /*
440 * Indicating first additional heap added
441 */
442 KT_VAR__FIRST_FLEXIBLE_HEAP,
443 });
444
445 /*
446 * Zone submap indices
447 *
448 * Z_SUBMAP_IDX_VM
449 * this map has the special property that its allocations
450 * can be done without ever locking the submap, and doesn't use
451 * VM entries in the map (which limits certain VM map operations on it).
452 *
453 * On ILP32 a single zone lives here (the vm_map_entry_reserved_zone).
454 *
455 * On LP64 it is also used to restrict VM allocations on LP64 lower
456 * in the kernel VA space, for pointer packing purposes.
457 *
458 * Z_SUBMAP_IDX_GENERAL_{0,1,2,3}
459 * used for unrestricted allocations
460 *
461 * Z_SUBMAP_IDX_DATA
462 * used to sequester bags of bytes from all other allocations and allow VA reuse
463 * within the map
464 *
465 * Z_SUBMAP_IDX_READ_ONLY
466 * used for the read-only allocator
467 */
468 __enum_decl(zone_submap_idx_t, uint32_t, {
469 Z_SUBMAP_IDX_VM,
470 Z_SUBMAP_IDX_READ_ONLY,
471 Z_SUBMAP_IDX_GENERAL_0,
472 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
473 Z_SUBMAP_IDX_GENERAL_1,
474 Z_SUBMAP_IDX_GENERAL_2,
475 Z_SUBMAP_IDX_GENERAL_3,
476 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
477 Z_SUBMAP_IDX_DATA,
478
479 Z_SUBMAP_IDX_COUNT,
480 });
481
482 #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
483 #define KALLOC_DLUT_SIZE (2048 / KALLOC_MINALIGN)
484
485 struct kheap_zones {
486 struct kalloc_zone_cfg *cfg;
487 struct kalloc_heap *views;
488 zone_kheap_id_t heap_id;
489 uint16_t max_k_zone;
490 uint8_t dlut[KALLOC_DLUT_SIZE]; /* table of indices into k_zone[] */
491 uint8_t k_zindex_start;
492 /* If there's no hit in the DLUT, then start searching from k_zindex_start. */
493 zone_t *k_zone;
494 };
495
496 /*
497 * Variable kalloc_type heap config
498 */
499 struct kt_heap_zones {
500 zone_id_t kh_zstart;
501 zone_kheap_id_t heap_id;
502 struct kalloc_type_var_view *views;
503 };
504
505 #define KT_VAR_MAX_HEAPS 8
506 #define MAX_ZONES 650
507 extern struct kt_heap_zones kalloc_type_heap_array[KT_VAR_MAX_HEAPS];
508 extern zone_id_t _Atomic num_zones;
509 extern uint32_t zone_view_count;
510 extern struct zone zone_array[];
511 extern zone_security_flags_t zone_security_array[];
512 extern uint16_t zone_ro_elem_size[];
513 extern const char * const kalloc_heap_names[KHEAP_ID_COUNT];
514 extern mach_memory_info_t *panic_kext_memory_info;
515 extern vm_size_t panic_kext_memory_size;
516 extern vm_offset_t panic_fault_address;
517 extern vm_map_size_t zone_map_size;
518
519 #define zone_index_foreach(i) \
520 for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \
521 i < num_zones_##i; i++)
522
523 #define zone_foreach(z) \
524 for (zone_t z = &zone_array[1], \
525 last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \
526 z < last_zone_##z; z++)
527
528 __abortlike
529 extern void zone_invalid_panic(zone_t zone);
530
531 __pure2
532 static inline zone_id_t
zone_index(zone_t z)533 zone_index(zone_t z)
534 {
535 zone_id_t zid = (zone_id_t)(z - zone_array);
536 if (__improbable(zid >= MAX_ZONES)) {
537 zone_invalid_panic(z);
538 }
539 return zid;
540 }
541
542 __pure2
543 static inline bool
zone_is_ro(zone_t zone)544 zone_is_ro(zone_t zone)
545 {
546 return zone >= &zone_array[ZONE_ID__FIRST_RO] &&
547 zone <= &zone_array[ZONE_ID__LAST_RO];
548 }
549
550 __pure2
551 static inline vm_offset_t
zone_elem_size_ro(zone_id_t zid)552 zone_elem_size_ro(zone_id_t zid)
553 {
554 return zone_ro_elem_size[zid];
555 }
556
557 static inline bool
zone_addr_size_crosses_page(mach_vm_address_t addr,mach_vm_size_t size)558 zone_addr_size_crosses_page(mach_vm_address_t addr, mach_vm_size_t size)
559 {
560 return atop(addr ^ (addr + size - 1)) != 0;
561 }
562
563 __pure2
564 static inline uint16_t
zone_oob_offs(zone_t zone)565 zone_oob_offs(zone_t zone)
566 {
567 uint16_t offs = 0;
568 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
569 offs = zone->z_pgz_oob_offs;
570 #else
571 (void)zone;
572 #endif
573 return offs;
574 }
575
576 __pure2
577 static inline vm_offset_t
zone_elem_size(zone_t zone)578 zone_elem_size(zone_t zone)
579 {
580 return zone->z_elem_size;
581 }
582
583 __pure2
584 static inline vm_offset_t
zone_elem_size_safe(zone_t zone)585 zone_elem_size_safe(zone_t zone)
586 {
587 if (zone_is_ro(zone)) {
588 zone_id_t zid = zone_index(zone);
589 return zone_elem_size_ro(zid);
590 }
591 return zone_elem_size(zone);
592 }
593
594 __pure2
595 static inline zone_security_flags_t
zone_security_config(zone_t z)596 zone_security_config(zone_t z)
597 {
598 zone_id_t zid = zone_index(z);
599 return zone_security_array[zid];
600 }
601
602 static inline uint32_t
zone_count_allocated(zone_t zone)603 zone_count_allocated(zone_t zone)
604 {
605 return zone->z_elems_avail - zone->z_elems_free;
606 }
607
608 static inline vm_size_t
zone_scale_for_percpu(zone_t zone,vm_size_t size)609 zone_scale_for_percpu(zone_t zone, vm_size_t size)
610 {
611 if (zone->z_percpu) {
612 size *= zpercpu_count();
613 }
614 return size;
615 }
616
617 static inline vm_size_t
zone_size_wired(zone_t zone)618 zone_size_wired(zone_t zone)
619 {
620 /*
621 * this either require the zone lock,
622 * or to be used for statistics purposes only.
623 */
624 vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed));
625 return zone_scale_for_percpu(zone, size);
626 }
627
628 static inline vm_size_t
zone_size_free(zone_t zone)629 zone_size_free(zone_t zone)
630 {
631 return zone_scale_for_percpu(zone,
632 (vm_size_t)zone->z_elem_size * zone->z_elems_free);
633 }
634
635 /* Under KASAN builds, this also accounts for quarantined elements. */
636 static inline vm_size_t
zone_size_allocated(zone_t zone)637 zone_size_allocated(zone_t zone)
638 {
639 return zone_scale_for_percpu(zone,
640 (vm_size_t)zone->z_elem_size * zone_count_allocated(zone));
641 }
642
643 static inline vm_size_t
zone_size_wasted(zone_t zone)644 zone_size_wasted(zone_t zone)
645 {
646 return zone_size_wired(zone) - zone_scale_for_percpu(zone,
647 (vm_size_t)zone->z_elem_size * zone->z_elems_avail);
648 }
649
650 /*
651 * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a
652 * userspace reboot is needed. The only other way to query for this information
653 * is via mach_memory_info() which is unavailable on release kernels.
654 */
655 extern uint64_t get_zones_collectable_bytes(void);
656
657 /*!
658 * @enum zone_gc_level_t
659 *
660 * @const ZONE_GC_TRIM
661 * Request a trimming GC: it will trim allocations in excess
662 * of the working set size estimate only.
663 *
664 * @const ZONE_GC_DRAIN
665 * Request a draining GC: this is an aggressive mode that will
666 * cause all caches to be drained and all free pages returned to the system.
667 *
668 * @const ZONE_GC_JETSAM
669 * Request to consider a jetsam, and then fallback to @c ZONE_GC_TRIM or
670 * @c ZONE_GC_DRAIN depending on the state of the zone map.
671 * To avoid deadlocks, only @c vm_pageout_garbage_collect() should ever
672 * request a @c ZONE_GC_JETSAM level.
673 */
674 __enum_closed_decl(zone_gc_level_t, uint32_t, {
675 ZONE_GC_TRIM,
676 ZONE_GC_DRAIN,
677 ZONE_GC_JETSAM,
678 });
679
680 /*!
681 * @function zone_gc
682 *
683 * @brief
684 * Reduces memory used by zones by trimming caches and freelists.
685 *
686 * @discussion
687 * @c zone_gc() is called:
688 * - by the pageout daemon when the system needs more free pages.
689 * - by the VM when contiguous page allocation requests get stuck
690 * (see vm_page_find_contiguous()).
691 *
692 * @param level The zone GC level requested.
693 */
694 extern void zone_gc(zone_gc_level_t level);
695
696 extern void zone_gc_trim(void);
697 extern void zone_gc_drain(void);
698
699 #define ZONE_WSS_UPDATE_PERIOD 10
700 /*!
701 * @function compute_zone_working_set_size
702 *
703 * @brief
704 * Recomputes the working set size for every zone
705 *
706 * @discussion
707 * This runs about every @c ZONE_WSS_UPDATE_PERIOD seconds (10),
708 * computing an exponential moving average with a weight of 75%,
709 * so that the history of the last minute is the dominating factor.
710 */
711 extern void compute_zone_working_set_size(void *);
712
713 /* Debug logging for zone-map-exhaustion jetsams. */
714 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
715 extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size);
716
717 /* Bootstrap zone module (create zone zone) */
718 extern void zone_bootstrap(void);
719
720 /*!
721 * @function zone_early_mem_init
722 *
723 * @brief
724 * Steal memory from pmap (prior to initialization of zalloc)
725 * for the special vm zones that allow bootstrap memory and store
726 * the range so as to facilitate range checking in zfree.
727 *
728 * @param size the size to steal (must be a page multiple)
729 */
730 __startup_func
731 extern vm_offset_t zone_early_mem_init(
732 vm_size_t size);
733
734 /*!
735 * @function zone_get_early_alloc_size
736 *
737 * @brief
738 * Compute the correct size (greater than @c ptoa(min_pages)) that is a multiple
739 * of the allocation granule for the zone with the given creation flags and
740 * element size.
741 */
742 __startup_func
743 extern vm_size_t zone_get_early_alloc_size(
744 const char *name __unused,
745 vm_size_t elem_size,
746 zone_create_flags_t flags,
747 vm_size_t min_elems);
748
749 /*!
750 * @function zone_cram_early
751 *
752 * @brief
753 * Cram memory allocated with @c zone_early_mem_init() into a zone.
754 *
755 * @param zone The zone to cram memory into.
756 * @param newmem The base address for the memory to cram.
757 * @param size The size of the memory to cram into the zone.
758 */
759 __startup_func
760 extern void zone_cram_early(
761 zone_t zone,
762 vm_offset_t newmem,
763 vm_size_t size);
764
765 extern bool zone_maps_owned(
766 vm_address_t addr,
767 vm_size_t size);
768
769 extern void zone_map_sizes(
770 vm_map_size_t *psize,
771 vm_map_size_t *pfree,
772 vm_map_size_t *plargest_free);
773
774 extern bool
775 zone_map_nearing_exhaustion(void);
776
777 #if defined(__LP64__)
778 #define ZONE_POISON 0xdeadbeefdeadbeef
779 #else
780 #define ZONE_POISON 0xdeadbeef
781 #endif
782
783 static inline vm_tag_t
zalloc_flags_get_tag(zalloc_flags_t flags)784 zalloc_flags_get_tag(zalloc_flags_t flags)
785 {
786 return (vm_tag_t)((flags & Z_VM_TAG_MASK) >> Z_VM_TAG_SHIFT);
787 }
788
789 extern void *zalloc_ext(
790 zone_t zone,
791 zone_stats_t zstats,
792 zalloc_flags_t flags,
793 vm_size_t elem_size);
794
795 extern void zfree_ext(
796 zone_t zone,
797 zone_stats_t zstats,
798 void *addr,
799 vm_size_t elem_size);
800
801 extern zone_id_t zone_id_for_element(
802 void *addr,
803 vm_size_t esize);
804
805 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
806 extern void *zone_element_pgz_oob_adjust(
807 struct kalloc_result kr,
808 vm_size_t elem_size);
809 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
810
811 extern vm_size_t zone_element_size(
812 void *addr,
813 zone_t *z,
814 bool clear_oob,
815 vm_offset_t *oob_offs);
816
817 /*!
818 * @function zone_spans_ro_va
819 *
820 * @abstract
821 * This function is used to check whether the specified address range
822 * spans through the read-only zone range.
823 *
824 * @discussion
825 * This only checks for the range specified within ZONE_ADDR_READONLY.
826 * The parameters addr_start and addr_end are stripped off of PAC bits
827 * before the check is made.
828 */
829 extern bool zone_spans_ro_va(
830 vm_offset_t addr_start,
831 vm_offset_t addr_end);
832
833 /*!
834 * @function __zalloc_ro_mut_atomic
835 *
836 * @abstract
837 * This function is called from the pmap to perform the specified atomic
838 * operation on memory from the read-only allocator.
839 *
840 * @discussion
841 * This function is for internal use only and should not be called directly.
842 */
843 static inline uint64_t
__zalloc_ro_mut_atomic(vm_offset_t dst,zro_atomic_op_t op,uint64_t value)844 __zalloc_ro_mut_atomic(vm_offset_t dst, zro_atomic_op_t op, uint64_t value)
845 {
846 #define __ZALLOC_RO_MUT_OP(op, op2) \
847 case ZRO_ATOMIC_##op##_8: \
848 return os_atomic_##op2((uint8_t *)dst, (uint8_t)value, seq_cst); \
849 case ZRO_ATOMIC_##op##_16: \
850 return os_atomic_##op2((uint16_t *)dst, (uint16_t)value, seq_cst); \
851 case ZRO_ATOMIC_##op##_32: \
852 return os_atomic_##op2((uint32_t *)dst, (uint32_t)value, seq_cst); \
853 case ZRO_ATOMIC_##op##_64: \
854 return os_atomic_##op2((uint64_t *)dst, (uint64_t)value, seq_cst)
855
856 switch (op) {
857 __ZALLOC_RO_MUT_OP(OR, or_orig);
858 __ZALLOC_RO_MUT_OP(XOR, xor_orig);
859 __ZALLOC_RO_MUT_OP(AND, and_orig);
860 __ZALLOC_RO_MUT_OP(ADD, add_orig);
861 __ZALLOC_RO_MUT_OP(XCHG, xchg);
862 default:
863 panic("%s: Invalid atomic operation: %d", __func__, op);
864 }
865
866 #undef __ZALLOC_RO_MUT_OP
867 }
868
869 /*!
870 * @function zone_owns
871 *
872 * @abstract
873 * This function is a soft version of zone_require that checks if a given
874 * pointer belongs to the specified zone and should not be used outside
875 * allocator code.
876 *
877 * @discussion
878 * Note that zone_owns() can only work with:
879 * - zones not allowing foreign memory
880 * - zones in the general submap.
881 *
882 * @param zone the zone the address needs to belong to.
883 * @param addr the element address to check.
884 */
885 extern bool zone_owns(
886 zone_t zone,
887 void *addr);
888
889 /**!
890 * @function zone_submap
891 *
892 * @param zsflags the security flags of a specified zone.
893 * @returns the zone (sub)map this zone allocates from.
894 */
895 __pure2
896 extern vm_map_t zone_submap(
897 zone_security_flags_t zsflags);
898
899 /*
900 * Structure for keeping track of a backtrace, used for leak detection.
901 * This is in the .h file because it is used during panic, see kern/debug.c
902 * A non-zero size indicates that the trace is in use.
903 */
904 struct ztrace {
905 vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */
906 uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */
907 void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */
908 uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */
909 uint32_t zt_hit_count; /* for determining effectiveness of hash function */
910 };
911
912 #ifndef VM_TAG_SIZECLASSES
913 #error MAX_TAG_ZONES
914 #endif
915 #if VM_TAG_SIZECLASSES
916
917 extern uint16_t zone_index_from_tag_index(
918 uint32_t tag_zone_index);
919
920 #endif /* VM_TAG_SIZECLASSES */
921
922 static inline void
zone_lock(zone_t zone)923 zone_lock(zone_t zone)
924 {
925 #if KASAN_ZALLOC
926 spl_t s = 0;
927 if (zone->kasan_fakestacks) {
928 s = splsched();
929 }
930 #endif /* KASAN_ZALLOC */
931 lck_spin_lock(&zone->z_lock);
932 #if KASAN_ZALLOC
933 zone->z_kasan_spl = s;
934 #endif /* KASAN_ZALLOC */
935 }
936
937 static inline void
zone_unlock(zone_t zone)938 zone_unlock(zone_t zone)
939 {
940 #if KASAN_ZALLOC
941 spl_t s = zone->z_kasan_spl;
942 zone->z_kasan_spl = 0;
943 #endif /* KASAN_ZALLOC */
944 lck_spin_unlock(&zone->z_lock);
945 #if KASAN_ZALLOC
946 if (zone->kasan_fakestacks) {
947 splx(s);
948 }
949 #endif /* KASAN_ZALLOC */
950 }
951
952 #if CONFIG_GZALLOC
953 void gzalloc_init(void);
954 void gzalloc_zone_init(zone_t);
955 void gzalloc_empty_free_cache(zone_t);
956 boolean_t gzalloc_enabled(void);
957
958 vm_offset_t gzalloc_alloc(zone_t, zone_stats_t zstats, zalloc_flags_t flags);
959 void gzalloc_free(zone_t, zone_stats_t zstats, void *);
960 boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *);
961 #endif /* CONFIG_GZALLOC */
962
963 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
964
965 int track_this_zone(const char *zonename, const char *logname);
966 extern bool panic_include_kalloc_types;
967 extern zone_t kalloc_type_src_zone;
968 extern zone_t kalloc_type_dst_zone;
969
970 #if DEBUG || DEVELOPMENT
971 extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag);
972 extern bool zalloc_disable_copyio_check;
973 #else
974 #define zalloc_disable_copyio_check false
975 #endif /* DEBUG || DEVELOPMENT */
976
977 #pragma GCC visibility pop
978
979 __END_DECLS
980
981 #endif /* _KERN_ZALLOC_INTERNAL_H_ */
982