1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #ifndef _KERN_ZALLOC_INTERNAL_H_
60 #define _KERN_ZALLOC_INTERNAL_H_
61
62 #include <kern/zalloc.h>
63 #include <kern/locks.h>
64 #include <kern/btlog.h>
65 #include <kern/simple_lock.h>
66
67 #include <os/atomic_private.h>
68 #include <sys/queue.h>
69
70 #if KASAN
71 #include <san/kasan.h>
72 #include <kern/spl.h>
73 #endif /* !KASAN */
74
75 /*!
76 * @file <kern/zalloc_internal.h>
77 *
78 * @abstract
79 * Exposes some guts of zalloc to interact with the VM, debugging, copyio and
80 * kalloc subsystems.
81 */
82
83 __BEGIN_DECLS
84
85 #pragma GCC visibility push(hidden)
86
87 #if CONFIG_GZALLOC
88 typedef struct gzalloc_data {
89 uint32_t gzfc_index;
90 vm_offset_t *gzfc;
91 } gzalloc_data_t;
92 #endif
93
94 /*
95 * A zone is a collection of fixed size blocks for which there
96 * is fast allocation/deallocation access. Kernel routines can
97 * use zones to manage data structures dynamically, creating a zone
98 * for each type of data structure to be managed.
99 *
100 */
101
102 /*!
103 * @typedef zone_pva_t
104 *
105 * @brief
106 * Type used to point to a page virtual address in the zone allocator.
107 *
108 * @description
109 * - Valid pages have the top bit set.
110 * - 0 represents the "NULL" page
111 * - non 0 values with the top bit cleared represent queue heads,
112 * indexed from the beginning of the __DATA section of the kernel.
113 * (see zone_pageq_base).
114 */
115 typedef struct zone_packed_virtual_address {
116 uint32_t packed_address;
117 } zone_pva_t;
118
119 /*!
120 * @struct zone_stats
121 *
122 * @abstract
123 * Per-cpu structure used for basic zone stats.
124 *
125 * @discussion
126 * The values aren't scaled for per-cpu zones.
127 */
128 struct zone_stats {
129 uint64_t zs_mem_allocated;
130 uint64_t zs_mem_freed;
131 uint32_t zs_alloc_rr; /* allocation rr bias */
132 };
133
134 STAILQ_HEAD(zone_depot, zone_magazine);
135
136 struct zone {
137 /*
138 * Readonly / rarely written fields
139 */
140
141 /*
142 * The first 4 fields match a zone_view.
143 *
144 * z_self points back to the zone when the zone is initialized,
145 * or is NULL else.
146 */
147 struct zone *z_self;
148 zone_stats_t z_stats;
149 const char *z_name;
150 struct zone_view *z_views;
151
152 struct thread *z_expander;
153 struct zone_cache *__zpercpu z_pcpu_cache;
154
155 uint16_t z_chunk_pages; /* size used for more memory in pages */
156 uint16_t z_chunk_elems; /* count of allocations per chunk */
157 uint16_t z_elems_rsv; /* maintain a free reserve of elements */
158 uint16_t z_elem_size; /* size of an element */
159
160 uint64_t
161 /*
162 * Lifecycle state (Mutable after creation)
163 */
164 z_destroyed :1, /* zone is (being) destroyed */
165 z_async_refilling :1, /* asynchronous allocation pending? */
166 z_expanding_wait :1, /* is thread waiting for expansion? */
167 z_expander_vm_priv :1, /* a vm privileged thread is expanding */
168
169 /*
170 * Behavior configuration bits
171 */
172 z_percpu :1, /* the zone is percpu */
173 z_permanent :1, /* the zone allocations are permanent */
174 z_nocaching :1, /* disallow zone caching for this zone */
175 collectable :1, /* garbage collect empty pages */
176 exhaustible :1, /* merely return if empty? */
177 expandable :1, /* expand zone (with message)? */
178 no_callout :1,
179 z_destructible :1, /* zone can be zdestroy()ed */
180
181 _reserved :37,
182
183 /*
184 * Debugging features
185 */
186 alignment_required :1, /* element alignment needs to be preserved */
187 gzalloc_tracked :1, /* this zone is tracked by gzalloc */
188 gzalloc_exempt :1, /* this zone doesn't participate with gzalloc */
189 kasan_fakestacks :1,
190 kasan_noquarantine :1, /* whether to use the kasan quarantine */
191 z_tags_sizeclass :5, /* idx into zone_tags_sizeclasses to associate
192 * sizeclass for a particualr kalloc tag */
193 z_uses_tags :1,
194 z_tags_inline :1,
195 zleak_on :1, /* Are we collecting allocation information? */
196 zone_logging :1, /* Enable zone logging for this zone. */
197 z_tbi_tag :1; /* Zone supports tbi tagging */
198
199 /*
200 * often mutated fields
201 */
202
203 lck_spin_t z_lock;
204 struct zone_depot z_recirc;
205
206 /*
207 * Page accounting (wired / VA)
208 *
209 * Those numbers are unscaled for z_percpu zones
210 * (zone_scale_for_percpu() needs to be used to find the true value).
211 */
212 uint32_t z_wired_max; /* how large can this zone grow */
213 uint32_t z_wired_hwm; /* z_wired_cur high watermark */
214 uint32_t z_wired_cur; /* number of pages used by this zone */
215 uint32_t z_wired_empty; /* pages collectable by GC */
216 uint32_t z_va_cur; /* amount of VA used by this zone */
217
218 /*
219 * list of metadata structs, which maintain per-page free element lists
220 */
221 zone_pva_t z_pageq_empty; /* populated, completely empty pages */
222 zone_pva_t z_pageq_partial;/* populated, partially filled pages */
223 zone_pva_t z_pageq_full; /* populated, completely full pages */
224 zone_pva_t z_pageq_va; /* non-populated VA pages */
225
226 /*
227 * Zone statistics
228 *
229 * z_contention_wma:
230 * weighted moving average of the number of contentions per second,
231 * in Z_CONTENTION_WMA_UNIT units (fixed point decimal).
232 *
233 * z_contention_cur:
234 * count of recorded contentions that will be fused in z_contention_wma
235 * at the next period.
236 *
237 * z_recirc_cur:
238 * number of magazines in the recirculation depot.
239 *
240 * z_elems_free:
241 * number of free elements in the zone.
242 *
243 * z_elems_{min,max}:
244 * tracks the low/high watermark of z_elems_free for the current
245 * weighted moving average period.
246 *
247 * z_elems_free_wss:
248 * weighted moving average of the (z_elems_free_max - z_elems_free_min)
249 * amplited which is used by the GC for trim operations.
250 *
251 * z_elems_avail:
252 * number of elements in the zone (at all).
253 */
254 #define Z_CONTENTION_WMA_UNIT (1u << 8)
255 uint32_t z_contention_wma;
256 uint32_t z_contention_cur;
257 uint32_t z_recirc_cur;
258 uint32_t z_elems_free_max;
259 uint32_t z_elems_free_wss;
260 uint32_t z_elems_free_min;
261 uint32_t z_elems_free; /* Number of free elements */
262 uint32_t z_elems_avail; /* Number of elements available */
263
264 #if CONFIG_ZLEAKS
265 uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */
266 #endif
267 #if CONFIG_GZALLOC
268 gzalloc_data_t gz;
269 #endif
270 #if KASAN_ZALLOC
271 uint32_t z_kasan_redzone;
272 spl_t z_kasan_spl;
273 #endif
274 #if DEBUG || DEVELOPMENT || CONFIG_ZLEAKS
275 /* zone logging structure to hold stacks and element references to those stacks. */
276 btlog_t *zlog_btlog;
277 #endif
278 #if DEBUG || DEVELOPMENT
279 struct zone *z_kt_next;
280 #endif
281 };
282
283 /*!
284 * @typedef zone_security_flags_t
285 *
286 * @brief
287 * Type used to store the immutable security properties of a zone.
288 *
289 * @description
290 * These properties influence the security nature of a zone and can't be
291 * modified after lockdown.
292 */
293 typedef struct zone_security_flags {
294 uint16_t
295 /*
296 * Security sensitive configuration bits
297 */
298 z_submap_idx :8, /* a Z_SUBMAP_IDX_* value */
299 z_submap_from_end :1, /* allocate from the left or the right ? */
300 z_kheap_id :2, /* zone_kheap_id_t when part of a kalloc heap */
301 z_allows_foreign :1, /* allow non-zalloc space */
302 z_noencrypt :1, /* do not encrypt pages when hibernating */
303 z_va_sequester :1, /* page sequester: no VA reuse with other zones */
304 z_kalloc_type :1; /* zones that does types based seggregation */
305 } zone_security_flags_t;
306
307
308 /*
309 * Zsecurity config to enable sequestering VA of zones
310 */
311 #if KASAN_ZALLOC || !defined(__LP64__)
312 # define ZSECURITY_CONFIG_SEQUESTER OFF
313 #else
314 # define ZSECURITY_CONFIG_SEQUESTER ON
315 #endif
316
317 /*
318 * Zsecurity config to enable creating separate kalloc zones for
319 * bags of bytes
320 */
321 #if KASAN_ZALLOC || !defined(__LP64__)
322 # define ZSECURITY_CONFIG_SUBMAP_USER_DATA OFF
323 #else
324 # define ZSECURITY_CONFIG_SUBMAP_USER_DATA ON
325 #endif
326
327 /*
328 * Leave kext heap on macOS for kalloc/kalloc_type callsites that aren't
329 * in the BootKC.
330 */
331 #if KASAN_ZALLOC || !defined(__LP64__)
332 # define ZSECURITY_CONFIG_SEQUESTER_KEXT_KALLOC OFF
333 #elif PLATFORM_MacOSX
334 # define ZSECURITY_CONFIG_SEQUESTER_KEXT_KALLOC ON
335 #else
336 # define ZSECURITY_CONFIG_SEQUESTER_KEXT_KALLOC OFF
337 #endif
338
339 /*
340 * Zsecurity config to enable strict free of iokit objects to zone
341 * or heap they were allocated from.
342 *
343 * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not
344 * not break third party kexts that haven't yet been recompiled
345 * to use the new iokit macros.
346 */
347 #if PLATFORM_MacOSX && __x86_64__
348 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE OFF
349 #else
350 # define ZSECURITY_CONFIG_STRICT_IOKIT_FREE ON
351 #endif
352
353 /*
354 * Zsecurity config to enable the read-only allocator
355 */
356 #if KASAN_ZALLOC || !defined(__LP64__)
357 # define ZSECURITY_CONFIG_READ_ONLY OFF
358 #else
359 # define ZSECURITY_CONFIG_READ_ONLY ON
360 #endif
361
362 /*
363 * Zsecurity config to enable making heap feng-shui
364 * less reliable.
365 */
366 #if KASAN_ZALLOC || !defined(__LP64__)
367 # define ZSECURITY_CONFIG_SAD_FENG_SHUI OFF
368 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 1
369 #else
370 # define ZSECURITY_CONFIG_SAD_FENG_SHUI ON
371 # define ZSECURITY_CONFIG_GENERAL_SUBMAPS 4
372 #endif
373
374 /*
375 * Zsecurity options that can be toggled, as opposed to configs
376 */
377 __options_decl(zone_security_options_t, uint64_t, {
378 /*
379 * Zsecurity option to enable the kernel and kalloc data maps.
380 */
381 ZSECURITY_OPTIONS_KERNEL_DATA_MAP = 0x00000020,
382 });
383
384 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__OFF() 0
385 #define ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__ON() 1
386 #define ZSECURITY_CONFIG2(v) ZSECURITY_NOT_A_COMPILE_TIME_CONFIG__##v()
387 #define ZSECURITY_CONFIG1(v) ZSECURITY_CONFIG2(v)
388 #define ZSECURITY_CONFIG(opt) ZSECURITY_CONFIG1(ZSECURITY_CONFIG_##opt)
389 #define ZSECURITY_ENABLED(opt) (zsecurity_options & ZSECURITY_OPTIONS_##opt)
390
391 __options_decl(kalloc_type_options_t, uint64_t, {
392 /*
393 * kalloc type option to switch default accounting to private.
394 */
395 KT_OPTIONS_ACCT = 0x00000001,
396 /*
397 * kalloc type option to turn on signature based type segregation.
398 * The total number of zones to be for this feature should be
399 * provided with the kt_zbudget boot-arg.
400 */
401 KT_OPTIONS_ON = 0x00000002,
402 /*
403 * kalloc type option to print additional stats regarding zone
404 * budget distribution and signatures.
405 */
406 KT_OPTIONS_DEBUG = 0x00000004,
407 });
408
409 /*
410 * Zone submap indices
411 *
412 * Z_SUBMAP_IDX_VM
413 * this map has the special property that its allocations
414 * can be done without ever locking the submap, and doesn't use
415 * VM entries in the map (which limits certain VM map operations on it).
416 *
417 * On ILP32 a single zone lives here (the vm_map_entry_reserved_zone).
418 *
419 * On LP64 it is also used to restrict VM allocations on LP64 lower
420 * in the kernel VA space, for pointer packing purposes.
421 *
422 * Z_SUBMAP_IDX_GENERAL_{0,1,2,3}
423 * used for unrestricted allocations
424 *
425 * Z_SUBMAP_IDX_DATA
426 * used to sequester bags of bytes from all other allocations and allow VA reuse
427 * within the map
428 *
429 * Z_SUBMAP_IDX_READ_ONLY
430 * used for the read-only allocator
431 */
432 __enum_decl(zone_submap_idx_t, uint32_t, {
433 Z_SUBMAP_IDX_VM,
434 Z_SUBMAP_IDX_READ_ONLY,
435 Z_SUBMAP_IDX_GENERAL_0,
436 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
437 Z_SUBMAP_IDX_GENERAL_1,
438 Z_SUBMAP_IDX_GENERAL_2,
439 Z_SUBMAP_IDX_GENERAL_3,
440 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
441 Z_SUBMAP_IDX_DATA,
442
443 Z_SUBMAP_IDX_COUNT,
444 });
445
446 #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
447 #define KALLOC_DLUT_SIZE (2048 / KALLOC_MINALIGN)
448
449 struct kheap_zones {
450 struct kalloc_zone_cfg *cfg;
451 struct kalloc_heap *views;
452 zone_kheap_id_t heap_id;
453 uint16_t max_k_zone;
454 uint8_t dlut[KALLOC_DLUT_SIZE]; /* table of indices into k_zone[] */
455 uint8_t k_zindex_start;
456 /* If there's no hit in the DLUT, then start searching from k_zindex_start. */
457 zone_t *k_zone;
458 vm_size_t kalloc_max;
459 };
460
461 #define MAX_ZONES 650
462 extern zone_security_options_t zsecurity_options;
463 extern zone_id_t _Atomic num_zones;
464 extern uint32_t zone_view_count;
465 extern struct zone zone_array[];
466 extern zone_security_flags_t zone_security_array[];
467 extern uint16_t zone_ro_elem_size[];
468 extern const char * const kalloc_heap_names[KHEAP_ID_COUNT];
469 extern mach_memory_info_t *panic_kext_memory_info;
470 extern vm_size_t panic_kext_memory_size;
471 extern unsigned int zone_map_jetsam_limit;
472
473 #define zone_index_foreach(i) \
474 for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \
475 i < num_zones_##i; i++)
476
477 #define zone_foreach(z) \
478 for (zone_t z = &zone_array[1], \
479 last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \
480 z < last_zone_##z; z++)
481
482 struct zone_map_range {
483 vm_offset_t min_address;
484 vm_offset_t max_address;
485 } __attribute__((aligned(2 * sizeof(vm_offset_t))));
486
487 __abortlike
488 extern void zone_invalid_panic(zone_t zone);
489
490 __pure2
491 static inline zone_id_t
zone_index(zone_t z)492 zone_index(zone_t z)
493 {
494 zone_id_t zid = (zone_id_t)(z - zone_array);
495 if (__improbable(zid >= MAX_ZONES)) {
496 zone_invalid_panic(z);
497 }
498 return zid;
499 }
500
501 __pure2
502 static inline bool
zone_is_ro(zone_t zone)503 zone_is_ro(zone_t zone)
504 {
505 return zone >= &zone_array[ZONE_ID__FIRST_RO] &&
506 zone <= &zone_array[ZONE_ID__LAST_RO];
507 }
508
509 __pure2
510 static inline vm_offset_t
zone_elem_size_ro(zone_id_t zid)511 zone_elem_size_ro(zone_id_t zid)
512 {
513 return zone_ro_elem_size[zid];
514 }
515
516 __pure2
517 static inline vm_offset_t
zone_elem_size(zone_t zone)518 zone_elem_size(zone_t zone)
519 {
520 return zone->z_elem_size;
521 }
522
523 __pure2
524 static inline vm_offset_t
zone_elem_size_safe(zone_t zone)525 zone_elem_size_safe(zone_t zone)
526 {
527 if (zone_is_ro(zone)) {
528 zone_id_t zid = zone_index(zone);
529 return zone_elem_size_ro(zid);
530 }
531 return zone_elem_size(zone);
532 }
533
534 __pure2
535 static inline zone_security_flags_t
zone_security_config(zone_t z)536 zone_security_config(zone_t z)
537 {
538 zone_id_t zid = zone_index(z);
539 return zone_security_array[zid];
540 }
541
542 static inline uint32_t
zone_count_allocated(zone_t zone)543 zone_count_allocated(zone_t zone)
544 {
545 return zone->z_elems_avail - zone->z_elems_free;
546 }
547
548 static inline vm_size_t
zone_scale_for_percpu(zone_t zone,vm_size_t size)549 zone_scale_for_percpu(zone_t zone, vm_size_t size)
550 {
551 if (zone->z_percpu) {
552 size *= zpercpu_count();
553 }
554 return size;
555 }
556
557 static inline vm_size_t
zone_size_wired(zone_t zone)558 zone_size_wired(zone_t zone)
559 {
560 /*
561 * this either require the zone lock,
562 * or to be used for statistics purposes only.
563 */
564 vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed));
565 return zone_scale_for_percpu(zone, size);
566 }
567
568 static inline vm_size_t
zone_size_free(zone_t zone)569 zone_size_free(zone_t zone)
570 {
571 return zone_scale_for_percpu(zone,
572 (vm_size_t)zone->z_elem_size * zone->z_elems_free);
573 }
574
575 /* Under KASAN builds, this also accounts for quarantined elements. */
576 static inline vm_size_t
zone_size_allocated(zone_t zone)577 zone_size_allocated(zone_t zone)
578 {
579 return zone_scale_for_percpu(zone,
580 (vm_size_t)zone->z_elem_size * zone_count_allocated(zone));
581 }
582
583 static inline vm_size_t
zone_size_wasted(zone_t zone)584 zone_size_wasted(zone_t zone)
585 {
586 return zone_size_wired(zone) - zone_scale_for_percpu(zone,
587 (vm_size_t)zone->z_elem_size * zone->z_elems_avail);
588 }
589
590 /*
591 * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a
592 * userspace reboot is needed. The only other way to query for this information
593 * is via mach_memory_info() which is unavailable on release kernels.
594 */
595 extern uint64_t get_zones_collectable_bytes(void);
596
597 /*!
598 * @enum zone_gc_level_t
599 *
600 * @const ZONE_GC_TRIM
601 * Request a trimming GC: it will trim allocations in excess
602 * of the working set size estimate only.
603 *
604 * @const ZONE_GC_DRAIN
605 * Request a draining GC: this is an aggressive mode that will
606 * cause all caches to be drained and all free pages returned to the system.
607 *
608 * @const ZONE_GC_JETSAM
609 * Request to consider a jetsam, and then fallback to @c ZONE_GC_TRIM or
610 * @c ZONE_GC_DRAIN depending on the state of the zone map.
611 * To avoid deadlocks, only @c vm_pageout_garbage_collect() should ever
612 * request a @c ZONE_GC_JETSAM level.
613 */
614 __enum_closed_decl(zone_gc_level_t, uint32_t, {
615 ZONE_GC_TRIM,
616 ZONE_GC_DRAIN,
617 ZONE_GC_JETSAM,
618 });
619
620 /*!
621 * @function zone_gc
622 *
623 * @brief
624 * Reduces memory used by zones by trimming caches and freelists.
625 *
626 * @discussion
627 * @c zone_gc() is called:
628 * - by the pageout daemon when the system needs more free pages.
629 * - by the VM when contiguous page allocation requests get stuck
630 * (see vm_page_find_contiguous()).
631 *
632 * @param level The zone GC level requested.
633 */
634 extern void zone_gc(zone_gc_level_t level);
635
636 extern void zone_gc_trim(void);
637 extern void zone_gc_drain(void);
638
639 #define ZONE_WSS_UPDATE_PERIOD 10
640 /*!
641 * @function compute_zone_working_set_size
642 *
643 * @brief
644 * Recomputes the working set size for every zone
645 *
646 * @discussion
647 * This runs about every @c ZONE_WSS_UPDATE_PERIOD seconds (10),
648 * computing an exponential moving average with a weight of 75%,
649 * so that the history of the last minute is the dominating factor.
650 */
651 extern void compute_zone_working_set_size(void *);
652
653 /* Debug logging for zone-map-exhaustion jetsams. */
654 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
655 extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size);
656
657 /* Bootstrap zone module (create zone zone) */
658 extern void zone_bootstrap(void);
659
660 /*!
661 * @function zone_foreign_mem_init
662 *
663 * @brief
664 * Steal memory from pmap (prior to initialization of zalloc)
665 * for the special vm zones that allow foreign memory and store
666 * the range so as to facilitate range checking in zfree.
667 *
668 * @param size the size to steal (must be a page multiple)
669 * @param allow_meta_steal whether allocator metadata should be stolen too
670 * due to a non natural config.
671 */
672 __startup_func
673 extern vm_offset_t zone_foreign_mem_init(
674 vm_size_t size,
675 bool allow_meta_steal);
676
677 /*!
678 * @function zone_get_foreign_alloc_size
679 *
680 * @brief
681 * Compute the correct size (greater than @c ptoa(min_pages)) that is a multiple
682 * of the allocation granule for the zone with the given creation flags and
683 * element size.
684 */
685 __startup_func
686 extern vm_size_t zone_get_foreign_alloc_size(
687 const char *name __unused,
688 vm_size_t elem_size,
689 zone_create_flags_t flags,
690 uint16_t min_pages);
691
692 /*!
693 * @function zone_cram_foreign
694 *
695 * @brief
696 * Cram memory allocated with @c zone_foreign_mem_init() into a zone.
697 *
698 * @param zone The zone to cram memory into.
699 * @param newmem The base address for the memory to cram.
700 * @param size The size of the memory to cram into the zone.
701 */
702 __startup_func
703 extern void zone_cram_foreign(
704 zone_t zone,
705 vm_offset_t newmem,
706 vm_size_t size);
707
708 extern bool zone_maps_owned(
709 vm_address_t addr,
710 vm_size_t size);
711
712 extern void zone_map_sizes(
713 vm_map_size_t *psize,
714 vm_map_size_t *pfree,
715 vm_map_size_t *plargest_free);
716
717 extern bool
718 zone_map_nearing_exhaustion(void);
719
720 #if defined(__LP64__)
721 #define ZONE_POISON 0xdeadbeefdeadbeef
722 #else
723 #define ZONE_POISON 0xdeadbeef
724 #endif
725
726 static inline vm_tag_t
zalloc_flags_get_tag(zalloc_flags_t flags)727 zalloc_flags_get_tag(zalloc_flags_t flags)
728 {
729 return (vm_tag_t)((flags & Z_VM_TAG_MASK) >> Z_VM_TAG_SHIFT);
730 }
731
732 extern void *zalloc_ext(
733 zone_t zone,
734 zone_stats_t zstats,
735 zalloc_flags_t flags,
736 vm_size_t elem_size);
737
738 extern void zfree_ext(
739 zone_t zone,
740 zone_stats_t zstats,
741 void *addr,
742 vm_size_t elem_size);
743
744 extern zone_id_t zone_id_for_native_element(
745 void *addr,
746 vm_size_t esize);
747
748 extern vm_size_t zone_element_size(
749 void *addr,
750 zone_t *z);
751
752 __attribute__((overloadable))
753 extern bool zone_range_contains(
754 const struct zone_map_range *r,
755 vm_offset_t addr);
756
757 __attribute__((overloadable))
758 extern bool zone_range_contains(
759 const struct zone_map_range *r,
760 vm_offset_t addr,
761 vm_offset_t size);
762
763 extern vm_size_t zone_range_size(
764 const struct zone_map_range *r);
765
766 /*!
767 * @function zone_spans_ro_va
768 *
769 * @abstract
770 * This function is used to check whether the specified address range
771 * spans through the read-only zone range.
772 *
773 * @discussion
774 * This only checks for the range specified within ZONE_ADDR_READONLY.
775 * The parameters addr_start and addr_end are stripped off of PAC bits
776 * before the check is made.
777 */
778 extern bool zone_spans_ro_va(
779 vm_offset_t addr_start,
780 vm_offset_t addr_end);
781
782 /*!
783 * @function zone_owns
784 *
785 * @abstract
786 * This function is a soft version of zone_require that checks if a given
787 * pointer belongs to the specified zone and should not be used outside
788 * allocator code.
789 *
790 * @discussion
791 * Note that zone_owns() can only work with:
792 * - zones not allowing foreign memory
793 * - zones in the general submap.
794 *
795 * @param zone the zone the address needs to belong to.
796 * @param addr the element address to check.
797 */
798 extern bool zone_owns(
799 zone_t zone,
800 void *addr);
801
802 /**!
803 * @function zone_submap
804 *
805 * @param zsflags the security flags of a specified zone.
806 * @returns the zone (sub)map this zone allocates from.
807 */
808 __pure2
809 extern vm_map_t zone_submap(
810 zone_security_flags_t zsflags);
811
812 /*
813 * Structure for keeping track of a backtrace, used for leak detection.
814 * This is in the .h file because it is used during panic, see kern/debug.c
815 * A non-zero size indicates that the trace is in use.
816 */
817 struct ztrace {
818 vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */
819 uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */
820 void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */
821 uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */
822 uint32_t zt_hit_count; /* for determining effectiveness of hash function */
823 };
824
825 #ifndef VM_TAG_SIZECLASSES
826 #error MAX_TAG_ZONES
827 #endif
828 #if VM_TAG_SIZECLASSES
829
830 extern uint16_t zone_index_from_tag_index(
831 uint32_t tag_zone_index);
832
833 #endif /* VM_TAG_SIZECLASSES */
834
835 extern void kalloc_init_maps(
836 vm_address_t min_address);
837
838 static inline void
zone_lock(zone_t zone)839 zone_lock(zone_t zone)
840 {
841 #if KASAN_ZALLOC
842 spl_t s = 0;
843 if (zone->kasan_fakestacks) {
844 s = splsched();
845 }
846 #endif /* KASAN_ZALLOC */
847 lck_spin_lock(&zone->z_lock);
848 #if KASAN_ZALLOC
849 zone->z_kasan_spl = s;
850 #endif /* KASAN_ZALLOC */
851 }
852
853 static inline void
zone_unlock(zone_t zone)854 zone_unlock(zone_t zone)
855 {
856 #if KASAN_ZALLOC
857 spl_t s = zone->z_kasan_spl;
858 zone->z_kasan_spl = 0;
859 #endif /* KASAN_ZALLOC */
860 lck_spin_unlock(&zone->z_lock);
861 #if KASAN_ZALLOC
862 if (zone->kasan_fakestacks) {
863 splx(s);
864 }
865 #endif /* KASAN_ZALLOC */
866 }
867
868 #if CONFIG_GZALLOC
869 void gzalloc_init(vm_size_t);
870 void gzalloc_zone_init(zone_t);
871 void gzalloc_empty_free_cache(zone_t);
872 boolean_t gzalloc_enabled(void);
873
874 vm_offset_t gzalloc_alloc(zone_t, zone_stats_t zstats, zalloc_flags_t flags);
875 void gzalloc_free(zone_t, zone_stats_t zstats, void *);
876 boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *);
877 #endif /* CONFIG_GZALLOC */
878
879 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
880
881 int track_this_zone(const char *zonename, const char *logname);
882 extern bool panic_include_kalloc_types;
883 extern zone_t kalloc_type_src_zone;
884 extern zone_t kalloc_type_dst_zone;
885
886 #if DEBUG || DEVELOPMENT
887 extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag);
888 extern bool zalloc_disable_copyio_check;
889 #else
890 #define zalloc_disable_copyio_check false
891 #endif /* DEBUG || DEVELOPMENT */
892
893 #pragma GCC visibility pop
894
895 __END_DECLS
896
897 #endif /* _KERN_ZALLOC_INTERNAL_H_ */
898