1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
106
107 #include <pexpert/pexpert.h>
108
109 #include <machine/machparam.h>
110 #include <machine/machine_routines.h> /* ml_cpu_get_info */
111
112 #include <os/atomic.h>
113
114 #include <libkern/OSDebug.h>
115 #include <libkern/OSAtomic.h>
116 #include <libkern/section_keywords.h>
117 #include <sys/kdebug.h>
118
119 #include <san/kasan.h>
120 #include <libsa/stdlib.h>
121 #include <sys/errno.h>
122
123 #include <IOKit/IOBSD.h>
124
125 #if DEBUG
126 #define z_debug_assert(expr) assert(expr)
127 #else
128 #define z_debug_assert(expr) (void)(expr)
129 #endif
130
131 /* Returns pid of the task with the largest number of VM map entries. */
132 extern pid_t find_largest_process_vm_map_entries(void);
133
134 /*
135 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
136 * For any other pid we try to kill that process synchronously.
137 */
138 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
139
140 extern zone_t vm_map_entry_zone;
141 extern zone_t vm_object_zone;
142 extern zone_t ipc_service_port_label_zone;
143
144 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
145 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
146
147 #if CONFIG_KERNEL_TBI && KASAN_TBI
148 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
149 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
150 #else /* CONFIG_KERNEL_TBI && KASAN_TBI */
151 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
152 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
153 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
154
155 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
156 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
157 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
158 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
159
160 __enum_closed_decl(zm_len_t, uint16_t, {
161 ZM_CHUNK_FREE = 0x0,
162 /* 1 through 8 are valid lengths */
163 ZM_CHUNK_LEN_MAX = 0x8,
164
165 /* PGZ magical values */
166 ZM_PGZ_FREE = 0x0,
167 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
168 ZM_PGZ_GUARD = 0xb, /* oo[b] */
169 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
170
171 /* secondary page markers */
172 ZM_SECONDARY_PAGE = 0xe,
173 ZM_SECONDARY_PCPU_PAGE = 0xf,
174 });
175
176 struct zone_page_metadata {
177 union {
178 struct {
179 /* The index of the zone this metadata page belongs to */
180 zone_id_t zm_index : 11;
181
182 /*
183 * Whether `zm_bitmap` is an inline bitmap
184 * or a packed bitmap reference
185 */
186 uint16_t zm_inline_bitmap : 1;
187
188 /*
189 * Zones allocate in "chunks" of zone_t::z_chunk_pages
190 * consecutive pages, or zpercpu_count() pages if the
191 * zone is percpu.
192 *
193 * The first page of it has its metadata set with:
194 * - 0 if none of the pages are currently wired
195 * - the number of wired pages in the chunk
196 * (not scaled for percpu).
197 *
198 * Other pages in the chunk have their zm_chunk_len set
199 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
200 * depending on whether the zone is percpu or not.
201 * For those, zm_page_index holds the index of that page
202 * in the run.
203 *
204 * Metadata used for PGZ pages can have 3 values:
205 * - ZM_PGZ_FREE: slot is free
206 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
207 * at offset (zm_pgz_orig_addr & PAGE_MASK)
208 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
209 * (will panic).
210 */
211 zm_len_t zm_chunk_len : 4;
212 };
213 uint16_t zm_bits;
214 };
215
216 union {
217 #define ZM_ALLOC_SIZE_LOCK 1u
218 uint16_t zm_alloc_size; /* first page only */
219 uint16_t zm_page_index; /* secondary pages only */
220 uint16_t zm_oob_offs; /* in guard pages */
221 };
222 union {
223 uint32_t zm_bitmap; /* most zones */
224 uint32_t zm_bump; /* permanent zones */
225 };
226
227 union {
228 struct {
229 zone_pva_t zm_page_next;
230 zone_pva_t zm_page_prev;
231 };
232 vm_offset_t zm_pgz_orig_addr;
233 struct zone_page_metadata *zm_pgz_slot_next;
234 };
235 };
236 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
237
238 __enum_closed_decl(zone_addr_kind_t, uint32_t, {
239 ZONE_ADDR_FOREIGN,
240 ZONE_ADDR_NATIVE,
241 ZONE_ADDR_READONLY
242 });
243 #define ZONE_ADDR_KIND_COUNT 3
244
245 static const char * const zone_map_range_names[] = {
246 [ZONE_ADDR_FOREIGN] = "Foreign",
247 [ZONE_ADDR_NATIVE] = "Native",
248 [ZONE_ADDR_READONLY] = "Readonly",
249 };
250
251 /*!
252 * @typedef zone_element_t
253 *
254 * @brief
255 * Type that represents a "resolved" zone element.
256 *
257 * @description
258 * This type encodes an element pointer as a pair of:
259 * { chunk base, element index }.
260 *
261 * The chunk base is extracted with @c trunc_page()
262 * as it is always page aligned, and occupies the bits above @c PAGE_SHIFT.
263 *
264 * The other bits encode the element index in the chunk rather than its address.
265 */
266 typedef struct zone_element {
267 vm_offset_t ze_value;
268 } zone_element_t;
269
270 /*!
271 * @typedef zone_magazine_t
272 *
273 * @brief
274 * Magazine of cached allocations.
275 *
276 * @field zm_cur how many elements this magazine holds (unused while loaded).
277 * @field zm_link linkage used by magazine depots.
278 * @field zm_elems an array of @c zc_mag_size() elements.
279 */
280 typedef struct zone_magazine {
281 uint16_t zm_cur;
282 STAILQ_ENTRY(zone_magazine) zm_link;
283 zone_element_t zm_elems[0];
284 } *zone_magazine_t;
285
286 /*!
287 * @typedef zone_cache_t
288 *
289 * @brief
290 * Magazine of cached allocations.
291 *
292 * @discussion
293 * Below is a diagram of the caching system. This design is inspired by the
294 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
295 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
296 * zone allocator (itself derived from this seminal work).
297 *
298 * It is divided into 3 layers:
299 * - the per-cpu layer,
300 * - the recirculation depot layer,
301 * - the Zone Allocator.
302 *
303 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
304 * which are stacks of up to @c zc_mag_size() elements.
305 *
306 * <h2>CPU layer</h2>
307 *
308 * The CPU layer (@c zone_cache_t) looks like this:
309 *
310 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
311 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
312 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
313 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
314 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
315 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
316 * ╰─────────┴─────────────────────────────╯
317 *
318 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
319 * or free to. Serialization is achieved through disabling preemption, and only
320 * the current CPU can acces those allocations. This is represented on the left
321 * hand side of the diagram above.
322 *
323 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
324 * full magazines, and is protected by the @c zm_depot_lock for access.
325 * The lock is expected to absolutely never be contended, as only the local CPU
326 * tends to access the local per-cpu depot in regular operation mode.
327 *
328 * However unlike UMA, our implementation allows for the zone GC to reclaim
329 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
330 *
331 *
332 * <h2>Recirculation Depot</h2>
333 *
334 * The recirculation depot layer is a list similar to the per-cpu depot,
335 * however it is different in two fundamental ways:
336 *
337 * - it is protected by the regular zone lock,
338 * - elements referenced by the magazines in that layer appear free
339 * to the zone layer.
340 *
341 *
342 * <h2>Magazine circulation and sizing</h2>
343 *
344 * The caching system sizes itself dynamically. Operations that allocate/free
345 * a single element call @c zone_lock_nopreempt_check_contention() which records
346 * contention on the lock by doing a trylock and recording its success.
347 *
348 * This information is stored in the @c z_contention_cur field of the zone,
349 * and a windoed moving average is maintained in @c z_contention_wma.
350 * Each time a CPU registers any contention, it will also allow its own per-cpu
351 * cache to grow, incrementing @c zc_depot_max, which is how the per-cpu layer
352 * might grow into using its local depot.
353 *
354 * Note that @c zc_depot_max assume that the (a) and (f) pre-loaded magazines
355 * on average contain @c zc_mag_size() elements.
356 *
357 * When a per-cpu layer cannot hold more full magazines in its depot,
358 * then it will overflow about 1/3 of its depot into the recirculation depot
359 * (see @c zfree_cached_slow(). Conversely, when a depot is empty, then it will
360 * refill its per-cpu depot to about 1/3 of its size from the recirculation
361 * depot (see @c zalloc_cached_slow()).
362 *
363 * Lastly, the zone layer keeps track of the high and low watermark of how many
364 * elements have been free per period of time (including being part of the
365 * recirculation depot) in the @c z_elems_free_min and @c z_elems_free_max
366 * fields. A weighted moving average of the amplitude of this is maintained in
367 * the @c z_elems_free_wss which informs the zone GC on how to gently trim
368 * zones without hurting performance.
369 *
370 *
371 * <h2>Security considerations</h2>
372 *
373 * The zone caching layer has been designed to avoid returning elements in
374 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
375 * and @c zfree() free to the (f) magazine, and only swap them when the
376 * requested operation cannot be fulfilled.
377 *
378 * The per-cpu overflow depot or the recirculation depots are similarly used
379 * in FIFO order.
380 *
381 * More importantly, when magazines flow through the recirculation depot,
382 * the elements they contain are marked as "free" in the zone layer bitmaps.
383 * Because allocations out of per-cpu caches verify the bitmaps at allocation
384 * time, this acts as a poor man's double-free quarantine. The magazines
385 * allow to avoid the cost of the bit-scanning involved in the zone-level
386 * @c zalloc_item() codepath.
387 *
388 *
389 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
390 * @field zc_free_cur denormalized number of elements in the (f) magazine
391 * @field zc_alloc_elems a pointer to the array of elements in (a)
392 * @field zc_free_elems a pointer to the array of elements in (f)
393 *
394 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
395 * @field zc_depot a list of @c zc_depot_cur full magazines
396 * @field zc_depot_cur number of magazines in @c zc_depot
397 * @field zc_depot_max the maximum number of elements in @c zc_depot,
398 * protected by the zone lock.
399 */
400 typedef struct zone_cache {
401 uint16_t zc_alloc_cur;
402 uint16_t zc_free_cur;
403 uint16_t zc_depot_cur;
404 uint16_t __zc_padding;
405 zone_element_t *zc_alloc_elems;
406 zone_element_t *zc_free_elems;
407 hw_lock_bit_t zc_depot_lock;
408 uint32_t zc_depot_max;
409 struct zone_depot zc_depot;
410 } *zone_cache_t;
411
412 #if !__x86_64__
413 static
414 #endif
415 __security_const_late struct {
416 struct zone_map_range zi_map_range[ZONE_ADDR_KIND_COUNT];
417 struct zone_map_range zi_meta_range; /* debugging only */
418 struct zone_map_range zi_bits_range; /* bits buddy allocator */
419 struct zone_map_range zi_pgz_range;
420 struct zone_page_metadata *zi_pgz_meta;
421
422 /*
423 * The metadata lives within the zi_meta_range address range.
424 *
425 * The correct formula to find a metadata index is:
426 * absolute_page_index - page_index(MIN(zi_map_range[*].min_address))
427 *
428 * And then this index is used to dereference zi_meta_range.min_address
429 * as a `struct zone_page_metadata` array.
430 *
431 * To avoid doing that substraction all the time in the various fast-paths,
432 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
433 * that math all the time.
434 *
435 * Do note that the array might have a hole punched in the middle,
436 * see zone_metadata_init().
437 */
438 struct zone_page_metadata *zi_meta_base;
439 } zone_info;
440
441 /*
442 * Initial array of metadata for stolen memory.
443 *
444 * The numbers here have to be kept in sync with vm_map_steal_memory()
445 * so that we have reserved enough metadata.
446 *
447 * After zone_init() has run (which happens while the kernel is still single
448 * threaded), the metadata is moved to its final dynamic location, and
449 * this array is unmapped with the rest of __startup_data at lockdown.
450 */
451 #define ZONE_FOREIGN_META_INLINE_COUNT 64
452 __startup_data
453 static struct zone_page_metadata
454 zone_foreign_meta_array_startup[ZONE_FOREIGN_META_INLINE_COUNT];
455 __startup_data
456 static struct zone_map_range zone_early_steal;
457
458 /*
459 * The zone_locks_grp allows for collecting lock statistics.
460 * All locks are associated to this group in zinit.
461 * Look at tools/lockstat for debugging lock contention.
462 */
463 static LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
464 static LCK_MTX_EARLY_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
465
466 /*
467 * The zone metadata lock protects:
468 * - metadata faulting,
469 * - VM submap VA allocations,
470 * - early gap page queue list
471 */
472 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
473 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
474
475 /*
476 * Exclude more than one concurrent garbage collection
477 */
478 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
479 static LCK_MTX_EARLY_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
480 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
481
482 /*
483 * Panic logging metadata
484 */
485 bool panic_include_zprint = false;
486 bool panic_include_kalloc_types = false;
487 zone_t kalloc_type_src_zone = ZONE_NULL;
488 zone_t kalloc_type_dst_zone = ZONE_NULL;
489 mach_memory_info_t *panic_kext_memory_info = NULL;
490 vm_size_t panic_kext_memory_size = 0;
491 vm_offset_t panic_fault_address = 0;
492
493 /*
494 * Protects zone_array, num_zones, num_zones_in_use, and
495 * zone_destroyed_bitmap
496 */
497 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
498 static zone_id_t num_zones_in_use;
499 zone_id_t _Atomic num_zones;
500 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
501
502 /*
503 * Initial globals for zone stats until we can allocate the real ones.
504 * Those get migrated inside the per-CPU ones during zone_init() and
505 * this array is unmapped with the rest of __startup_data at lockdown.
506 */
507
508 /* zone to allocate zone_magazine structs from */
509 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
510 /*
511 * Until pid1 is made, zone caching is off,
512 * until compute_zone_working_set_size() runs for the firt time.
513 *
514 * -1 represents the "never enabled yet" value.
515 */
516 static int8_t zone_caching_disabled = -1;
517
518 __startup_data
519 static struct zone_cache zone_cache_startup[MAX_ZONES];
520 __startup_data
521 static struct zone_stats zone_stats_startup[MAX_ZONES];
522 struct zone zone_array[MAX_ZONES];
523 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
524 [0 ... MAX_ZONES - 1] = {
525 .z_allows_foreign = false,
526 .z_kheap_id = KHEAP_ID_NONE,
527 .z_noencrypt = false,
528 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
529 .z_kalloc_type = false,
530 .z_va_sequester = ZSECURITY_CONFIG(SEQUESTER),
531 },
532 };
533 SECURITY_READ_ONLY_LATE(uint16_t) zone_ro_elem_size[MAX_ZONES];
534
535 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
536 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
537
538 /* Used to keep track of destroyed slots in the zone_array */
539 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
540
541 /* number of zone mapped pages used by all zones */
542 static size_t _Atomic zone_pages_wired;
543 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
544
545 #if CONFIG_PROB_GZALLOC
546 static int32_t _Atomic zone_guard_pages;
547 #endif /* CONFIG_PROB_GZALLOC */
548
549 #define ZSECURITY_DEFAULT ( \
550 ZSECURITY_OPTIONS_KERNEL_DATA_MAP | \
551 0)
552 TUNABLE(zone_security_options_t, zsecurity_options, "zs", ZSECURITY_DEFAULT);
553
554 /* Time in (ms) after which we panic for zone exhaustions */
555 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
556
557 #if VM_TAG_SIZECLASSES
558 /* enable tags for zones that ask for it */
559 static TUNABLE(bool, zone_tagging_on, "-zt", false);
560 #endif /* VM_TAG_SIZECLASSES */
561
562 #if DEBUG || DEVELOPMENT
563 static int zalloc_simulate_vm_pressure;
564 TUNABLE(bool, zalloc_disable_copyio_check, "-no-copyio-zalloc-check", false);
565 #endif /* DEBUG || DEVELOPMENT */
566
567 /*
568 * Zone caching tunables
569 *
570 * zc_mag_size():
571 * size of magazines, larger to reduce contention at the expense of memory
572 *
573 * zc_auto_enable_threshold
574 * number of contentions per second after which zone caching engages
575 * automatically.
576 *
577 * 0 to disable.
578 *
579 * zc_grow_threshold
580 * numer of contentions per second after which the per-cpu depot layer
581 * grows at each newly observed contention without restriction.
582 *
583 * 0 to disable.
584 *
585 * zc_recirc_denom
586 * denominator of the fraction of per-cpu depot to migrate to/from
587 * the recirculation depot layer at a time. Default 3 (1/3).
588 *
589 * zc_defrag_ratio
590 * percentage of the working set to recirc size below which
591 * the zone is defragmented. Default is 66%.
592 *
593 * zc_defrag_threshold
594 * how much memory needs to be free before the auto-defrag is even considered.
595 * Default is 512k.
596 *
597 * zc_autogc_ratio
598 * percentage of the working set to min-free size below which
599 * the zone is auto-GCed to the working set size. Default is 20%.
600 *
601 * zc_autogc_threshold
602 * how much memory needs to be free before the auto-gc is even considered.
603 * Default is 4M.
604 *
605 * zc_free_batch_size
606 * The size of batches of frees/reclaim that can be done keeping
607 * the zone lock held (and preemption disabled).
608 */
609 static TUNABLE(uint16_t, zc_magazine_size, "zc_mag_size", 8);
610 static TUNABLE(uint32_t, zc_auto_threshold, "zc_auto_enable_threshold", 20);
611 static TUNABLE(uint32_t, zc_grow_threshold, "zc_grow_threshold", 8);
612 static TUNABLE(uint32_t, zc_recirc_denom, "zc_recirc_denom", 3);
613 static TUNABLE(uint32_t, zc_defrag_ratio, "zc_defrag_ratio", 66);
614 static TUNABLE(uint32_t, zc_defrag_threshold, "zc_defrag_threshold", 512u << 10);
615 static TUNABLE(uint32_t, zc_autogc_ratio, "zc_autogc_ratio", 20);
616 static TUNABLE(uint32_t, zc_autogc_threshold, "zc_autogc_threshold", 4u << 20);
617 static TUNABLE(uint32_t, zc_free_batch_size, "zc_free_batch_size", 256);
618
619 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
620 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
621 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_submaps[2];
622 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
623 [Z_SUBMAP_IDX_VM] = "VM",
624 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
625 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
626 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
627 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
628 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
629 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
630 #else
631 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
632 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
633 [Z_SUBMAP_IDX_DATA] = "DATA",
634 };
635
636 #if __x86_64__
637 #define ZONE_ENTROPY_CNT 8
638 #else
639 #define ZONE_ENTROPY_CNT 2
640 #endif
641 static struct zone_bool_gen {
642 struct bool_gen zbg_bg;
643 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
644 } zone_bool_gen[MAX_CPUS];
645
646 #if CONFIG_PROB_GZALLOC
647 /*
648 * Probabilistic gzalloc
649 * =====================
650 *
651 *
652 * Probabilistic guard zalloc samples allocations and will protect them by
653 * double-mapping the page holding them and returning the secondary virtual
654 * address to its callers.
655 *
656 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
657 * are set.
658 *
659 *
660 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
661 * most zalloc/kalloc features:
662 * - zone_require is functional
663 * - zone caching or zone tagging is compatible
664 * - non-blocking allocation work (they will always return NULL with gzalloc).
665 *
666 * PGZ limitations:
667 * - VA sequestering isn't respected, as the slots (which are in limited
668 * quantity) will be reused for any type, however the PGZ quarantine
669 * somewhat mitigates the impact.
670 * - zones with elements larger than a page cannot be protected.
671 *
672 *
673 * Tunables:
674 * --------
675 *
676 * pgz=1:
677 * Turn on probabilistic guard malloc for all zones
678 *
679 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
680 *
681 * pgz_sample_rate=0 to 2^31
682 * average sample rate between two guarded allocations.
683 * 0 means every allocation.
684 *
685 * The default is a random number between 1000 and 10,000
686 *
687 * pgz_slots
688 * how many allocations to protect.
689 *
690 * Each costs:
691 * - a PTE in the pmap (when allocated)
692 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
693 * - 64 bytes per backtraces.
694 * On LP64 this is <16K per 100 slots.
695 *
696 * The default is ~200 slots per G of physical ram (32k / G)
697 *
698 * TODO:
699 * - try harder to allocate elements at the "end" to catch OOB more reliably.
700 *
701 * pgz_quarantine
702 * how many slots should be free at any given time.
703 *
704 * PGZ will round robin through free slots to be reused, but free slots are
705 * important to detect use-after-free by acting as a quarantine.
706 *
707 * By default, PGZ will keep 33% of the slots around at all time.
708 *
709 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
710 * Specific zones for which to enable probabilistic guard malloc.
711 * There must be no numbering gap (names after the gap will be ignored).
712 */
713 #if DEBUG || DEVELOPMENT
714 static TUNABLE(bool, pgz_all, "pgz", true);
715 #else
716 static TUNABLE(bool, pgz_all, "pgz", false);
717 #endif
718 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
719 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
720 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
721 #endif /* CONFIG_PROB_GZALLOC */
722
723 static zone_t zone_find_largest(uint64_t *zone_size);
724
725 #endif /* !ZALLOC_TEST */
726 #pragma mark Zone metadata
727 #if !ZALLOC_TEST
728
729 static inline bool
zone_has_index(zone_t z,zone_id_t zid)730 zone_has_index(zone_t z, zone_id_t zid)
731 {
732 return zone_array + zid == z;
733 }
734
735 static zone_element_t
zone_element_encode(vm_offset_t base,vm_offset_t eidx)736 zone_element_encode(vm_offset_t base, vm_offset_t eidx)
737 {
738 return (zone_element_t){ .ze_value = base | eidx };
739 }
740
741 static vm_offset_t
zone_element_base(zone_element_t ze)742 zone_element_base(zone_element_t ze)
743 {
744 return trunc_page(ze.ze_value);
745 }
746
747 static vm_offset_t
zone_element_idx(zone_element_t ze)748 zone_element_idx(zone_element_t ze)
749 {
750 return ze.ze_value & PAGE_MASK;
751 }
752
753 static vm_offset_t
zone_element_addr(zone_t z,zone_element_t ze,vm_offset_t esize)754 zone_element_addr(zone_t z, zone_element_t ze, vm_offset_t esize)
755 {
756 vm_offset_t offs = zone_oob_offs(z);
757
758 return offs + zone_element_base(ze) + esize * zone_element_idx(ze);
759 }
760
761 __abortlike
762 void
zone_invalid_panic(zone_t zone)763 zone_invalid_panic(zone_t zone)
764 {
765 panic("zone %p isn't in the zone_array", zone);
766 }
767
768 __abortlike
769 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)770 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
771 const char *kind)
772 {
773 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
774 kind, meta, zone_heap_name(zone), zone->z_name);
775 }
776
777 __abortlike
778 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)779 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
780 {
781 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
782 (void *)addr, zone_heap_name(zone), zone->z_name);
783 }
784
785 __abortlike
786 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)787 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
788 struct zone_page_metadata *meta)
789 {
790 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
791 zone_id_t zidx;
792 zone_t src_zone;
793
794 if (zsflags.z_kalloc_type) {
795 panic_include_kalloc_types = true;
796 kalloc_type_dst_zone = zone;
797 }
798
799 zidx = meta->zm_index;
800 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
801 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
802 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
803 zidx);
804 }
805
806 src_zone = &zone_array[zidx];
807 src_zsflags = zone_security_array[zidx];
808 if (src_zsflags.z_kalloc_type) {
809 panic_include_kalloc_types = true;
810 kalloc_type_src_zone = src_zone;
811 }
812
813 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
814 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
815 zone_heap_name(src_zone), src_zone->z_name, zidx);
816 }
817
818 __abortlike
819 static void
zone_page_metadata_native_queue_corruption(zone_t zone,zone_pva_t * queue)820 zone_page_metadata_native_queue_corruption(zone_t zone, zone_pva_t *queue)
821 {
822 panic("foreign metadata index %d enqueued in native head %p from zone %s%s",
823 queue->packed_address, queue, zone_heap_name(zone),
824 zone->z_name);
825 }
826
827 __abortlike
828 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)829 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
830 {
831 panic("metadata list corruption through element %p detected in zone %s%s",
832 meta, zone_heap_name(zone), zone->z_name);
833 }
834
835 __abortlike __unused
836 static void
zone_invalid_foreign_addr_panic(zone_t zone,vm_offset_t addr)837 zone_invalid_foreign_addr_panic(zone_t zone, vm_offset_t addr)
838 {
839 panic("addr %p being freed to foreign zone %s%s not from foreign range",
840 (void *)addr, zone_heap_name(zone), zone->z_name);
841 }
842
843 __abortlike
844 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)845 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
846 const char *kind)
847 {
848 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
849 zone_heap_name(zone), zone->z_name, meta);
850 }
851
852 __abortlike
853 static void
zone_meta_double_free_panic(zone_t zone,zone_element_t ze,const char * caller)854 zone_meta_double_free_panic(zone_t zone, zone_element_t ze, const char *caller)
855 {
856 panic("%s: double free of %p to zone %s%s", caller,
857 (void *)zone_element_addr(zone, ze, zone_elem_size(zone)),
858 zone_heap_name(zone), zone->z_name);
859 }
860
861 __abortlike
862 static void
zone_accounting_panic(zone_t zone,const char * kind)863 zone_accounting_panic(zone_t zone, const char *kind)
864 {
865 panic("accounting mismatch (%s) for zone %s%s", kind,
866 zone_heap_name(zone), zone->z_name);
867 }
868
869 #define zone_counter_sub(z, stat, value) ({ \
870 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
871 zone_accounting_panic(z, #stat " wrap-around"); \
872 } \
873 (z)->stat; \
874 })
875
876 static inline void
zone_elems_free_add(zone_t z,uint32_t count)877 zone_elems_free_add(zone_t z, uint32_t count)
878 {
879 uint32_t n = (z->z_elems_free += count);
880 if (z->z_elems_free_max < n) {
881 z->z_elems_free_max = n;
882 }
883 }
884
885 static inline void
zone_elems_free_sub(zone_t z,uint32_t count)886 zone_elems_free_sub(zone_t z, uint32_t count)
887 {
888 uint32_t n = zone_counter_sub(z, z_elems_free, count);
889
890 if (z->z_elems_free_min > n) {
891 z->z_elems_free_min = n;
892 }
893 }
894
895 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)896 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
897 vm_offset_t esize)
898 {
899 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
900 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
901 }
902 return m->zm_alloc_size;
903 }
904
905 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)906 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
907 vm_offset_t esize)
908 {
909 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
910 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
911 }
912 return m->zm_alloc_size;
913 }
914
915 __abortlike
916 static void
zone_nofail_panic(zone_t zone)917 zone_nofail_panic(zone_t zone)
918 {
919 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
920 zone_heap_name(zone), zone->z_name);
921 }
922
923 #if __arm64__
924 // <rdar://problem/48304934> arm64 doesn't use ldp when I'd expect it to
925 #define zone_range_load(r, rmin, rmax) \
926 asm("ldp %[rmin], %[rmax], [%[range]]" \
927 : [rmin] "=r"(rmin), [rmax] "=r"(rmax) \
928 : [range] "r"(r), "m"((r)->min_address), "m"((r)->max_address))
929 #else
930 #define zone_range_load(r, rmin, rmax) \
931 ({ rmin = (r)->min_address; rmax = (r)->max_address; })
932 #endif
933
934 __attribute__((overloadable))
935 __header_always_inline bool
zone_range_contains(const struct zone_map_range * r,vm_offset_t addr)936 zone_range_contains(const struct zone_map_range *r, vm_offset_t addr)
937 {
938 vm_offset_t rmin, rmax;
939
940 #if CONFIG_KERNEL_TBI
941 addr = VM_KERNEL_TBI_FILL(addr);
942 #endif /* CONFIG_KERNEL_TBI */
943
944 /*
945 * The `&` is not a typo: we really expect the check to pass,
946 * so encourage the compiler to eagerly load and test without branches
947 */
948 zone_range_load(r, rmin, rmax);
949 return (addr >= rmin) & (addr < rmax);
950 }
951
952 __attribute__((overloadable))
953 __header_always_inline bool
zone_range_contains(const struct zone_map_range * r,vm_offset_t addr,vm_offset_t size)954 zone_range_contains(const struct zone_map_range *r, vm_offset_t addr, vm_offset_t size)
955 {
956 vm_offset_t rmin, rmax;
957
958 #if CONFIG_KERNEL_TBI
959 addr = VM_KERNEL_TBI_FILL(addr);
960 #endif /* CONFIG_KERNEL_TBI */
961
962 /*
963 * The `&` is not a typo: we really expect the check to pass,
964 * so encourage the compiler to eagerly load and test without branches
965 */
966 zone_range_load(r, rmin, rmax);
967 return (addr >= rmin) & (addr + size >= rmin) & (addr + size <= rmax);
968 }
969
970 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)971 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
972 {
973 vm_offset_t rmin, rmax;
974
975 #if CONFIG_KERNEL_TBI
976 addr_start = VM_KERNEL_STRIP_UPTR(addr_start);
977 addr_end = VM_KERNEL_STRIP_UPTR(addr_end);
978 #endif /* CONFIG_KERNEL_TBI */
979
980 zone_range_load(&zone_info.zi_map_range[ZONE_ADDR_READONLY], rmin, rmax);
981
982 /*
983 * Either the start and the end are leftward of the read-only range, or they
984 * are both completely rightward. If neither, then they span over the range.
985 */
986
987 if ((addr_start < rmin) && (addr_end < rmin)) {
988 /* Leftward */
989 return false;
990 } else if ((addr_start > rmax) && (addr_end > rmax)) {
991 /* Rightward */
992 return false;
993 }
994
995 return true;
996 }
997
998 __header_always_inline vm_size_t
zone_range_size(const struct zone_map_range * r)999 zone_range_size(const struct zone_map_range *r)
1000 {
1001 vm_offset_t rmin, rmax;
1002
1003 zone_range_load(r, rmin, rmax);
1004 return rmax - rmin;
1005 }
1006
1007 #define from_zone_map(addr, size, kind) \
1008 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
1009 zone_range_contains(&zone_info.zi_map_range[kind], (vm_offset_t)(addr)), \
1010 zone_range_contains(&zone_info.zi_map_range[kind], \
1011 (vm_offset_t)(addr), size))
1012
1013 #define zone_native_size() \
1014 zone_range_size(&zone_info.zi_map_range[ZONE_ADDR_NATIVE])
1015
1016 #define zone_foreign_size() \
1017 zone_range_size(&zone_info.zi_map_range[ZONE_ADDR_FOREIGN])
1018
1019 #define zone_readonly_size() \
1020 zone_range_size(&zone_info.zi_map_range[ZONE_ADDR_READONLY])
1021
1022 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)1023 zone_pva_is_null(zone_pva_t page)
1024 {
1025 return page.packed_address == 0;
1026 }
1027
1028 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)1029 zone_pva_is_queue(zone_pva_t page)
1030 {
1031 // actual kernel pages have the top bit set
1032 return (int32_t)page.packed_address > 0;
1033 }
1034
1035 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)1036 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
1037 {
1038 return pva1.packed_address == pva2.packed_address;
1039 }
1040
1041 __header_always_inline zone_pva_t *
zone_pageq_base(void)1042 zone_pageq_base(void)
1043 {
1044 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
1045
1046 /*
1047 * `-1` so that if the first __DATA variable is a page queue,
1048 * it gets a non 0 index
1049 */
1050 return data_seg_start - 1;
1051 }
1052
1053 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)1054 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
1055 struct zone_page_metadata *meta)
1056 {
1057 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
1058
1059 if (!zone_pva_is_equal(*queue_head, oldv)) {
1060 zone_page_metadata_list_corruption(z, meta);
1061 }
1062 *queue_head = meta->zm_page_next;
1063 }
1064
1065 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)1066 zone_queue_encode(zone_pva_t *headp)
1067 {
1068 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
1069 }
1070
1071 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)1072 zone_pva_from_addr(vm_address_t addr)
1073 {
1074 // cannot use atop() because we want to maintain the sign bit
1075 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
1076 }
1077
1078 __header_always_inline zone_pva_t
zone_pva_from_element(zone_element_t ze)1079 zone_pva_from_element(zone_element_t ze)
1080 {
1081 return zone_pva_from_addr(ze.ze_value);
1082 }
1083
1084 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)1085 zone_pva_to_addr(zone_pva_t page)
1086 {
1087 // cause sign extension so that we end up with the right address
1088 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
1089 }
1090
1091 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)1092 zone_pva_to_meta(zone_pva_t page)
1093 {
1094 return &zone_info.zi_meta_base[page.packed_address];
1095 }
1096
1097 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)1098 zone_pva_from_meta(struct zone_page_metadata *meta)
1099 {
1100 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
1101 }
1102
1103 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)1104 zone_meta_from_addr(vm_offset_t addr)
1105 {
1106 return zone_pva_to_meta(zone_pva_from_addr(addr));
1107 }
1108
1109 __header_always_inline struct zone_page_metadata *
zone_meta_from_element(zone_element_t ze)1110 zone_meta_from_element(zone_element_t ze)
1111 {
1112 return zone_pva_to_meta(zone_pva_from_element(ze));
1113 }
1114
1115 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)1116 zone_index_from_ptr(const void *ptr)
1117 {
1118 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
1119 }
1120
1121 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)1122 zone_meta_to_addr(struct zone_page_metadata *meta)
1123 {
1124 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
1125 }
1126
1127 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1128 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
1129 struct zone_page_metadata *meta)
1130 {
1131 zone_pva_t head = *headp;
1132 zone_pva_t queue_pva = zone_queue_encode(headp);
1133 struct zone_page_metadata *tmp;
1134
1135 meta->zm_page_next = head;
1136 if (!zone_pva_is_null(head)) {
1137 tmp = zone_pva_to_meta(head);
1138 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1139 zone_page_metadata_list_corruption(z, meta);
1140 }
1141 tmp->zm_page_prev = zone_pva_from_meta(meta);
1142 }
1143 meta->zm_page_prev = queue_pva;
1144 *headp = zone_pva_from_meta(meta);
1145 }
1146
1147 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop_native(zone_t z,zone_pva_t * headp,vm_offset_t * page_addrp)1148 zone_meta_queue_pop_native(zone_t z, zone_pva_t *headp, vm_offset_t *page_addrp)
1149 {
1150 zone_pva_t head = *headp;
1151 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1152 vm_offset_t page_addr = zone_pva_to_addr(head);
1153 struct zone_page_metadata *tmp;
1154
1155 if (!from_zone_map(page_addr, 1, ZONE_ADDR_NATIVE)) {
1156 zone_page_metadata_native_queue_corruption(z, headp);
1157 }
1158
1159 if (!zone_pva_is_null(meta->zm_page_next)) {
1160 tmp = zone_pva_to_meta(meta->zm_page_next);
1161 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1162 zone_page_metadata_list_corruption(z, meta);
1163 }
1164 tmp->zm_page_prev = meta->zm_page_prev;
1165 }
1166 *headp = meta->zm_page_next;
1167
1168 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1169 *page_addrp = page_addr;
1170
1171 if (!zone_has_index(z, meta->zm_index)) {
1172 zone_page_metadata_index_confusion_panic(z,
1173 zone_meta_to_addr(meta), meta);
1174 }
1175 return meta;
1176 }
1177
1178 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1179 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1180 {
1181 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1182 struct zone_page_metadata *tmp;
1183
1184 if (!zone_pva_is_null(meta->zm_page_next)) {
1185 tmp = zone_pva_to_meta(meta->zm_page_next);
1186 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1187 zone_page_metadata_list_corruption(z, meta);
1188 }
1189 tmp->zm_page_prev = meta->zm_page_prev;
1190 }
1191 if (zone_pva_is_queue(meta->zm_page_prev)) {
1192 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1193 } else {
1194 tmp = zone_pva_to_meta(meta->zm_page_prev);
1195 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1196 zone_page_metadata_list_corruption(z, meta);
1197 }
1198 tmp->zm_page_next = meta->zm_page_next;
1199 }
1200
1201 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1202 }
1203
1204 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1205 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1206 struct zone_page_metadata *meta)
1207 {
1208 zone_meta_remqueue(z, meta);
1209 zone_meta_queue_push(z, headp, meta);
1210 }
1211
1212 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1213 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1214 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1215 {
1216 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1217
1218 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1219 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1220 zone_meta_requeue(z, &z->z_pageq_partial, m);
1221 zone_counter_sub(z, z_wired_empty, len);
1222 }
1223 }
1224
1225 /* allows a given metadata to reach the z_pageq_empty queue again */
1226 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1227 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1228 {
1229 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1230
1231 assert(new_size % sizeof(vm_offset_t) == 0);
1232 if (new_size == 0) {
1233 zone_meta_requeue(z, &z->z_pageq_empty, m);
1234 z->z_wired_empty += len;
1235 }
1236 }
1237
1238 /*
1239 * Routine to populate a page backing metadata in the zone_metadata_region.
1240 * Must be called without the zone lock held as it might potentially block.
1241 */
1242 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1243 zone_meta_populate(vm_offset_t base, vm_size_t size)
1244 {
1245 struct zone_page_metadata *from = zone_meta_from_addr(base);
1246 struct zone_page_metadata *to = from + atop(size);
1247 vm_offset_t page_addr = trunc_page(from);
1248 vm_map_t map;
1249
1250 if (page_addr < zone_meta_submaps[0]->max_offset) {
1251 map = zone_meta_submaps[0];
1252 } else {
1253 map = zone_meta_submaps[1];
1254 }
1255
1256 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1257 #if !KASAN_ZALLOC
1258 /*
1259 * This can race with another thread doing a populate on the same metadata
1260 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1261 * fault in the shadow when we first access the metadata page. Avoid this
1262 * by always synchronizing on the zone_metadata_region lock with KASan.
1263 */
1264 if (pmap_find_phys(kernel_pmap, page_addr)) {
1265 continue;
1266 }
1267 #endif
1268
1269 for (;;) {
1270 kern_return_t ret = KERN_SUCCESS;
1271
1272 /*
1273 * All updates to the zone_metadata_region are done
1274 * under the zone_metadata_region_lck
1275 */
1276 zone_meta_lock();
1277 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1278 ret = kernel_memory_populate(map, page_addr,
1279 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1280 VM_KERN_MEMORY_OSFMK);
1281 }
1282 zone_meta_unlock();
1283
1284 if (ret == KERN_SUCCESS) {
1285 break;
1286 }
1287
1288 /*
1289 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1290 * to bad system deadlocks, so if the allocation failed,
1291 * we need to do the VM_PAGE_WAIT() outside of the lock.
1292 */
1293 VM_PAGE_WAIT();
1294 }
1295 }
1296 }
1297
1298 __abortlike
1299 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr,bool cache)1300 zone_invalid_element_panic(zone_t zone, vm_offset_t addr, bool cache)
1301 {
1302 struct zone_page_metadata *meta;
1303 vm_offset_t page, esize = zone_elem_size(zone);
1304 const char *from_cache = "";
1305
1306 if (cache) {
1307 zone_element_t ze = { .ze_value = addr };
1308 addr = zone_element_addr(zone, ze, esize);
1309 from_cache = " (from cache)";
1310
1311 if (zone_element_idx(ze) >= zone->z_chunk_elems) {
1312 panic("eidx %d for addr %p being freed to zone %s%s, is larger "
1313 "than number fo element in chunk (%d)", (int)zone_element_idx(ze),
1314 (void *)addr, zone_heap_name(zone), zone->z_name,
1315 zone->z_chunk_elems);
1316 }
1317 }
1318
1319 if (!from_zone_map(addr, esize, ZONE_ADDR_NATIVE) &&
1320 !from_zone_map(addr, esize, ZONE_ADDR_FOREIGN)) {
1321 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1322 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1323 }
1324 page = trunc_page(addr);
1325 meta = zone_meta_from_addr(addr);
1326
1327 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1328 panic("metadata %p corresponding to addr %p being freed to "
1329 "zone %s%s%s, is marked as secondary per cpu page",
1330 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1331 from_cache);
1332 }
1333 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1334 panic("metadata %p corresponding to addr %p being freed to "
1335 "zone %s%s%s, has chunk len greater than max",
1336 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1337 from_cache);
1338 }
1339
1340 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1341 page -= ptoa(meta->zm_page_index);
1342 }
1343
1344 if ((addr - page - zone_oob_offs(zone)) % esize) {
1345 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1346 "zone element size", (void *)addr, zone_heap_name(zone),
1347 zone->z_name, from_cache);
1348 }
1349
1350 zone_invalid_element_addr_panic(zone, addr);
1351 }
1352
1353 __header_always_inline
1354 struct zone_page_metadata *
zone_element_validate(zone_t zone,zone_element_t ze)1355 zone_element_validate(zone_t zone, zone_element_t ze)
1356 {
1357 struct zone_page_metadata *meta;
1358 vm_offset_t page = zone_element_base(ze);
1359 vm_offset_t addr;
1360
1361 if (!from_zone_map(page, 1, ZONE_ADDR_NATIVE) &&
1362 !from_zone_map(page, 1, ZONE_ADDR_FOREIGN)) {
1363 zone_invalid_element_panic(zone, ze.ze_value, true);
1364 }
1365 meta = zone_meta_from_addr(page);
1366
1367 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1368 zone_invalid_element_panic(zone, ze.ze_value, true);
1369 }
1370 if (zone_element_idx(ze) >= zone->z_chunk_elems) {
1371 zone_invalid_element_panic(zone, ze.ze_value, true);
1372 }
1373
1374 if (!zone_has_index(zone, meta->zm_index)) {
1375 addr = zone_element_addr(zone, ze, zone_elem_size(zone));
1376 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1377 }
1378
1379 return meta;
1380 }
1381
1382 __attribute__((always_inline))
1383 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t esize,zone_element_t * ze)1384 zone_element_resolve(zone_t zone, vm_offset_t addr, vm_offset_t esize,
1385 zone_element_t *ze)
1386 {
1387 struct zone_page_metadata *meta;
1388 vm_offset_t offs = zone_oob_offs(zone);
1389 vm_offset_t page, eidx;
1390
1391 if (!from_zone_map(addr, esize, ZONE_ADDR_NATIVE) &&
1392 !from_zone_map(addr, esize, ZONE_ADDR_FOREIGN)) {
1393 zone_invalid_element_panic(zone, addr, false);
1394 }
1395 page = trunc_page(addr);
1396 meta = zone_meta_from_addr(addr);
1397
1398 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1399 zone_invalid_element_panic(zone, addr, false);
1400 }
1401 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1402 page -= ptoa(meta->zm_page_index);
1403 meta -= meta->zm_page_index;
1404 }
1405
1406 eidx = (addr - page - offs) / esize;
1407 if ((addr - page - offs) % esize) {
1408 zone_invalid_element_panic(zone, addr, false);
1409 }
1410
1411 if (!zone_has_index(zone, meta->zm_index)) {
1412 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1413 }
1414
1415 *ze = zone_element_encode(page, eidx);
1416 return meta;
1417 }
1418
1419 #if CONFIG_PROB_GZALLOC
1420 void *
zone_element_pgz_oob_adjust(void * elem,vm_size_t esize,vm_size_t req_size)1421 zone_element_pgz_oob_adjust(void *elem, vm_size_t esize, vm_size_t req_size)
1422 {
1423 struct zone_page_metadata *meta;
1424 vm_offset_t addr = (vm_offset_t)elem;
1425 vm_offset_t offs_max = PAGE_SIZE - (1u << KALLOC_LOG2_MINALIGN);
1426 vm_offset_t offs;
1427
1428 if (esize - req_size >= offs_max) {
1429 /*
1430 * The code in zone_element_size() is trying finding
1431 * the guard page for the chunk using <addr + element_size>
1432 * and this needs to hit the guard page which holds
1433 * the metadata for the offset we applied.
1434 *
1435 * For this to work, when "esize" is larger than PAGE_SIZE,
1436 * it is important that the offset is sub-page, else we will
1437 * jump over the guard page and not find an offset.
1438 *
1439 * TOOD: find a scheme to make this actually work for arbitrary
1440 * offsets without costing us having to fully resolve
1441 * the item.
1442 */
1443 offs = offs_max;
1444 } else if (req_size <= (1u << KALLOC_LOG2_MINALIGN)) {
1445 offs = esize - (1u << KALLOC_LOG2_MINALIGN);
1446 } else {
1447 offs = esize - roundup(req_size, 1u << KALLOC_LOG2_MINALIGN);
1448 }
1449
1450 meta = zone_meta_from_addr(addr + esize);
1451 meta->zm_oob_offs = (uint16_t)offs;
1452 return (void *)(addr + offs);
1453 }
1454 #endif /* CONFIG_PROB_GZALLOC */
1455
1456 /*
1457 * Routine to get the size of a zone allocated address.
1458 * If the address doesnt belong to the zone maps, returns 0.
1459 */
1460 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1461 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1462 {
1463 vm_address_t addr = (vm_address_t)elem;
1464 struct zone_page_metadata *meta, *next;
1465 vm_size_t esize, offs;
1466 zone_t zone;
1467
1468 if (from_zone_map(addr, sizeof(void *), ZONE_ADDR_NATIVE) ||
1469 from_zone_map(addr, sizeof(void *), ZONE_ADDR_FOREIGN)) {
1470 meta = zone_meta_from_addr(addr);
1471 zone = &zone_array[meta->zm_index];
1472 esize = zone_elem_size_safe(zone);
1473 next = zone_meta_from_addr(addr + esize + 1);
1474 offs = 0;
1475
1476 #if CONFIG_PROB_GZALLOC
1477 /*
1478 * If the element is crossing a page and the metadata
1479 * for the next page is a guard, it very likely means
1480 * that it was allocated by kalloc_ext() and
1481 * zone_element_pgz_oob_adjust() was used.
1482 *
1483 * Adjust the element back with the info stashed
1484 * in that guard page metadata.
1485 *
1486 * Note: if clear_oob isn't set, then `elem` might
1487 * be a pointer inside the element, which makes
1488 * dereferencing next->zm_chunk_len unsafe
1489 * for large elements. This makes copyio checks less potent.
1490 */
1491 if (oob_offs && zone->z_pgz_use_guards &&
1492 (clear_oob || esize <= PAGE_SIZE) &&
1493 next->zm_chunk_len == ZM_PGZ_GUARD) {
1494 offs = next->zm_oob_offs;
1495 if (clear_oob) {
1496 next->zm_oob_offs = 0;
1497 }
1498 }
1499 #else
1500 (void)clear_oob;
1501 #endif /* CONFIG_PROB_GZALLOC */
1502 if (oob_offs) {
1503 *oob_offs = offs;
1504 }
1505 if (z) {
1506 *z = zone;
1507 }
1508 return esize;
1509 }
1510
1511 if (oob_offs) {
1512 *oob_offs = 0;
1513 }
1514 #if CONFIG_GZALLOC
1515 if (__improbable(gzalloc_enabled())) {
1516 vm_size_t gzsize;
1517 if (gzalloc_element_size(elem, z, &gzsize)) {
1518 return gzsize;
1519 }
1520 }
1521 #endif /* CONFIG_GZALLOC */
1522
1523 return 0;
1524 }
1525
1526 zone_id_t
zone_id_for_native_element(void * addr,vm_size_t esize)1527 zone_id_for_native_element(void *addr, vm_size_t esize)
1528 {
1529 zone_id_t zid = ZONE_ID_INVALID;
1530 if (from_zone_map(addr, esize, ZONE_ADDR_NATIVE)) {
1531 zid = zone_index_from_ptr(addr);
1532 __builtin_assume(zid != ZONE_ID_INVALID);
1533 }
1534 return zid;
1535 }
1536
1537 /* This function just formats the reason for the panics by redoing the checks */
1538 __abortlike
1539 static void
zone_require_panic(zone_t zone,void * addr)1540 zone_require_panic(zone_t zone, void *addr)
1541 {
1542 uint32_t zindex;
1543 zone_t other;
1544
1545 if (!from_zone_map(addr, zone_elem_size(zone), ZONE_ADDR_NATIVE)) {
1546 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1547 }
1548
1549 zindex = zone_index_from_ptr(addr);
1550 other = &zone_array[zindex];
1551 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1552 panic("zone_require failed: invalid zone index %d "
1553 "(addr: %p, expected: %s%s)", zindex,
1554 addr, zone_heap_name(zone), zone->z_name);
1555 } else {
1556 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1557 "(addr: %p, expected: %s%s)",
1558 zindex, zone_heap_name(other), other->z_name,
1559 addr, zone_heap_name(zone), zone->z_name);
1560 }
1561 }
1562
1563 __abortlike
1564 static void
zone_id_require_panic(zone_id_t zid,void * addr)1565 zone_id_require_panic(zone_id_t zid, void *addr)
1566 {
1567 zone_require_panic(&zone_array[zid], addr);
1568 }
1569
1570 /*
1571 * Routines to panic if a pointer is not mapped to an expected zone.
1572 * This can be used as a means of pinning an object to the zone it is expected
1573 * to be a part of. Causes a panic if the address does not belong to any
1574 * specified zone, does not belong to any zone, has been freed and therefore
1575 * unmapped from the zone, or the pointer contains an uninitialized value that
1576 * does not belong to any zone.
1577 *
1578 * Note that this can only work with collectable zones without foreign pages.
1579 */
1580 void
zone_require(zone_t zone,void * addr)1581 zone_require(zone_t zone, void *addr)
1582 {
1583 vm_size_t esize = zone_elem_size(zone);
1584
1585 if (__probable(from_zone_map(addr, esize, ZONE_ADDR_NATIVE))) {
1586 if (zone_has_index(zone, zone_index_from_ptr(addr))) {
1587 return;
1588 }
1589 #if CONFIG_GZALLOC
1590 } else if (__probable(zone->z_gzalloc_tracked)) {
1591 return;
1592 #endif
1593 }
1594 zone_require_panic(zone, addr);
1595 }
1596
1597 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1598 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1599 {
1600 if (__probable(from_zone_map(addr, esize, ZONE_ADDR_NATIVE))) {
1601 if (zid == zone_index_from_ptr(addr)) {
1602 return;
1603 }
1604 #if CONFIG_GZALLOC
1605 } else if (__probable(zone_array[zid].z_gzalloc_tracked)) {
1606 return;
1607 #endif
1608 }
1609 zone_id_require_panic(zid, addr);
1610 }
1611
1612 void
zone_id_require_allow_foreign(zone_id_t zid,vm_size_t esize,void * addr)1613 zone_id_require_allow_foreign(zone_id_t zid, vm_size_t esize, void *addr)
1614 {
1615 if (__probable(from_zone_map(addr, esize, ZONE_ADDR_NATIVE) ||
1616 from_zone_map(addr, esize, ZONE_ADDR_FOREIGN))) {
1617 if (zid == zone_index_from_ptr(addr)) {
1618 return;
1619 }
1620 #if CONFIG_GZALLOC
1621 } else if (__probable(zone_array[zid].z_gzalloc_tracked)) {
1622 return;
1623 #endif
1624 }
1625 zone_id_require_panic(zid, addr);
1626 }
1627
1628 bool
zone_owns(zone_t zone,void * addr)1629 zone_owns(zone_t zone, void *addr)
1630 {
1631 vm_size_t esize = zone_elem_size_safe(zone);
1632
1633 if (__probable(from_zone_map(addr, esize, ZONE_ADDR_NATIVE))) {
1634 return zone_has_index(zone, zone_index_from_ptr(addr));
1635 #if CONFIG_GZALLOC
1636 } else if (__probable(zone->z_gzalloc_tracked)) {
1637 return true;
1638 #endif
1639 }
1640 return false;
1641 }
1642
1643 __startup_func
1644 static struct zone_map_range
zone_kmem_suballoc(vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1645 zone_kmem_suballoc(
1646 vm_offset_t addr,
1647 vm_size_t size,
1648 int flags,
1649 vm_tag_t tag,
1650 vm_map_t *new_map)
1651 {
1652 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1653 struct zone_map_range r;
1654 kern_return_t kr;
1655
1656 vmk_flags.vmkf_permanent = TRUE;
1657 kr = kmem_suballoc(kernel_map, &addr, size,
1658 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1659 flags, vmk_flags, tag, new_map);
1660 if (kr != KERN_SUCCESS) {
1661 panic("kmem_suballoc(%p:%p) failed: %d",
1662 (void *)addr, (void *)(addr + size), kr);
1663 }
1664
1665 r.min_address = addr;
1666 r.max_address = addr + size;
1667 return r;
1668 }
1669
1670 #endif /* !ZALLOC_TEST */
1671 #pragma mark Zone bits allocator
1672
1673 /*!
1674 * @defgroup Zone Bitmap allocator
1675 * @{
1676 *
1677 * @brief
1678 * Functions implementing the zone bitmap allocator
1679 *
1680 * @discussion
1681 * The zone allocator maintains which elements are allocated or free in bitmaps.
1682 *
1683 * When the number of elements per page is smaller than 32, it is stored inline
1684 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1685 * and @c zm_bitmap used for storage).
1686 *
1687 * When the number of elements is larger, then a bitmap is allocated from
1688 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1689 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1690 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1691 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1692 * cannot be larger than 1024 bytes (8192 bits).
1693 *
1694 * This buddy allocator can actually accomodate allocations as large
1695 * as 8k on 16k systems and 2k on 4k systems.
1696 *
1697 * Note: @c zba_* functions are implementation details not meant to be used
1698 * outside of the allocation of the allocator itself. Interfaces to the rest of
1699 * the zone allocator are documented and not @c zba_* prefixed.
1700 */
1701
1702 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1703 #define ZBA_GRANULE sizeof(uint64_t)
1704 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1705 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1706 #define ZBA_MAX_ALLOC_ORDER 7
1707 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1708 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1709 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1710
1711 struct zone_bits_chain {
1712 uint32_t zbc_next;
1713 uint32_t zbc_prev;
1714 } __attribute__((aligned(ZBA_GRANULE)));
1715
1716 struct zone_bits_head {
1717 uint32_t zbh_next;
1718 uint32_t zbh_unused;
1719 } __attribute__((aligned(ZBA_GRANULE)));
1720
1721 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1722 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1723
1724 struct zone_bits_allocator_meta {
1725 uint32_t zbam_chunks;
1726 uint32_t __zbam_padding;
1727 struct zone_bits_head zbam_lists[ZBA_MAX_ORDER + 1];
1728 };
1729
1730 struct zone_bits_allocator_header {
1731 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1732 };
1733
1734 #if ZALLOC_TEST
1735 static struct zalloc_bits_allocator_test_setup {
1736 vm_offset_t zbats_base;
1737 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1738 } zba_test_info;
1739
1740 static struct zone_bits_allocator_header *
zba_base_header(void)1741 zba_base_header(void)
1742 {
1743 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1744 }
1745
1746 static void
zba_populate(uint32_t n)1747 zba_populate(uint32_t n)
1748 {
1749 vm_address_t base = zba_test_info.zbats_base;
1750 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1751 }
1752 #else
1753 __startup_data
1754 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE]
1755 __attribute__((aligned(ZBA_CHUNK_SIZE)));
1756 static LCK_MTX_EARLY_DECLARE(zba_mtx, &zone_locks_grp);
1757
1758 static struct zone_bits_allocator_header *
zba_base_header(void)1759 zba_base_header(void)
1760 {
1761 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1762 }
1763
1764 static void
zba_lock(void)1765 zba_lock(void)
1766 {
1767 lck_mtx_lock(&zba_mtx);
1768 }
1769
1770 static void
zba_unlock(void)1771 zba_unlock(void)
1772 {
1773 lck_mtx_unlock(&zba_mtx);
1774 }
1775
1776 static void
zba_populate(uint32_t n)1777 zba_populate(uint32_t n)
1778 {
1779 vm_size_t size = ZBA_CHUNK_SIZE;
1780 vm_address_t addr;
1781
1782 addr = zone_info.zi_bits_range.min_address + n * size;
1783 if (addr >= zone_info.zi_bits_range.max_address) {
1784 uint64_t zsize = 0;
1785 zone_t z = zone_find_largest(&zsize);
1786 panic("zba_populate: out of bitmap space, "
1787 "likely due to memory leak in zone [%s%s] "
1788 "(%luM, %d elements allocated)",
1789 zone_heap_name(z), zone_name(z),
1790 (unsigned long)zsize >> 20,
1791 zone_count_allocated(z));
1792 }
1793
1794 for (;;) {
1795 kern_return_t kr = KERN_SUCCESS;
1796
1797 if (0 == pmap_find_phys(kernel_pmap, addr)) {
1798 kr = kernel_memory_populate(zone_meta_submaps[0], addr, size,
1799 KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1800 VM_KERN_MEMORY_OSFMK);
1801 }
1802
1803 if (kr == KERN_SUCCESS) {
1804 return;
1805 }
1806
1807 zba_unlock();
1808 VM_PAGE_WAIT();
1809 zba_lock();
1810 }
1811 }
1812 #endif
1813
1814 __pure2
1815 static struct zone_bits_allocator_meta *
zba_meta(void)1816 zba_meta(void)
1817 {
1818 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1819 }
1820
1821 __pure2
1822 static uint64_t *
zba_slot_base(void)1823 zba_slot_base(void)
1824 {
1825 return (uint64_t *)zba_base_header();
1826 }
1827
1828 __pure2
1829 static vm_address_t
zba_page_addr(uint32_t n)1830 zba_page_addr(uint32_t n)
1831 {
1832 return (vm_address_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1833 }
1834
1835 __pure2
1836 static struct zone_bits_head *
zba_head(uint32_t order)1837 zba_head(uint32_t order)
1838 {
1839 return &zba_meta()->zbam_lists[order];
1840 }
1841
1842 __pure2
1843 static uint32_t
zba_head_index(uint32_t order)1844 zba_head_index(uint32_t order)
1845 {
1846 uint32_t hdr_size = sizeof(struct zone_bits_allocator_header) +
1847 offsetof(struct zone_bits_allocator_meta, zbam_lists);
1848 return (hdr_size / ZBA_GRANULE) + order;
1849 }
1850
1851 __pure2
1852 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1853 zba_chain_for_index(uint32_t index)
1854 {
1855 return (struct zone_bits_chain *)(zba_slot_base() + index);
1856 }
1857
1858 __pure2
1859 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1860 zba_chain_to_index(const struct zone_bits_chain *zbc)
1861 {
1862 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1863 }
1864
1865 __abortlike
1866 static void
zba_head_corruption_panic(uint32_t order)1867 zba_head_corruption_panic(uint32_t order)
1868 {
1869 panic("zone bits allocator head[%d:%p] is corrupt", order,
1870 zba_head(order));
1871 }
1872
1873 __abortlike
1874 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1875 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1876 {
1877 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1878 }
1879
1880 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order)1881 zba_push_block(struct zone_bits_chain *zbc, uint32_t order)
1882 {
1883 struct zone_bits_head *hd = zba_head(order);
1884 uint32_t hd_index = zba_head_index(order);
1885 uint32_t index = zba_chain_to_index(zbc);
1886 struct zone_bits_chain *next;
1887
1888 if (hd->zbh_next) {
1889 next = zba_chain_for_index(hd->zbh_next);
1890 if (next->zbc_prev != hd_index) {
1891 zba_head_corruption_panic(order);
1892 }
1893 next->zbc_prev = index;
1894 }
1895 zbc->zbc_next = hd->zbh_next;
1896 zbc->zbc_prev = hd_index;
1897 hd->zbh_next = index;
1898 }
1899
1900 static void
zba_remove_block(struct zone_bits_chain * zbc)1901 zba_remove_block(struct zone_bits_chain *zbc)
1902 {
1903 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1904 uint32_t index = zba_chain_to_index(zbc);
1905
1906 if (prev->zbc_next != index) {
1907 zba_chain_corruption_panic(prev, zbc);
1908 }
1909 if ((prev->zbc_next = zbc->zbc_next)) {
1910 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1911 if (next->zbc_prev != index) {
1912 zba_chain_corruption_panic(zbc, next);
1913 }
1914 next->zbc_prev = zbc->zbc_prev;
1915 }
1916 }
1917
1918 static vm_address_t
zba_try_pop_block(uint32_t order)1919 zba_try_pop_block(uint32_t order)
1920 {
1921 struct zone_bits_head *hd = zba_head(order);
1922 struct zone_bits_chain *zbc;
1923
1924 if (hd->zbh_next == 0) {
1925 return 0;
1926 }
1927
1928 zbc = zba_chain_for_index(hd->zbh_next);
1929 zba_remove_block(zbc);
1930 return (vm_address_t)zbc;
1931 }
1932
1933 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1934 zba_header(vm_offset_t addr)
1935 {
1936 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1937 return (struct zone_bits_allocator_header *)addr;
1938 }
1939
1940 static size_t
zba_node_parent(size_t node)1941 zba_node_parent(size_t node)
1942 {
1943 return (node - 1) / 2;
1944 }
1945
1946 static size_t
zba_node_left_child(size_t node)1947 zba_node_left_child(size_t node)
1948 {
1949 return node * 2 + 1;
1950 }
1951
1952 static size_t
zba_node_buddy(size_t node)1953 zba_node_buddy(size_t node)
1954 {
1955 return ((node - 1) ^ 1) + 1;
1956 }
1957
1958 static size_t
zba_node(vm_offset_t addr,uint32_t order)1959 zba_node(vm_offset_t addr, uint32_t order)
1960 {
1961 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1962 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1963 }
1964
1965 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1966 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1967 {
1968 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1969 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1970 }
1971
1972 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1973 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1974 {
1975 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1976 }
1977
1978 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1979 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1980 {
1981 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1982 }
1983
1984 static void
zba_free(vm_offset_t addr,uint32_t order)1985 zba_free(vm_offset_t addr, uint32_t order)
1986 {
1987 struct zone_bits_allocator_header *zbah = zba_header(addr);
1988 struct zone_bits_chain *zbc;
1989 size_t node = zba_node(addr, order);
1990
1991 while (node) {
1992 size_t parent = zba_node_parent(node);
1993
1994 zba_node_flip_split(zbah, parent);
1995 if (zba_node_is_split(zbah, parent)) {
1996 break;
1997 }
1998
1999 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
2000 zba_remove_block(zbc);
2001 order++;
2002 node = parent;
2003 }
2004
2005 zba_push_block(zba_chain_for_node(zbah, node, order), order);
2006 }
2007
2008 static vm_size_t
zba_chunk_header_size(uint32_t n)2009 zba_chunk_header_size(uint32_t n)
2010 {
2011 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
2012 if (n == 0) {
2013 hdr_size += sizeof(struct zone_bits_allocator_meta);
2014 }
2015 return hdr_size;
2016 }
2017
2018 static void
zba_init_chunk(uint32_t n)2019 zba_init_chunk(uint32_t n)
2020 {
2021 vm_size_t hdr_size = zba_chunk_header_size(n);
2022 vm_offset_t page = zba_page_addr(n);
2023 struct zone_bits_allocator_header *zbah = zba_header(page);
2024 vm_size_t size = ZBA_CHUNK_SIZE;
2025 size_t node;
2026
2027 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
2028 if (size < hdr_size + (ZBA_GRANULE << o)) {
2029 continue;
2030 }
2031 size -= ZBA_GRANULE << o;
2032 node = zba_node(page + size, o);
2033 zba_node_flip_split(zbah, zba_node_parent(node));
2034 zba_push_block(zba_chain_for_node(zbah, node, o), o);
2035 }
2036
2037 zba_meta()->zbam_chunks = n + 1;
2038 }
2039
2040 __attribute__((noinline))
2041 static void
zba_grow(void)2042 zba_grow(void)
2043 {
2044 uint32_t chunk = zba_meta()->zbam_chunks;
2045
2046 zba_populate(chunk);
2047 if (zba_meta()->zbam_chunks == chunk) {
2048 zba_init_chunk(chunk);
2049 }
2050 }
2051
2052 static vm_offset_t
zba_alloc(uint32_t order)2053 zba_alloc(uint32_t order)
2054 {
2055 struct zone_bits_allocator_header *zbah;
2056 uint32_t cur = order;
2057 vm_address_t addr;
2058 size_t node;
2059
2060 while ((addr = zba_try_pop_block(cur)) == 0) {
2061 if (cur++ >= ZBA_MAX_ORDER) {
2062 zba_grow();
2063 cur = order;
2064 }
2065 }
2066
2067 zbah = zba_header(addr);
2068 node = zba_node(addr, cur);
2069 zba_node_flip_split(zbah, zba_node_parent(node));
2070 while (cur > order) {
2071 cur--;
2072 zba_node_flip_split(zbah, node);
2073 node = zba_node_left_child(node);
2074 zba_push_block(zba_chain_for_node(zbah, node + 1, cur), cur);
2075 }
2076
2077 return addr;
2078 }
2079
2080 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
2081 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
2082 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
2083 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
2084
2085 #if !ZALLOC_TEST
2086 static uint32_t
zba_bits_ref_order(uint32_t bref)2087 zba_bits_ref_order(uint32_t bref)
2088 {
2089 return bref & 0x7;
2090 }
2091
2092 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)2093 zba_bits_ref_ptr(uint32_t bref)
2094 {
2095 return zba_slot_base() + (bref >> 3);
2096 }
2097
2098 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)2099 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
2100 zalloc_flags_t flags, vm_offset_t eidx)
2101 {
2102 size_t i = eidx / 32;
2103 uint32_t map;
2104
2105 if (eidx % 32) {
2106 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2107 if (map) {
2108 eidx = __builtin_ctz(map);
2109 meta[i].zm_bitmap ^= 1u << eidx;
2110 return i * 32 + eidx;
2111 }
2112 i++;
2113 }
2114
2115 uint32_t chunk_len = meta->zm_chunk_len;
2116 if (flags & Z_PCPU) {
2117 chunk_len = zpercpu_count();
2118 }
2119 for (int j = 0; j < chunk_len; j++, i++) {
2120 if (i >= chunk_len) {
2121 i = 0;
2122 }
2123 if (__probable(map = meta[i].zm_bitmap)) {
2124 meta[i].zm_bitmap &= map - 1;
2125 return i * 32 + __builtin_ctz(map);
2126 }
2127 }
2128
2129 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2130 }
2131
2132 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2133 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2134 vm_offset_t eidx)
2135 {
2136 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2137 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2138 size_t i = eidx / 64;
2139 uint64_t map;
2140
2141 if (eidx % 64) {
2142 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2143 if (map) {
2144 eidx = __builtin_ctzll(map);
2145 bits[i] ^= 1ull << eidx;
2146 return i * 64 + eidx;
2147 }
2148 i++;
2149 }
2150
2151 for (int j = 0; j < bits_size; i++, j++) {
2152 if (i >= bits_size) {
2153 i = 0;
2154 }
2155 if (__probable(map = bits[i])) {
2156 bits[i] &= map - 1;
2157 return i * 64 + __builtin_ctzll(map);
2158 }
2159 }
2160
2161 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2162 }
2163
2164 /*!
2165 * @function zone_meta_find_and_clear_bit
2166 *
2167 * @brief
2168 * The core of the bitmap allocator: find a bit set in the bitmaps.
2169 *
2170 * @discussion
2171 * This method will round robin through available allocations,
2172 * with a per-core memory of the last allocated element index allocated.
2173 *
2174 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2175 * double-free bugs way too practical.
2176 *
2177 * @param zone The zone we're allocating from.
2178 * @param meta The main metadata for the chunk being allocated from.
2179 * @param flags the alloc flags (for @c Z_PCPU).
2180 */
2181 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags)2182 zone_meta_find_and_clear_bit(zone_t zone, struct zone_page_metadata *meta,
2183 zalloc_flags_t flags)
2184 {
2185 zone_stats_t zs = zpercpu_get(zone->z_stats);
2186 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2187
2188 if (meta->zm_inline_bitmap) {
2189 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2190 } else {
2191 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2192 }
2193 zs->zs_alloc_rr = (uint16_t)eidx;
2194 return eidx;
2195 }
2196
2197 /*!
2198 * @function zone_meta_bits_init
2199 *
2200 * @brief
2201 * Initializes the zm_bitmap field(s) for a newly assigned chunk.
2202 *
2203 * @param meta The main metadata for the initialized chunk.
2204 * @param count The number of elements the chunk can hold
2205 * (which might be partial for partially populated chunks).
2206 * @param nbits The maximum nuber of bits that will be used.
2207 */
2208 static void
zone_meta_bits_init(struct zone_page_metadata * meta,uint32_t count,uint32_t nbits)2209 zone_meta_bits_init(struct zone_page_metadata *meta,
2210 uint32_t count, uint32_t nbits)
2211 {
2212 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2213 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2214
2215 if (meta->zm_inline_bitmap) {
2216 /*
2217 * We're called with the metadata zm_bitmap fields already
2218 * zeroed out.
2219 */
2220 for (size_t i = 0; 32 * i < count; i++) {
2221 if (32 * i + 32 <= count) {
2222 meta[i].zm_bitmap = ~0u;
2223 } else {
2224 meta[i].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2225 }
2226 }
2227 } else {
2228 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2229 uint64_t *bits;
2230
2231 assert(order <= ZBA_MAX_ALLOC_ORDER);
2232 assert(count <= ZBA_GRANULE_BITS << order);
2233
2234 zba_lock();
2235 bits = (uint64_t *)zba_alloc(order);
2236 zba_unlock();
2237
2238 for (size_t i = 0; i < 1u << order; i++) {
2239 if (64 * i + 64 <= count) {
2240 bits[i] = ~0ull;
2241 } else if (64 * i < count) {
2242 bits[i] = zba_map_mask_lt(uint64_t, count);
2243 } else {
2244 bits[i] = 0ull;
2245 }
2246 }
2247
2248 meta->zm_bitmap = (uint32_t)((vm_offset_t)bits -
2249 (vm_offset_t)zba_slot_base()) + order;
2250 }
2251 }
2252
2253 /*!
2254 * @function zone_meta_bits_merge
2255 *
2256 * @brief
2257 * Adds elements <code>[start, end)</code> to a chunk being extended.
2258 *
2259 * @param meta The main metadata for the extended chunk.
2260 * @param start The index of the first element to add to the chunk.
2261 * @param end The index of the last (exclusive) element to add.
2262 */
2263 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2264 zone_meta_bits_merge(struct zone_page_metadata *meta,
2265 uint32_t start, uint32_t end)
2266 {
2267 if (meta->zm_inline_bitmap) {
2268 while (start < end) {
2269 size_t s_i = start / 32;
2270 size_t s_e = end / 32;
2271
2272 if (s_i == s_e) {
2273 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2274 zba_map_mask_ge(uint32_t, start);
2275 break;
2276 }
2277
2278 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2279 start += 32 - (start % 32);
2280 }
2281 } else {
2282 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2283
2284 while (start < end) {
2285 size_t s_i = start / 64;
2286 size_t s_e = end / 64;
2287
2288 if (s_i == s_e) {
2289 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2290 zba_map_mask_ge(uint64_t, start);
2291 break;
2292 }
2293 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2294 start += 64 - (start % 64);
2295 }
2296 }
2297 }
2298
2299 /*!
2300 * @function zone_bits_free
2301 *
2302 * @brief
2303 * Frees a bitmap to the zone bitmap allocator.
2304 *
2305 * @param bref
2306 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2307 */
2308 static void
zone_bits_free(uint32_t bref)2309 zone_bits_free(uint32_t bref)
2310 {
2311 zba_lock();
2312 zba_free((vm_offset_t)zba_bits_ref_ptr(bref), zba_bits_ref_order(bref));
2313 zba_unlock();
2314 }
2315
2316 /*!
2317 * @function zone_meta_is_free
2318 *
2319 * @brief
2320 * Returns whether a given element appears free.
2321 */
2322 static bool
zone_meta_is_free(struct zone_page_metadata * meta,zone_element_t ze)2323 zone_meta_is_free(struct zone_page_metadata *meta, zone_element_t ze)
2324 {
2325 vm_offset_t eidx = zone_element_idx(ze);
2326 if (meta->zm_inline_bitmap) {
2327 uint32_t bit = zba_map_bit(uint32_t, eidx);
2328 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2329 } else {
2330 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2331 uint64_t bit = zba_map_bit(uint64_t, eidx);
2332 return bits[zba_map_index(uint64_t, eidx)] & bit;
2333 }
2334 }
2335
2336 /*!
2337 * @function zone_meta_mark_free
2338 *
2339 * @brief
2340 * Marks an element as free and returns whether it was marked as used.
2341 */
2342 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,zone_element_t ze)2343 zone_meta_mark_free(struct zone_page_metadata *meta, zone_element_t ze)
2344 {
2345 vm_offset_t eidx = zone_element_idx(ze);
2346
2347 if (meta->zm_inline_bitmap) {
2348 uint32_t bit = zba_map_bit(uint32_t, eidx);
2349 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2350 return false;
2351 }
2352 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2353 } else {
2354 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2355 uint64_t bit = zba_map_bit(uint64_t, eidx);
2356 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2357 return false;
2358 }
2359 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2360 }
2361 return true;
2362 }
2363
2364 /*!
2365 * @function zone_meta_mark_used
2366 *
2367 * @brief
2368 * Marks an element as used and returns whether it was marked as free
2369 */
2370 static bool
zone_meta_mark_used(struct zone_page_metadata * meta,zone_element_t ze)2371 zone_meta_mark_used(struct zone_page_metadata *meta, zone_element_t ze)
2372 {
2373 vm_offset_t eidx = zone_element_idx(ze);
2374
2375 if (meta->zm_inline_bitmap) {
2376 uint32_t bit = zba_map_bit(uint32_t, eidx);
2377 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2378 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2379 return true;
2380 }
2381 } else {
2382 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2383 uint64_t bit = zba_map_bit(uint64_t, eidx);
2384 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2385 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2386 return true;
2387 }
2388 }
2389 return false;
2390 }
2391
2392 #endif /* !ZALLOC_TEST */
2393 /*! @} */
2394 #pragma mark ZTAGS
2395 #if !ZALLOC_TEST
2396 #if VM_TAG_SIZECLASSES
2397 /*
2398 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
2399 * zones only.
2400 *
2401 * There are 3 kinds of tags that can be used:
2402 * - pre-registered VM_KERN_MEMORY_*
2403 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
2404 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
2405 *
2406 * The VM tracks the statistics in lazily allocated structures.
2407 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
2408 *
2409 * If for some reason the requested tag cannot be accounted for,
2410 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
2411 *
2412 * Each allocated element also remembers the tag it was assigned,
2413 * in its ztSlot() which lets zalloc/zfree update statistics correctly.
2414 */
2415
2416 // for zones with tagging enabled:
2417
2418 // calculate a pointer to the tag base entry,
2419 // holding either a uint32_t the first tag offset for a page in the zone map,
2420 // or two uint16_t tags if the page can only hold one or two elements
2421
2422 #define ZTAGBASE(zone, element) \
2423 (&((uint32_t *)zone_tagbase_min)[atop((element) - \
2424 zone_info.zi_map_range[ZONE_ADDR_NATIVE].min_address)])
2425
2426 static vm_offset_t zone_tagbase_min;
2427 static vm_offset_t zone_tagbase_max;
2428 static vm_offset_t zone_tagbase_map_size;
2429 static vm_map_t zone_tagbase_map;
2430
2431 static vm_offset_t zone_tags_min;
2432 static vm_offset_t zone_tags_max;
2433 static vm_offset_t zone_tags_map_size;
2434 static vm_map_t zone_tags_map;
2435
2436 // simple heap allocator for allocating the tags for new memory
2437
2438 static LCK_MTX_EARLY_DECLARE(ztLock, &zone_locks_grp); /* heap lock */
2439
2440 /*
2441 * Array of all sizeclasses used by kalloc variants so that we can
2442 * have accounting per size class for each kalloc callsite
2443 */
2444 uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
2445
2446 enum{
2447 ztFreeIndexCount = 8,
2448 ztFreeIndexMax = (ztFreeIndexCount - 1),
2449 ztTagsPerBlock = 4
2450 };
2451
2452 struct ztBlock {
2453 #if __LITTLE_ENDIAN__
2454 uint64_t free:1,
2455 next:21,
2456 prev:21,
2457 size:21;
2458 #else
2459 // ztBlock needs free bit least significant
2460 #error !__LITTLE_ENDIAN__
2461 #endif
2462 };
2463 typedef struct ztBlock ztBlock;
2464
2465 static ztBlock * ztBlocks;
2466 static uint32_t ztBlocksCount;
2467 static uint32_t ztBlocksFree;
2468
2469 zalloc_flags_t
__zone_flags_mix_tag(zone_t z,zalloc_flags_t flags,vm_allocation_site_t * site)2470 __zone_flags_mix_tag(zone_t z, zalloc_flags_t flags, vm_allocation_site_t *site)
2471 {
2472 if (__improbable(z->z_uses_tags)) {
2473 vm_tag_t tag = zalloc_flags_get_tag(flags);
2474 if (flags & Z_VM_TAG_BT_BIT) {
2475 tag = vm_tag_bt() ?: tag;
2476 }
2477 if (tag == VM_KERN_MEMORY_NONE && site) {
2478 tag = vm_tag_alloc(site);
2479 }
2480 if (tag != VM_KERN_MEMORY_NONE) {
2481 tag = vm_tag_will_update_zone(tag, z->z_tags_sizeclass,
2482 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
2483 }
2484 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
2485 }
2486
2487 return flags;
2488 }
2489
2490 static uint32_t
ztLog2up(uint32_t size)2491 ztLog2up(uint32_t size)
2492 {
2493 if (1 == size) {
2494 size = 0;
2495 } else {
2496 size = 32 - __builtin_clz(size - 1);
2497 }
2498 return size;
2499 }
2500
2501 // pointer to the tag for an element
2502 static vm_tag_t *
ztSlot(zone_t zone,vm_offset_t element)2503 ztSlot(zone_t zone, vm_offset_t element)
2504 {
2505 vm_tag_t *result;
2506 if (zone->z_tags_inline) {
2507 result = (vm_tag_t *)ZTAGBASE(zone, element);
2508 if ((PAGE_MASK & element) >= zone_elem_size(zone)) {
2509 result++;
2510 }
2511 } else {
2512 result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE(zone, element)[0] +
2513 (element & PAGE_MASK) / zone_elem_size(zone)];
2514 }
2515 return result;
2516 }
2517
2518 static uint32_t
ztLog2down(uint32_t size)2519 ztLog2down(uint32_t size)
2520 {
2521 size = 31 - __builtin_clz(size);
2522 return size;
2523 }
2524
2525 static void
ztFault(vm_map_t map,const void * address,size_t size,uint32_t flags)2526 ztFault(vm_map_t map, const void * address, size_t size, uint32_t flags)
2527 {
2528 vm_map_offset_t addr = (vm_map_offset_t) address;
2529 vm_map_offset_t page, end;
2530
2531 page = trunc_page(addr);
2532 end = round_page(addr + size);
2533
2534 for (; page < end; page += page_size) {
2535 if (!pmap_find_phys(kernel_pmap, page)) {
2536 kern_return_t __unused
2537 ret = kernel_memory_populate(map, page, PAGE_SIZE,
2538 KMA_KOBJECT | flags, VM_KERN_MEMORY_DIAG);
2539 assert(ret == KERN_SUCCESS);
2540 }
2541 }
2542 }
2543
2544 static boolean_t
ztPresent(const void * address,size_t size)2545 ztPresent(const void * address, size_t size)
2546 {
2547 vm_map_offset_t addr = (vm_map_offset_t) address;
2548 vm_map_offset_t page, end;
2549 boolean_t result;
2550
2551 page = trunc_page(addr);
2552 end = round_page(addr + size);
2553 for (result = TRUE; (page < end); page += page_size) {
2554 result = pmap_find_phys(kernel_pmap, page);
2555 if (!result) {
2556 break;
2557 }
2558 }
2559 return result;
2560 }
2561
2562
2563 void __unused
2564 ztDump(boolean_t sanity);
2565 void __unused
ztDump(boolean_t sanity)2566 ztDump(boolean_t sanity)
2567 {
2568 uint32_t q, cq, p;
2569
2570 for (q = 0; q <= ztFreeIndexMax; q++) {
2571 p = q;
2572 do{
2573 if (sanity) {
2574 cq = ztLog2down(ztBlocks[p].size);
2575 if (cq > ztFreeIndexMax) {
2576 cq = ztFreeIndexMax;
2577 }
2578 if (!ztBlocks[p].free
2579 || ((p != q) && (q != cq))
2580 || (ztBlocks[ztBlocks[p].next].prev != p)
2581 || (ztBlocks[ztBlocks[p].prev].next != p)) {
2582 kprintf("zterror at %d", p);
2583 ztDump(FALSE);
2584 kprintf("zterror at %d", p);
2585 assert(FALSE);
2586 }
2587 continue;
2588 }
2589 kprintf("zt[%03d]%c %d, %d, %d\n",
2590 p, ztBlocks[p].free ? 'F' : 'A',
2591 ztBlocks[p].next, ztBlocks[p].prev,
2592 ztBlocks[p].size);
2593 p = ztBlocks[p].next;
2594 if (p == q) {
2595 break;
2596 }
2597 }while (p != q);
2598 if (!sanity) {
2599 printf("\n");
2600 }
2601 }
2602 if (!sanity) {
2603 printf("-----------------------\n");
2604 }
2605 }
2606
2607
2608
2609 #define ZTBDEQ(idx) \
2610 ztBlocks[ztBlocks[(idx)].prev].next = ztBlocks[(idx)].next; \
2611 ztBlocks[ztBlocks[(idx)].next].prev = ztBlocks[(idx)].prev;
2612
2613 static void
ztFree(zone_t zone __unused,uint32_t index,uint32_t count)2614 ztFree(zone_t zone __unused, uint32_t index, uint32_t count)
2615 {
2616 uint32_t q, w, p, size, merge;
2617
2618 assert(count);
2619 ztBlocksFree += count;
2620
2621 // merge with preceding
2622 merge = (index + count);
2623 if ((merge < ztBlocksCount)
2624 && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
2625 && ztBlocks[merge].free) {
2626 ZTBDEQ(merge);
2627 count += ztBlocks[merge].size;
2628 }
2629
2630 // merge with following
2631 merge = (index - 1);
2632 if ((merge > ztFreeIndexMax)
2633 && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
2634 && ztBlocks[merge].free) {
2635 size = ztBlocks[merge].size;
2636 count += size;
2637 index -= size;
2638 ZTBDEQ(index);
2639 }
2640
2641 q = ztLog2down(count);
2642 if (q > ztFreeIndexMax) {
2643 q = ztFreeIndexMax;
2644 }
2645 w = q;
2646 // queue in order of size
2647 while (TRUE) {
2648 p = ztBlocks[w].next;
2649 if (p == q) {
2650 break;
2651 }
2652 if (ztBlocks[p].size >= count) {
2653 break;
2654 }
2655 w = p;
2656 }
2657 ztBlocks[p].prev = index;
2658 ztBlocks[w].next = index;
2659
2660 // fault in first
2661 ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0);
2662
2663 // mark first & last with free flag and size
2664 ztBlocks[index].free = TRUE;
2665 ztBlocks[index].size = count;
2666 ztBlocks[index].prev = w;
2667 ztBlocks[index].next = p;
2668 if (count > 1) {
2669 index += (count - 1);
2670 // fault in last
2671 ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0);
2672 ztBlocks[index].free = TRUE;
2673 ztBlocks[index].size = count;
2674 }
2675 }
2676
2677 static uint32_t
ztAlloc(zone_t zone,uint32_t count)2678 ztAlloc(zone_t zone, uint32_t count)
2679 {
2680 uint32_t q, w, p, leftover;
2681
2682 assert(count);
2683
2684 q = ztLog2up(count);
2685 if (q > ztFreeIndexMax) {
2686 q = ztFreeIndexMax;
2687 }
2688 do{
2689 w = q;
2690 while (TRUE) {
2691 p = ztBlocks[w].next;
2692 if (p == q) {
2693 break;
2694 }
2695 if (ztBlocks[p].size >= count) {
2696 // dequeue, mark both ends allocated
2697 ztBlocks[w].next = ztBlocks[p].next;
2698 ztBlocks[ztBlocks[p].next].prev = w;
2699 ztBlocks[p].free = FALSE;
2700 ztBlocksFree -= ztBlocks[p].size;
2701 if (ztBlocks[p].size > 1) {
2702 ztBlocks[p + ztBlocks[p].size - 1].free = FALSE;
2703 }
2704
2705 // fault all the allocation
2706 ztFault(zone_tags_map, &ztBlocks[p], count * sizeof(ztBlocks[p]), 0);
2707 // mark last as allocated
2708 if (count > 1) {
2709 ztBlocks[p + count - 1].free = FALSE;
2710 }
2711 // free remainder
2712 leftover = ztBlocks[p].size - count;
2713 if (leftover) {
2714 ztFree(zone, p + ztBlocks[p].size - leftover, leftover);
2715 }
2716
2717 return p;
2718 }
2719 w = p;
2720 }
2721 q++;
2722 }while (q <= ztFreeIndexMax);
2723
2724 return -1U;
2725 }
2726
2727 __startup_func
2728 static void
zone_tagging_init(vm_size_t max_zonemap_size)2729 zone_tagging_init(vm_size_t max_zonemap_size)
2730 {
2731 struct zone_map_range r;
2732
2733 // allocate submaps VM_KERN_MEMORY_DIAG
2734
2735 zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t);
2736 r = zone_kmem_suballoc(0, round_page(zone_tagbase_map_size),
2737 VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_DIAG, &zone_tagbase_map);
2738 zone_tagbase_min = r.min_address;
2739 zone_tagbase_max = r.max_address;
2740
2741 zone_tags_map_size = 2048 * 1024 * sizeof(vm_tag_t);
2742 r = zone_kmem_suballoc(0, round_page(zone_tags_map_size),
2743 VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_DIAG, &zone_tags_map);
2744 zone_tags_min = r.min_address;
2745 zone_tags_max = r.max_address;
2746
2747 ztBlocks = (ztBlock *) zone_tags_min;
2748 ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock));
2749
2750 // initialize the qheads
2751 lck_mtx_lock(&ztLock);
2752
2753 ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0);
2754 for (uint32_t idx = 0; idx < ztFreeIndexCount; idx++) {
2755 ztBlocks[idx].free = TRUE;
2756 ztBlocks[idx].next = idx;
2757 ztBlocks[idx].prev = idx;
2758 ztBlocks[idx].size = 0;
2759 }
2760 // free remaining space
2761 ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount);
2762
2763 lck_mtx_unlock(&ztLock);
2764 }
2765
2766 static void
ztMemoryAdd(zone_t zone,vm_offset_t mem,vm_size_t size)2767 ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size)
2768 {
2769 uint32_t * tagbase;
2770 uint32_t count, block, blocks, idx;
2771 size_t pages;
2772
2773 pages = atop(size);
2774 tagbase = ZTAGBASE(zone, mem);
2775
2776 lck_mtx_lock(&ztLock);
2777
2778 // fault tagbase
2779 ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0);
2780
2781 if (!zone->z_tags_inline) {
2782 // allocate tags
2783 count = (uint32_t)(size / zone_elem_size(zone));
2784 blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
2785 block = ztAlloc(zone, blocks);
2786 if (-1U == block) {
2787 ztDump(false);
2788 }
2789 assert(-1U != block);
2790 }
2791
2792 lck_mtx_unlock(&ztLock);
2793
2794 if (!zone->z_tags_inline) {
2795 // set tag base for each page
2796 block *= ztTagsPerBlock;
2797 for (idx = 0; idx < pages; idx++) {
2798 vm_offset_t esize = zone_elem_size(zone);
2799 tagbase[idx] = block + (uint32_t)((ptoa(idx) + esize - 1) / esize);
2800 }
2801 }
2802 }
2803
2804 static void
ztMemoryRemove(zone_t zone,vm_offset_t mem,vm_size_t size)2805 ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size)
2806 {
2807 uint32_t * tagbase;
2808 uint32_t count, block, blocks, idx;
2809 size_t pages;
2810
2811 // set tag base for each page
2812 pages = atop(size);
2813 tagbase = ZTAGBASE(zone, mem);
2814 block = tagbase[0];
2815 for (idx = 0; idx < pages; idx++) {
2816 tagbase[idx] = 0xFFFFFFFF;
2817 }
2818
2819 lck_mtx_lock(&ztLock);
2820 if (!zone->z_tags_inline) {
2821 count = (uint32_t)(size / zone_elem_size(zone));
2822 blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
2823 assert(block != 0xFFFFFFFF);
2824 block /= ztTagsPerBlock;
2825 ztFree(NULL /* zone is unlocked */, block, blocks);
2826 }
2827
2828 lck_mtx_unlock(&ztLock);
2829 }
2830
2831 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2832 zone_index_from_tag_index(uint32_t sizeclass_idx)
2833 {
2834 return zone_tags_sizeclasses[sizeclass_idx];
2835 }
2836
2837 #endif /* VM_TAG_SIZECLASSES */
2838 #endif /* !ZALLOC_TEST */
2839 #pragma mark zalloc helpers
2840 #if !ZALLOC_TEST
2841
2842 __pure2
2843 static inline uint16_t
zc_mag_size(void)2844 zc_mag_size(void)
2845 {
2846 return zc_magazine_size;
2847 }
2848
2849 __attribute__((noinline, cold))
2850 static void
zone_lock_was_contended(zone_t zone,zone_cache_t zc)2851 zone_lock_was_contended(zone_t zone, zone_cache_t zc)
2852 {
2853 lck_spin_lock_nopreempt(&zone->z_lock);
2854
2855 /*
2856 * If zone caching has been disabled due to memory pressure,
2857 * then recording contention is not useful, give the system
2858 * time to recover.
2859 */
2860 if (__improbable(zone_caching_disabled)) {
2861 return;
2862 }
2863
2864 zone->z_contention_cur++;
2865
2866 if (zc == NULL || zc->zc_depot_max >= INT16_MAX * zc_mag_size()) {
2867 return;
2868 }
2869
2870 /*
2871 * Let the depot grow based on how bad the contention is,
2872 * and how populated the zone is.
2873 */
2874 if (zone->z_contention_wma < 2 * Z_CONTENTION_WMA_UNIT) {
2875 if (zc->zc_depot_max * zpercpu_count() * 20u >=
2876 zone->z_elems_avail) {
2877 return;
2878 }
2879 }
2880 if (zone->z_contention_wma < 4 * Z_CONTENTION_WMA_UNIT) {
2881 if (zc->zc_depot_max * zpercpu_count() * 10u >=
2882 zone->z_elems_avail) {
2883 return;
2884 }
2885 }
2886 if (!zc_grow_threshold || zone->z_contention_wma <
2887 zc_grow_threshold * Z_CONTENTION_WMA_UNIT) {
2888 return;
2889 }
2890
2891 zc->zc_depot_max++;
2892 }
2893
2894 static inline void
zone_lock_nopreempt_check_contention(zone_t zone,zone_cache_t zc)2895 zone_lock_nopreempt_check_contention(zone_t zone, zone_cache_t zc)
2896 {
2897 if (lck_spin_try_lock_nopreempt(&zone->z_lock)) {
2898 return;
2899 }
2900
2901 zone_lock_was_contended(zone, zc);
2902 }
2903
2904 static inline void
zone_lock_check_contention(zone_t zone,zone_cache_t zc)2905 zone_lock_check_contention(zone_t zone, zone_cache_t zc)
2906 {
2907 disable_preemption();
2908 zone_lock_nopreempt_check_contention(zone, zc);
2909 }
2910
2911 static inline void
zone_unlock_nopreempt(zone_t zone)2912 zone_unlock_nopreempt(zone_t zone)
2913 {
2914 lck_spin_unlock_nopreempt(&zone->z_lock);
2915 }
2916
2917 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2918 zone_depot_lock_nopreempt(zone_cache_t zc)
2919 {
2920 hw_lock_bit_nopreempt(&zc->zc_depot_lock, 0, &zone_locks_grp);
2921 }
2922
2923 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2924 zone_depot_unlock_nopreempt(zone_cache_t zc)
2925 {
2926 hw_unlock_bit_nopreempt(&zc->zc_depot_lock, 0);
2927 }
2928
2929 static inline void
zone_depot_lock(zone_cache_t zc)2930 zone_depot_lock(zone_cache_t zc)
2931 {
2932 hw_lock_bit(&zc->zc_depot_lock, 0, &zone_locks_grp);
2933 }
2934
2935 static inline void
zone_depot_unlock(zone_cache_t zc)2936 zone_depot_unlock(zone_cache_t zc)
2937 {
2938 hw_unlock_bit(&zc->zc_depot_lock, 0);
2939 }
2940
2941 const char *
zone_name(zone_t z)2942 zone_name(zone_t z)
2943 {
2944 return z->z_name;
2945 }
2946
2947 const char *
zone_heap_name(zone_t z)2948 zone_heap_name(zone_t z)
2949 {
2950 zone_security_flags_t zsflags = zone_security_config(z);
2951 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2952 return kalloc_heap_names[zsflags.z_kheap_id];
2953 }
2954 return "invalid";
2955 }
2956
2957 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2958 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2959 {
2960 vm_size_t elem_count, chunks;
2961
2962 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2963 zone_elem_size_safe(z);
2964 chunks = (max_elems + elem_count - 1) / elem_count;
2965
2966 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2967 }
2968
2969 static inline vm_size_t
zone_submaps_approx_size(void)2970 zone_submaps_approx_size(void)
2971 {
2972 vm_size_t size = 0;
2973
2974 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2975 if (zone_submaps[idx] != VM_MAP_NULL) {
2976 size += zone_submaps[idx]->size;
2977 }
2978 }
2979
2980 return size;
2981 }
2982
2983 static void
zone_cache_swap_magazines(zone_cache_t cache)2984 zone_cache_swap_magazines(zone_cache_t cache)
2985 {
2986 uint16_t count_a = cache->zc_alloc_cur;
2987 uint16_t count_f = cache->zc_free_cur;
2988 zone_element_t *elems_a = cache->zc_alloc_elems;
2989 zone_element_t *elems_f = cache->zc_free_elems;
2990
2991 z_debug_assert(count_a <= zc_mag_size());
2992 z_debug_assert(count_f <= zc_mag_size());
2993
2994 cache->zc_alloc_cur = count_f;
2995 cache->zc_free_cur = count_a;
2996 cache->zc_alloc_elems = elems_f;
2997 cache->zc_free_elems = elems_a;
2998 }
2999
3000 /*!
3001 * @function zone_magazine_load
3002 *
3003 * @brief
3004 * Cache the value of @c zm_cur on the cache to avoid a dependent load
3005 * on the allocation fastpath.
3006 */
3007 static void
zone_magazine_load(uint16_t * count,zone_element_t ** elems,zone_magazine_t mag)3008 zone_magazine_load(uint16_t *count, zone_element_t **elems, zone_magazine_t mag)
3009 {
3010 z_debug_assert(mag->zm_cur <= zc_mag_size());
3011 *count = mag->zm_cur;
3012 *elems = mag->zm_elems;
3013 }
3014
3015 /*!
3016 * @function zone_magazine_replace
3017 *
3018 * @brief
3019 * Unlod a magazine and load a new one instead.
3020 */
3021 static zone_magazine_t
zone_magazine_replace(uint16_t * count,zone_element_t ** elems,zone_magazine_t mag)3022 zone_magazine_replace(uint16_t *count, zone_element_t **elems,
3023 zone_magazine_t mag)
3024 {
3025 zone_magazine_t old;
3026
3027 old = (zone_magazine_t)((uintptr_t)*elems -
3028 offsetof(struct zone_magazine, zm_elems));
3029 old->zm_cur = *count;
3030 z_debug_assert(old->zm_cur <= zc_mag_size());
3031 zone_magazine_load(count, elems, mag);
3032
3033 return old;
3034 }
3035
3036 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)3037 zone_magazine_alloc(zalloc_flags_t flags)
3038 {
3039 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
3040 }
3041
3042 static void
zone_magazine_free(zone_magazine_t mag)3043 zone_magazine_free(zone_magazine_t mag)
3044 {
3045 (zfree)(zc_magazine_zone, mag);
3046 }
3047
3048 static void
zone_magazine_free_list(struct zone_depot * mags)3049 zone_magazine_free_list(struct zone_depot *mags)
3050 {
3051 zone_magazine_t mag, tmp;
3052
3053 STAILQ_FOREACH_SAFE(mag, mags, zm_link, tmp) {
3054 zone_magazine_free(mag);
3055 }
3056
3057 STAILQ_INIT(mags);
3058 }
3059
3060 static void
zone_enable_caching(zone_t zone)3061 zone_enable_caching(zone_t zone)
3062 {
3063 zone_cache_t caches;
3064
3065 caches = zalloc_percpu_permanent_type(struct zone_cache);
3066 zpercpu_foreach(zc, caches) {
3067 zone_magazine_load(&zc->zc_alloc_cur, &zc->zc_alloc_elems,
3068 zone_magazine_alloc(Z_WAITOK | Z_NOFAIL));
3069 zone_magazine_load(&zc->zc_free_cur, &zc->zc_free_elems,
3070 zone_magazine_alloc(Z_WAITOK | Z_NOFAIL));
3071 STAILQ_INIT(&zc->zc_depot);
3072 }
3073
3074 if (os_atomic_xchg(&zone->z_pcpu_cache, caches, release)) {
3075 panic("allocating caches for zone %s twice", zone->z_name);
3076 }
3077 }
3078
3079 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)3080 zone_maps_owned(vm_address_t addr, vm_size_t size)
3081 {
3082 return from_zone_map(addr, size, ZONE_ADDR_NATIVE);
3083 }
3084
3085 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)3086 zone_map_sizes(
3087 vm_map_size_t *psize,
3088 vm_map_size_t *pfree,
3089 vm_map_size_t *plargest_free)
3090 {
3091 vm_map_size_t size, free, largest;
3092
3093 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
3094
3095 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
3096 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
3097 *psize += size;
3098 *pfree += free;
3099 *plargest_free = MAX(*plargest_free, largest);
3100 }
3101 }
3102
3103 __attribute__((always_inline))
3104 vm_map_t
zone_submap(zone_security_flags_t zsflags)3105 zone_submap(zone_security_flags_t zsflags)
3106 {
3107 return zone_submaps[zsflags.z_submap_idx];
3108 }
3109
3110 unsigned
zpercpu_count(void)3111 zpercpu_count(void)
3112 {
3113 return zpercpu_early_count;
3114 }
3115
3116 /*
3117 * Returns a random number between [bound_min, bound_max)
3118 *
3119 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
3120 *
3121 * This uses Intel's rdrand because random() uses FP registers
3122 * which causes FP faults and allocations which isn't something
3123 * we can do from zalloc itself due to reentrancy problems.
3124 *
3125 * For pre-rdrand machines (which we no longer support),
3126 * we use a bad biased random generator that doesn't use FP.
3127 * Such HW is no longer supported, but VM of newer OSes on older
3128 * bare metal is made to limp along (with reduced security) this way.
3129 */
3130 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
3131 static uint32_t
zalloc_random_uniform(uint32_t bound_min,uint32_t bound_max)3132 zalloc_random_uniform(uint32_t bound_min, uint32_t bound_max)
3133 {
3134 uint32_t bits = 32 - __builtin_clz(bound_max - bound_min);
3135 uint32_t mask = ~0u >> (32 - bits);
3136 uint32_t v;
3137
3138 do {
3139 #if __x86_64__
3140 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
3141 asm volatile ("1: rdrand %0; jnc 1b\n"
3142 : "=r" (v) :: "cc");
3143 } else {
3144 disable_preemption();
3145 int cpu = cpu_number();
3146 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3147 zone_bool_gen[cpu].zbg_entropy,
3148 ZONE_ENTROPY_CNT, bits);
3149 enable_preemption();
3150 }
3151 #else
3152 v = (uint32_t)early_random();
3153 #endif
3154 v = bound_min + (v & mask);
3155 } while (v >= bound_max);
3156
3157 return v;
3158 }
3159 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
3160
3161 #if ZONE_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
3162 /*
3163 * Track all kalloc zones of specified size for zlog name
3164 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
3165 */
3166 static bool
track_kalloc_zones(zone_t z,const char * logname)3167 track_kalloc_zones(zone_t z, const char *logname)
3168 {
3169 const char *prefix;
3170 size_t len;
3171 zone_security_flags_t zsflags = zone_security_config(z);
3172
3173 prefix = "kalloc.type.var.";
3174 len = strlen(prefix);
3175 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
3176 strncmp(logname, prefix, len) == 0) {
3177 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
3178
3179 return zone_elem_size(z) == sizeclass;
3180 }
3181
3182 prefix = "kalloc.type.";
3183 len = strlen(prefix);
3184 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
3185 strncmp(logname, prefix, len) == 0) {
3186 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
3187
3188 return zone_elem_size(z) == sizeclass;
3189 }
3190
3191 prefix = "kalloc.";
3192 len = strlen(prefix);
3193 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
3194 strncmp(logname, prefix, len) == 0) {
3195 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
3196
3197 return zone_elem_size(z) == sizeclass;
3198 }
3199
3200 return false;
3201 }
3202 #endif
3203
3204 int
track_this_zone(const char * zonename,const char * logname)3205 track_this_zone(const char *zonename, const char *logname)
3206 {
3207 unsigned int len;
3208 const char *zc = zonename;
3209 const char *lc = logname;
3210
3211 /*
3212 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
3213 */
3214
3215 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
3216 /*
3217 * If the current characters don't match, check for a space in
3218 * in the zone name and a corresponding period in the log name.
3219 * If that's not there, then the strings don't match.
3220 */
3221
3222 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
3223 break;
3224 }
3225
3226 /*
3227 * The strings are equal so far. If we're at the end, then it's a match.
3228 */
3229
3230 if (*zc == '\0') {
3231 return TRUE;
3232 }
3233 }
3234
3235 return FALSE;
3236 }
3237
3238 #if DEBUG || DEVELOPMENT
3239
3240 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)3241 zone_element_info(void *addr, vm_tag_t * ptag)
3242 {
3243 vm_size_t size = 0;
3244 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3245 struct zone *src_zone;
3246
3247 if (from_zone_map(addr, sizeof(void *), ZONE_ADDR_NATIVE) ||
3248 from_zone_map(addr, sizeof(void *), ZONE_ADDR_FOREIGN)) {
3249 src_zone = &zone_array[zone_index_from_ptr(addr)];
3250 #if VM_TAG_SIZECLASSES
3251 if (__improbable(src_zone->z_uses_tags)) {
3252 tag = *ztSlot(src_zone, (vm_offset_t)addr) >> 1;
3253 }
3254 #endif /* VM_TAG_SIZECLASSES */
3255 size = zone_elem_size_safe(src_zone);
3256 } else {
3257 #if CONFIG_GZALLOC
3258 gzalloc_element_size(addr, NULL, &size);
3259 #endif /* CONFIG_GZALLOC */
3260 }
3261 *ptag = tag;
3262 return size;
3263 }
3264
3265 #endif /* DEBUG || DEVELOPMENT */
3266 #endif /* !ZALLOC_TEST */
3267
3268 #pragma mark Zone zeroing and early random
3269 #if !ZALLOC_TEST
3270
3271 /*
3272 * Zone zeroing
3273 *
3274 * All allocations from zones are zeroed on free and are additionally
3275 * check that they are still zero on alloc. The check is
3276 * always on, on embedded devices. Perf regression was detected
3277 * on intel as we cant use the vectorized implementation of
3278 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3279 * initization and allocation. Therefore we perform the check
3280 * on 20% of the allocations.
3281 */
3282 #if ZALLOC_ENABLE_ZERO_CHECK
3283 #if defined(__x86_64__) || defined(__arm__)
3284 /*
3285 * Peform zero validation on every 5th allocation
3286 */
3287 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3288 static uint32_t PERCPU_DATA(zzc_decrementer);
3289 #endif /* defined(__x86_64__) || defined(__arm__) */
3290
3291 /*
3292 * Determine if zero validation for allocation should be skipped
3293 */
3294 static bool
zalloc_skip_zero_check(void)3295 zalloc_skip_zero_check(void)
3296 {
3297 #if defined(__x86_64__) || defined(__arm__)
3298 uint32_t *counterp, cnt;
3299
3300 counterp = PERCPU_GET(zzc_decrementer);
3301 cnt = *counterp;
3302 if (__probable(cnt > 0)) {
3303 *counterp = cnt - 1;
3304 return true;
3305 }
3306 *counterp = zzc_rate - 1;
3307 #endif /* !(defined(__x86_64__) || defined(__arm__)) */
3308 return false;
3309 }
3310
3311 __abortlike
3312 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3313 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3314 {
3315 uint32_t esize = (uint32_t)zone_elem_size(z);
3316 uint32_t first_offs = ~0u;
3317 uintptr_t first_bits = 0, v;
3318 char buf[1024];
3319 int pos = 0;
3320
3321 #if __LP64__
3322 #define ZPF "0x%016lx"
3323 #else
3324 #define ZPF "0x%08lx"
3325 #endif
3326
3327 buf[0] = '\0';
3328
3329 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3330 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3331 continue;
3332 }
3333 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3334 "%5d: "ZPF, o, v);
3335 if (first_offs > o) {
3336 first_offs = o;
3337 first_bits = v;
3338 }
3339 }
3340
3341 (panic)("[%s%s]: element modified after free "
3342 "(off:%d, val:"ZPF", sz:%d, ptr:%p)%s",
3343 zone_heap_name(z), zone_name(z),
3344 first_offs, first_bits, esize, (void *)elem, buf);
3345
3346 #undef ZPF
3347 }
3348
3349 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3350 zalloc_validate_element(zone_t zone, vm_offset_t elem, vm_size_t size,
3351 zalloc_flags_t flags)
3352 {
3353 #if CONFIG_GZALLOC
3354 if (zone->z_gzalloc_tracked) {
3355 return;
3356 }
3357 #endif /* CONFIG_GZALLOC */
3358
3359 if (flags & Z_NOZZC) {
3360 return;
3361 }
3362 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3363 zalloc_uaf_panic(zone, elem, size);
3364 }
3365 if (flags & Z_PCPU) {
3366 for (size_t i = zpercpu_count(); --i > 0;) {
3367 elem += PAGE_SIZE;
3368 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3369 zalloc_uaf_panic(zone, elem, size);
3370 }
3371 }
3372 }
3373 }
3374
3375 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3376
3377 static void
zone_early_scramble_rr(zone_t zone,zone_stats_t zstats)3378 zone_early_scramble_rr(zone_t zone, zone_stats_t zstats)
3379 {
3380 int cpu = cpu_number();
3381 zone_stats_t zs = zpercpu_get_cpu(zstats, cpu);
3382 uint32_t bits;
3383
3384 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3385 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3386
3387 zs->zs_alloc_rr += bits;
3388 zs->zs_alloc_rr %= zone->z_chunk_elems;
3389 }
3390
3391 #endif /* !ZALLOC_TEST */
3392 #pragma mark Zone Leak Detection
3393 #if !ZALLOC_TEST
3394 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
3395
3396 /*
3397 * Zone leak debugging code
3398 *
3399 * When enabled, this code keeps a log to track allocations to a particular
3400 * zone that have not yet been freed.
3401 *
3402 * Examining this log will reveal the source of a zone leak.
3403 *
3404 * The log is allocated only when logging is enabled (it is off by default),
3405 * so there is no effect on the system when it's turned off.
3406 *
3407 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3408 * zone name to log, with n starting at 1.
3409 *
3410 * Leaks debugging utilizes 2 tunables:
3411 * - zlsize (in kB) which describes how much "size" the record covers
3412 * (zones with smaller elements get more records, default is 4M).
3413 *
3414 * - zlfreq (in kB) which describes a sample rate in cumulative allocation
3415 * size at which automatic leak detection will sample allocations.
3416 * (default is 16k)
3417 *
3418 *
3419 * Zone corruption logging
3420 *
3421 * Logging can also be used to help identify the source of a zone corruption.
3422 *
3423 * First, identify the zone that is being corrupted,
3424 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3425 *
3426 * When -zc is used in conjunction with zlog,
3427 * it changes the logging style to track both allocations and frees to the zone.
3428 *
3429 * When the corruption is detected, examining the log will show you the stack
3430 * traces of the callers who last allocated and freed any particular element in
3431 * the zone.
3432 *
3433 * Corruption debugging logs will have zrecs records
3434 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3435 */
3436
3437 #define ZRECORDS_MAX (256u << 10)
3438 #define ZRECORDS_DEFAULT (16u << 10)
3439 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3440 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3441 static TUNABLE(uint32_t, zlfreq, "zlfreq", 16);
3442
3443 __startup_func
3444 static void
zone_leaks_init_zrecs(void)3445 zone_leaks_init_zrecs(void)
3446 {
3447 /*
3448 * Don't allow more than ZRECORDS_MAX records,
3449 * even if the user asked for more.
3450 *
3451 * This prevents accidentally hogging too much kernel memory
3452 * and making the system unusable.
3453 */
3454 if (zrecs == 0) {
3455 zrecs = ZRECORDS_DEFAULT *
3456 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3457 }
3458 if (zrecs > ZRECORDS_MAX) {
3459 zrecs = ZRECORDS_MAX;
3460 }
3461 }
3462 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3463
3464 static uint32_t
zone_leaks_record_count(zone_t z)3465 zone_leaks_record_count(zone_t z)
3466 {
3467 uint32_t recs = (zlsize << 10) / zone_elem_size(z);
3468
3469 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3470 }
3471
3472 static uint32_t
zone_leaks_sample_rate(zone_t z)3473 zone_leaks_sample_rate(zone_t z)
3474 {
3475 return (zlfreq << 10) / zone_elem_size(z);
3476 }
3477
3478 #if ZONE_ENABLE_LOGGING
3479 /* Log allocations and frees to help debug a zone element corruption */
3480 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3481
3482 /*
3483 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3484 */
3485 #define MAX_ZONES_LOG_REQUESTS 10
3486 /*
3487 * As all kalloc type zones of a specificified size are logged, by providing
3488 * a single zlog boot-arg, the maximum number of zones that can be logged
3489 * is higher than MAX_ZONES_LOG_REQUESTS
3490 */
3491 #define MAX_ZONES_LOGGED 20
3492
3493 static int num_zones_logged = 0;
3494
3495 /**
3496 * @function zone_setup_logging
3497 *
3498 * @abstract
3499 * Optionally sets up a zone for logging.
3500 *
3501 * @discussion
3502 * We recognized two boot-args:
3503 *
3504 * zlog=<zone_to_log>
3505 * zrecs=<num_records_in_log>
3506 * zlsize=<memory to cover for leaks>
3507 *
3508 * The zlog arg is used to specify the zone name that should be logged,
3509 * and zrecs/zlsize is used to control the size of the log.
3510 */
3511 static void
zone_setup_logging(zone_t z)3512 zone_setup_logging(zone_t z)
3513 {
3514 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3515 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3516 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3517 bool logging_on = false;
3518
3519 if (num_zones_logged >= MAX_ZONES_LOGGED) {
3520 return;
3521 }
3522
3523 /*
3524 * Append kalloc heap name to zone name (if zone is used by kalloc)
3525 */
3526 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3527
3528 /* zlog0 isn't allowed. */
3529 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3530 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3531
3532 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3533 if (track_this_zone(zone_name, zlog_val) ||
3534 track_kalloc_zones(z, zlog_val)) {
3535 logging_on = true;
3536 break;
3537 }
3538 }
3539 }
3540
3541 /*
3542 * Backwards compat. with the old boot-arg used to specify single zone
3543 * logging i.e. zlog Needs to happen after the newer zlogn checks
3544 * because the prefix will match all the zlogn
3545 * boot-args.
3546 */
3547 if (!logging_on &&
3548 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3549 if (track_this_zone(zone_name, zlog_val) ||
3550 track_kalloc_zones(z, zlog_val)) {
3551 logging_on = true;
3552 }
3553 }
3554
3555 /*
3556 * If we want to log a zone, see if we need to allocate buffer space for
3557 * the log.
3558 *
3559 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3560 * we have to defer allocation in that case.
3561 *
3562 * zone_init() will finish the job.
3563 *
3564 * If we want to log one of the VM related zones that's set up early on,
3565 * we will skip allocation of the log until zinit is called again later
3566 * on some other zone.
3567 */
3568 if (logging_on) {
3569 if (os_atomic_inc(&num_zones_logged, relaxed) >
3570 MAX_ZONES_LOGGED) {
3571 os_atomic_dec(&num_zones_logged, relaxed);
3572 return;
3573 }
3574
3575 if (corruption_debug_flag) {
3576 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3577 } else {
3578 z->z_btlog = btlog_create(BTLOG_HASH,
3579 zone_leaks_record_count(z), 0);
3580 }
3581 if (z->z_btlog) {
3582 z->z_log_on = true;
3583 printf("zone[%s%s]: logging enabled\n",
3584 zone_heap_name(z), z->z_name);
3585 } else {
3586 printf("zone[%s%s]: failed to enable logging\n",
3587 zone_heap_name(z), z->z_name);
3588 }
3589 }
3590 }
3591
3592 #endif /* ZONE_ENABLE_LOGGING */
3593 #if CONFIG_ZLEAKS
3594
3595 static thread_call_data_t zone_leaks_callout;
3596
3597 /*
3598 * The zone leak detector, abbreviated 'zleak', keeps track
3599 * of a subset of the currently outstanding allocations
3600 * made by the zone allocator.
3601 *
3602 * It will engage itself automatically if the zone map usage
3603 * goes above zleak_pages_global_wired_threshold pages.
3604 *
3605 * When that threshold is reached, zones who use more than
3606 * zleak_pages_per_zone_wired_threshold pages will get
3607 * a BTLOG_HASH btlog with sampling to minimize perf impact,
3608 * yet receive statistical data about the backtrace that is
3609 * the most likely to cause the leak.
3610 *
3611 * If the zone goes under the threshold enough, then the log
3612 * is disabled and backtraces freed. Data can be collected
3613 * from userspace with the zlog(1) command.
3614 */
3615
3616 /* whether the zleaks subsystem thinks the map is under pressure */
3617 uint32_t zleak_active;
3618 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3619
3620 /* Size of zone map at which to start collecting data */
3621 static size_t zleak_pages_global_wired_threshold = ~0;
3622 vm_size_t zleak_global_tracking_threshold = ~0;
3623
3624 /* Size a zone will have before we will collect data on it */
3625 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3626 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3627
3628 static inline bool
zleak_should_enable_for_zone(zone_t z)3629 zleak_should_enable_for_zone(zone_t z)
3630 {
3631 if (z->z_log_on) {
3632 return false;
3633 }
3634 if (z->z_btlog) {
3635 return false;
3636 }
3637 if (!zleak_active) {
3638 return false;
3639 }
3640 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3641 }
3642
3643 static inline bool
zleak_should_disable_for_zone(zone_t z)3644 zleak_should_disable_for_zone(zone_t z)
3645 {
3646 if (z->z_log_on) {
3647 return false;
3648 }
3649 if (!z->z_btlog) {
3650 return false;
3651 }
3652 if (!zleak_active) {
3653 return true;
3654 }
3655 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3656 }
3657
3658 static inline bool
zleak_should_activate(size_t pages)3659 zleak_should_activate(size_t pages)
3660 {
3661 return !zleak_active && pages >= zleak_pages_global_wired_threshold;
3662 }
3663
3664 static inline bool
zleak_should_deactivate(size_t pages)3665 zleak_should_deactivate(size_t pages)
3666 {
3667 return zleak_active && pages < zleak_pages_global_wired_threshold / 2;
3668 }
3669
3670 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3671 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3672 {
3673 size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
3674 btlog_t log;
3675
3676 if (zleak_should_activate(pages)) {
3677 zleak_active = 1;
3678 } else if (zleak_should_deactivate(pages)) {
3679 zleak_active = 0;
3680 }
3681
3682 zone_foreach(z) {
3683 if (zleak_should_disable_for_zone(z)) {
3684 log = z->z_btlog;
3685 z->z_btlog = NULL;
3686 assert(z->z_btlog_disabled == NULL);
3687 btlog_disable(log);
3688 z->z_btlog_disabled = log;
3689 }
3690
3691 if (zleak_should_enable_for_zone(z)) {
3692 log = z->z_btlog_disabled;
3693 if (log == NULL) {
3694 log = btlog_create(BTLOG_HASH,
3695 zone_leaks_record_count(z),
3696 zone_leaks_sample_rate(z));
3697 } else {
3698 z->z_btlog_disabled = NULL;
3699 btlog_enable(log);
3700 }
3701 os_atomic_store(&z->z_btlog, log, release);
3702 }
3703 }
3704 }
3705
3706 __startup_func
3707 static void
zleak_init(void)3708 zleak_init(void)
3709 {
3710 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3711
3712 zleak_update_threshold(&zleak_global_tracking_threshold,
3713 zleak_max_zonemap_size / 2);
3714 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3715 zleak_global_tracking_threshold / 8);
3716
3717 thread_call_setup_with_options(&zone_leaks_callout,
3718 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3719 THREAD_CALL_OPTIONS_ONCE);
3720 }
3721 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3722
3723 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3724 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3725 {
3726 if (value >= zleak_max_zonemap_size) {
3727 return KERN_INVALID_VALUE;
3728 }
3729
3730 if (arg == &zleak_global_tracking_threshold) {
3731 zleak_global_tracking_threshold = (vm_size_t)value;
3732 zleak_pages_global_wired_threshold = atop(value);
3733 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3734 thread_call_enter(&zone_leaks_callout);
3735 }
3736 return KERN_SUCCESS;
3737 }
3738
3739 if (arg == &zleak_per_zone_tracking_threshold) {
3740 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3741 zleak_pages_per_zone_wired_threshold = atop(value);
3742 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3743 thread_call_enter(&zone_leaks_callout);
3744 }
3745 return KERN_SUCCESS;
3746 }
3747
3748 return KERN_INVALID_ARGUMENT;
3749 }
3750
3751 static void
panic_display_zleaks(bool has_syms)3752 panic_display_zleaks(bool has_syms)
3753 {
3754 bool did_header = false;
3755 vm_address_t bt[BTLOG_MAX_DEPTH];
3756 uint32_t len, count;
3757
3758 zone_foreach(z) {
3759 btlog_t log = z->z_btlog;
3760
3761 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3762 continue;
3763 }
3764
3765 count = btlog_guess_top(log, bt, &len);
3766 if (count == 0) {
3767 continue;
3768 }
3769
3770 if (!did_header) {
3771 paniclog_append_noflush("Zone (suspected) leak report:\n");
3772 did_header = true;
3773 }
3774
3775 paniclog_append_noflush(" Zone: %s%s\n",
3776 zone_heap_name(z), zone_name(z));
3777 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3778 (long)count * zone_scale_for_percpu(z, zone_elem_size(z)));
3779 paniclog_append_noflush(" Size: %ld\n",
3780 (long)zone_size_wired(z));
3781 paniclog_append_noflush(" Top backtrace:\n");
3782 for (uint32_t i = 0; i < len; i++) {
3783 if (has_syms) {
3784 paniclog_append_noflush(" %p ", (void *)bt[i]);
3785 panic_print_symbol_name(bt[i]);
3786 paniclog_append_noflush("\n");
3787 } else {
3788 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3789 }
3790 }
3791
3792 kmod_panic_dump(bt, len);
3793 paniclog_append_noflush("\n");
3794 }
3795 }
3796 #endif /* CONFIG_ZLEAKS */
3797
3798 static void
zalloc_log(btlog_t log,vm_offset_t addr,zalloc_flags_t flags,void * fp)3799 zalloc_log(btlog_t log, vm_offset_t addr, zalloc_flags_t flags, void *fp)
3800 {
3801 btref_t ref;
3802
3803 if (btlog_sample(log)) {
3804 ref = btref_get(fp, (flags & Z_NOWAIT) ? BTREF_GET_NOWAIT : 0);
3805 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3806 }
3807 }
3808
3809 static void
zfree_log(btlog_t log,vm_offset_t addr,void * fp)3810 zfree_log(btlog_t log, vm_offset_t addr, void *fp)
3811 {
3812 /*
3813 * See if we're doing logging on this zone.
3814 *
3815 * There are two styles of logging used depending on
3816 * whether we're trying to catch a leak or corruption.
3817 */
3818 if (btlog_get_type(log) == BTLOG_LOG) {
3819 /*
3820 * We're logging to catch a corruption.
3821 *
3822 * Add a record of this zfree operation to log.
3823 */
3824 btlog_record(log, (void *)addr, ZOP_FREE,
3825 btref_get(fp, BTREF_GET_NOWAIT));
3826 } else {
3827 /*
3828 * We're logging to catch a leak.
3829 *
3830 * Remove any record we might have for this element
3831 * since it's being freed. Note that we may not find it
3832 * if the buffer overflowed and that's OK.
3833 *
3834 * Since the log is of a limited size, old records get
3835 * overwritten if there are more zallocs than zfrees.
3836 */
3837 btlog_erase(log, (void *)addr);
3838 }
3839 }
3840
3841 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3842 #endif /* !ZALLOC_TEST */
3843 #pragma mark zone (re)fill
3844 #if !ZALLOC_TEST
3845
3846 /*!
3847 * @defgroup Zone Refill
3848 * @{
3849 *
3850 * @brief
3851 * Functions handling The zone refill machinery.
3852 *
3853 * @discussion
3854 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3855 *
3856 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3857 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3858 *
3859 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3860 *
3861 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3862 * to block, or if the reserve becomes depleted.
3863 *
3864 *
3865 * <h2>Synchronous expansion</h2>
3866 *
3867 * This mechanism is actually the only one that may refill a zone, and all the
3868 * other ones funnel through this one eventually.
3869 *
3870 * @c zone_expand_locked() implements the core of the expansion mechanism,
3871 * and will do so while a caller specified predicate is true.
3872 *
3873 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3874 * - one VM privileged thread,
3875 * - one regular thread.
3876 *
3877 * Regular threads that refill will put down their identity in @c z_expander,
3878 * so that priority inversion avoidance can be implemented.
3879 *
3880 * However, VM privileged threads are allowed to use VM page reserves,
3881 * which allows for the system to recover from extreme memory pressure
3882 * situations, allowing for the few allocations that @c zone_gc() or
3883 * killing processes require.
3884 *
3885 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3886 * is set. @c z_expander is not necessarily the identity of this VM privileged
3887 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3888 * could even be @c THREAD_NULL otherwise).
3889 *
3890 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3891 * spending a whole pointer on priority inheritance for VM privileged threads
3892 * (and other issues related to having two owners), we use the rwlock boost as
3893 * a stop gap to avoid priority inversions.
3894 *
3895 *
3896 * <h2>Chunk wiring policies</h2>
3897 *
3898 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3899 * to try to minimize fragmentation relative to element sizes not aligning with
3900 * a chunk size well. However, this can grow large and be hard to fulfill on
3901 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3902 * 4k page systems).
3903 *
3904 * This is why, when under memory pressure the system allows chunks to be
3905 * partially populated. The metadata of the first page in the chunk maintains
3906 * the count of actually populated pages.
3907 *
3908 * The metadata for addresses assigned to a zone are found of 4 queues:
3909 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3910 * elements (those can be targeted by @c zone_gc()),
3911 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3912 * used,
3913 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3914 * left,
3915 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3916 * the zone forever (if @c z_va_sequester is enabled), or the first secondary
3917 * metadata for a chunk whose corresponding page is not populated in the
3918 * chunk.
3919 *
3920 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3921 * queues are preferred.
3922 *
3923 *
3924 * <h2>Asynchronous expansion</h2>
3925 *
3926 * This mechanism allows for refilling zones used mostly with non blocking
3927 * callers. It relies on a thread call (@c zone_expand_callout) which will
3928 * iterate all zones and refill the ones marked with @c z_async_refilling.
3929 *
3930 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3931 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3932 *
3933 *
3934 * <h2>Dealing with zone allocations from the mach VM code</h2>
3935 *
3936 * The implementation of the mach VM itself uses the zone allocator
3937 * for things like the vm_map_entry data structure. In order to prevent
3938 * a recursion problem when adding more pages to a zone, the VM zones
3939 * use the Z_SUBMAP_IDX_VM submap which doesn't use kernel_memory_allocate()
3940 * or any VM map functions to allocate.
3941 *
3942 * Instead, a really simple coalescing first-fit allocator is used
3943 * for this submap, and no one else than zalloc can allocate from it.
3944 *
3945 * Memory is directly populated which doesn't require allocation of
3946 * VM map entries, and avoids recursion. The cost of this scheme however,
3947 * is that `vm_map_lookup_entry` will not function on those addresses
3948 * (nor any API relying on it).
3949 */
3950
3951 static thread_call_data_t zone_expand_callout;
3952
3953 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3954 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3955 {
3956 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3957
3958 if (zsflags.z_noencrypt) {
3959 kmaflags |= KMA_NOENCRYPT;
3960 }
3961 if (flags & Z_NOPAGEWAIT) {
3962 kmaflags |= KMA_NOPAGEWAIT;
3963 }
3964 if (z->z_permanent || (!z->z_destructible && zsflags.z_va_sequester)) {
3965 kmaflags |= KMA_PERMANENT;
3966 }
3967 if (zsflags.z_submap_from_end) {
3968 kmaflags |= KMA_LAST_FREE;
3969 }
3970
3971 return kmaflags;
3972 }
3973
3974 static inline void
zone_add_wired_pages(uint32_t pages)3975 zone_add_wired_pages(uint32_t pages)
3976 {
3977 size_t count = os_atomic_add(&zone_pages_wired, pages, relaxed);
3978
3979 #if CONFIG_ZLEAKS
3980 if (__improbable(zleak_should_activate(count) &&
3981 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3982 thread_call_enter(&zone_leaks_callout);
3983 }
3984 #else
3985 (void)count;
3986 #endif
3987 }
3988
3989 static inline void
zone_remove_wired_pages(uint32_t pages)3990 zone_remove_wired_pages(uint32_t pages)
3991 {
3992 size_t count = os_atomic_sub(&zone_pages_wired, pages, relaxed);
3993
3994 #if CONFIG_ZLEAKS
3995 if (__improbable(zleak_should_deactivate(count) &&
3996 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3997 thread_call_enter(&zone_leaks_callout);
3998 }
3999 #else
4000 (void)count;
4001 #endif
4002 }
4003
4004 /*!
4005 * @function zcram_and_lock()
4006 *
4007 * @brief
4008 * Prepare some memory for being usable for allocation purposes.
4009 *
4010 * @discussion
4011 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
4012 * to be usable in the zone.
4013 *
4014 * This function assumes the metadata is already populated for the range.
4015 *
4016 * Calling this function with @c pg_start being 0 means that the memory
4017 * is either a partial chunk, or a full chunk, that isn't published anywhere
4018 * and the initialization can happen without locks held.
4019 *
4020 * Calling this function with a non zero @c pg_start means that we are extending
4021 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
4022 * is already usable and published in the zone, so extending it requires holding
4023 * the zone lock.
4024 *
4025 * @param zone The zone to cram new populated pages into
4026 * @param addr The base address for the chunk(s)
4027 * @param pg_va_new The number of virtual pages newly assigned to the zone
4028 * @param pg_start The first newly populated page relative to @a addr.
4029 * @param pg_end The after-last newly populated page relative to @a addr.
4030 * @param kind The kind of memory assigned to the zone.
4031 */
4032 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,zone_addr_kind_t kind)4033 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
4034 uint32_t pg_start, uint32_t pg_end, zone_addr_kind_t kind)
4035 {
4036 zone_id_t zindex = zone_index(zone);
4037 vm_offset_t elem_size = zone_elem_size_safe(zone);
4038 uint32_t free_start = 0, free_end = 0;
4039 uint32_t oob_offs = zone_oob_offs(zone);
4040
4041 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
4042 uint32_t chunk_pages = zone->z_chunk_pages;
4043
4044 assert(pg_start < pg_end && pg_end <= chunk_pages);
4045
4046 if (pg_start == 0) {
4047 uint16_t chunk_len = (uint16_t)pg_end;
4048 uint16_t secondary_len = ZM_SECONDARY_PAGE;
4049 bool inline_bitmap = false;
4050
4051 if (zone->z_percpu) {
4052 chunk_len = 1;
4053 secondary_len = ZM_SECONDARY_PCPU_PAGE;
4054 assert(pg_end == zpercpu_count());
4055 }
4056 if (!zone->z_permanent) {
4057 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
4058 }
4059
4060 meta[0] = (struct zone_page_metadata){
4061 .zm_index = zindex,
4062 .zm_inline_bitmap = inline_bitmap,
4063 .zm_chunk_len = chunk_len,
4064 };
4065 if (kind == ZONE_ADDR_FOREIGN) {
4066 /* Never hit z_pageq_empty */
4067 meta[0].zm_alloc_size = ZM_ALLOC_SIZE_LOCK;
4068 }
4069
4070 for (uint16_t i = 1; i < chunk_pages; i++) {
4071 meta[i] = (struct zone_page_metadata){
4072 .zm_index = zindex,
4073 .zm_inline_bitmap = inline_bitmap,
4074 .zm_chunk_len = secondary_len,
4075 .zm_page_index = i,
4076 };
4077 }
4078
4079 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
4080 if (!zone->z_permanent) {
4081 zone_meta_bits_init(meta, free_end, zone->z_chunk_elems);
4082 }
4083 } else {
4084 assert(!zone->z_percpu && !zone->z_permanent);
4085
4086 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
4087 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
4088 }
4089
4090 #if VM_TAG_SIZECLASSES
4091 if (__improbable(zone->z_uses_tags)) {
4092 assert(kind == ZONE_ADDR_NATIVE && !zone->z_percpu);
4093 ztMemoryAdd(zone, addr + ptoa(pg_start),
4094 ptoa(pg_end - pg_start));
4095 }
4096 #endif /* VM_TAG_SIZECLASSES */
4097
4098 /*
4099 * Insert the initialized pages / metadatas into the right lists.
4100 */
4101
4102 zone_lock(zone);
4103 assert(zone->z_self == zone);
4104
4105 if (pg_start != 0) {
4106 assert(meta->zm_chunk_len == pg_start);
4107
4108 zone_meta_bits_merge(meta, free_start, free_end);
4109 meta->zm_chunk_len = (uint16_t)pg_end;
4110
4111 /*
4112 * consume the zone_meta_lock_in_partial()
4113 * done in zone_expand_locked()
4114 */
4115 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4116 zone_meta_remqueue(zone, meta);
4117 }
4118
4119 if (zone->z_permanent || meta->zm_alloc_size) {
4120 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4121 } else {
4122 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4123 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4124 }
4125 if (pg_end < chunk_pages) {
4126 /* push any non populated residual VA on z_pageq_va */
4127 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4128 }
4129
4130 zone_elems_free_add(zone, free_end - free_start);
4131 zone->z_elems_avail += free_end - free_start;
4132 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4133 if (pg_va_new) {
4134 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4135 }
4136 if (zone->z_wired_hwm < zone->z_wired_cur) {
4137 zone->z_wired_hwm = zone->z_wired_cur;
4138 }
4139
4140 #if CONFIG_ZLEAKS
4141 if (__improbable(zleak_should_enable_for_zone(zone) &&
4142 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4143 thread_call_enter(&zone_leaks_callout);
4144 }
4145 #endif /* CONFIG_ZLEAKS */
4146
4147 zone_add_wired_pages(pg_end - pg_start);
4148 }
4149
4150 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,zone_addr_kind_t kind)4151 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, zone_addr_kind_t kind)
4152 {
4153 uint32_t chunk_pages = zone->z_chunk_pages;
4154
4155 assert(pages % chunk_pages == 0);
4156 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4157 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, kind);
4158 zone_unlock(zone);
4159 }
4160 }
4161
4162 void
zone_cram_foreign(zone_t zone,vm_offset_t newmem,vm_size_t size)4163 zone_cram_foreign(zone_t zone, vm_offset_t newmem, vm_size_t size)
4164 {
4165 uint32_t pages = (uint32_t)atop(size);
4166 zone_security_flags_t zsflags = zone_security_config(zone);
4167
4168 if (!from_zone_map(newmem, size, ZONE_ADDR_FOREIGN)) {
4169 panic("zone_cram_foreign: foreign memory [%p] being crammed is "
4170 "outside of expected range", (void *)newmem);
4171 }
4172 if (!zsflags.z_allows_foreign) {
4173 panic("zone_cram_foreign: foreign memory [%p] being crammed in "
4174 "zone '%s%s' not expecting it", (void *)newmem,
4175 zone_heap_name(zone), zone_name(zone));
4176 }
4177 if (size % ptoa(zone->z_chunk_pages)) {
4178 panic("zone_cram_foreign: foreign memory [%p] being crammed has "
4179 "invalid size %zx", (void *)newmem, (size_t)size);
4180 }
4181 if (startup_phase >= STARTUP_SUB_ZALLOC) {
4182 panic("zone_cram_foreign: foreign memory [%p] being crammed "
4183 "after zalloc is initialized", (void *)newmem);
4184 }
4185
4186 bzero((void *)newmem, size);
4187 zcram(zone, newmem, pages, ZONE_ADDR_FOREIGN);
4188 }
4189
4190 __attribute__((overloadable))
4191 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)4192 zone_submap_is_sequestered(zone_submap_idx_t idx)
4193 {
4194 switch (idx) {
4195 case Z_SUBMAP_IDX_READ_ONLY:
4196 case Z_SUBMAP_IDX_VM:
4197 return true;
4198 case Z_SUBMAP_IDX_DATA:
4199 return false;
4200 default:
4201 return ZSECURITY_CONFIG(SEQUESTER);
4202 }
4203 }
4204
4205 __attribute__((overloadable))
4206 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)4207 zone_submap_is_sequestered(zone_security_flags_t zsflags)
4208 {
4209 return zone_submap_is_sequestered(zsflags.z_submap_idx);
4210 }
4211
4212 /*!
4213 * @function zone_submap_alloc_sequestered_va
4214 *
4215 * @brief
4216 * Allocates VA without using vm_find_space().
4217 *
4218 * @discussion
4219 * Allocate VA quickly without using the slower vm_find_space() for cases
4220 * when the submaps are fully sequestered.
4221 *
4222 * The VM submap is used to implement the VM itself so it is always sequestered,
4223 * as it can't kernel_memory_allocate which needs to always allocate vm entries.
4224 * However, it can use vm_map_enter() which tries to coalesce entries, which
4225 * always works, so the VM map only ever needs 2 entries (one for each end).
4226 *
4227 * The RO submap is similarly always sequestered if it exists (as a non
4228 * sequestered RO submap makes very little sense).
4229 *
4230 * The allocator is a very simple bump-allocator
4231 * that allocates from either end.
4232 */
4233 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4234 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4235 vm_offset_t *addrp)
4236 {
4237 vm_size_t size = ptoa(pages);
4238 vm_map_t map = zone_submap(zsflags);
4239 vm_map_entry_t first, last;
4240 vm_map_offset_t addr;
4241
4242 vm_map_lock(map);
4243
4244 first = vm_map_first_entry(map);
4245 last = vm_map_last_entry(map);
4246
4247 if (first->vme_end + size > last->vme_start) {
4248 vm_map_unlock(map);
4249 return KERN_NO_SPACE;
4250 }
4251
4252 if (zsflags.z_submap_from_end) {
4253 last->vme_start -= size;
4254 addr = last->vme_start;
4255 VME_OFFSET_SET(last, addr);
4256 } else {
4257 addr = first->vme_end;
4258 first->vme_end += size;
4259 }
4260 map->size += size;
4261
4262 vm_map_unlock(map);
4263
4264 *addrp = addr;
4265 return KERN_SUCCESS;
4266 }
4267
4268 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4269 zone_fill_initially(zone_t zone, vm_size_t nelems)
4270 {
4271 kma_flags_t kmaflags;
4272 kern_return_t kr;
4273 vm_offset_t addr;
4274 uint32_t pages;
4275 zone_security_flags_t zsflags = zone_security_config(zone);
4276
4277 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4278 assert(zone->z_elems_avail == 0);
4279
4280 kmaflags = zone_kma_flags(zone, zsflags, Z_WAITOK) | KMA_PERMANENT;
4281 pages = zone_alloc_pages_for_nelems(zone, nelems);
4282 if (zone_submap_is_sequestered(zsflags)) {
4283 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4284 if (kr == KERN_SUCCESS) {
4285 kr = kernel_memory_populate(zone_submap(zsflags), addr,
4286 ptoa(pages), kmaflags, VM_KERN_MEMORY_ZONE);
4287 }
4288 } else {
4289 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4290 kr = kernel_memory_allocate(zone_submap(zsflags), &addr,
4291 ptoa(pages), 0, kmaflags, VM_KERN_MEMORY_ZONE);
4292 }
4293
4294 if (kr != KERN_SUCCESS) {
4295 panic("kernel_memory_allocate() of %u pages failed", pages);
4296 }
4297
4298 zone_meta_populate(addr, ptoa(pages));
4299 zcram(zone, addr, pages, ZONE_ADDR_NATIVE);
4300 }
4301
4302 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4303 __attribute__((noinline))
4304 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages)4305 zone_scramble_va_and_unlock(
4306 zone_t z,
4307 struct zone_page_metadata *meta,
4308 uint32_t runs,
4309 uint32_t pages,
4310 uint32_t chunk_pages)
4311 {
4312 struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4313
4314 /*
4315 * Fisher–Yates shuffle, for an array with indices [0, n)
4316 *
4317 * for i from n−1 downto 1 do
4318 * j ← random integer such that 0 ≤ j ≤ i
4319 * exchange a[j] and a[i]
4320 *
4321 * The point here is that early allocations aren't at a fixed
4322 * distance from each other.
4323 */
4324 for (uint32_t i = 0, j = 0; i < runs; i++, j += chunk_pages) {
4325 arr[i] = meta + j;
4326 }
4327
4328 for (uint32_t i = runs - 1; i > 0; i--) {
4329 uint32_t j = zalloc_random_uniform(0, i + 1);
4330
4331 meta = arr[j];
4332 arr[j] = arr[i];
4333 arr[i] = meta;
4334 }
4335
4336 zone_lock(z);
4337
4338 for (uint32_t i = 0; i < runs; i++) {
4339 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4340 }
4341 z->z_va_cur += z->z_percpu ? runs : pages;
4342 }
4343 #endif
4344
4345 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4346 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4347 {
4348 zone_security_flags_t zsflags = zone_security_config(z);
4349 struct zone_page_metadata *meta;
4350 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4351 uint32_t chunk_pages = z->z_chunk_pages;
4352 uint32_t runs, pages, guards;
4353 kern_return_t kr;
4354 vm_offset_t addr;
4355
4356 zone_unlock(z);
4357
4358 pages = chunk_pages;
4359 guards = 0;
4360 runs = 1;
4361 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4362 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4363 pages = roundup(atop(ZONE_CHUNK_ALLOC_SIZE), chunk_pages);
4364 runs = pages / chunk_pages;
4365 }
4366 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4367 #if CONFIG_PROB_GZALLOC
4368 /*
4369 * For configuration with probabilistic guard zalloc,
4370 * in zones that opt-in (kalloc, ZC_PGZ_USE_GUARDS),
4371 * add a guard page after each chunk.
4372 *
4373 * Those guard pages are marked with the ZM_PGZ_GUARD
4374 * magical chunk len, and their zm_oob_offs field
4375 * is used to remember optional shift applied
4376 * to returned elements, in order to right-pad-them
4377 * as much as possible.
4378 *
4379 * (see kalloc_ext, zone_element_size, ...).
4380 */
4381 if (z->z_pgz_use_guards) {
4382 guards = runs;
4383 chunk_pages++;
4384 }
4385 #endif /* CONFIG_PROB_GZALLOC */
4386
4387 if (zone_submap_is_sequestered(zsflags)) {
4388 kr = zone_submap_alloc_sequestered_va(zsflags,
4389 pages + guards, &addr);
4390 } else {
4391 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4392 kr = kernel_memory_allocate(zone_submap(zsflags), &addr,
4393 ptoa(pages + guards), 0, kmaflags, VM_KERN_MEMORY_ZONE);
4394 }
4395
4396 if (kr != KERN_SUCCESS) {
4397 uint64_t zone_size = 0;
4398 zone_t zone_largest = zone_find_largest(&zone_size);
4399 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4400 "likely due to memory leak in zone [%s%s] "
4401 "(%luM, %d elements allocated)",
4402 kr, zone_heap_name(z), zone_name(z),
4403 zone_heap_name(zone_largest), zone_name(zone_largest),
4404 (unsigned long)zone_size >> 20,
4405 zone_count_allocated(zone_largest));
4406 }
4407
4408 meta = zone_meta_from_addr(addr);
4409 zone_meta_populate(addr, ptoa(pages + guards));
4410
4411 for (uint32_t i = 0; i < pages + guards; i++) {
4412 meta[i].zm_index = zone_index(z);
4413 }
4414 #if CONFIG_PROB_GZALLOC
4415 if (guards) {
4416 for (uint32_t i = 0; i < pages + guards; i += chunk_pages) {
4417 meta[i + chunk_pages - 1].zm_chunk_len = ZM_PGZ_GUARD;
4418 }
4419 os_atomic_add(&zone_guard_pages, guards, relaxed);
4420 }
4421 #endif /* CONFIG_PROB_GZALLOC */
4422
4423 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4424 if (__improbable(zone_caching_disabled < 0)) {
4425 return zone_scramble_va_and_unlock(z, meta, runs, pages, chunk_pages);
4426 }
4427 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4428
4429 zone_lock(z);
4430
4431 for (uint32_t i = 0; i < pages + guards; i += chunk_pages) {
4432 zone_meta_queue_push(z, &z->z_pageq_va, meta + i);
4433 }
4434 z->z_va_cur += z->z_percpu ? runs : pages;
4435 }
4436
4437 static bool
zone_expand_pred_nope(__unused zone_t z)4438 zone_expand_pred_nope(__unused zone_t z)
4439 {
4440 return false;
4441 }
4442
4443 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4444 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4445 {
4446 #if DEBUG || DEVELOPMENT
4447 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4448 size, 0, 0, 0);
4449 #else
4450 (void)size;
4451 #endif
4452 }
4453
4454 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4455 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4456 {
4457 #if DEBUG || DEVELOPMENT
4458 task_t task = current_task_early();
4459 if (pages && task) {
4460 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4461 }
4462 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4463 pages, 0, 0, 0);
4464 #else
4465 (void)pages;
4466 #endif
4467 }
4468
4469 __attribute__((noinline))
4470 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4471 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4472 {
4473 uint64_t wait_start = 0;
4474 long mapped;
4475 zone_security_flags_t zsflags = zone_security_config(z);
4476
4477 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4478
4479 if (zsflags.z_allows_foreign || current_thread()->options & TH_OPT_VMPRIV) {
4480 /*
4481 * "allow foreign" zones are allowed to overcommit
4482 * because they're used to reclaim memory (VM support).
4483 */
4484 return;
4485 }
4486
4487 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4488
4489 /*
4490 * If the zone map is really exhausted, wait on the GC thread,
4491 * donating our priority (which is important because the GC
4492 * thread is at a rather low priority).
4493 */
4494 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4495 uint32_t wait_ms = n * (n + 1) / 2;
4496 uint64_t interval;
4497
4498 if (n == 1) {
4499 wait_start = mach_absolute_time();
4500 } else {
4501 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4502 }
4503 if (zone_exhausted_timeout > 0 &&
4504 wait_ms > zone_exhausted_timeout) {
4505 panic("zone map exhaustion: waited for %dms "
4506 "(pages: %ld, max: %ld, wanted: %d)",
4507 wait_ms, mapped, zone_pages_wired_max, pgs);
4508 }
4509
4510 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4511 &interval);
4512
4513 lck_spin_lock(&zone_exhausted_lock);
4514 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4515 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4516 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4517
4518 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4519 }
4520 }
4521
4522 static bool
zone_expand_wait_for_pages(bool waited)4523 zone_expand_wait_for_pages(bool waited)
4524 {
4525 if (waited) {
4526 return false;
4527 }
4528 #if DEBUG || DEVELOPMENT
4529 if (zalloc_simulate_vm_pressure) {
4530 return false;
4531 }
4532 #endif /* DEBUG || DEVELOPMENT */
4533 return !vm_pool_low();
4534 }
4535
4536 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags,bool (* pred)(zone_t))4537 zone_expand_locked(zone_t z, zalloc_flags_t flags, bool (*pred)(zone_t))
4538 {
4539 thread_t self = current_thread();
4540 bool vm_priv = (self->options & TH_OPT_VMPRIV);
4541 bool clear_vm_priv;
4542 thread_pri_floor_t token;
4543 zone_security_flags_t zsflags = zone_security_config(z);
4544
4545 for (;;) {
4546 if (!pred) {
4547 /* NULL pred means "try just once" */
4548 pred = zone_expand_pred_nope;
4549 } else if (!pred(z)) {
4550 return;
4551 }
4552
4553 if (vm_priv && !z->z_expander_vm_priv) {
4554 /*
4555 * Claim the vm priv overcommit slot
4556 *
4557 * We do not track exact ownership for VM privileged
4558 * threads, so use the rwlock boost as a stop-gap
4559 * just in case.
4560 */
4561 token = thread_priority_floor_start();
4562 z->z_expander_vm_priv = true;
4563 clear_vm_priv = true;
4564 } else {
4565 clear_vm_priv = false;
4566 }
4567
4568 if (z->z_expander == NULL) {
4569 z->z_expander = self;
4570 break;
4571 }
4572 if (clear_vm_priv) {
4573 break;
4574 }
4575
4576 if (flags & Z_NOPAGEWAIT) {
4577 return;
4578 }
4579
4580 z->z_expanding_wait = true;
4581 lck_spin_sleep_with_inheritor(&z->z_lock, LCK_SLEEP_DEFAULT,
4582 &z->z_expander, z->z_expander,
4583 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4584 }
4585
4586 do {
4587 struct zone_page_metadata *meta = NULL;
4588 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4589 vm_page_t page_list = NULL;
4590 vm_offset_t addr = 0;
4591 int waited = 0;
4592
4593 /*
4594 * While we hold the zone lock, look if there's VA we can:
4595 * - complete from partial pages,
4596 * - reuse from the sequester list.
4597 *
4598 * When the page is being populated we pretend we allocated
4599 * an extra element so that zone_gc() can't attempt to free
4600 * the chunk (as it could become empty while we wait for pages).
4601 */
4602 if (zone_pva_is_null(z->z_pageq_va)) {
4603 zone_allocate_va_locked(z, flags);
4604 }
4605
4606 meta = zone_meta_queue_pop_native(z, &z->z_pageq_va, &addr);
4607 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4608 cur_pages = meta->zm_page_index;
4609 meta -= cur_pages;
4610 addr -= ptoa(cur_pages);
4611 zone_meta_lock_in_partial(z, meta, cur_pages);
4612 }
4613 zone_unlock(z);
4614
4615 /*
4616 * And now allocate pages to populate our VA.
4617 */
4618 if (z->z_percpu) {
4619 min_pages = z->z_chunk_pages;
4620 } else {
4621 min_pages = (uint32_t)atop(round_page(zone_oob_offs(z) +
4622 zone_elem_size(z)));
4623 }
4624
4625 /*
4626 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4627 * if we're running out of zone memory
4628 */
4629 if (__improbable(zone_map_nearing_exhaustion())) {
4630 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4631 }
4632
4633 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4634
4635 while (pages < z->z_chunk_pages - cur_pages) {
4636 vm_page_t m = vm_page_grab();
4637
4638 if (m) {
4639 pages++;
4640 m->vmp_snext = page_list;
4641 page_list = m;
4642 vm_page_zero_fill(m);
4643 continue;
4644 }
4645
4646 if (pages >= min_pages &&
4647 !zone_expand_wait_for_pages(waited)) {
4648 break;
4649 }
4650
4651 if ((flags & Z_NOPAGEWAIT) == 0) {
4652 waited++;
4653 VM_PAGE_WAIT();
4654 continue;
4655 }
4656
4657 /*
4658 * Undo everything and bail out:
4659 *
4660 * - free pages
4661 * - undo the fake allocation if any
4662 * - put the VA back on the VA page queue.
4663 */
4664 vm_page_free_list(page_list, FALSE);
4665 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4666
4667 zone_lock(z);
4668
4669 if (cur_pages) {
4670 zone_meta_unlock_from_partial(z, meta, cur_pages);
4671 }
4672 if (meta) {
4673 zone_meta_queue_push(z, &z->z_pageq_va,
4674 meta + cur_pages);
4675 }
4676 goto page_shortage;
4677 }
4678
4679 kernel_memory_populate_with_pages(zone_submap(zsflags),
4680 addr + ptoa(cur_pages), ptoa(pages), page_list,
4681 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4682 (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY) ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4683
4684 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4685
4686 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages,
4687 ZONE_ADDR_NATIVE);
4688 } while (pred(z));
4689
4690 page_shortage:
4691 if (clear_vm_priv) {
4692 z->z_expander_vm_priv = false;
4693 thread_priority_floor_end(&token);
4694 }
4695 if (z->z_expander == self) {
4696 z->z_expander = THREAD_NULL;
4697 }
4698 if (z->z_expanding_wait) {
4699 z->z_expanding_wait = false;
4700 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4701 }
4702 }
4703
4704 static bool
zalloc_needs_refill(zone_t zone)4705 zalloc_needs_refill(zone_t zone)
4706 {
4707 if (zone->z_elems_free > zone->z_elems_rsv) {
4708 return false;
4709 }
4710 if (zone->z_wired_cur < zone->z_wired_max) {
4711 return true;
4712 }
4713 if (zone->exhaustible) {
4714 return false;
4715 }
4716 if (zone->expandable) {
4717 /*
4718 * If we're expandable, just don't go through this again.
4719 */
4720 zone->z_wired_max = ~0u;
4721 return true;
4722 }
4723 zone_unlock(zone);
4724
4725 panic("zone '%s%s' exhausted", zone_heap_name(zone), zone_name(zone));
4726 }
4727
4728 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4729 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4730 {
4731 zone_foreach(z) {
4732 if (z->no_callout) {
4733 /* z_async_refilling will never be set */
4734 continue;
4735 }
4736
4737 zone_lock(z);
4738 if (z->z_self && z->z_async_refilling) {
4739 z->z_async_refilling = false;
4740 zone_expand_locked(z, Z_WAITOK, zalloc_needs_refill);
4741 }
4742 zone_unlock(z);
4743 }
4744 }
4745
4746 static inline void
zone_expand_async_schedule_if_needed(zone_t zone)4747 zone_expand_async_schedule_if_needed(zone_t zone)
4748 {
4749 if (__improbable(startup_phase < STARTUP_SUB_THREAD_CALL)) {
4750 return;
4751 }
4752
4753 if (zone->z_elems_free > zone->z_elems_rsv || zone->z_async_refilling ||
4754 zone->no_callout) {
4755 return;
4756 }
4757
4758 if (!zone->expandable && zone->z_wired_cur >= zone->z_wired_max) {
4759 return;
4760 }
4761
4762 if (startup_phase < STARTUP_SUB_EARLY_BOOT) {
4763 return;
4764 }
4765
4766 if (zone->z_elems_free == 0 || !vm_pool_low()) {
4767 zone->z_async_refilling = true;
4768 thread_call_enter(&zone_expand_callout);
4769 }
4770 }
4771
4772 #endif /* !ZALLOC_TEST */
4773 #pragma mark zone jetsam integration
4774 #if !ZALLOC_TEST
4775
4776 /*
4777 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4778 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4779 */
4780 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4781
4782 /*
4783 * Threshold above which largest zones should be included in the panic log
4784 */
4785 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4786
4787 /*
4788 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4789 * where X=zone_map_jetsam_limit.
4790 *
4791 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4792 */
4793 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4794 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4795
4796 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4797 zone_map_jetsam_set_limit(uint32_t value)
4798 {
4799 if (value <= 0 || value > 100) {
4800 return KERN_INVALID_VALUE;
4801 }
4802
4803 zone_map_jetsam_limit = value;
4804 os_atomic_store(&zone_pages_jetsam_threshold,
4805 zone_pages_wired_max * value / 100, relaxed);
4806 return KERN_SUCCESS;
4807 }
4808
4809 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4810 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4811 {
4812 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4813 *current_size = ptoa_64(phys_pages);
4814 *capacity = ptoa_64(zone_pages_wired_max);
4815 }
4816
4817 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4818 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4819 {
4820 zone_t largest_zone = zone_find_largest(zone_size);
4821
4822 /*
4823 * Append kalloc heap name to zone name (if zone is used by kalloc)
4824 */
4825 snprintf(zone_name, zone_name_len, "%s%s",
4826 zone_heap_name(largest_zone), largest_zone->z_name);
4827 }
4828
4829 static bool
zone_map_nearing_threshold(unsigned int threshold)4830 zone_map_nearing_threshold(unsigned int threshold)
4831 {
4832 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4833 return phys_pages * 100 > zone_pages_wired_max * threshold;
4834 }
4835
4836 bool
zone_map_nearing_exhaustion(void)4837 zone_map_nearing_exhaustion(void)
4838 {
4839 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
4840
4841 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
4842 }
4843
4844
4845 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
4846
4847 /*
4848 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
4849 * to walk through the jetsam priority bands and kill processes.
4850 */
4851 static zone_t
kill_process_in_largest_zone(void)4852 kill_process_in_largest_zone(void)
4853 {
4854 pid_t pid = -1;
4855 uint64_t zone_size = 0;
4856 zone_t largest_zone = zone_find_largest(&zone_size);
4857
4858 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
4859 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
4860 ptoa_64(zone_pages_wired_max),
4861 (uint64_t)zone_submaps_approx_size(),
4862 (uint64_t)(zone_foreign_size() + zone_native_size()),
4863 zone_map_jetsam_limit);
4864 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
4865 largest_zone->z_name, (uintptr_t)zone_size);
4866
4867 /*
4868 * We want to make sure we don't call this function from userspace.
4869 * Or we could end up trying to synchronously kill the process
4870 * whose context we're in, causing the system to hang.
4871 */
4872 assert(current_task() == kernel_task);
4873
4874 /*
4875 * If vm_object_zone is the largest, check to see if the number of
4876 * elements in vm_map_entry_zone is comparable.
4877 *
4878 * If so, consider vm_map_entry_zone as the largest. This lets us target
4879 * a specific process to jetsam to quickly recover from the zone map
4880 * bloat.
4881 */
4882 if (largest_zone == vm_object_zone) {
4883 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
4884 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
4885 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
4886 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
4887 largest_zone = vm_map_entry_zone;
4888 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
4889 (uintptr_t)zone_size_wired(largest_zone));
4890 }
4891 }
4892
4893 /* TODO: Extend this to check for the largest process in other zones as well. */
4894 if (largest_zone == vm_map_entry_zone) {
4895 pid = find_largest_process_vm_map_entries();
4896 } else {
4897 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
4898 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
4899 largest_zone->z_name);
4900 }
4901 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
4902 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
4903 }
4904
4905 return largest_zone;
4906 }
4907
4908 #endif /* !ZALLOC_TEST */
4909 #pragma mark probabilistic gzalloc
4910 #if !ZALLOC_TEST
4911 #if CONFIG_PROB_GZALLOC
4912
4913 extern uint32_t random(void);
4914 struct pgz_backtrace {
4915 uint32_t pgz_depth;
4916 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
4917 };
4918
4919 static int32_t PERCPU_DATA(pgz_sample_counter);
4920 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
4921 static uint32_t pgz_uses; /* number of zones using PGZ */
4922 static int32_t pgz_slot_avail;
4923 #if OS_ATOMIC_HAS_LLSC
4924 struct zone_page_metadata *pgz_slot_head;
4925 #else
4926 static struct pgz_slot_head {
4927 uint32_t psh_count;
4928 uint32_t psh_slot;
4929 } pgz_slot_head;
4930 #endif
4931 struct zone_page_metadata *pgz_slot_tail;
4932 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
4933
4934 static struct zone_page_metadata *
pgz_meta(uint32_t index)4935 pgz_meta(uint32_t index)
4936 {
4937 return &zone_info.zi_pgz_meta[2 * index + 1];
4938 }
4939
4940 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)4941 pgz_bt(uint32_t slot, bool free)
4942 {
4943 return &pgz_backtraces[2 * slot + free];
4944 }
4945
4946 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)4947 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
4948 {
4949 struct backtrace_control ctl = {
4950 .btc_frame_addr = (uintptr_t)fp,
4951 };
4952
4953 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
4954 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
4955 }
4956
4957 static uint32_t
pgz_slot(vm_offset_t addr)4958 pgz_slot(vm_offset_t addr)
4959 {
4960 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
4961 }
4962
4963 static vm_offset_t
pgz_addr(uint32_t slot)4964 pgz_addr(uint32_t slot)
4965 {
4966 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
4967 }
4968
4969 static bool
pgz_sample(zalloc_flags_t flags)4970 pgz_sample(zalloc_flags_t flags)
4971 {
4972 int32_t *counterp, cnt;
4973
4974 counterp = PERCPU_GET(pgz_sample_counter);
4975 cnt = *counterp;
4976 if (__probable(cnt > 0)) {
4977 *counterp = cnt - 1;
4978 return false;
4979 }
4980
4981 if (pgz_slot_avail <= 0) {
4982 return false;
4983 }
4984
4985 /*
4986 * zalloc_random_uniform() might block, so when the sampled allocation
4987 * requested Z_NOWAIT, set the counter to `-1` which will cause
4988 * the next allocation that can block to generate a new random value.
4989 * No allocation on this CPU will sample until then.
4990 */
4991 if (flags & Z_NOWAIT) {
4992 *counterp = -1;
4993 } else {
4994 enable_preemption();
4995 *counterp = zalloc_random_uniform(0, 2 * pgz_sample_rate);
4996 disable_preemption();
4997 }
4998
4999 return cnt == 0;
5000 }
5001
5002 static inline bool
pgz_slot_alloc(uint32_t * slot)5003 pgz_slot_alloc(uint32_t *slot)
5004 {
5005 struct zone_page_metadata *m;
5006 uint32_t tries = 100;
5007
5008 disable_preemption();
5009
5010 #if OS_ATOMIC_USE_LLSC
5011 int32_t ov, nv;
5012 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5013 if (__improbable(ov <= 0)) {
5014 os_atomic_rmw_loop_give_up({
5015 enable_preemption();
5016 return false;
5017 });
5018 }
5019 nv = ov - 1;
5020 });
5021 #else
5022 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5023 os_atomic_inc(&pgz_slot_avail, relaxed);
5024 enable_preemption();
5025 return false;
5026 }
5027 #endif
5028
5029 again:
5030 if (__improbable(tries-- == 0)) {
5031 /*
5032 * Too much contention,
5033 * extremely unlikely but do not stay stuck.
5034 */
5035 os_atomic_inc(&pgz_slot_avail, relaxed);
5036 enable_preemption();
5037 return false;
5038 }
5039
5040 #if OS_ATOMIC_HAS_LLSC
5041 do {
5042 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5043 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5044 /*
5045 * Either we are waiting for an enqueuer (unlikely)
5046 * or we are competing with another core and
5047 * are looking at a popped element.
5048 */
5049 os_atomic_clear_exclusive();
5050 goto again;
5051 }
5052 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5053 m->zm_pgz_slot_next, relaxed));
5054 #else
5055 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5056 struct pgz_slot_head ov, nv;
5057 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5058 m = &base[ov.psh_slot * 2];
5059 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5060 /*
5061 * Either we are waiting for an enqueuer (unlikely)
5062 * or we are competing with another core and
5063 * are looking at a popped element.
5064 */
5065 os_atomic_rmw_loop_give_up(goto again);
5066 }
5067 nv.psh_count = ov.psh_count + 1;
5068 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5069 });
5070 #endif
5071
5072 enable_preemption();
5073
5074 m->zm_pgz_slot_next = NULL;
5075 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5076 return true;
5077 }
5078
5079 static inline bool
pgz_slot_free(uint32_t slot)5080 pgz_slot_free(uint32_t slot)
5081 {
5082 struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5083 struct zone_page_metadata *t;
5084
5085 disable_preemption();
5086 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5087 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5088 os_atomic_inc(&pgz_slot_avail, relaxed);
5089 enable_preemption();
5090
5091 return true;
5092 }
5093
5094 /*!
5095 * @function pgz_protect()
5096 *
5097 * @brief
5098 * Try to protect an allocation with PGZ.
5099 *
5100 * @param zone The zone the allocation was made against.
5101 * @param addr An allocated element address to protect.
5102 * @param flags The @c zalloc_flags_t passed to @c zalloc.
5103 * @param fp The caller frame pointer (for the backtrace).
5104 * @returns The new address for the element, or @c addr.
5105 */
5106 __attribute__((noinline))
5107 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,void * fp)5108 pgz_protect(zone_t zone, vm_offset_t addr, zalloc_flags_t flags, void *fp)
5109 {
5110 kern_return_t kr;
5111 uint32_t slot;
5112
5113 if (!pgz_slot_alloc(&slot)) {
5114 return addr;
5115 }
5116
5117 /*
5118 * Try to double-map the page (may fail if Z_NOWAIT).
5119 * we will always find a PA because pgz_init() pre-expanded the pmap.
5120 */
5121 vm_offset_t new_addr = pgz_addr(slot);
5122 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5123
5124 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5125 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5126 (flags & Z_NOWAIT) ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5127
5128 if (__improbable(kr != KERN_SUCCESS)) {
5129 pgz_slot_free(slot);
5130 return addr;
5131 }
5132
5133 struct zone_page_metadata tmp = {
5134 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5135 .zm_index = zone_index(zone),
5136 };
5137 struct zone_page_metadata *meta = pgz_meta(slot);
5138
5139 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5140 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5141 pgz_backtrace(pgz_bt(slot, false), fp);
5142
5143 return new_addr + (addr & PAGE_MASK);
5144 }
5145
5146 /*!
5147 * @function pgz_unprotect()
5148 *
5149 * @brief
5150 * Release a PGZ slot and returns the original address of a freed element.
5151 *
5152 * @param addr A PGZ protected element address.
5153 * @param fp The caller frame pointer (for the backtrace).
5154 * @returns The non protected address for the element
5155 * that was passed to @c pgz_protect().
5156 */
5157 __attribute__((noinline))
5158 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5159 pgz_unprotect(vm_offset_t addr, void *fp)
5160 {
5161 struct zone_page_metadata *meta;
5162 struct zone_page_metadata tmp;
5163 uint32_t slot;
5164
5165 slot = pgz_slot(addr);
5166 meta = zone_meta_from_addr(addr);
5167 tmp = *meta;
5168 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5169 goto double_free;
5170 }
5171
5172 pmap_remove(kernel_pmap, trunc_page(addr), trunc_page(addr) + PAGE_SIZE);
5173
5174 pgz_backtrace(pgz_bt(slot, true), fp);
5175
5176 tmp.zm_chunk_len = ZM_PGZ_FREE;
5177 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5178 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5179 goto double_free;
5180 }
5181
5182 pgz_slot_free(slot);
5183 return tmp.zm_pgz_orig_addr;
5184
5185 double_free:
5186 panic_fault_address = addr;
5187 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5188 panic("probabilistic gzalloc double free: %p", (void *)addr);
5189 }
5190
5191 bool
pgz_owned(mach_vm_address_t addr)5192 pgz_owned(mach_vm_address_t addr)
5193 {
5194 vm_offset_t rmin, rmax;
5195
5196 #if CONFIG_KERNEL_TBI
5197 addr = VM_KERNEL_TBI_FILL(addr);
5198 #endif /* CONFIG_KERNEL_TBI */
5199
5200 zone_range_load(&zone_info.zi_pgz_range, rmin, rmax);
5201
5202 return (addr >= rmin) & (addr < rmax);
5203 }
5204
5205
5206 __attribute__((always_inline))
5207 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5208 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5209 {
5210 struct zone_page_metadata *meta;
5211
5212 if (__probable(!pgz_owned(addr))) {
5213 return (vm_offset_t)addr;
5214 }
5215
5216 if (zone_addr_size_crosses_page(addr, size)) {
5217 panic("invalid size for PGZ protected address %p:%p",
5218 (void *)addr, (void *)(addr + size));
5219 }
5220
5221 meta = zone_meta_from_addr((vm_offset_t)addr);
5222 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5223 panic_fault_address = (vm_offset_t)addr;
5224 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5225 }
5226
5227 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5228 }
5229
5230 __attribute__((always_inline))
5231 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5232 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5233 {
5234 struct zone_page_metadata *meta;
5235
5236 if (__probable(!pgz_owned(addr))) {
5237 return addr;
5238 }
5239
5240 meta = zone_meta_from_addr(addr);
5241 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5242
5243 if (zid != ZONE_ID_ANY && zone_index_from_ptr((void *)addr) != zid) {
5244 return 0;
5245 }
5246
5247 return addr;
5248 }
5249
5250 static void
pgz_zone_init(zone_t z)5251 pgz_zone_init(zone_t z)
5252 {
5253 char zn[MAX_ZONE_NAME];
5254 char zv[MAX_ZONE_NAME];
5255 char key[30];
5256
5257 if (zone_elem_size(z) > PAGE_SIZE) {
5258 return;
5259 }
5260
5261 if (zone_index(z) == ZONE_ID_SELECT_SET) {
5262 return;
5263 }
5264
5265 if (pgz_all) {
5266 os_atomic_inc(&pgz_uses, relaxed);
5267 z->z_pgz_tracked = true;
5268 return;
5269 }
5270
5271 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5272
5273 for (int i = 1;; i++) {
5274 snprintf(key, sizeof(key), "pgz%d", i);
5275 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5276 break;
5277 }
5278 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5279 os_atomic_inc(&pgz_uses, relaxed);
5280 z->z_pgz_tracked = true;
5281 break;
5282 }
5283 }
5284 }
5285
5286 __startup_func
5287 static vm_size_t
pgz_get_size(void)5288 pgz_get_size(void)
5289 {
5290 if (pgz_slots == UINT32_MAX) {
5291 /*
5292 * Scale with RAM size: ~200 slots a G
5293 */
5294 pgz_slots = (uint32_t)(sane_size >> 22);
5295 }
5296
5297 /*
5298 * Make sure that the slot allocation scheme works.
5299 * see pgz_slot_alloc() / pgz_slot_free();
5300 */
5301 if (pgz_slots < zpercpu_count() * 4) {
5302 pgz_slots = zpercpu_count() * 4;
5303 }
5304 if (pgz_slots >= UINT16_MAX) {
5305 pgz_slots = UINT16_MAX - 1;
5306 }
5307
5308 /*
5309 * Quarantine is 33% of slots by default, no more than 90%.
5310 */
5311 if (pgz_quarantine == 0) {
5312 pgz_quarantine = pgz_slots / 3;
5313 }
5314 if (pgz_quarantine > pgz_slots * 9 / 10) {
5315 pgz_quarantine = pgz_slots * 9 / 10;
5316 }
5317 pgz_slot_avail = pgz_slots - pgz_quarantine;
5318
5319 return ptoa(2 * pgz_slots + 1);
5320 }
5321
5322 __startup_func
5323 static void
pgz_init(void)5324 pgz_init(void)
5325 {
5326 if (!pgz_uses) {
5327 return;
5328 }
5329
5330 if (pgz_sample_rate == 0) {
5331 /*
5332 * If no rate was provided, pick a random one that scales
5333 * with the number of protected zones.
5334 *
5335 * Use a binomal distribution to avoid having too many
5336 * really fast sample rates.
5337 */
5338 uint32_t factor = MIN(pgz_uses, 10);
5339 uint32_t max_rate = 1000 * factor;
5340 uint32_t min_rate = 100 * factor;
5341
5342 pgz_sample_rate = (zalloc_random_uniform(min_rate, max_rate) +
5343 zalloc_random_uniform(min_rate, max_rate)) / 2;
5344 }
5345
5346 struct zone_map_range *r = &zone_info.zi_pgz_range;
5347 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5348 zone_meta_populate(r->min_address, zone_range_size(r));
5349
5350 for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5351 zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5352 }
5353
5354 for (size_t i = 1; i < pgz_slots; i++) {
5355 zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5356 &zone_info.zi_pgz_meta[2 * i + 1];
5357 }
5358 #if OS_ATOMIC_HAS_LLSC
5359 pgz_slot_head = &zone_info.zi_pgz_meta[1];
5360 #endif
5361 pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5362
5363 pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5364 2 * pgz_slots, ZALIGN_PTR);
5365
5366 /*
5367 * expand the pmap so that pmap_enter_options_addr()
5368 * in pgz_protect() never need to call pmap_expand().
5369 */
5370 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5371 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5372 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5373 PMAP_OPTIONS_NOENTER, NULL);
5374 }
5375
5376 /* do this last as this will enable pgz */
5377 percpu_foreach(counter, pgz_sample_counter) {
5378 *counter = zalloc_random_uniform(0, 2 * pgz_sample_rate);
5379 }
5380 }
5381 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5382
5383 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5384 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5385 {
5386 struct pgz_backtrace *bt = pgz_bt(slot, free);
5387 const char *what = free ? "Free" : "Allocation";
5388 uintptr_t buf[MAX_ZTRACE_DEPTH];
5389
5390 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5391 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5392 return;
5393 }
5394
5395 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5396 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5397
5398 paniclog_append_noflush(" %s Backtrace:\n", what);
5399 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5400 if (has_syms) {
5401 paniclog_append_noflush(" %p ", (void *)buf[i]);
5402 panic_print_symbol_name(buf[i]);
5403 paniclog_append_noflush("\n");
5404 } else {
5405 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5406 }
5407 }
5408 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5409 }
5410
5411 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5412 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5413 {
5414 struct zone_page_metadata *meta;
5415 vm_offset_t elem, esize;
5416 const char *type;
5417 const char *prob;
5418 uint32_t slot;
5419 zone_t z;
5420
5421 slot = pgz_slot(addr);
5422 meta = pgz_meta(slot);
5423 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5424
5425 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5426
5427 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5428 meta->zm_index &&
5429 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5430 z = &zone_array[meta->zm_index];
5431 } else {
5432 paniclog_append_noflush(" Zone : <unknown>\n");
5433 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5434 paniclog_append_noflush("\n");
5435 return;
5436 }
5437
5438 esize = zone_elem_size(z);
5439 paniclog_append_noflush(" Zone : %s%s\n",
5440 zone_heap_name(z), zone_name(z));
5441 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5442 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5443 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5444
5445 if (addr < elem) {
5446 type = "out-of-bounds(underflow) + use-after-free";
5447 prob = "low";
5448 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5449 type = "double-free";
5450 prob = "high";
5451 } else if (addr < elem + esize) {
5452 type = "use-after-free";
5453 prob = "high";
5454 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5455 type = "out-of-bounds + use-after-free";
5456 prob = "low";
5457 } else {
5458 type = "out-of-bounds";
5459 prob = "high";
5460 }
5461 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5462 type, prob);
5463 if (addr < elem) {
5464 paniclog_append_noflush(" Access : %d byte(s) before\n",
5465 (uint32_t)(elem - addr) + 1);
5466 } else if (addr < elem + esize) {
5467 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5468 (uint32_t)(addr - elem) + 1);
5469 } else {
5470 paniclog_append_noflush(" Access : %d byte(s) past\n",
5471 (uint32_t)(addr - (elem + esize)) + 1);
5472 }
5473
5474 panic_display_pgz_bt(has_syms, slot, false);
5475 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5476 panic_display_pgz_bt(has_syms, slot, true);
5477 }
5478
5479 paniclog_append_noflush("\n");
5480 }
5481
5482 #endif /* CONFIG_PROB_GZALLOC */
5483 #endif /* !ZALLOC_TEST */
5484 #pragma mark zfree
5485 #if !ZALLOC_TEST
5486
5487 /*!
5488 * @defgroup zfree
5489 * @{
5490 *
5491 * @brief
5492 * The codepath for zone frees.
5493 *
5494 * @discussion
5495 * There are 4 major ways to allocate memory that end up in the zone allocator:
5496 * - @c zfree()
5497 * - @c zfree_percpu()
5498 * - @c kfree*()
5499 * - @c zfree_permanent()
5500 *
5501 * While permanent zones have their own allocation scheme, all other codepaths
5502 * will eventually go through the @c zfree_ext() choking point.
5503 *
5504 * Ignoring the @c gzalloc_free() codepath, the decision tree looks like this:
5505 * <code>
5506 * zfree_ext()
5507 * ├───> zfree_cached() ────────────────╮
5508 * │ │ │
5509 * │ │ │
5510 * │ ├───> zfree_cached_slow() ───┤
5511 * │ │ │ │
5512 * │ │ v │
5513 * ╰───────┴───> zfree_item() ──────────┴───>
5514 * </code>
5515 *
5516 * @c zfree_ext() takes care of all the generic work to perform on an element
5517 * before it is freed (zeroing, logging, tagging, ...) then will hand it off to:
5518 * - @c zfree_item() if zone caching is off
5519 * - @c zfree_cached() if zone caching is on.
5520 *
5521 * @c zfree_cached can take a number of decisions:
5522 * - a fast path if the (f) or (a) magazines have space (preemption disabled),
5523 * - using the cpu local or recirculation depot calling @c zfree_cached_slow(),
5524 * - falling back to @c zfree_item() when CPU caching has been disabled.
5525 */
5526
5527 #if KASAN_ZALLOC
5528 /*
5529 * Called from zfree() to add the element being freed to the KASan quarantine.
5530 *
5531 * Returns true if the newly-freed element made it into the quarantine without
5532 * displacing another, false otherwise. In the latter case, addrp points to the
5533 * address of the displaced element, which will be freed by the zone.
5534 */
5535 static bool
kasan_quarantine_freed_element(zone_t * zonep,void ** addrp)5536 kasan_quarantine_freed_element(
5537 zone_t *zonep, /* the zone the element is being freed to */
5538 void **addrp) /* address of the element being freed */
5539 {
5540 zone_t zone = *zonep;
5541 void *addr = *addrp;
5542
5543 /*
5544 * Resize back to the real allocation size and hand off to the KASan
5545 * quarantine. `addr` may then point to a different allocation, if the
5546 * current element replaced another in the quarantine. The zone then
5547 * takes ownership of the swapped out free element.
5548 */
5549 vm_size_t usersz = zone_elem_size(zone) - 2 * zone->z_kasan_redzone;
5550 vm_size_t sz = usersz;
5551
5552 if (addr && zone->z_kasan_redzone) {
5553 kasan_check_free((vm_address_t)addr, usersz, KASAN_HEAP_ZALLOC);
5554 addr = (void *)kasan_dealloc((vm_address_t)addr, &sz);
5555 assert(sz == zone_elem_size(zone));
5556 }
5557 if (addr && !zone->kasan_noquarantine) {
5558 kasan_free(&addr, &sz, KASAN_HEAP_ZALLOC, zonep, usersz, true);
5559 if (!addr) {
5560 return TRUE;
5561 }
5562 }
5563 if (addr && zone->kasan_noquarantine) {
5564 kasan_unpoison(addr, zone_elem_size(zone));
5565 }
5566 *addrp = addr;
5567 return FALSE;
5568 }
5569 #endif /* KASAN_ZALLOC */
5570
5571 __header_always_inline void
zfree_drop(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze,bool recirc)5572 zfree_drop(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze,
5573 bool recirc)
5574 {
5575 vm_offset_t esize = zone_elem_size(zone);
5576
5577 if (zone_meta_mark_free(meta, ze) == recirc) {
5578 zone_meta_double_free_panic(zone, ze, __func__);
5579 }
5580
5581 vm_offset_t old_size = meta->zm_alloc_size;
5582 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5583 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5584
5585 if (new_size == 0) {
5586 /* whether the page was on the intermediate or all_used, queue, move it to free */
5587 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5588 zone->z_wired_empty += meta->zm_chunk_len;
5589 } else if (old_size + esize > max_size) {
5590 /* first free element on page, move from all_used */
5591 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5592 }
5593 }
5594
5595 static void
zfree_item(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze)5596 zfree_item(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze)
5597 {
5598 /* transfer preemption count to lock */
5599 zone_lock_nopreempt_check_contention(zone, NULL);
5600
5601 zfree_drop(zone, meta, ze, false);
5602 zone_elems_free_add(zone, 1);
5603
5604 zone_unlock(zone);
5605 }
5606
5607 __attribute__((noinline))
5608 static void
zfree_cached_slow(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze,zone_cache_t cache)5609 zfree_cached_slow(zone_t zone, struct zone_page_metadata *meta,
5610 zone_element_t ze, zone_cache_t cache)
5611 {
5612 struct zone_depot mags;
5613 zone_magazine_t mag = NULL;
5614 uint32_t depot_max;
5615 uint16_t n_mags = 0;
5616
5617 if (zone_meta_is_free(meta, ze)) {
5618 zone_meta_double_free_panic(zone, ze, __func__);
5619 }
5620
5621 if (zone == zc_magazine_zone) {
5622 mag = (zone_magazine_t)zone_element_addr(zone, ze,
5623 zone_elem_size(zone));
5624 #if KASAN_ZALLOC
5625 kasan_poison_range((vm_offset_t)mag, zone_elem_size(zone),
5626 ASAN_VALID);
5627 #endif
5628 } else {
5629 mag = zone_magazine_alloc(Z_NOWAIT);
5630 if (__improbable(mag == NULL)) {
5631 return zfree_item(zone, meta, ze);
5632 }
5633 mag->zm_cur = 1;
5634 mag->zm_elems[0] = ze;
5635 }
5636
5637 mag = zone_magazine_replace(&cache->zc_free_cur,
5638 &cache->zc_free_elems, mag);
5639
5640 z_debug_assert(cache->zc_free_cur <= 1);
5641 z_debug_assert(mag->zm_cur == zc_mag_size());
5642
5643 /*
5644 * Depot growth policy:
5645 *
5646 * The zc_alloc and zc_free are on average half empty/full,
5647 * hence count for "1" unit of zc_mag_size().
5648 *
5649 * We use the local depot for each `zc_depot_max` extra `zc_mag_size()`
5650 * worth of element we're allowed.
5651 *
5652 * If pushing the bucket puts us in excess of `zc_depot_max`,
5653 * then we trim (1/zc_recirc_denom) buckets out, in order
5654 * to amortize taking the zone lock.
5655 *
5656 * Note that `zc_depot_max` can be mutated by the GC concurrently,
5657 * so take a copy that we use throughout.
5658 */
5659 depot_max = os_atomic_load(&cache->zc_depot_max, relaxed);
5660 if (2 * zc_mag_size() <= depot_max) {
5661 zone_depot_lock_nopreempt(cache);
5662
5663 STAILQ_INSERT_TAIL(&cache->zc_depot, mag, zm_link);
5664 cache->zc_depot_cur++;
5665
5666 if (__probable((cache->zc_depot_cur + 1) * zc_mag_size() <=
5667 depot_max)) {
5668 return zone_depot_unlock(cache);
5669 }
5670
5671 n_mags = 1;
5672 STAILQ_FIRST(&mags) = mag = STAILQ_FIRST(&cache->zc_depot);
5673
5674 /* always leave at least once magazine behind */
5675 while (n_mags + 1 < cache->zc_depot_cur &&
5676 n_mags * zc_mag_size() * zc_recirc_denom < depot_max) {
5677 mag = STAILQ_NEXT(mag, zm_link);
5678 n_mags++;
5679 }
5680
5681 cache->zc_depot_cur -= n_mags;
5682 STAILQ_FIRST(&cache->zc_depot) = STAILQ_NEXT(mag, zm_link);
5683 STAILQ_NEXT(mag, zm_link) = NULL;
5684
5685 zone_depot_unlock(cache);
5686
5687 mags.stqh_last = &STAILQ_NEXT(mag, zm_link);
5688 } else {
5689 enable_preemption();
5690
5691 n_mags = 1;
5692 STAILQ_FIRST(&mags) = mag;
5693 mags.stqh_last = &STAILQ_NEXT(mag, zm_link);
5694 STAILQ_NEXT(mag, zm_link) = NULL;
5695 }
5696
5697 /*
5698 * Preflight validity of all the elements before we touch the zone
5699 * metadata, and then insert them into the recirculation depot.
5700 */
5701 STAILQ_FOREACH(mag, &mags, zm_link) {
5702 for (uint16_t i = 0; i < zc_mag_size(); i++) {
5703 zone_element_validate(zone, mag->zm_elems[i]);
5704 }
5705 }
5706
5707 zone_lock_check_contention(zone, cache);
5708
5709 STAILQ_FOREACH(mag, &mags, zm_link) {
5710 for (uint16_t i = 0; i < zc_mag_size(); i++) {
5711 zone_element_t e = mag->zm_elems[i];
5712
5713 if (!zone_meta_mark_free(zone_meta_from_element(e), e)) {
5714 zone_meta_double_free_panic(zone, e, __func__);
5715 }
5716 }
5717 }
5718 STAILQ_CONCAT(&zone->z_recirc, &mags);
5719 zone->z_recirc_cur += n_mags;
5720
5721 zone_elems_free_add(zone, n_mags * zc_mag_size());
5722
5723 zone_unlock(zone);
5724 }
5725
5726 static void
zfree_cached(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze)5727 zfree_cached(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze)
5728 {
5729 zone_cache_t cache = zpercpu_get(zone->z_pcpu_cache);
5730
5731 if (cache->zc_free_cur >= zc_mag_size()) {
5732 if (cache->zc_alloc_cur >= zc_mag_size()) {
5733 return zfree_cached_slow(zone, meta, ze, cache);
5734 }
5735 zone_cache_swap_magazines(cache);
5736 }
5737
5738 if (__improbable(cache->zc_alloc_elems == NULL)) {
5739 return zfree_item(zone, meta, ze);
5740 }
5741
5742 if (zone_meta_is_free(meta, ze)) {
5743 zone_meta_double_free_panic(zone, ze, __func__);
5744 }
5745
5746 uint16_t idx = cache->zc_free_cur++;
5747 if (idx >= zc_mag_size()) {
5748 zone_accounting_panic(zone, "zc_free_cur overflow");
5749 }
5750 cache->zc_free_elems[idx] = ze;
5751
5752 enable_preemption();
5753 }
5754
5755 /*
5756 * The function is noinline when zlog can be used so that the backtracing can
5757 * reliably skip the zfree_ext() and zfree_log()
5758 * boring frames.
5759 */
5760 #if ZONE_ENABLE_LOGGING
5761 __attribute__((noinline))
5762 #endif /* ZONE_ENABLE_LOGGING */
5763 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,vm_size_t elem_size)5764 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, vm_size_t elem_size)
5765 {
5766 struct zone_page_metadata *page_meta;
5767 vm_offset_t elem = (vm_offset_t)addr;
5768 zone_element_t ze;
5769
5770 DTRACE_VM2(zfree, zone_t, zone, void*, addr);
5771
5772 #if CONFIG_KERNEL_TBI && KASAN_TBI
5773 if (zone->z_tbi_tag) {
5774 elem = kasan_tbi_tag_zfree(elem, elem_size, zone->z_percpu);
5775 /* addr is still consumed in the function: gzalloc_free */
5776 addr = (void *)elem;
5777 }
5778 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
5779 #if CONFIG_PROB_GZALLOC
5780 if (__improbable(pgz_owned(elem))) {
5781 elem = pgz_unprotect(elem, __builtin_frame_address(0));
5782 addr = (void *)elem;
5783 }
5784 #endif /* CONFIG_PROB_GZALLOC */
5785 #if VM_TAG_SIZECLASSES
5786 if (__improbable(zone->z_uses_tags)) {
5787 vm_tag_t tag = *ztSlot(zone, elem) >> 1;
5788 // set the tag with b0 clear so the block remains inuse
5789 *ztSlot(zone, elem) = 0xFFFE;
5790 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
5791 -(long)elem_size);
5792 }
5793 #endif /* VM_TAG_SIZECLASSES */
5794
5795 #if KASAN_ZALLOC
5796 /*
5797 * Call zone_element_resolve() and throw away the results in
5798 * order to validate the element and its zone membership.
5799 * Any validation panics need to happen now, while we're
5800 * still close to the caller.
5801 *
5802 * Note that elem has not been adjusted, so we have to remove the
5803 * redzone first.
5804 */
5805 zone_element_t ze_discard;
5806 vm_offset_t elem_actual = elem - zone->z_kasan_redzone;
5807 (void)zone_element_resolve(zone, elem_actual, elem_size, &ze_discard);
5808
5809 if (kasan_quarantine_freed_element(&zone, &addr)) {
5810 return;
5811 }
5812 /*
5813 * kasan_quarantine_freed_element() might return a different
5814 * {zone, addr} than the one being freed for kalloc heaps.
5815 *
5816 * Make sure we reload everything.
5817 */
5818 elem = (vm_offset_t)addr;
5819 elem_size = zone_elem_size(zone);
5820 #endif
5821 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
5822 if (__improbable(zone->z_btlog)) {
5823 zfree_log(zone->z_btlog, elem, __builtin_frame_address(0));
5824 }
5825 #endif /* ZONE_ENABLE_LOGGING */
5826 #if CONFIG_GZALLOC
5827 if (__improbable(zone->z_gzalloc_tracked)) {
5828 return gzalloc_free(zone, zstats, addr);
5829 }
5830 #endif /* CONFIG_GZALLOC */
5831
5832 page_meta = zone_element_resolve(zone, elem, elem_size, &ze);
5833 #if KASAN_ZALLOC
5834 if (zone->z_percpu) {
5835 zpercpu_foreach_cpu(i) {
5836 kasan_poison_range(elem + ptoa(i), elem_size,
5837 ASAN_HEAP_FREED);
5838 }
5839 } else {
5840 kasan_poison_range(elem, elem_size, ASAN_HEAP_FREED);
5841 }
5842 #endif
5843
5844 disable_preemption();
5845 zpercpu_get(zstats)->zs_mem_freed += elem_size;
5846
5847 if (zone->z_pcpu_cache) {
5848 return zfree_cached(zone, page_meta, ze);
5849 }
5850
5851 return zfree_item(zone, page_meta, ze);
5852 }
5853
5854 void
5855 (zfree)(union zone_or_view zov, void *addr)
5856 {
5857 zone_t zone = zov.zov_view->zv_zone;
5858 zone_stats_t zstats = zov.zov_view->zv_stats;
5859 vm_offset_t esize = zone_elem_size(zone);
5860
5861 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
5862 assert(!zone->z_percpu);
5863 #if !KASAN_KALLOC
5864 bzero(addr, esize);
5865 #endif /* !KASAN_KALLOC */
5866 zfree_ext(zone, zstats, addr, esize);
5867 }
5868
5869 __attribute__((noinline))
5870 void
zfree_percpu(union zone_or_view zov,void * addr)5871 zfree_percpu(union zone_or_view zov, void *addr)
5872 {
5873 zone_t zone = zov.zov_view->zv_zone;
5874 zone_stats_t zstats = zov.zov_view->zv_stats;
5875 vm_offset_t esize = zone_elem_size(zone);
5876
5877 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
5878 assert(zone->z_percpu);
5879 addr = (void *)__zpcpu_demangle(addr);
5880 #if !KASAN_KALLOC
5881 zpercpu_foreach_cpu(i) {
5882 bzero((char *)addr + ptoa(i), esize);
5883 }
5884 #endif /* !KASAN_KALLOC */
5885 zfree_ext(zone, zstats, addr, esize);
5886 }
5887
5888 void
5889 (zfree_id)(zone_id_t zid, void *addr)
5890 {
5891 (zfree)(&zone_array[zid], addr);
5892 }
5893
5894 void
5895 (zfree_ro)(zone_id_t zid, void *addr)
5896 {
5897 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
5898 zone_t zone = &zone_array[zid];
5899 zone_stats_t zstats = zone->z_stats;
5900 vm_offset_t esize = zone_ro_elem_size[zid];
5901
5902 #if ZSECURITY_CONFIG(READ_ONLY)
5903 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
5904 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
5905 #elif !KASAN_KALLOC
5906 (void)zid;
5907 bzero(addr, esize);
5908 #endif /* !KASAN_KALLOC */
5909 zfree_ext(zone, zstats, addr, esize);
5910 }
5911
5912 /*! @} */
5913 #endif /* !ZALLOC_TEST */
5914 #pragma mark zalloc
5915 #if !ZALLOC_TEST
5916
5917 /*!
5918 * @defgroup zalloc
5919 * @{
5920 *
5921 * @brief
5922 * The codepath for zone allocations.
5923 *
5924 * @discussion
5925 * There are 4 major ways to allocate memory that end up in the zone allocator:
5926 * - @c zalloc(), @c zalloc_flags(), ...
5927 * - @c zalloc_percpu()
5928 * - @c kalloc*()
5929 * - @c zalloc_permanent()
5930 *
5931 * While permanent zones have their own allocation scheme, all other codepaths
5932 * will eventually go through the @c zalloc_ext() choking point.
5933 *
5934 * Ignoring the @c zalloc_gz() codepath, the decision tree looks like this:
5935 * <code>
5936 * zalloc_ext()
5937 * │
5938 * ├───> zalloc_cached() ──────> zalloc_cached_fast() ───╮
5939 * │ │ ^ │
5940 * │ │ │ │
5941 * │ ╰───> zalloc_cached_slow() ───╯ │
5942 * │ │ │
5943 * │<─────────────────╮ ├─────────────╮ │
5944 * │ │ │ │ │
5945 * │ │ v │ │
5946 * │<───────╮ ╭──> zalloc_item_slow() ────┤ │
5947 * │ │ │ │ │
5948 * │ │ │ v │
5949 * ╰───> zalloc_item() ──────────> zalloc_item_fast() ───┤
5950 * │
5951 * v
5952 * zalloc_return()
5953 * </code>
5954 *
5955 *
5956 * The @c zalloc_item() track is used when zone caching is off:
5957 * - @c zalloc_item_fast() is used when there are enough elements available,
5958 * - @c zalloc_item_slow() is used when a refill is needed, which can cause
5959 * the zone to grow. This is the only codepath that refills.
5960 *
5961 * This track uses the zone lock for serialization:
5962 * - taken in @c zalloc_item(),
5963 * - maintained during @c zalloc_item_slow() (possibly dropped and re-taken),
5964 * - dropped in @c zalloc_item_fast().
5965 *
5966 *
5967 * The @c zalloc_cached() track is used when zone caching is on:
5968 * - @c zalloc_cached_fast() is taken when the cache has elements,
5969 * - @c zalloc_cached_slow() is taken if a cache refill is needed.
5970 * It can chose many strategies:
5971 * ~ @c zalloc_cached_from_depot() to try to reuse cpu stashed magazines,
5972 * ~ @c zalloc_cached_from_recirc() using the global recirculation depot
5973 * @c z_recirc,
5974 * ~ using zalloc_import() if the zone has enough elements,
5975 * ~ falling back to the @c zalloc_item() track if zone caching is disabled
5976 * due to VM pressure or the zone has no available elements.
5977 *
5978 * This track disables preemption for serialization:
5979 * - preemption is disabled in @c zalloc_ext(),
5980 * - kept disabled during @c zalloc_cached_slow(), converted into a zone lock
5981 * if switching to @c zalloc_item_slow(),
5982 * - preemption is reenabled in @c zalloc_cached_fast().
5983 *
5984 * @c zalloc_cached_from_depot() also takes depot locks (taken by the caller,
5985 * released by @c zalloc_cached_from_depot().
5986 *
5987 * In general the @c zalloc_*_slow() codepaths deal with refilling and will
5988 * tail call into the @c zalloc_*_fast() code to perform the actual allocation.
5989 *
5990 * @c zalloc_return() is the final function everyone tail calls into,
5991 * which prepares the element for consumption by the caller and deals with
5992 * common treatment (zone logging, tags, kasan, validation, ...).
5993 */
5994
5995 /*!
5996 * @function zalloc_import
5997 *
5998 * @brief
5999 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6000 *
6001 * @param zone The zone to import elements from
6002 * @param elems The array to import into
6003 * @param n The number of elements to import. Must be non zero,
6004 * and smaller than @c zone->z_elems_free.
6005 */
6006 __header_always_inline void
zalloc_import(zone_t zone,zone_element_t * elems,zalloc_flags_t flags,vm_size_t esize,uint32_t n)6007 zalloc_import(zone_t zone, zone_element_t *elems, zalloc_flags_t flags,
6008 vm_size_t esize, uint32_t n)
6009 {
6010 uint32_t i = 0;
6011
6012 assertf(STAILQ_EMPTY(&zone->z_recirc),
6013 "Trying to import from zone %p [%s%s] with non empty recirc",
6014 zone, zone_heap_name(zone), zone_name(zone));
6015
6016 do {
6017 vm_offset_t page, eidx, size = 0;
6018 struct zone_page_metadata *meta;
6019
6020 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6021 meta = zone_pva_to_meta(zone->z_pageq_partial);
6022 page = zone_pva_to_addr(zone->z_pageq_partial);
6023 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6024 meta = zone_pva_to_meta(zone->z_pageq_empty);
6025 page = zone_pva_to_addr(zone->z_pageq_empty);
6026 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6027 } else {
6028 zone_accounting_panic(zone, "z_elems_free corruption");
6029 }
6030
6031 if (!zone_has_index(zone, meta->zm_index)) {
6032 zone_page_metadata_index_confusion_panic(zone, page, meta);
6033 }
6034
6035 vm_offset_t old_size = meta->zm_alloc_size;
6036 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6037
6038 do {
6039 eidx = zone_meta_find_and_clear_bit(zone, meta, flags);
6040 elems[i++] = zone_element_encode(page, eidx);
6041 size += esize;
6042 } while (i < n && old_size + size + esize <= max_size);
6043
6044 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6045
6046 if (new_size + esize > max_size) {
6047 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6048 } else if (old_size == 0) {
6049 /* remove from free, move to intermediate */
6050 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6051 }
6052 } while (i < n);
6053 }
6054
6055 /*!
6056 * @function zalloc_return
6057 *
6058 * @brief
6059 * Performs the tail-end of the work required on allocations before the caller
6060 * uses them.
6061 *
6062 * @discussion
6063 * This function is called without any zone lock held,
6064 * and preemption back to the state it had when @c zalloc_ext() was called.
6065 *
6066 * @param zone The zone we're allocating from.
6067 * @param ze The encoded element we just allocated.
6068 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6069 * @param elem_size The element size for this zone.
6070 */
6071 __attribute__((noinline))
6072 static void *
zalloc_return(zone_t zone,zone_element_t ze,zalloc_flags_t flags __unused,vm_offset_t elem_size)6073 zalloc_return(zone_t zone, zone_element_t ze, zalloc_flags_t flags __unused,
6074 vm_offset_t elem_size)
6075 {
6076 vm_offset_t addr = zone_element_addr(zone, ze, elem_size);
6077
6078 #if CONFIG_KERNEL_TBI && KASAN_TBI
6079 addr = kasan_tbi_fix_address_tag(addr);
6080 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
6081 #if ZALLOC_ENABLE_ZERO_CHECK
6082 zalloc_validate_element(zone, addr, elem_size, flags);
6083 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6084 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
6085 if (__improbable(zone->z_btlog)) {
6086 zalloc_log(zone->z_btlog, addr, flags,
6087 __builtin_frame_address(0));
6088 }
6089 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
6090 #if KASAN_ZALLOC
6091 if (zone->z_kasan_redzone) {
6092 addr = kasan_alloc(addr, elem_size,
6093 elem_size - 2 * zone->z_kasan_redzone,
6094 zone->z_kasan_redzone);
6095 elem_size -= 2 * zone->z_kasan_redzone;
6096 }
6097 if (flags & Z_PCPU) {
6098 zpercpu_foreach_cpu(i) {
6099 kasan_poison_range(addr + ptoa(i), elem_size, ASAN_VALID);
6100 __nosan_bzero((char *)addr + ptoa(i), elem_size);
6101 }
6102 } else {
6103 kasan_poison_range(addr, elem_size, ASAN_VALID);
6104 __nosan_bzero((char *)addr, elem_size);
6105 }
6106 #endif /* KASAN_ZALLOC */
6107
6108 #if VM_TAG_SIZECLASSES
6109 if (__improbable(zone->z_uses_tags)) {
6110 vm_tag_t tag = zalloc_flags_get_tag(flags);
6111 if (tag == VM_KERN_MEMORY_NONE) {
6112 zone_security_flags_t zsflags = zone_security_config(zone);
6113 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6114 tag = VM_KERN_MEMORY_KALLOC_DATA;
6115 } else {
6116 tag = VM_KERN_MEMORY_KALLOC;
6117 }
6118 }
6119 // set the tag with b0 clear so the block remains inuse
6120 *ztSlot(zone, addr) = (vm_tag_t)(tag << 1);
6121 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6122 (long)elem_size);
6123 }
6124 #endif /* VM_TAG_SIZECLASSES */
6125 #if CONFIG_PROB_GZALLOC
6126 if ((flags & Z_PGZ) && !zone_addr_size_crosses_page(addr, elem_size)) {
6127 addr = pgz_protect(zone, addr, flags,
6128 __builtin_frame_address(0));
6129 }
6130 #endif
6131
6132 #if CONFIG_KERNEL_TBI && KASAN_TBI
6133 if (__probable(zone->z_tbi_tag)) {
6134 addr = kasan_tbi_tag_zalloc(addr, elem_size, (flags & Z_PCPU));
6135 } else {
6136 addr = kasan_tbi_tag_zalloc_default(addr, elem_size, (flags & Z_PCPU));
6137 }
6138 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
6139
6140 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6141 return (void *)addr;
6142 }
6143
6144 #if CONFIG_GZALLOC
6145 /*!
6146 * @function zalloc_gz
6147 *
6148 * @brief
6149 * Performs allocations for zones using gzalloc.
6150 *
6151 * @discussion
6152 * This function is noinline so that it doesn't affect the codegen
6153 * of the fastpath.
6154 */
6155 __attribute__((noinline))
6156 static void *
zalloc_gz(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize)6157 zalloc_gz(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags, vm_size_t esize)
6158 {
6159 vm_offset_t addr = gzalloc_alloc(zone, zstats, flags);
6160 return zalloc_return(zone, zone_element_encode(addr, 0),
6161 flags, esize);
6162 }
6163 #endif /* CONFIG_GZALLOC */
6164
6165 __attribute__((noinline))
6166 static void *
zalloc_item_fast(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize)6167 zalloc_item_fast(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
6168 vm_size_t esize)
6169 {
6170 zone_element_t ze;
6171
6172 zalloc_import(zone, &ze, flags, esize, 1);
6173 zone_elems_free_sub(zone, 1);
6174 zpercpu_get(zstats)->zs_mem_allocated += esize;
6175 zone_unlock(zone);
6176
6177 return zalloc_return(zone, ze, flags, esize);
6178 }
6179
6180 static inline bool
zalloc_item_slow_should_schedule_async(zone_t zone,zalloc_flags_t flags)6181 zalloc_item_slow_should_schedule_async(zone_t zone, zalloc_flags_t flags)
6182 {
6183 /*
6184 * If we can't wait, then async it is.
6185 */
6186 if (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) {
6187 return true;
6188 }
6189
6190 if (zone->z_elems_free == 0) {
6191 return false;
6192 }
6193
6194 /*
6195 * Early boot gets to tap in foreign reserves
6196 */
6197 if (startup_phase < STARTUP_SUB_EARLY_BOOT) {
6198 return true;
6199 }
6200
6201 /*
6202 * Allow threads to tap up to 3/4 of the reserve only doing asyncs.
6203 * Note that reserve-less zones will always say "true" here.
6204 */
6205 if (zone->z_elems_free >= zone->z_elems_rsv / 4) {
6206 return true;
6207 }
6208
6209 /*
6210 * After this, only VM and GC threads get to tap in the reserve.
6211 */
6212 return current_thread()->options & (TH_OPT_ZONE_PRIV | TH_OPT_VMPRIV);
6213 }
6214
6215 /*!
6216 * @function zalloc_item_slow
6217 *
6218 * @brief
6219 * Performs allocations when the zone is out of elements.
6220 *
6221 * @discussion
6222 * This function might drop the lock and reenable preemption,
6223 * which means the per-CPU caching layer or recirculation depot
6224 * might have received elements.
6225 */
6226 __attribute__((noinline))
6227 static void *
zalloc_item_slow(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize)6228 zalloc_item_slow(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
6229 vm_size_t esize)
6230 {
6231 if (zalloc_item_slow_should_schedule_async(zone, flags)) {
6232 zone_expand_async_schedule_if_needed(zone);
6233 } else {
6234 zone_expand_locked(zone, flags, zalloc_needs_refill);
6235 }
6236 if (__improbable(zone->z_elems_free == 0)) {
6237 zone_unlock(zone);
6238 if (__improbable(flags & Z_NOFAIL)) {
6239 zone_nofail_panic(zone);
6240 }
6241 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6242 return NULL;
6243 }
6244
6245 /*
6246 * We might have changed core or got preempted/blocked while expanding
6247 * the zone. Allocating from the zone when the recirculation depot
6248 * is not empty is not allowed.
6249 *
6250 * It will be rare but possible for the depot to refill while we were
6251 * waiting for pages. If that happens we need to start over.
6252 */
6253 if (!STAILQ_EMPTY(&zone->z_recirc)) {
6254 zone_unlock(zone);
6255 return zalloc_ext(zone, zstats, flags, esize);
6256 }
6257
6258 return zalloc_item_fast(zone, zstats, flags, esize);
6259 }
6260
6261 /*!
6262 * @function zalloc_item
6263 *
6264 * @brief
6265 * Performs allocations when zone caching is off.
6266 *
6267 * @discussion
6268 * This function calls @c zalloc_item_slow() when refilling the zone
6269 * is needed, or @c zalloc_item_fast() if the zone has enough free elements.
6270 */
6271 static void *
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize)6272 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
6273 vm_size_t esize)
6274 {
6275 zone_lock_nopreempt_check_contention(zone, NULL);
6276
6277 /*
6278 * When we commited to the zalloc_item() path,
6279 * zone caching might have been flipped/enabled.
6280 *
6281 * If we got preempted for long enough, the recirculation layer
6282 * can have been populated, and allocating from the zone would be
6283 * incorrect.
6284 *
6285 * So double check for this extremely rare race here.
6286 */
6287 if (__improbable(!STAILQ_EMPTY(&zone->z_recirc))) {
6288 zone_unlock(zone);
6289 return zalloc_ext(zone, zstats, flags, esize);
6290 }
6291
6292 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6293 return zalloc_item_slow(zone, zstats, flags, esize);
6294 }
6295
6296 return zalloc_item_fast(zone, zstats, flags, esize);
6297 }
6298
6299 __attribute__((always_inline))
6300 static void *
zalloc_cached_fast(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize,zone_cache_t cache,zone_magazine_t freemag)6301 zalloc_cached_fast(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
6302 vm_size_t esize, zone_cache_t cache, zone_magazine_t freemag)
6303 {
6304 zone_element_t ze;
6305 uint32_t index;
6306
6307 index = --cache->zc_alloc_cur;
6308 if (index >= zc_mag_size()) {
6309 zone_accounting_panic(zone, "zc_alloc_cur wrap around");
6310 }
6311 ze = cache->zc_alloc_elems[index];
6312 cache->zc_alloc_elems[index].ze_value = 0;
6313
6314 zpercpu_get(zstats)->zs_mem_allocated += esize;
6315 enable_preemption();
6316
6317 if (zone_meta_is_free(zone_meta_from_element(ze), ze)) {
6318 zone_meta_double_free_panic(zone, ze, __func__);
6319 }
6320
6321 if (freemag) {
6322 zone_magazine_free(freemag);
6323 }
6324 return zalloc_return(zone, ze, flags, esize);
6325 }
6326
6327 __attribute__((noinline))
6328 static void *
zalloc_cached_from_depot(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize,zone_cache_t cache)6329 zalloc_cached_from_depot(
6330 zone_t zone,
6331 zone_stats_t zstats,
6332 zalloc_flags_t flags,
6333 vm_size_t esize,
6334 zone_cache_t cache)
6335 {
6336 zone_magazine_t mag = STAILQ_FIRST(&cache->zc_depot);
6337
6338 STAILQ_REMOVE_HEAD(&cache->zc_depot, zm_link);
6339 STAILQ_NEXT(mag, zm_link) = NULL;
6340
6341 if (cache->zc_depot_cur-- == 0) {
6342 zone_accounting_panic(zone, "zc_depot_cur wrap-around");
6343 }
6344 zone_depot_unlock_nopreempt(cache);
6345
6346 mag = zone_magazine_replace(&cache->zc_alloc_cur,
6347 &cache->zc_alloc_elems, mag);
6348
6349 z_debug_assert(cache->zc_alloc_cur == zc_mag_size());
6350 z_debug_assert(mag->zm_cur == 0);
6351
6352 if (zone == zc_magazine_zone) {
6353 enable_preemption();
6354 bzero(mag, esize);
6355 return mag;
6356 }
6357
6358 return zalloc_cached_fast(zone, zstats, flags, esize, cache, mag);
6359 }
6360
6361 __attribute__((noinline))
6362 static void *
zalloc_cached_import(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize,zone_cache_t cache)6363 zalloc_cached_import(
6364 zone_t zone,
6365 zone_stats_t zstats,
6366 zalloc_flags_t flags,
6367 vm_size_t esize,
6368 zone_cache_t cache)
6369 {
6370 uint16_t n_elems = zc_mag_size();
6371
6372 if (zone->z_elems_free < n_elems + zone->z_elems_rsv / 2 &&
6373 os_sub_overflow(zone->z_elems_free,
6374 zone->z_elems_rsv / 2, &n_elems)) {
6375 n_elems = 0;
6376 }
6377
6378 z_debug_assert(n_elems <= zc_mag_size());
6379
6380 if (__improbable(n_elems == 0)) {
6381 /*
6382 * If importing elements would deplete the zone,
6383 * call zalloc_item_slow()
6384 */
6385 return zalloc_item_slow(zone, zstats, flags, esize);
6386 }
6387
6388 if (__improbable(zone_caching_disabled)) {
6389 if (__improbable(zone_caching_disabled < 0)) {
6390 /*
6391 * In the first 10s after boot, mess with
6392 * the scan position in order to make early
6393 * allocations patterns less predictible.
6394 */
6395 zone_early_scramble_rr(zone, zstats);
6396 }
6397 return zalloc_item_fast(zone, zstats, flags, esize);
6398 }
6399
6400 zalloc_import(zone, cache->zc_alloc_elems, flags, esize, n_elems);
6401
6402 cache->zc_alloc_cur = n_elems;
6403 zone_elems_free_sub(zone, n_elems);
6404
6405 zone_unlock_nopreempt(zone);
6406
6407 return zalloc_cached_fast(zone, zstats, flags, esize, cache, NULL);
6408 }
6409
6410 static void *
zalloc_cached_from_recirc(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize,zone_cache_t cache)6411 zalloc_cached_from_recirc(
6412 zone_t zone,
6413 zone_stats_t zstats,
6414 zalloc_flags_t flags,
6415 vm_size_t esize,
6416 zone_cache_t cache)
6417 {
6418 struct zone_depot mags;
6419 zone_magazine_t mag;
6420 uint16_t n_mags = 1;
6421
6422 STAILQ_FIRST(&mags) = mag = STAILQ_FIRST(&zone->z_recirc);
6423
6424 for (;;) {
6425 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6426 zone_element_t e = mag->zm_elems[i];
6427
6428 if (!zone_meta_mark_used(zone_meta_from_element(e), e)) {
6429 zone_meta_double_free_panic(zone, e, __func__);
6430 }
6431 }
6432
6433 if (n_mags >= zone->z_recirc_cur) {
6434 STAILQ_INIT(&zone->z_recirc);
6435 assert(STAILQ_NEXT(mag, zm_link) == NULL);
6436 break;
6437 }
6438
6439 if (n_mags * zc_mag_size() * zc_recirc_denom >=
6440 cache->zc_depot_max) {
6441 STAILQ_FIRST(&zone->z_recirc) = STAILQ_NEXT(mag, zm_link);
6442 STAILQ_NEXT(mag, zm_link) = NULL;
6443 break;
6444 }
6445
6446 n_mags++;
6447 mag = STAILQ_NEXT(mag, zm_link);
6448 }
6449
6450 zone_elems_free_sub(zone, n_mags * zc_mag_size());
6451 zone_counter_sub(zone, z_recirc_cur, n_mags);
6452
6453 zone_unlock_nopreempt(zone);
6454
6455 mags.stqh_last = &STAILQ_NEXT(mag, zm_link);
6456
6457 /*
6458 * And then incorporate everything into our per-cpu layer.
6459 */
6460
6461 mag = STAILQ_FIRST(&mags);
6462
6463 if (n_mags > 1) {
6464 STAILQ_FIRST(&mags) = STAILQ_NEXT(mag, zm_link);
6465 STAILQ_NEXT(mag, zm_link) = NULL;
6466
6467 zone_depot_lock_nopreempt(cache);
6468
6469 cache->zc_depot_cur += n_mags - 1;
6470 STAILQ_CONCAT(&cache->zc_depot, &mags);
6471
6472 zone_depot_unlock_nopreempt(cache);
6473 }
6474
6475 mag = zone_magazine_replace(&cache->zc_alloc_cur,
6476 &cache->zc_alloc_elems, mag);
6477 z_debug_assert(cache->zc_alloc_cur == zc_mag_size());
6478 z_debug_assert(mag->zm_cur == 0);
6479
6480 return zalloc_cached_fast(zone, zstats, flags, esize, cache, mag);
6481 }
6482
6483 __attribute__((noinline))
6484 static void *
zalloc_cached_slow(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize,zone_cache_t cache)6485 zalloc_cached_slow(
6486 zone_t zone,
6487 zone_stats_t zstats,
6488 zalloc_flags_t flags,
6489 vm_size_t esize,
6490 zone_cache_t cache)
6491 {
6492 /*
6493 * Try to allocate from our local depot, if there's one.
6494 */
6495 if (STAILQ_FIRST(&cache->zc_depot)) {
6496 zone_depot_lock_nopreempt(cache);
6497
6498 if (STAILQ_FIRST(&cache->zc_depot)) {
6499 return zalloc_cached_from_depot(zone, zstats, flags,
6500 esize, cache);
6501 }
6502
6503 zone_depot_unlock_nopreempt(cache);
6504 }
6505
6506 zone_lock_nopreempt_check_contention(zone, cache);
6507
6508 /*
6509 * If the recirculation depot is empty, we'll need to import.
6510 * The system is tuned for this to be extremely rare.
6511 */
6512 if (__improbable(STAILQ_EMPTY(&zone->z_recirc))) {
6513 return zalloc_cached_import(zone, zstats, flags, esize, cache);
6514 }
6515
6516 /*
6517 * If the recirculation depot has elements, then try to fill
6518 * the local per-cpu depot to (1 / zc_recirc_denom)
6519 */
6520 return zalloc_cached_from_recirc(zone, zstats, flags, esize, cache);
6521 }
6522
6523 /*!
6524 * @function zalloc_cached
6525 *
6526 * @brief
6527 * Performs allocations when zone caching is on.
6528 *
6529 * @discussion
6530 * This function calls @c zalloc_cached_fast() when the caches have elements
6531 * ready.
6532 *
6533 * Else it will call @c zalloc_cached_slow() so that the cache is refilled,
6534 * which might switch to the @c zalloc_item_slow() track when the backing zone
6535 * needs to be refilled.
6536 */
6537 static void *
zalloc_cached(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize)6538 zalloc_cached(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
6539 vm_size_t esize)
6540 {
6541 zone_cache_t cache;
6542
6543 cache = zpercpu_get(zone->z_pcpu_cache);
6544
6545 if (cache->zc_alloc_cur == 0) {
6546 if (__improbable(cache->zc_free_cur == 0)) {
6547 return zalloc_cached_slow(zone, zstats, flags, esize, cache);
6548 }
6549 zone_cache_swap_magazines(cache);
6550 }
6551
6552 return zalloc_cached_fast(zone, zstats, flags, esize, cache, NULL);
6553 }
6554
6555 /*!
6556 * @function zalloc_ext
6557 *
6558 * @brief
6559 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6560 */
6561 void *
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t esize)6562 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags, vm_size_t esize)
6563 {
6564 /*
6565 * KASan uses zalloc() for fakestack, which can be called anywhere.
6566 * However, we make sure these calls can never block.
6567 */
6568 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6569 #if KASAN_ZALLOC
6570 zone->kasan_fakestacks ||
6571 #endif /* KASAN_ZALLOC */
6572 ml_get_interrupts_enabled() ||
6573 ml_is_quiescing() ||
6574 debug_mode_active(),
6575 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6576
6577 /*
6578 * Make sure Z_NOFAIL was not obviously misused
6579 */
6580 if (flags & Z_NOFAIL) {
6581 assert(!zone->exhaustible &&
6582 (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6583 }
6584
6585 #if CONFIG_GZALLOC
6586 if (__improbable(zone->z_gzalloc_tracked)) {
6587 return zalloc_gz(zone, zstats, flags, esize);
6588 }
6589 #endif /* CONFIG_GZALLOC */
6590
6591 disable_preemption();
6592
6593 #if ZALLOC_ENABLE_ZERO_CHECK
6594 if (zalloc_skip_zero_check()) {
6595 flags |= Z_NOZZC;
6596 }
6597 #endif
6598 #if CONFIG_PROB_GZALLOC
6599 if (zone->z_pgz_tracked && pgz_sample(flags)) {
6600 flags |= Z_PGZ;
6601 }
6602 #endif /* CONFIG_PROB_GZALLOC */
6603
6604 if (zone->z_pcpu_cache) {
6605 return zalloc_cached(zone, zstats, flags, esize);
6606 }
6607
6608 return zalloc_item(zone, zstats, flags, esize);
6609 }
6610
6611 __attribute__((always_inline))
6612 void *
zalloc(union zone_or_view zov)6613 zalloc(union zone_or_view zov)
6614 {
6615 return zalloc_flags(zov, Z_WAITOK);
6616 }
6617
6618 __attribute__((always_inline))
6619 void *
zalloc_noblock(union zone_or_view zov)6620 zalloc_noblock(union zone_or_view zov)
6621 {
6622 return zalloc_flags(zov, Z_NOWAIT);
6623 }
6624
6625 void *
zalloc_flags(union zone_or_view zov,zalloc_flags_t flags)6626 zalloc_flags(union zone_or_view zov, zalloc_flags_t flags)
6627 {
6628 zone_t zone = zov.zov_view->zv_zone;
6629 zone_stats_t zstats = zov.zov_view->zv_stats;
6630 vm_size_t esize = zone_elem_size(zone);
6631
6632 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6633 assert(!zone->z_percpu);
6634 return zalloc_ext(zone, zstats, flags, esize);
6635 }
6636
6637 __attribute__((always_inline))
6638 void *
6639 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6640 {
6641 return zalloc_flags(&zone_array[zid], flags);
6642 }
6643
6644 void *
6645 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6646 {
6647 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6648 zone_t zone = &zone_array[zid];
6649 zone_stats_t zstats = zone->z_stats;
6650 vm_size_t esize = zone_ro_elem_size[zid];
6651 void *elem;
6652
6653 assert(!zone->z_percpu);
6654 elem = zalloc_ext(zone, zstats, flags, esize);
6655 #if ZSECURITY_CONFIG(READ_ONLY)
6656 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6657 if (elem) {
6658 zone_require_ro(zid, esize, elem);
6659 }
6660 #endif
6661 return elem;
6662 }
6663
6664 #if ZSECURITY_CONFIG(READ_ONLY)
6665
6666 __attribute__((always_inline))
6667 static bool
from_current_stack(const vm_offset_t addr,vm_size_t size)6668 from_current_stack(const vm_offset_t addr, vm_size_t size)
6669 {
6670 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6671 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6672 return (addr >= start) && (addr + size < end);
6673 }
6674
6675 #if XNU_MONITOR
6676 /*
6677 * Check if an address is from const memory i.e TEXT or DATA CONST segements
6678 * or the SECURITY_READ_ONLY_LATE section.
6679 */
6680 __attribute__((always_inline))
6681 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6682 from_const_memory(const vm_offset_t addr, vm_size_t size)
6683 {
6684 extern uint8_t text_start[] __SEGMENT_START_SYM("__TEXT");
6685 extern uint8_t text_end[] __SEGMENT_END_SYM("__TEXT");
6686
6687 extern uint8_t data_const_start[] __SEGMENT_START_SYM("__DATA_CONST");
6688 extern uint8_t data_const_end[] __SEGMENT_END_SYM("__DATA_CONST");
6689
6690 extern uint8_t security_start[] __SECTION_START_SYM(SECURITY_SEGMENT_NAME,
6691 SECURITY_SECTION_NAME);
6692 extern uint8_t security_end[] __SECTION_END_SYM(SECURITY_SEGMENT_NAME,
6693 SECURITY_SECTION_NAME);
6694
6695 const uint8_t *_addr = (const uint8_t *) addr;
6696
6697 return (_addr >= text_start && _addr + size <= text_end) ||
6698 (_addr >= data_const_start && _addr + size <= data_const_end) ||
6699 (_addr >= security_start && _addr + size <= security_end);
6700 }
6701 #else
6702 __attribute__((always_inline))
6703 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6704 from_const_memory(const vm_offset_t addr, vm_size_t size)
6705 {
6706 (void) addr;
6707 (void) size;
6708 return true;
6709 }
6710 #endif /* XNU_MONITOR */
6711
6712 __abortlike
6713 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6714 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6715 const vm_offset_t src, vm_size_t src_size)
6716 {
6717 if (from_zone_map(src, src_size, ZONE_ADDR_READONLY)) {
6718 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6719 zone_t dst_zone = &zone_array[zid];
6720 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6721 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6722 dst_zone->z_name);
6723 }
6724 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6725 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6726 panic("zalloc_ro_mut failed: source (%p) neither from RO zone map nor from"
6727 " current stack (%p - %p)\n", (void *)src, (void *)start, (void *)end);
6728 }
6729
6730 __attribute__((always_inline))
6731 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6732 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6733 const vm_offset_t src, vm_size_t src_size)
6734 {
6735 if (from_current_stack(src, src_size) ||
6736 (from_zone_map(src, src_size, ZONE_ADDR_READONLY) &&
6737 zid == zone_index_from_ptr((void *)src)) ||
6738 from_const_memory(src, src_size)) {
6739 return;
6740 }
6741 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6742 }
6743
6744 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6745
6746 __attribute__((noinline))
6747 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)6748 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
6749 const void *new_data, vm_size_t new_data_size)
6750 {
6751 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6752
6753 #if ZSECURITY_CONFIG(READ_ONLY)
6754 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
6755 new_data_size);
6756 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
6757 (vm_offset_t) new_data, new_data_size);
6758 #else
6759 (void)zid;
6760 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
6761 #endif
6762 }
6763
6764 __attribute__((noinline))
6765 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)6766 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
6767 zro_atomic_op_t op, uint64_t value)
6768 {
6769 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6770
6771 #if ZSECURITY_CONFIG(READ_ONLY)
6772 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
6773 #else
6774 (void)zid;
6775 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
6776 #endif
6777 return value;
6778 }
6779
6780 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)6781 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
6782 {
6783 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6784 #if ZSECURITY_CONFIG(READ_ONLY)
6785 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
6786 #else
6787 (void)zid;
6788 bzero((void *)((uintptr_t)elem + offset), size);
6789 #endif
6790 }
6791
6792 /*
6793 * This function will run in the PPL and needs to be robust
6794 * against an attacker with arbitrary kernel write.
6795 */
6796
6797 #if ZSECURITY_CONFIG(READ_ONLY)
6798
6799 __abortlike
6800 static void
zone_id_require_ro_panic(zone_id_t zid,vm_size_t esize,void * addr)6801 zone_id_require_ro_panic(zone_id_t zid, vm_size_t esize, void *addr)
6802 {
6803 vm_offset_t va = (vm_offset_t)addr;
6804 uint32_t zindex;
6805 zone_t other;
6806 zone_t zone = &zone_array[zid];
6807
6808 if (!from_zone_map(addr, 1, ZONE_ADDR_READONLY)) {
6809 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
6810 }
6811
6812 if (zone_addr_size_crosses_page(va, esize)) {
6813 panic("zone_require_ro failed: address crosses a page (addr: %p)", addr);
6814 }
6815
6816 zindex = zone_index_from_ptr(addr);
6817 other = &zone_array[zindex];
6818 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
6819 panic("zone_require_ro failed: invalid zone index %d "
6820 "(addr: %p, expected: %s%s)", zindex,
6821 addr, zone_heap_name(zone), zone->z_name);
6822 } else {
6823 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
6824 "(addr: %p, expected: %s%s)",
6825 zindex, zone_heap_name(other), other->z_name,
6826 addr, zone_heap_name(zone), zone->z_name);
6827 }
6828 }
6829
6830 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6831
6832 __attribute__((always_inline))
6833 void
zone_require_ro(zone_id_t zid,vm_size_t esize,void * addr)6834 zone_require_ro(zone_id_t zid, vm_size_t esize, void *addr)
6835 {
6836 #if ZSECURITY_CONFIG(READ_ONLY)
6837 vm_offset_t va = (vm_offset_t)addr;
6838 struct zone_page_metadata *meta = zone_meta_from_addr(va);
6839
6840 /*
6841 * Check that:
6842 * - the first byte of the element is in the map
6843 * - the element doesn't cross a page (implies it is wholy in the map)
6844 * - the zone ID matches
6845 *
6846 * The code is weirdly written to minimize instruction count.
6847 */
6848 if (!from_zone_map(addr, 1, ZONE_ADDR_READONLY) ||
6849 zone_addr_size_crosses_page(va, esize) ||
6850 zid != meta->zm_index) {
6851 zone_id_require_ro_panic(zid, esize, addr);
6852 }
6853 #else
6854 #pragma unused(zid, esize, addr)
6855 #endif
6856 }
6857
6858 void
zone_require_ro_range_contains(zone_id_t zid,void * addr)6859 zone_require_ro_range_contains(zone_id_t zid, void *addr)
6860 {
6861 vm_size_t esize = zone_ro_elem_size[zid];
6862 vm_offset_t va = (vm_offset_t)addr;
6863
6864 /* this is called by the pmap and we know for those the RO submap is on */
6865 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6866
6867 if (!from_zone_map(addr, esize, ZONE_ADDR_READONLY)) {
6868 zone_t zone = &zone_array[zid];
6869 zone_invalid_element_addr_panic(zone, va);
6870 }
6871 }
6872
6873 void *
zalloc_percpu(union zone_or_view zov,zalloc_flags_t flags)6874 zalloc_percpu(union zone_or_view zov, zalloc_flags_t flags)
6875 {
6876 zone_t zone = zov.zov_view->zv_zone;
6877 zone_stats_t zstats = zov.zov_view->zv_stats;
6878 vm_size_t esize = zone_elem_size(zone);
6879
6880 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6881 assert(zone->z_percpu);
6882 flags |= Z_PCPU;
6883 return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags, esize));
6884 }
6885
6886 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)6887 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
6888 {
6889 struct zone_page_metadata *page_meta;
6890 vm_offset_t offs, addr;
6891 zone_pva_t pva;
6892
6893 assert(ml_get_interrupts_enabled() ||
6894 ml_is_quiescing() ||
6895 debug_mode_active() ||
6896 startup_phase < STARTUP_SUB_EARLY_BOOT);
6897
6898 size = (size + mask) & ~mask;
6899 assert(size <= PAGE_SIZE);
6900
6901 zone_lock(zone);
6902 assert(zone->z_self == zone);
6903
6904 for (;;) {
6905 pva = zone->z_pageq_partial;
6906 while (!zone_pva_is_null(pva)) {
6907 page_meta = zone_pva_to_meta(pva);
6908 if (page_meta->zm_bump + size <= PAGE_SIZE) {
6909 goto found;
6910 }
6911 pva = page_meta->zm_page_next;
6912 }
6913
6914 zone_expand_locked(zone, Z_WAITOK, NULL);
6915 }
6916
6917 found:
6918 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
6919 page_meta->zm_bump = (uint16_t)(offs + size);
6920 page_meta->zm_alloc_size += size;
6921 zone->z_elems_free -= size;
6922 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
6923
6924 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
6925 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
6926 }
6927
6928 zone_unlock(zone);
6929
6930 addr = offs + zone_pva_to_addr(pva);
6931
6932 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6933 return (void *)addr;
6934 }
6935
6936 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)6937 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
6938 {
6939 kern_return_t kr;
6940 vm_offset_t addr;
6941
6942 kr = kernel_memory_allocate(kernel_map, &addr, size, mask,
6943 KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
6944 if (kr != 0) {
6945 panic("%s: unable to allocate %zd bytes (%d)",
6946 __func__, (size_t)size, kr);
6947 }
6948 return (void *)addr;
6949 }
6950
6951 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)6952 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
6953 {
6954 if (size <= PAGE_SIZE) {
6955 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
6956 return _zalloc_permanent(zone, size, mask);
6957 }
6958 return _zalloc_permanent_large(size, mask, tag);
6959 }
6960
6961 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)6962 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
6963 {
6964 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
6965 return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
6966 }
6967
6968 /*! @} */
6969 #endif /* !ZALLOC_TEST */
6970 #pragma mark zone GC / trimming
6971 #if !ZALLOC_TEST
6972
6973 static thread_call_data_t zone_defrag_callout;
6974
6975 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count,struct zone_depot * mags)6976 zone_reclaim_chunk(zone_t z, struct zone_page_metadata *meta,
6977 uint32_t free_count, struct zone_depot *mags)
6978 {
6979 vm_address_t page_addr;
6980 vm_size_t size_to_free;
6981 uint32_t bitmap_ref;
6982 uint32_t page_count;
6983 zone_security_flags_t zsflags = zone_security_config(z);
6984 bool sequester = zsflags.z_va_sequester && !z->z_destroyed;
6985 uint16_t oob_guards = 0;
6986
6987 #if CONFIG_PROB_GZALLOC
6988 /*
6989 * See zone_allocate_va_locked: we added a guard page
6990 * at the end of chunks, so if we are going to kmem_free,
6991 * we want to return that one dude too.
6992 */
6993 oob_guards = z->z_pgz_use_guards;
6994 #endif /* CONFIG_PROB_GZALLOC */
6995
6996 if (zone_submap_is_sequestered(zsflags)) {
6997 /*
6998 * If the entire map is sequestered, we can't return the VA.
6999 * It stays pinned to the zone forever.
7000 */
7001 sequester = true;
7002 }
7003
7004 zone_meta_queue_pop_native(z, &z->z_pageq_empty, &page_addr);
7005
7006 page_count = meta->zm_chunk_len;
7007
7008 if (meta->zm_alloc_size) {
7009 zone_metadata_corruption(z, meta, "alloc_size");
7010 }
7011 if (z->z_percpu) {
7012 if (page_count != 1) {
7013 zone_metadata_corruption(z, meta, "page_count");
7014 }
7015 size_to_free = ptoa(z->z_chunk_pages);
7016 zone_remove_wired_pages(z->z_chunk_pages);
7017 } else {
7018 if (page_count > z->z_chunk_pages) {
7019 zone_metadata_corruption(z, meta, "page_count");
7020 }
7021 if (page_count < z->z_chunk_pages) {
7022 /* Dequeue non populated VA from z_pageq_va */
7023 zone_meta_remqueue(z, meta + page_count);
7024 }
7025 size_to_free = ptoa(page_count);
7026 zone_remove_wired_pages(page_count);
7027 }
7028
7029 zone_counter_sub(z, z_elems_free, free_count);
7030 zone_counter_sub(z, z_elems_avail, free_count);
7031 zone_counter_sub(z, z_wired_empty, page_count);
7032 zone_counter_sub(z, z_wired_cur, page_count);
7033 if (z->z_elems_free_min < free_count) {
7034 z->z_elems_free_min = 0;
7035 } else {
7036 z->z_elems_free_min -= free_count;
7037 }
7038 if (z->z_elems_free_max < free_count) {
7039 z->z_elems_free_max = 0;
7040 } else {
7041 z->z_elems_free_max -= free_count;
7042 }
7043
7044 bitmap_ref = 0;
7045 if (sequester) {
7046 if (meta->zm_inline_bitmap) {
7047 for (int i = 0; i < meta->zm_chunk_len; i++) {
7048 meta[i].zm_bitmap = 0;
7049 }
7050 } else {
7051 bitmap_ref = meta->zm_bitmap;
7052 meta->zm_bitmap = 0;
7053 }
7054 meta->zm_chunk_len = 0;
7055 } else {
7056 if (!meta->zm_inline_bitmap) {
7057 bitmap_ref = meta->zm_bitmap;
7058 }
7059 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7060 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guards));
7061 }
7062
7063 #if CONFIG_ZLEAKS
7064 if (__improbable(zleak_should_disable_for_zone(z) &&
7065 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7066 thread_call_enter(&zone_leaks_callout);
7067 }
7068 #endif /* CONFIG_ZLEAKS */
7069
7070 zone_unlock(z);
7071
7072 if (bitmap_ref) {
7073 zone_bits_free(bitmap_ref);
7074 }
7075
7076 /* Free the pages for metadata and account for them */
7077 #if KASAN_ZALLOC
7078 kasan_poison_range(page_addr, size_to_free, ASAN_VALID);
7079 #endif
7080 #if VM_TAG_SIZECLASSES
7081 if (z->z_uses_tags) {
7082 ztMemoryRemove(z, page_addr, size_to_free);
7083 }
7084 #endif /* VM_TAG_SIZECLASSES */
7085
7086 if (sequester) {
7087 kernel_memory_depopulate(zone_submap(zsflags), page_addr,
7088 size_to_free, KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7089 } else {
7090 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7091 kmem_free(zone_submap(zsflags), page_addr,
7092 ptoa(z->z_chunk_pages + oob_guards));
7093 #if CONFIG_PROB_GZALLOC
7094 if (z->z_pgz_use_guards) {
7095 os_atomic_dec(&zone_guard_pages, relaxed);
7096 }
7097 #endif /* CONFIG_PROB_GZALLOC */
7098 }
7099
7100 zone_magazine_free_list(mags);
7101 thread_yield_to_preemption();
7102
7103 zone_lock(z);
7104
7105 if (sequester) {
7106 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7107 }
7108 }
7109
7110 static uint16_t
zone_reclaim_elements(zone_t z,uint16_t * count,zone_element_t * elems)7111 zone_reclaim_elements(zone_t z, uint16_t *count, zone_element_t *elems)
7112 {
7113 uint16_t n = *count;
7114
7115 z_debug_assert(n <= zc_mag_size());
7116
7117 for (uint16_t i = 0; i < n; i++) {
7118 zone_element_t ze = elems[i];
7119 elems[i].ze_value = 0;
7120 zfree_drop(z, zone_element_validate(z, ze), ze, false);
7121 }
7122
7123 *count = 0;
7124 return n;
7125 }
7126
7127 static uint16_t
zone_reclaim_recirc_magazine(zone_t z,struct zone_depot * mags)7128 zone_reclaim_recirc_magazine(zone_t z, struct zone_depot *mags)
7129 {
7130 zone_magazine_t mag = STAILQ_FIRST(&z->z_recirc);
7131
7132 STAILQ_REMOVE_HEAD(&z->z_recirc, zm_link);
7133 STAILQ_INSERT_TAIL(mags, mag, zm_link);
7134 zone_counter_sub(z, z_recirc_cur, 1);
7135
7136 z_debug_assert(mag->zm_cur == zc_mag_size());
7137
7138 for (uint16_t i = 0; i < zc_mag_size(); i++) {
7139 zone_element_t ze = mag->zm_elems[i];
7140 mag->zm_elems[i].ze_value = 0;
7141 zfree_drop(z, zone_element_validate(z, ze), ze, true);
7142 }
7143
7144 mag->zm_cur = 0;
7145
7146 return zc_mag_size();
7147 }
7148
7149 static void
zone_depot_trim(zone_cache_t zc,struct zone_depot * head)7150 zone_depot_trim(zone_cache_t zc, struct zone_depot *head)
7151 {
7152 zone_magazine_t mag;
7153
7154 if (zc->zc_depot_cur == 0 ||
7155 2 * (zc->zc_depot_cur + 1) * zc_mag_size() <= zc->zc_depot_max) {
7156 return;
7157 }
7158
7159 zone_depot_lock(zc);
7160
7161 while (zc->zc_depot_cur &&
7162 2 * (zc->zc_depot_cur + 1) * zc_mag_size() > zc->zc_depot_max) {
7163 mag = STAILQ_FIRST(&zc->zc_depot);
7164 STAILQ_REMOVE_HEAD(&zc->zc_depot, zm_link);
7165 STAILQ_INSERT_TAIL(head, mag, zm_link);
7166 zc->zc_depot_cur--;
7167 }
7168
7169 zone_depot_unlock(zc);
7170 }
7171
7172 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7173 ZONE_RECLAIM_TRIM,
7174 ZONE_RECLAIM_DRAIN,
7175 ZONE_RECLAIM_DESTROY,
7176 });
7177
7178 /*!
7179 * @function zone_reclaim
7180 *
7181 * @brief
7182 * Drains or trim the zone.
7183 *
7184 * @discussion
7185 * Draining the zone will free it from all its elements.
7186 *
7187 * Trimming the zone tries to respect the working set size, and avoids draining
7188 * the depot when it's not necessary.
7189 *
7190 * @param z The zone to reclaim from
7191 * @param mode The purpose of this reclaim.
7192 */
7193 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7194 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7195 {
7196 struct zone_depot mags = STAILQ_HEAD_INITIALIZER(mags);
7197 zone_magazine_t mag;
7198 zone_security_flags_t zsflags = zone_security_config(z);
7199
7200 zone_lock(z);
7201
7202 if (mode == ZONE_RECLAIM_DESTROY) {
7203 if (!z->z_destructible || z->z_elems_rsv ||
7204 zsflags.z_allows_foreign) {
7205 panic("zdestroy: Zone %s%s isn't destructible",
7206 zone_heap_name(z), z->z_name);
7207 }
7208
7209 if (!z->z_self || z->z_expander || z->z_expander_vm_priv ||
7210 z->z_async_refilling || z->z_expanding_wait) {
7211 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7212 zone_heap_name(z), z->z_name);
7213 }
7214
7215 #if !KASAN_ZALLOC
7216 /*
7217 * Unset the valid bit. We'll hit an assert failure on further
7218 * operations on this zone, until zinit() is called again.
7219 *
7220 * Leave the zone valid for KASan as we will see zfree's on
7221 * quarantined free elements even after the zone is destroyed.
7222 */
7223 z->z_self = NULL;
7224 #endif
7225 z->z_destroyed = true;
7226 } else if (z->z_destroyed) {
7227 return zone_unlock(z);
7228 } else if (z->z_elems_free <= z->z_elems_rsv) {
7229 /* If the zone is under its reserve level, leave it alone. */
7230 return zone_unlock(z);
7231 }
7232
7233 if (z->z_pcpu_cache) {
7234 if (mode != ZONE_RECLAIM_TRIM) {
7235 zpercpu_foreach(zc, z->z_pcpu_cache) {
7236 zc->zc_depot_max /= 2;
7237 }
7238 } else {
7239 zpercpu_foreach(zc, z->z_pcpu_cache) {
7240 if (zc->zc_depot_max > 0) {
7241 zc->zc_depot_max--;
7242 }
7243 }
7244 }
7245
7246 zone_unlock(z);
7247
7248 if (mode == ZONE_RECLAIM_TRIM) {
7249 zpercpu_foreach(zc, z->z_pcpu_cache) {
7250 zone_depot_trim(zc, &mags);
7251 }
7252 } else {
7253 zpercpu_foreach(zc, z->z_pcpu_cache) {
7254 zone_depot_lock(zc);
7255 STAILQ_CONCAT(&mags, &zc->zc_depot);
7256 zc->zc_depot_cur = 0;
7257 zone_depot_unlock(zc);
7258 }
7259 }
7260
7261 zone_lock(z);
7262
7263 uint32_t freed = 0;
7264
7265 STAILQ_FOREACH(mag, &mags, zm_link) {
7266 freed += zone_reclaim_elements(z,
7267 &mag->zm_cur, mag->zm_elems);
7268
7269 if (freed >= zc_free_batch_size) {
7270 z->z_elems_free_min += freed;
7271 z->z_elems_free_max += freed;
7272 z->z_elems_free += freed;
7273 zone_unlock(z);
7274 thread_yield_to_preemption();
7275 zone_lock(z);
7276 freed = 0;
7277 }
7278 }
7279
7280 if (mode == ZONE_RECLAIM_DESTROY) {
7281 zpercpu_foreach(zc, z->z_pcpu_cache) {
7282 freed += zone_reclaim_elements(z,
7283 &zc->zc_alloc_cur, zc->zc_alloc_elems);
7284 freed += zone_reclaim_elements(z,
7285 &zc->zc_free_cur, zc->zc_free_elems);
7286 }
7287
7288 z->z_elems_free_wss = 0;
7289 z->z_elems_free_min = 0;
7290 z->z_elems_free_max = 0;
7291 z->z_contention_cur = 0;
7292 z->z_contention_wma = 0;
7293 } else {
7294 z->z_elems_free_min += freed;
7295 z->z_elems_free_max += freed;
7296 }
7297 z->z_elems_free += freed;
7298 }
7299
7300 for (;;) {
7301 struct zone_page_metadata *meta;
7302 uint32_t count, goal, freed = 0;
7303
7304 goal = z->z_elems_rsv;
7305 if (mode == ZONE_RECLAIM_TRIM) {
7306 /*
7307 * When trimming, only free elements in excess
7308 * of the working set estimate.
7309 *
7310 * However if we are in a situation where the working
7311 * set estimate is clearly growing, ignore the estimate
7312 * as the next working set update will grow it and
7313 * we want to avoid churn.
7314 */
7315 goal = MAX(goal, MAX(z->z_elems_free_wss,
7316 z->z_elems_free - z->z_elems_free_min));
7317
7318 /*
7319 * Add some slop to account for "the last partial chunk in flight"
7320 * so that we do not deplete the recirculation depot too harshly.
7321 */
7322 goal += z->z_chunk_elems / 2;
7323 }
7324
7325 if (z->z_elems_free <= goal) {
7326 break;
7327 }
7328
7329 /*
7330 * If we're above target, but we have no free page, then drain
7331 * the recirculation depot until we get a free chunk or exhaust
7332 * the depot.
7333 *
7334 * This is rather abrupt but also somehow will reduce
7335 * fragmentation anyway, and the zone code will import
7336 * over time anyway.
7337 */
7338 while (z->z_recirc_cur && zone_pva_is_null(z->z_pageq_empty)) {
7339 if (freed >= zc_free_batch_size) {
7340 zone_unlock(z);
7341 zone_magazine_free_list(&mags);
7342 thread_yield_to_preemption();
7343 zone_lock(z);
7344 freed = 0;
7345 /* we dropped the lock, needs to reassess */
7346 continue;
7347 }
7348 freed += zone_reclaim_recirc_magazine(z, &mags);
7349 }
7350
7351 if (zone_pva_is_null(z->z_pageq_empty)) {
7352 break;
7353 }
7354
7355 meta = zone_pva_to_meta(z->z_pageq_empty);
7356 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_size(z);
7357
7358 if (z->z_elems_free - count < goal) {
7359 break;
7360 }
7361
7362 zone_reclaim_chunk(z, meta, count, &mags);
7363 }
7364
7365 zone_unlock(z);
7366
7367 zone_magazine_free_list(&mags);
7368 }
7369
7370 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7371 zone_reclaim_all(zone_reclaim_mode_t mode)
7372 {
7373 /*
7374 * Start with zones with VA sequester since depopulating
7375 * pages will not need to allocate vm map entries for holes,
7376 * which will give memory back to the system faster.
7377 */
7378 zone_index_foreach(zid) {
7379 zone_t z = &zone_array[zid];
7380 if (z == zc_magazine_zone) {
7381 continue;
7382 }
7383 if (zone_security_array[zid].z_va_sequester && z->collectable) {
7384 zone_reclaim(z, mode);
7385 }
7386 }
7387
7388 zone_index_foreach(zid) {
7389 zone_t z = &zone_array[zid];
7390 if (z == zc_magazine_zone) {
7391 continue;
7392 }
7393 if (!zone_security_array[zid].z_va_sequester && z->collectable) {
7394 zone_reclaim(z, mode);
7395 }
7396 }
7397
7398 zone_reclaim(zc_magazine_zone, mode);
7399 }
7400
7401 void
zone_userspace_reboot_checks(void)7402 zone_userspace_reboot_checks(void)
7403 {
7404 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7405 if (label_zone_size != 0) {
7406 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7407 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7408 }
7409 }
7410
7411 void
zone_gc(zone_gc_level_t level)7412 zone_gc(zone_gc_level_t level)
7413 {
7414 zone_reclaim_mode_t mode;
7415 zone_t largest_zone = NULL;
7416
7417 switch (level) {
7418 case ZONE_GC_TRIM:
7419 mode = ZONE_RECLAIM_TRIM;
7420 break;
7421 case ZONE_GC_DRAIN:
7422 mode = ZONE_RECLAIM_DRAIN;
7423 break;
7424 case ZONE_GC_JETSAM:
7425 largest_zone = kill_process_in_largest_zone();
7426 mode = ZONE_RECLAIM_TRIM;
7427 break;
7428 }
7429
7430 current_thread()->options |= TH_OPT_ZONE_PRIV;
7431 lck_mtx_lock(&zone_gc_lock);
7432
7433 zone_reclaim_all(mode);
7434
7435 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7436 /*
7437 * If we possibly killed a process, but we're still critical,
7438 * we need to drain harder.
7439 */
7440 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7441 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7442 }
7443
7444 lck_mtx_unlock(&zone_gc_lock);
7445 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7446 }
7447
7448 void
zone_gc_trim(void)7449 zone_gc_trim(void)
7450 {
7451 zone_gc(ZONE_GC_TRIM);
7452 }
7453
7454 void
zone_gc_drain(void)7455 zone_gc_drain(void)
7456 {
7457 zone_gc(ZONE_GC_DRAIN);
7458 }
7459
7460 static bool
zone_defrag_needed(zone_t z)7461 zone_defrag_needed(zone_t z)
7462 {
7463 uint32_t recirc_size = z->z_recirc_cur * zc_mag_size();
7464
7465 if (recirc_size <= z->z_chunk_elems / 2) {
7466 return false;
7467 }
7468 if (recirc_size * z->z_elem_size <= zc_defrag_threshold) {
7469 return false;
7470 }
7471 return recirc_size * zc_defrag_ratio > z->z_elems_free_wss * 100;
7472 }
7473
7474 /*!
7475 * @function zone_defrag
7476 *
7477 * @brief
7478 * Resize the recirculation depot to match the working set size.
7479 *
7480 * @discussion
7481 * When zones grow very large due to a spike in usage, and then some of those
7482 * elements get freed, the elements in magazines in the recirculation depot
7483 * are in no particular order.
7484 *
7485 * In order to control fragmentation, we need to detect "empty" pages so that
7486 * they get onto the @c z_pageq_empty freelist, so that allocations re-pack
7487 * naturally.
7488 *
7489 * This is done very gently, never in excess of the working set and some slop.
7490 */
7491 static bool
zone_autogc_needed(zone_t z)7492 zone_autogc_needed(zone_t z)
7493 {
7494 uint32_t free_min = z->z_elems_free_min;
7495
7496 if (free_min * z->z_elem_size <= zc_autogc_threshold) {
7497 return false;
7498 }
7499
7500 return free_min * zc_autogc_ratio > z->z_elems_free_wss * 100;
7501 }
7502
7503 static void
zone_defrag(zone_t z)7504 zone_defrag(zone_t z)
7505 {
7506 struct zone_depot mags = STAILQ_HEAD_INITIALIZER(mags);
7507 zone_magazine_t mag, tmp;
7508 uint32_t freed = 0, goal = 0;
7509
7510 zone_lock(z);
7511
7512 goal = z->z_elems_free_wss + z->z_chunk_elems / 2 +
7513 zc_mag_size() - 1;
7514
7515 while (z->z_recirc_cur * zc_mag_size() > goal) {
7516 if (freed >= zc_free_batch_size) {
7517 zone_unlock(z);
7518 thread_yield_to_preemption();
7519 zone_lock(z);
7520 freed = 0;
7521 /* we dropped the lock, needs to reassess */
7522 continue;
7523 }
7524 freed += zone_reclaim_recirc_magazine(z, &mags);
7525 }
7526
7527 zone_unlock(z);
7528
7529 STAILQ_FOREACH_SAFE(mag, &mags, zm_link, tmp) {
7530 zone_magazine_free(mag);
7531 }
7532 }
7533
7534 static void
zone_defrag_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7535 zone_defrag_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7536 {
7537 zone_foreach(z) {
7538 if (!z->collectable || z == zc_magazine_zone) {
7539 continue;
7540 }
7541
7542 if (zone_autogc_needed(z)) {
7543 current_thread()->options |= TH_OPT_ZONE_PRIV;
7544 lck_mtx_lock(&zone_gc_lock);
7545 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7546 lck_mtx_unlock(&zone_gc_lock);
7547 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7548 } else if (zone_defrag_needed(z)) {
7549 zone_defrag(z);
7550 }
7551 }
7552
7553 if (zone_autogc_needed(zc_magazine_zone)) {
7554 current_thread()->options |= TH_OPT_ZONE_PRIV;
7555 lck_mtx_lock(&zone_gc_lock);
7556 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7557 lck_mtx_unlock(&zone_gc_lock);
7558 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7559 } else if (zone_defrag_needed(zc_magazine_zone)) {
7560 zone_defrag(zc_magazine_zone);
7561 }
7562 }
7563
7564 void
compute_zone_working_set_size(__unused void * param)7565 compute_zone_working_set_size(__unused void *param)
7566 {
7567 uint32_t zc_auto = zc_auto_threshold;
7568 bool kick_defrag = false;
7569
7570 /*
7571 * Keep zone caching disabled until the first proc is made.
7572 */
7573 if (__improbable(zone_caching_disabled < 0)) {
7574 return;
7575 }
7576
7577 zone_caching_disabled = vm_pool_low();
7578
7579 if (os_mul_overflow(zc_auto, Z_CONTENTION_WMA_UNIT, &zc_auto)) {
7580 zc_auto = 0;
7581 }
7582
7583 zone_foreach(z) {
7584 uint32_t wma;
7585 bool needs_caching = false;
7586
7587 if (z->z_self != z) {
7588 continue;
7589 }
7590
7591 zone_lock(z);
7592
7593 wma = z->z_elems_free_max - z->z_elems_free_min;
7594 wma = (3 * wma + z->z_elems_free_wss) / 4;
7595 z->z_elems_free_max = z->z_elems_free_min = z->z_elems_free;
7596 z->z_elems_free_wss = wma;
7597
7598 if (!kick_defrag &&
7599 (zone_defrag_needed(z) || zone_autogc_needed(z))) {
7600 kick_defrag = true;
7601 }
7602
7603 /* fixed point decimal of contentions per second */
7604 wma = z->z_contention_cur * Z_CONTENTION_WMA_UNIT /
7605 ZONE_WSS_UPDATE_PERIOD;
7606 z->z_contention_cur = 0;
7607 z->z_contention_wma = (3 * wma + z->z_contention_wma) / 4;
7608
7609 /*
7610 * If the zone seems to be very quiet,
7611 * gently lower its cpu-local depot size.
7612 */
7613 if (z->z_pcpu_cache && wma < Z_CONTENTION_WMA_UNIT / 2 &&
7614 z->z_contention_wma < Z_CONTENTION_WMA_UNIT / 2) {
7615 zpercpu_foreach(zc, z->z_pcpu_cache) {
7616 if (zc->zc_depot_max > zc_mag_size()) {
7617 zc->zc_depot_max--;
7618 }
7619 }
7620 }
7621
7622 /*
7623 * If the zone has been contending like crazy for two periods,
7624 * and is eligible, maybe it's time to enable caching.
7625 */
7626 if (!z->z_nocaching && !z->z_pcpu_cache && !z->exhaustible &&
7627 zc_auto && z->z_contention_wma >= zc_auto && wma >= zc_auto) {
7628 needs_caching = true;
7629 }
7630
7631 zone_unlock(z);
7632
7633 if (needs_caching) {
7634 zone_enable_caching(z);
7635 }
7636 }
7637
7638 if (kick_defrag) {
7639 thread_call_enter(&zone_defrag_callout);
7640 }
7641 }
7642
7643 #endif /* !ZALLOC_TEST */
7644 #pragma mark vm integration, MIG routines
7645 #if !ZALLOC_TEST
7646
7647 extern unsigned int stack_total;
7648 #if defined (__x86_64__)
7649 extern unsigned int inuse_ptepages_count;
7650 #endif
7651
7652 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7653 panic_print_types_in_zone(zone_t z, const char* debug_str)
7654 {
7655 kalloc_type_view_t kt_cur = NULL;
7656 const char *prev_type = "";
7657 size_t skip_over_site = sizeof("site.") - 1;
7658 paniclog_append_noflush("kalloc types in zone, %s (%s):\n",
7659 debug_str, z->z_name);
7660 kt_cur = (kalloc_type_view_t) z->z_views;
7661 while (kt_cur) {
7662 struct zone_view kt_zv = kt_cur->kt_zv;
7663 const char *typename = kt_zv.zv_name + skip_over_site;
7664 if (strcmp(typename, prev_type) != 0) {
7665 paniclog_append_noflush("\t%-50s\n", typename);
7666 prev_type = typename;
7667 }
7668 kt_cur = (kalloc_type_view_t) kt_zv.zv_next;
7669 }
7670 paniclog_append_noflush("\n");
7671 }
7672
7673 static void
panic_display_kalloc_types(void)7674 panic_display_kalloc_types(void)
7675 {
7676 if (kalloc_type_src_zone) {
7677 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7678 }
7679 if (kalloc_type_dst_zone) {
7680 panic_print_types_in_zone(kalloc_type_dst_zone,
7681 "addr is being freed to");
7682 }
7683 }
7684
7685 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7686 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7687 uint64_t *zone_size)
7688 {
7689 zone_index_foreach(zid) {
7690 zone_t z = &zone_array[zid];
7691 vm_offset_t size = zone_size_wired(z);
7692
7693 if (zid == ZONE_ID_VM_PAGES) {
7694 continue;
7695 }
7696 for (uint32_t i = 0; i < n; i++) {
7697 if (size > zone_size[i]) {
7698 largest_zones[i] = z;
7699 zone_size[i] = size;
7700 break;
7701 }
7702 }
7703 }
7704 }
7705
7706 #define NUM_LARGEST_ZONES 5
7707 static void
panic_display_largest_zones(void)7708 panic_display_largest_zones(void)
7709 {
7710 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
7711 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
7712
7713 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
7714 (uint64_t *) &largest_size);
7715
7716 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
7717 "Zone Name", "Cur Size", "Free Size");
7718 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
7719 zone_t z = largest_zones[i];
7720 paniclog_append_noflush("%-8s%-20s %9luM %9luK\n",
7721 zone_heap_name(z), z->z_name,
7722 (uintptr_t)largest_size[i] >> 20,
7723 (uintptr_t)zone_size_free(z) >> 10);
7724 }
7725 }
7726
7727 static void
panic_display_zprint(void)7728 panic_display_zprint(void)
7729 {
7730 panic_display_largest_zones();
7731 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
7732 (uintptr_t)(kernel_stack_size * stack_total));
7733 #if defined (__x86_64__)
7734 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
7735 (uintptr_t)ptoa(inuse_ptepages_count));
7736 #endif
7737 paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large",
7738 (uintptr_t)kalloc_large_total);
7739
7740 if (panic_kext_memory_info) {
7741 mach_memory_info_t *mem_info = panic_kext_memory_info;
7742
7743 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
7744 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
7745 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
7746 continue;
7747 }
7748 if (mem_info[i].size > (1024 * 1024)) {
7749 paniclog_append_noflush("%-5lld %10lld\n",
7750 mem_info[i].site, mem_info[i].size);
7751 }
7752 }
7753 }
7754 }
7755
7756 static void
panic_display_zone_info(void)7757 panic_display_zone_info(void)
7758 {
7759 paniclog_append_noflush("Zone info:\n");
7760 for (uint32_t i = 0; i < ZONE_ADDR_KIND_COUNT; i++) {
7761 paniclog_append_noflush(" %-8s: %p - %p\n",
7762 zone_map_range_names[i],
7763 (void *) zone_info.zi_map_range[i].min_address,
7764 (void *) zone_info.zi_map_range[i].max_address);
7765 }
7766 paniclog_append_noflush(" Metadata: %p - %p\n"
7767 " Bitmaps : %p - %p\n"
7768 "\n",
7769 (void *) zone_info.zi_meta_range.min_address,
7770 (void *) zone_info.zi_meta_range.max_address,
7771 (void *) zone_info.zi_bits_range.min_address,
7772 (void *) zone_info.zi_bits_range.max_address);
7773 }
7774
7775 static void
panic_display_zone_fault(vm_offset_t addr)7776 panic_display_zone_fault(vm_offset_t addr)
7777 {
7778 struct zone_page_metadata meta = { };
7779 vm_map_t map = VM_MAP_NULL;
7780 vm_offset_t oob_offs = 0, size = 0;
7781 int map_idx = -1;
7782 zone_t z = NULL;
7783 const char *kind = "whild deref";
7784 bool oob = false;
7785
7786 /*
7787 * First: look if we bumped into guard pages between submaps
7788 */
7789 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7790 map = zone_submaps[i];
7791 if (map == VM_MAP_NULL) {
7792 continue;
7793 }
7794
7795 if (addr >= map->min_offset && addr < map->max_offset) {
7796 map_idx = i;
7797 break;
7798 }
7799 }
7800
7801 if (map_idx == -1) {
7802 /* this really shouldn't happen, submaps are back to back */
7803 return;
7804 }
7805
7806 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
7807
7808 /*
7809 * Second: look if there's just no metadata at all
7810 */
7811 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
7812 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
7813 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
7814 zone_array[meta.zm_index].z_self == NULL) {
7815 paniclog_append_noflush(" Zone : <unknown>\n");
7816 kind = "wild deref, missing or invalid metadata";
7817 } else {
7818 z = &zone_array[meta.zm_index];
7819 paniclog_append_noflush(" Zone : %s%s\n",
7820 zone_heap_name(z), zone_name(z));
7821 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
7822 kind = "out-of-bounds (high confidence)";
7823 oob = true;
7824 size = zone_element_size((void *)addr,
7825 &z, false, &oob_offs);
7826 } else {
7827 kind = "use-after-free (medium confidence)";
7828 }
7829 }
7830
7831 paniclog_append_noflush(" Address : %p\n", (void *)addr);
7832 if (oob) {
7833 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
7834 (void *)(trunc_page(addr) - (size - oob_offs)),
7835 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
7836 }
7837 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
7838 zone_submaps_names[map_idx],
7839 (void *)map->min_offset, (void *)map->max_offset);
7840 paniclog_append_noflush(" Kind : %s\n", kind);
7841 if (oob) {
7842 paniclog_append_noflush(" Access : %d byte(s) past\n",
7843 (uint32_t)(addr & PAGE_MASK) + 1);
7844 }
7845 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
7846 "0x%04x 0x%08x 0x%08x 0x%08x\n",
7847 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
7848 meta.zm_alloc_size, meta.zm_bitmap,
7849 meta.zm_page_next.packed_address,
7850 meta.zm_page_prev.packed_address);
7851 paniclog_append_noflush("\n");
7852 }
7853
7854 void
panic_display_zalloc(void)7855 panic_display_zalloc(void)
7856 {
7857 bool keepsyms = false;
7858
7859 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
7860
7861 panic_display_zone_info();
7862
7863 if (panic_fault_address) {
7864 #if CONFIG_PROB_GZALLOC
7865 if (pgz_owned(panic_fault_address)) {
7866 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
7867 } else
7868 #endif /* CONFIG_PROB_GZALLOC */
7869 if (zone_maps_owned(panic_fault_address, 1)) {
7870 panic_display_zone_fault(panic_fault_address);
7871 }
7872 }
7873
7874 if (panic_include_zprint) {
7875 panic_display_zprint();
7876 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
7877 panic_display_largest_zones();
7878 }
7879 #if CONFIG_ZLEAKS
7880 if (zleak_active) {
7881 panic_display_zleaks(keepsyms);
7882 }
7883 #endif
7884 if (panic_include_kalloc_types) {
7885 panic_display_kalloc_types();
7886 }
7887 }
7888
7889 /*
7890 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
7891 * requesting zone information.
7892 * Frees unused pages towards the end of the region, and zero'es out unused
7893 * space on the last page.
7894 */
7895 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)7896 create_vm_map_copy(
7897 vm_offset_t start_addr,
7898 vm_size_t total_size,
7899 vm_size_t used_size)
7900 {
7901 kern_return_t kr;
7902 vm_offset_t end_addr;
7903 vm_size_t free_size;
7904 vm_map_copy_t copy;
7905
7906 if (used_size != total_size) {
7907 end_addr = start_addr + used_size;
7908 free_size = total_size - (round_page(end_addr) - start_addr);
7909
7910 if (free_size >= PAGE_SIZE) {
7911 kmem_free(ipc_kernel_map,
7912 round_page(end_addr), free_size);
7913 }
7914 bzero((char *) end_addr, round_page(end_addr) - end_addr);
7915 }
7916
7917 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
7918 (vm_map_size_t)used_size, TRUE, ©);
7919 assert(kr == KERN_SUCCESS);
7920
7921 return copy;
7922 }
7923
7924 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)7925 get_zone_info(
7926 zone_t z,
7927 mach_zone_name_t *zn,
7928 mach_zone_info_t *zi)
7929 {
7930 struct zone zcopy;
7931 vm_size_t cached = 0;
7932
7933 assert(z != ZONE_NULL);
7934 zone_lock(z);
7935 if (!z->z_self) {
7936 zone_unlock(z);
7937 return FALSE;
7938 }
7939 zcopy = *z;
7940 if (z->z_pcpu_cache) {
7941 zpercpu_foreach(zc, z->z_pcpu_cache) {
7942 cached += zc->zc_alloc_cur + zc->zc_free_cur;
7943 cached += zc->zc_depot_cur * zc_mag_size();
7944 }
7945 }
7946 zone_unlock(z);
7947
7948 if (zn != NULL) {
7949 /*
7950 * Append kalloc heap name to zone name (if zone is used by kalloc)
7951 */
7952 char temp_zone_name[MAX_ZONE_NAME] = "";
7953 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
7954 zone_heap_name(z), z->z_name);
7955
7956 /* assuming here the name data is static */
7957 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
7958 strlen(temp_zone_name) + 1);
7959 }
7960
7961 if (zi != NULL) {
7962 *zi = (mach_zone_info_t) {
7963 .mzi_count = zone_count_allocated(&zcopy) - cached,
7964 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
7965 // max_size for zprint is now high-watermark of pages used
7966 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
7967 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
7968 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
7969 .mzi_exhaustible = (uint64_t)zcopy.exhaustible,
7970 };
7971 zpercpu_foreach(zs, zcopy.z_stats) {
7972 zi->mzi_sum_size += zs->zs_mem_allocated;
7973 }
7974 if (zcopy.collectable) {
7975 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
7976 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
7977 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
7978 }
7979 }
7980
7981 return TRUE;
7982 }
7983
7984 kern_return_t
task_zone_info(__unused task_t task,__unused mach_zone_name_array_t * namesp,__unused mach_msg_type_number_t * namesCntp,__unused task_zone_info_array_t * infop,__unused mach_msg_type_number_t * infoCntp)7985 task_zone_info(
7986 __unused task_t task,
7987 __unused mach_zone_name_array_t *namesp,
7988 __unused mach_msg_type_number_t *namesCntp,
7989 __unused task_zone_info_array_t *infop,
7990 __unused mach_msg_type_number_t *infoCntp)
7991 {
7992 return KERN_FAILURE;
7993 }
7994
7995
7996 /* mach_memory_info entitlement */
7997 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
7998
7999 /* macro needed to rate-limit mach_memory_info */
8000 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8001
8002 /* declarations necessary to call kauth_cred_issuser() */
8003 struct ucred;
8004 extern int kauth_cred_issuser(struct ucred *);
8005 extern struct ucred *kauth_cred_get(void);
8006
8007 static kern_return_t
mach_memory_info_security_check(void)8008 mach_memory_info_security_check(void)
8009 {
8010 /* If not root or does not have the memoryinfo entitlement, fail */
8011 if (!kauth_cred_issuser(kauth_cred_get())) {
8012 return KERN_NO_ACCESS;
8013 }
8014
8015 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8016 if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8017 return KERN_DENIED;
8018 }
8019
8020 /*
8021 * On release non-mac arm devices, allow mach_memory_info
8022 * to be called twice per day per boot. memorymaintenanced
8023 * calls it once per day, which leaves room for a sysdiagnose.
8024 */
8025 static uint64_t first_call, second_call = 0;
8026 uint64_t now = 0;
8027 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8028
8029 if (!first_call) {
8030 first_call = now;
8031 } else if (!second_call) {
8032 second_call = now;
8033 } else if (first_call + NSEC_DAY > now) {
8034 return KERN_DENIED;
8035 } else if (first_call + NSEC_DAY < now) {
8036 first_call = now;
8037 second_call = 0;
8038 }
8039 #endif
8040
8041 return KERN_SUCCESS;
8042 }
8043
8044 kern_return_t
mach_zone_info(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8045 mach_zone_info(
8046 host_priv_t host,
8047 mach_zone_name_array_t *namesp,
8048 mach_msg_type_number_t *namesCntp,
8049 mach_zone_info_array_t *infop,
8050 mach_msg_type_number_t *infoCntp)
8051 {
8052 return mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8053 }
8054
8055
8056 kern_return_t
mach_memory_info(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8057 mach_memory_info(
8058 host_priv_t host,
8059 mach_zone_name_array_t *namesp,
8060 mach_msg_type_number_t *namesCntp,
8061 mach_zone_info_array_t *infop,
8062 mach_msg_type_number_t *infoCntp,
8063 mach_memory_info_array_t *memoryInfop,
8064 mach_msg_type_number_t *memoryInfoCntp)
8065 {
8066 mach_zone_name_t *names;
8067 vm_offset_t names_addr;
8068 vm_size_t names_size;
8069
8070 mach_zone_info_t *info;
8071 vm_offset_t info_addr;
8072 vm_size_t info_size;
8073
8074 mach_memory_info_t *memory_info;
8075 vm_offset_t memory_info_addr;
8076 vm_size_t memory_info_size;
8077 vm_size_t memory_info_vmsize;
8078 unsigned int num_info;
8079
8080 unsigned int max_zones, used_zones, i;
8081 mach_zone_name_t *zn;
8082 mach_zone_info_t *zi;
8083 kern_return_t kr;
8084
8085 uint64_t zones_collectable_bytes = 0;
8086
8087 if (host == HOST_NULL) {
8088 return KERN_INVALID_HOST;
8089 }
8090
8091 kr = mach_memory_info_security_check();
8092 if (kr != KERN_SUCCESS) {
8093 return kr;
8094 }
8095
8096 /*
8097 * We assume that zones aren't freed once allocated.
8098 * We won't pick up any zones that are allocated later.
8099 */
8100
8101 max_zones = os_atomic_load(&num_zones, relaxed);
8102
8103 names_size = round_page(max_zones * sizeof *names);
8104 kr = kmem_alloc_pageable(ipc_kernel_map,
8105 &names_addr, names_size, VM_KERN_MEMORY_IPC);
8106 if (kr != KERN_SUCCESS) {
8107 return kr;
8108 }
8109 names = (mach_zone_name_t *) names_addr;
8110
8111 info_size = round_page(max_zones * sizeof *info);
8112 kr = kmem_alloc_pageable(ipc_kernel_map,
8113 &info_addr, info_size, VM_KERN_MEMORY_IPC);
8114 if (kr != KERN_SUCCESS) {
8115 kmem_free(ipc_kernel_map,
8116 names_addr, names_size);
8117 return kr;
8118 }
8119 info = (mach_zone_info_t *) info_addr;
8120
8121 zn = &names[0];
8122 zi = &info[0];
8123
8124 used_zones = max_zones;
8125 for (i = 0; i < max_zones; i++) {
8126 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8127 used_zones--;
8128 continue;
8129 }
8130 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8131 zn++;
8132 zi++;
8133 }
8134
8135 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8136 *namesCntp = used_zones;
8137
8138 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8139 *infoCntp = used_zones;
8140
8141 num_info = 0;
8142 memory_info_addr = 0;
8143
8144 if (memoryInfop && memoryInfoCntp) {
8145 vm_map_copy_t copy;
8146 num_info = vm_page_diagnose_estimate();
8147 memory_info_size = num_info * sizeof(*memory_info);
8148 memory_info_vmsize = round_page(memory_info_size);
8149 kr = kmem_alloc_pageable(ipc_kernel_map,
8150 &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC);
8151 if (kr != KERN_SUCCESS) {
8152 return kr;
8153 }
8154
8155 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8156 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8157 assert(kr == KERN_SUCCESS);
8158
8159 memory_info = (mach_memory_info_t *) memory_info_addr;
8160 vm_page_diagnose(memory_info, num_info, zones_collectable_bytes);
8161
8162 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8163 assert(kr == KERN_SUCCESS);
8164
8165 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8166 (vm_map_size_t)memory_info_size, TRUE, ©);
8167 assert(kr == KERN_SUCCESS);
8168
8169 *memoryInfop = (mach_memory_info_t *) copy;
8170 *memoryInfoCntp = num_info;
8171 }
8172
8173 return KERN_SUCCESS;
8174 }
8175
8176 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8177 mach_zone_info_for_zone(
8178 host_priv_t host,
8179 mach_zone_name_t name,
8180 mach_zone_info_t *infop)
8181 {
8182 zone_t zone_ptr;
8183
8184 if (host == HOST_NULL) {
8185 return KERN_INVALID_HOST;
8186 }
8187
8188 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8189 if (!PE_i_can_has_debugger(NULL)) {
8190 return KERN_INVALID_HOST;
8191 }
8192 #endif
8193
8194 if (infop == NULL) {
8195 return KERN_INVALID_ARGUMENT;
8196 }
8197
8198 zone_ptr = ZONE_NULL;
8199 zone_foreach(z) {
8200 /*
8201 * Append kalloc heap name to zone name (if zone is used by kalloc)
8202 */
8203 char temp_zone_name[MAX_ZONE_NAME] = "";
8204 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8205 zone_heap_name(z), z->z_name);
8206
8207 /* Find the requested zone by name */
8208 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8209 zone_ptr = z;
8210 break;
8211 }
8212 }
8213
8214 /* No zones found with the requested zone name */
8215 if (zone_ptr == ZONE_NULL) {
8216 return KERN_INVALID_ARGUMENT;
8217 }
8218
8219 if (get_zone_info(zone_ptr, NULL, infop)) {
8220 return KERN_SUCCESS;
8221 }
8222 return KERN_FAILURE;
8223 }
8224
8225 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8226 mach_zone_info_for_largest_zone(
8227 host_priv_t host,
8228 mach_zone_name_t *namep,
8229 mach_zone_info_t *infop)
8230 {
8231 if (host == HOST_NULL) {
8232 return KERN_INVALID_HOST;
8233 }
8234
8235 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8236 if (!PE_i_can_has_debugger(NULL)) {
8237 return KERN_INVALID_HOST;
8238 }
8239 #endif
8240
8241 if (namep == NULL || infop == NULL) {
8242 return KERN_INVALID_ARGUMENT;
8243 }
8244
8245 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8246 return KERN_SUCCESS;
8247 }
8248 return KERN_FAILURE;
8249 }
8250
8251 uint64_t
get_zones_collectable_bytes(void)8252 get_zones_collectable_bytes(void)
8253 {
8254 uint64_t zones_collectable_bytes = 0;
8255 mach_zone_info_t zi;
8256
8257 zone_foreach(z) {
8258 if (get_zone_info(z, NULL, &zi)) {
8259 zones_collectable_bytes +=
8260 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8261 }
8262 }
8263
8264 return zones_collectable_bytes;
8265 }
8266
8267 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8268 mach_zone_get_zlog_zones(
8269 host_priv_t host,
8270 mach_zone_name_array_t *namesp,
8271 mach_msg_type_number_t *namesCntp)
8272 {
8273 #if ZONE_ENABLE_LOGGING
8274 unsigned int max_zones, logged_zones, i;
8275 kern_return_t kr;
8276 zone_t zone_ptr;
8277 mach_zone_name_t *names;
8278 vm_offset_t names_addr;
8279 vm_size_t names_size;
8280
8281 if (host == HOST_NULL) {
8282 return KERN_INVALID_HOST;
8283 }
8284
8285 if (namesp == NULL || namesCntp == NULL) {
8286 return KERN_INVALID_ARGUMENT;
8287 }
8288
8289 max_zones = os_atomic_load(&num_zones, relaxed);
8290
8291 names_size = round_page(max_zones * sizeof *names);
8292 kr = kmem_alloc_pageable(ipc_kernel_map,
8293 &names_addr, names_size, VM_KERN_MEMORY_IPC);
8294 if (kr != KERN_SUCCESS) {
8295 return kr;
8296 }
8297 names = (mach_zone_name_t *) names_addr;
8298
8299 zone_ptr = ZONE_NULL;
8300 logged_zones = 0;
8301 for (i = 0; i < max_zones; i++) {
8302 zone_t z = &(zone_array[i]);
8303 assert(z != ZONE_NULL);
8304
8305 /* Copy out the zone name if zone logging is enabled */
8306 if (z->z_btlog) {
8307 get_zone_info(z, &names[logged_zones], NULL);
8308 logged_zones++;
8309 }
8310 }
8311
8312 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8313 *namesCntp = logged_zones;
8314
8315 return KERN_SUCCESS;
8316
8317 #else /* ZONE_ENABLE_LOGGING */
8318 #pragma unused(host, namesp, namesCntp)
8319 return KERN_FAILURE;
8320 #endif /* ZONE_ENABLE_LOGGING */
8321 }
8322
8323 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8324 mach_zone_get_btlog_records(
8325 host_priv_t host,
8326 mach_zone_name_t name,
8327 zone_btrecord_array_t *recsp,
8328 mach_msg_type_number_t *numrecs)
8329 {
8330 #if ZONE_ENABLE_LOGGING
8331 zone_btrecord_t *recs;
8332 kern_return_t kr;
8333 vm_address_t addr;
8334 vm_size_t size;
8335 zone_t zone_ptr;
8336 vm_map_copy_t copy;
8337
8338 if (host == HOST_NULL) {
8339 return KERN_INVALID_HOST;
8340 }
8341
8342 if (recsp == NULL || numrecs == NULL) {
8343 return KERN_INVALID_ARGUMENT;
8344 }
8345
8346 zone_ptr = ZONE_NULL;
8347 zone_foreach(z) {
8348 /*
8349 * Append kalloc heap name to zone name (if zone is used by kalloc)
8350 */
8351 char temp_zone_name[MAX_ZONE_NAME] = "";
8352 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8353 zone_heap_name(z), z->z_name);
8354
8355 /* Find the requested zone by name */
8356 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8357 zone_ptr = z;
8358 break;
8359 }
8360 }
8361
8362 /* No zones found with the requested zone name */
8363 if (zone_ptr == ZONE_NULL) {
8364 return KERN_INVALID_ARGUMENT;
8365 }
8366
8367 /* Logging not turned on for the requested zone */
8368 if (!zone_ptr->z_btlog) {
8369 return KERN_FAILURE;
8370 }
8371
8372 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8373 if (kr != KERN_SUCCESS) {
8374 return kr;
8375 }
8376
8377 addr = (vm_address_t)recs;
8378 size = sizeof(zone_btrecord_t) * *numrecs;
8379
8380 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
8381 assert(kr == KERN_SUCCESS);
8382
8383 *recsp = (zone_btrecord_t *)copy;
8384 return KERN_SUCCESS;
8385
8386 #else /* !ZONE_ENABLE_LOGGING */
8387 #pragma unused(host, name, recsp, numrecs)
8388 return KERN_FAILURE;
8389 #endif /* !ZONE_ENABLE_LOGGING */
8390 }
8391
8392
8393 #if DEBUG || DEVELOPMENT
8394
8395 kern_return_t
mach_memory_info_check(void)8396 mach_memory_info_check(void)
8397 {
8398 mach_memory_info_t * memory_info;
8399 mach_memory_info_t * info;
8400 unsigned int num_info;
8401 vm_offset_t memory_info_addr;
8402 kern_return_t kr;
8403 size_t memory_info_size, memory_info_vmsize;
8404 uint64_t top_wired, zonestotal, total;
8405
8406 num_info = vm_page_diagnose_estimate();
8407 memory_info_size = num_info * sizeof(*memory_info);
8408 memory_info_vmsize = round_page(memory_info_size);
8409 kr = kmem_alloc(kernel_map, &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_DIAG);
8410 assert(kr == KERN_SUCCESS);
8411
8412 memory_info = (mach_memory_info_t *) memory_info_addr;
8413 vm_page_diagnose(memory_info, num_info, 0);
8414
8415 top_wired = total = zonestotal = 0;
8416 zone_foreach(z) {
8417 zonestotal += zone_size_wired(z);
8418 }
8419
8420 for (uint32_t idx = 0; idx < num_info; idx++) {
8421 info = &memory_info[idx];
8422 if (!info->size) {
8423 continue;
8424 }
8425 if (VM_KERN_COUNT_WIRED == info->site) {
8426 top_wired = info->size;
8427 }
8428 if (VM_KERN_SITE_HIDE & info->flags) {
8429 continue;
8430 }
8431 if (!(VM_KERN_SITE_WIRED & info->flags)) {
8432 continue;
8433 }
8434 total += info->size;
8435 }
8436 total += zonestotal;
8437
8438 printf("vm_page_diagnose_check %qd of %qd, zones %qd, short 0x%qx\n",
8439 total, top_wired, zonestotal, top_wired - total);
8440
8441 kmem_free(kernel_map, memory_info_addr, memory_info_vmsize);
8442
8443 return kr;
8444 }
8445
8446 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8447
8448 #endif /* DEBUG || DEVELOPMENT */
8449
8450 kern_return_t
mach_zone_force_gc(host_t host)8451 mach_zone_force_gc(
8452 host_t host)
8453 {
8454 if (host == HOST_NULL) {
8455 return KERN_INVALID_HOST;
8456 }
8457
8458 #if DEBUG || DEVELOPMENT
8459 /* Callout to buffer cache GC to drop elements in the apfs zones */
8460 if (consider_buffer_cache_collect != NULL) {
8461 (void)(*consider_buffer_cache_collect)(0);
8462 }
8463 zone_gc(ZONE_GC_DRAIN);
8464 #endif /* DEBUG || DEVELOPMENT */
8465 return KERN_SUCCESS;
8466 }
8467
8468 zone_t
zone_find_largest(uint64_t * zone_size)8469 zone_find_largest(uint64_t *zone_size)
8470 {
8471 zone_t largest_zone = 0;
8472 uint64_t largest_zone_size = 0;
8473 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8474 if (zone_size) {
8475 *zone_size = largest_zone_size;
8476 }
8477 return largest_zone;
8478 }
8479
8480 #endif /* !ZALLOC_TEST */
8481 #pragma mark zone creation, configuration, destruction
8482 #if !ZALLOC_TEST
8483
8484 static zone_t
zone_init_defaults(zone_id_t zid)8485 zone_init_defaults(zone_id_t zid)
8486 {
8487 zone_t z = &zone_array[zid];
8488
8489 z->z_wired_max = ~0u;
8490 z->collectable = true;
8491 z->expandable = true;
8492
8493 lck_spin_init(&z->z_lock, &zone_locks_grp, LCK_ATTR_NULL);
8494 STAILQ_INIT(&z->z_recirc);
8495 return z;
8496 }
8497
8498 static bool
zone_is_initializing(zone_t z)8499 zone_is_initializing(zone_t z)
8500 {
8501 return !z->z_self && !z->z_destroyed;
8502 }
8503
8504 void
zone_set_noexpand(zone_t zone,vm_size_t nelems)8505 zone_set_noexpand(zone_t zone, vm_size_t nelems)
8506 {
8507 if (!zone_is_initializing(zone)) {
8508 panic("%s: called after zone_create()", __func__);
8509 }
8510 zone->expandable = false;
8511 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8512 }
8513
8514 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems)8515 zone_set_exhaustible(zone_t zone, vm_size_t nelems)
8516 {
8517 if (!zone_is_initializing(zone)) {
8518 panic("%s: called after zone_create()", __func__);
8519 }
8520 zone->expandable = false;
8521 zone->exhaustible = true;
8522 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8523 }
8524
8525 /**
8526 * @function zone_create_find
8527 *
8528 * @abstract
8529 * Finds an unused zone for the given name and element size.
8530 *
8531 * @param name the zone name
8532 * @param size the element size (including redzones, ...)
8533 * @param flags the flags passed to @c zone_create*
8534 * @param zid_inout the desired zone ID or ZONE_ID_ANY
8535 *
8536 * @returns a zone to initialize further.
8537 */
8538 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8539 zone_create_find(
8540 const char *name,
8541 vm_size_t size,
8542 zone_create_flags_t flags,
8543 zone_id_t *zid_inout)
8544 {
8545 zone_id_t nzones, zid = *zid_inout;
8546 zone_t z;
8547
8548 simple_lock(&all_zones_lock, &zone_locks_grp);
8549
8550 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8551 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8552
8553 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8554 /*
8555 * The first time around, make sure the reserved zone IDs
8556 * have an initialized lock as zone_index_foreach() will
8557 * enumerate them.
8558 */
8559 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8560 zone_init_defaults(nzones++);
8561 }
8562
8563 os_atomic_store(&num_zones, nzones, release);
8564 }
8565
8566 if (zid != ZONE_ID_ANY) {
8567 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8568 panic("zone_create: invalid desired zone ID %d for %s",
8569 zid, name);
8570 }
8571 if (flags & ZC_DESTRUCTIBLE) {
8572 panic("zone_create: ID %d (%s) must be permanent", zid, name);
8573 }
8574 if (zone_array[zid].z_self) {
8575 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
8576 }
8577 z = &zone_array[zid];
8578 } else {
8579 if (flags & ZC_DESTRUCTIBLE) {
8580 /*
8581 * If possible, find a previously zdestroy'ed zone in the
8582 * zone_array that we can reuse.
8583 */
8584 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
8585 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
8586 z = &zone_array[i];
8587
8588 /*
8589 * If the zone name and the element size are the
8590 * same, we can just reuse the old zone struct.
8591 */
8592 if (strcmp(z->z_name, name) || zone_elem_size(z) != size) {
8593 continue;
8594 }
8595 bitmap_clear(zone_destroyed_bitmap, i);
8596 z->z_destroyed = false;
8597 z->z_self = z;
8598 zid = (zone_id_t)i;
8599 goto out;
8600 }
8601 }
8602
8603 zid = nzones++;
8604 z = zone_init_defaults(zid);
8605
8606 /*
8607 * The release barrier pairs with the acquire in
8608 * zone_index_foreach() and makes sure that enumeration loops
8609 * always see an initialized zone lock.
8610 */
8611 os_atomic_store(&num_zones, nzones, release);
8612 }
8613
8614 out:
8615 num_zones_in_use++;
8616 simple_unlock(&all_zones_lock);
8617
8618 *zid_inout = zid;
8619 return z;
8620 }
8621
8622 __abortlike
8623 static void
zone_create_panic(const char * name,const char * f1,const char * f2)8624 zone_create_panic(const char *name, const char *f1, const char *f2)
8625 {
8626 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
8627 name, f1, f2);
8628 }
8629 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
8630 if ((flags) & forbidden_flag) { \
8631 zone_create_panic(name, #current_flag, #forbidden_flag); \
8632 }
8633
8634 /*
8635 * Adjusts the size of the element based on minimum size, alignment
8636 * and kasan redzones
8637 */
8638 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint32_t * redzone __unused)8639 zone_elem_adjust_size(
8640 const char *name __unused,
8641 vm_size_t elem_size,
8642 zone_create_flags_t flags __unused,
8643 uint32_t *redzone __unused)
8644 {
8645 vm_size_t size;
8646 /*
8647 * Adjust element size for minimum size and pointer alignment
8648 */
8649 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
8650 if (size < ZONE_MIN_ELEM_SIZE) {
8651 size = ZONE_MIN_ELEM_SIZE;
8652 }
8653
8654 #if KASAN_ZALLOC
8655 /*
8656 * Expand the zone allocation size to include the redzones.
8657 *
8658 * For page-multiple zones add a full guard page because they
8659 * likely require alignment.
8660 */
8661 uint32_t redzone_tmp;
8662 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU)) {
8663 redzone_tmp = 0;
8664 } else if ((size & PAGE_MASK) == 0) {
8665 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
8666 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
8667 "alignment", name);
8668 }
8669 redzone_tmp = PAGE_SIZE;
8670 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
8671 redzone_tmp = 0;
8672 } else {
8673 redzone_tmp = KASAN_GUARD_SIZE;
8674 }
8675 size += redzone_tmp * 2;
8676 if (redzone) {
8677 *redzone = redzone_tmp;
8678 }
8679 #endif
8680 return size;
8681 }
8682
8683 /*
8684 * Returns the allocation chunk size that has least framentation
8685 */
8686 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)8687 zone_get_min_alloc_granule(
8688 vm_size_t elem_size,
8689 zone_create_flags_t flags)
8690 {
8691 vm_size_t alloc_granule = PAGE_SIZE;
8692 if (flags & ZC_PERCPU) {
8693 alloc_granule = PAGE_SIZE * zpercpu_count();
8694 if (PAGE_SIZE % elem_size > 256) {
8695 panic("zone_create: per-cpu zone has too much fragmentation");
8696 }
8697 } else if (flags & ZC_READONLY) {
8698 alloc_granule = PAGE_SIZE;
8699 } else if ((elem_size & PAGE_MASK) == 0) {
8700 /* zero fragmentation by definition */
8701 alloc_granule = elem_size;
8702 } else if (alloc_granule % elem_size == 0) {
8703 /* zero fragmentation by definition */
8704 } else {
8705 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
8706 vm_size_t alloc_tmp = PAGE_SIZE;
8707 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
8708 /*
8709 * Increase chunk size to 48K for sizes larger than 4K on 16k
8710 * machines, so as to reduce internal fragementation for kalloc
8711 * zones with sizes 12K and 24K.
8712 */
8713 #if __ARM_16K_PG__
8714 if (elem_size > 4 * 1024) {
8715 max_chunk_size = 48 * 1024;
8716 }
8717 #endif
8718 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
8719 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
8720 if (frag_tmp < frag) {
8721 frag = frag_tmp;
8722 alloc_granule = alloc_tmp;
8723 }
8724 }
8725 }
8726 return alloc_granule;
8727 }
8728
8729 vm_size_t
zone_get_foreign_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,uint16_t min_pages)8730 zone_get_foreign_alloc_size(
8731 const char *name __unused,
8732 vm_size_t elem_size,
8733 zone_create_flags_t flags,
8734 uint16_t min_pages)
8735 {
8736 vm_size_t adjusted_size = zone_elem_adjust_size(name, elem_size, flags,
8737 NULL);
8738 vm_size_t alloc_granule = zone_get_min_alloc_granule(adjusted_size,
8739 flags);
8740 vm_size_t min_size = min_pages * PAGE_SIZE;
8741 /*
8742 * Round up min_size to a multiple of alloc_granule
8743 */
8744 return ((min_size + alloc_granule - 1) / alloc_granule)
8745 * alloc_granule;
8746 }
8747
8748 zone_t
8749 zone_create_ext(
8750 const char *name,
8751 vm_size_t size,
8752 zone_create_flags_t flags,
8753 zone_id_t zid,
8754 void (^extra_setup)(zone_t))
8755 {
8756 vm_size_t alloc;
8757 uint32_t redzone;
8758 zone_t z;
8759 zone_security_flags_t *zsflags;
8760
8761 if (size > ZONE_MAX_ALLOC_SIZE) {
8762 panic("zone_create: element size too large: %zd", (size_t)size);
8763 }
8764
8765 if (size < 2 * sizeof(vm_size_t)) {
8766 /* Elements are too small for kasan. */
8767 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
8768 }
8769
8770 size = zone_elem_adjust_size(name, size, flags, &redzone);
8771 /*
8772 * Allocate the zone slot, return early if we found an older match.
8773 */
8774 z = zone_create_find(name, size, flags, &zid);
8775 if (__improbable(z->z_self)) {
8776 /* We found a zone to reuse */
8777 return z;
8778 }
8779
8780 /*
8781 * Initialize the zone properly.
8782 */
8783
8784 /*
8785 * If the kernel is post lockdown, copy the zone name passed in.
8786 * Else simply maintain a pointer to the name string as it can only
8787 * be a core XNU zone (no unloadable kext exists before lockdown).
8788 */
8789 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
8790 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
8791 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
8792 strlcpy(buf, name, nsz);
8793 z->z_name = buf;
8794 } else {
8795 z->z_name = name;
8796 }
8797 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
8798 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
8799 } else {
8800 /*
8801 * zone_init() hasn't run yet, use the storage provided by
8802 * zone_stats_startup(), and zone_init() will replace it
8803 * with the final value once the PERCPU zone exists.
8804 */
8805 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
8806 }
8807
8808 alloc = zone_get_min_alloc_granule(size, flags);
8809
8810 z->z_elem_size = (uint16_t)size;
8811 z->z_chunk_pages = (uint16_t)atop(alloc);
8812 if (flags & ZC_PERCPU) {
8813 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / z->z_elem_size);
8814 } else {
8815 z->z_chunk_elems = (uint16_t)(alloc / z->z_elem_size);
8816 }
8817 if (zone_element_idx(zone_element_encode(0,
8818 z->z_chunk_elems - 1)) != z->z_chunk_elems - 1) {
8819 panic("zone_element_encode doesn't work for zone [%s]", name);
8820 }
8821
8822 #if KASAN_ZALLOC
8823 z->z_kasan_redzone = redzone;
8824 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
8825 z->kasan_fakestacks = true;
8826 }
8827 #endif
8828
8829 /*
8830 * Handle KPI flags
8831 */
8832 zsflags = &zone_security_array[zid];
8833 /*
8834 * Some zones like ipc ports and procs rely on sequestering for
8835 * correctness, so explicitly turn on sequestering despite the
8836 * configuration in zsecurity_options.
8837 */
8838 if (flags & ZC_SEQUESTER) {
8839 zsflags->z_va_sequester = true;
8840 }
8841
8842 /* ZC_CACHING applied after all configuration is done */
8843 if (flags & ZC_NOCACHING) {
8844 z->z_nocaching = true;
8845 }
8846
8847 if (flags & ZC_READONLY) {
8848 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
8849 #if ZSECURITY_CONFIG(READ_ONLY)
8850 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
8851 zsflags->z_va_sequester = true;
8852 #endif
8853 zone_ro_elem_size[zid] = (uint16_t)size;
8854 assert(size <= PAGE_SIZE);
8855 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
8856 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
8857 (uint32_t)size, zone_heap_name(z), z->z_name);
8858 }
8859 }
8860
8861 if (flags & ZC_PERCPU) {
8862 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_ALLOW_FOREIGN);
8863 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
8864 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
8865 z->z_percpu = true;
8866 z->z_nogzalloc = true;
8867 }
8868 if (flags & ZC_NOGC) {
8869 z->collectable = false;
8870 }
8871 /*
8872 * Handle ZC_NOENCRYPT from xnu only
8873 */
8874 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
8875 zsflags->z_noencrypt = true;
8876 }
8877 if (flags & ZC_ALIGNMENT_REQUIRED) {
8878 z->alignment_required = true;
8879 }
8880 if (flags & (ZC_NOGZALLOC | ZC_READONLY)) {
8881 z->z_nogzalloc = true;
8882 }
8883 if (flags & ZC_NOCALLOUT) {
8884 z->no_callout = true;
8885 }
8886 if (flags & ZC_DESTRUCTIBLE) {
8887 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_ALLOW_FOREIGN);
8888 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
8889 z->z_destructible = true;
8890 }
8891 /*
8892 * Handle Internal flags
8893 */
8894 #if CONFIG_PROB_GZALLOC
8895 if (flags & (ZC_PGZ_USE_GUARDS | ZC_KALLOC_HEAP | ZC_KALLOC_TYPE)) {
8896 /*
8897 * Try to turn on guard pages only for zones
8898 * with a chance of OOB.
8899 */
8900 z->z_pgz_use_guards = true;
8901 z->z_pgz_oob_offs = (uint16_t)(alloc -
8902 z->z_chunk_elems * z->z_elem_size);
8903 }
8904 #endif
8905 if (!(flags & ZC_NOTBITAG)) {
8906 z->z_tbi_tag = true;
8907 }
8908 if (flags & ZC_KALLOC_TYPE) {
8909 zsflags->z_kalloc_type = true;
8910 }
8911 if (flags & ZC_ALLOW_FOREIGN) {
8912 zone_create_assert_not_both(name, flags,
8913 ZC_ALLOW_FOREIGN, ZC_PGZ_USE_GUARDS);
8914 zsflags->z_allows_foreign = true;
8915 }
8916 if (flags & ZC_VM) {
8917 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
8918 zsflags->z_va_sequester = true;
8919 }
8920 if (flags & ZC_KASAN_NOQUARANTINE) {
8921 z->kasan_noquarantine = true;
8922 }
8923 /* ZC_KASAN_NOREDZONE already handled */
8924
8925 /*
8926 * Then if there's extra tuning, do it
8927 */
8928 if (extra_setup) {
8929 extra_setup(z);
8930 }
8931
8932 /*
8933 * Configure debugging features
8934 */
8935 #if CONFIG_GZALLOC
8936 if (!z->z_nogzalloc && (flags & ZC_VM) == 0) {
8937 gzalloc_zone_init(z); /* might set z->z_gzalloc_tracked */
8938 if (z->z_gzalloc_tracked) {
8939 z->z_nocaching = true;
8940 }
8941 }
8942 #endif
8943 #if CONFIG_PROB_GZALLOC
8944 if (!z->z_gzalloc_tracked && (flags & (ZC_READONLY | ZC_PERCPU)) == 0) {
8945 pgz_zone_init(z);
8946 }
8947 #endif
8948 #if ZONE_ENABLE_LOGGING
8949 if (!z->z_gzalloc_tracked && startup_phase >= STARTUP_SUB_ZALLOC) {
8950 /*
8951 * Check for and set up zone leak detection
8952 * if requested via boot-args.
8953 */
8954 zone_setup_logging(z);
8955 }
8956 #endif /* ZONE_ENABLE_LOGGING */
8957
8958 #if VM_TAG_SIZECLASSES
8959 if (!z->z_gzalloc_tracked && (zsflags->z_kheap_id || zsflags->z_kalloc_type)
8960 && zone_tagging_on) {
8961 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
8962 static uint16_t sizeclass_idx;
8963 z->z_uses_tags = true;
8964 z->z_tags_inline = (((page_size + size - 1) / size) <=
8965 (sizeof(uint32_t) / sizeof(uint16_t)));
8966 if (zsflags->z_kheap_id == KHEAP_ID_DEFAULT) {
8967 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
8968 z->z_tags_sizeclass = sizeclass_idx++;
8969 } else {
8970 uint16_t i = 0;
8971 for (; i < sizeclass_idx; i++) {
8972 if (size == zone_tags_sizeclasses[i]) {
8973 z->z_tags_sizeclass = i;
8974 break;
8975 }
8976 }
8977 /*
8978 * Size class wasn't found, add it to zone_tags_sizeclasses
8979 */
8980 if (i == sizeclass_idx) {
8981 assert(i < VM_TAG_SIZECLASSES);
8982 zone_tags_sizeclasses[i] = (uint16_t)size;
8983 z->z_tags_sizeclass = sizeclass_idx++;
8984 }
8985 }
8986 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
8987 }
8988 #endif
8989
8990 /*
8991 * Finally, fixup properties based on security policies, boot-args, ...
8992 */
8993 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA)
8994 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
8995 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
8996 zsflags->z_va_sequester = false;
8997 }
8998 #endif
8999
9000 if ((flags & ZC_CACHING) && !z->z_nocaching) {
9001 /*
9002 * If zcache hasn't been initialized yet, remember our decision,
9003 *
9004 * zone_enable_caching() will be called again by
9005 * zcache_bootstrap(), while the system is still single
9006 * threaded, to build the missing caches.
9007 */
9008 if (__probable(zc_magazine_zone)) {
9009 zone_enable_caching(z);
9010 } else {
9011 z->z_pcpu_cache =
9012 __zpcpu_mangle_for_boot(&zone_cache_startup[zid]);
9013 }
9014 }
9015
9016 zone_lock(z);
9017 z->z_self = z;
9018 zone_unlock(z);
9019
9020 return z;
9021 }
9022
9023 __startup_func
9024 void
zone_create_startup(struct zone_create_startup_spec * spec)9025 zone_create_startup(struct zone_create_startup_spec *spec)
9026 {
9027 zone_t z;
9028
9029 z = zone_create_ext(spec->z_name, spec->z_size,
9030 spec->z_flags, spec->z_zid, spec->z_setup);
9031 if (spec->z_var) {
9032 *spec->z_var = z;
9033 }
9034 }
9035
9036 /*
9037 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9038 * union works. trust but verify.
9039 */
9040 #define zalloc_check_zov_alias(f1, f2) \
9041 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9042 zalloc_check_zov_alias(z_self, zv_zone);
9043 zalloc_check_zov_alias(z_stats, zv_stats);
9044 zalloc_check_zov_alias(z_name, zv_name);
9045 zalloc_check_zov_alias(z_views, zv_next);
9046 #undef zalloc_check_zov_alias
9047
9048 __startup_func
9049 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9050 zone_view_startup_init(struct zone_view_startup_spec *spec)
9051 {
9052 struct kalloc_heap *heap = NULL;
9053 zone_view_t zv = spec->zv_view;
9054 zone_t z;
9055 zone_security_flags_t zsflags;
9056
9057 switch (spec->zv_heapid) {
9058 case KHEAP_ID_DEFAULT:
9059 panic("%s: Use KALLOC_TYPE_DEFINE for zone view %s instead"
9060 "of ZONE_VIEW_DEFINE as it is from default kalloc heap",
9061 __func__, zv->zv_name);
9062 __builtin_unreachable();
9063 case KHEAP_ID_DATA_BUFFERS:
9064 heap = KHEAP_DATA_BUFFERS;
9065 break;
9066 case KHEAP_ID_KEXT:
9067 heap = KHEAP_KEXT;
9068 break;
9069 default:
9070 heap = NULL;
9071 }
9072
9073 if (heap) {
9074 z = kalloc_heap_zone_for_size(heap, spec->zv_size);
9075 } else {
9076 z = *spec->zv_zone;
9077 assert(spec->zv_size <= zone_elem_size(z));
9078 }
9079
9080 assert(z);
9081
9082 zv->zv_zone = z;
9083 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9084 zv->zv_next = z->z_views;
9085 zsflags = zone_security_config(z);
9086 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9087 /*
9088 * count the raw view for zones not in a heap,
9089 * kalloc_heap_init() already counts it for its members.
9090 */
9091 zone_view_count += 2;
9092 } else {
9093 zone_view_count += 1;
9094 }
9095 z->z_views = zv;
9096 }
9097
9098 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9099 zone_create(
9100 const char *name,
9101 vm_size_t size,
9102 zone_create_flags_t flags)
9103 {
9104 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9105 }
9106
9107 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9108
9109 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9110 zone_create_ro(
9111 const char *name,
9112 vm_size_t size,
9113 zone_create_flags_t flags,
9114 zone_create_ro_id_t zc_ro_id)
9115 {
9116 assert(zc_ro_id <= ZC_RO_ID__LAST);
9117 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9118 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9119 return reserved_zid;
9120 }
9121
9122 zone_t
zinit(vm_size_t size,vm_size_t max,vm_size_t alloc __unused,const char * name)9123 zinit(
9124 vm_size_t size, /* the size of an element */
9125 vm_size_t max, /* maximum memory to use */
9126 vm_size_t alloc __unused, /* allocation size */
9127 const char *name) /* a name for the zone */
9128 {
9129 zone_t z = zone_create(name, size, ZC_DESTRUCTIBLE);
9130 z->z_wired_max = zone_alloc_pages_for_nelems(z, max / size);
9131 return z;
9132 }
9133
9134 void
zdestroy(zone_t z)9135 zdestroy(zone_t z)
9136 {
9137 unsigned int zindex = zone_index(z);
9138 zone_security_flags_t zsflags = zone_security_array[zindex];
9139
9140 current_thread()->options |= TH_OPT_ZONE_PRIV;
9141 lck_mtx_lock(&zone_gc_lock);
9142
9143 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9144
9145 lck_mtx_unlock(&zone_gc_lock);
9146 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9147
9148 #if CONFIG_GZALLOC
9149 if (__improbable(z->z_gzalloc_tracked)) {
9150 /* If the zone is gzalloc managed dump all the elements in the free cache */
9151 gzalloc_empty_free_cache(z);
9152 }
9153 #endif
9154
9155 zone_lock(z);
9156
9157 if (!zone_submap_is_sequestered(zsflags)) {
9158 while (!zone_pva_is_null(z->z_pageq_va)) {
9159 struct zone_page_metadata *meta;
9160 vm_offset_t free_addr;
9161
9162 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9163 meta = zone_meta_queue_pop_native(z, &z->z_pageq_va, &free_addr);
9164 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9165 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9166 zone_unlock(z);
9167 kmem_free(zone_submap(zsflags), free_addr, ptoa(z->z_chunk_pages));
9168 zone_lock(z);
9169 }
9170 }
9171
9172 #if !KASAN_ZALLOC
9173 /* Assert that all counts are zero */
9174 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9175 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9176 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9177 zone_heap_name(z), z->z_name);
9178 }
9179
9180 /* consistency check: make sure everything is indeed empty */
9181 assert(zone_pva_is_null(z->z_pageq_empty));
9182 assert(zone_pva_is_null(z->z_pageq_partial));
9183 assert(zone_pva_is_null(z->z_pageq_full));
9184 if (!zone_submap_is_sequestered(zsflags)) {
9185 assert(zone_pva_is_null(z->z_pageq_va));
9186 }
9187 #endif
9188
9189 zone_unlock(z);
9190
9191 simple_lock(&all_zones_lock, &zone_locks_grp);
9192
9193 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9194 /* Mark the zone as empty in the bitmap */
9195 bitmap_set(zone_destroyed_bitmap, zindex);
9196 num_zones_in_use--;
9197 assert(num_zones_in_use > 0);
9198
9199 simple_unlock(&all_zones_lock);
9200 }
9201
9202 #endif /* !ZALLOC_TEST */
9203 #pragma mark zalloc module init
9204 #if !ZALLOC_TEST
9205
9206 /*
9207 * Initialize the "zone of zones" which uses fixed memory allocated
9208 * earlier in memory initialization. zone_bootstrap is called
9209 * before zone_init.
9210 */
9211 __startup_func
9212 void
zone_bootstrap(void)9213 zone_bootstrap(void)
9214 {
9215 #if DEBUG || DEVELOPMENT
9216 #if __x86_64__
9217 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9218 /*
9219 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9220 * the fp faulting code gets triggered and deadlocks.
9221 */
9222 zone_caching_disabled = 1;
9223 }
9224 #endif /* __x86_64__ */
9225 #endif /* DEBUG || DEVELOPMENT */
9226
9227 /* Validate struct zone_packed_virtual_address expectations */
9228 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9229 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9230 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9231 }
9232
9233 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9234
9235 /*
9236 * Initialize random used to scramble early allocations
9237 */
9238 zpercpu_foreach_cpu(cpu) {
9239 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9240 }
9241
9242 #if CONFIG_PROB_GZALLOC
9243 /*
9244 * Set pgz_sample_counter on the boot CPU so that we do not sample
9245 * any allocation until PGZ has been properly setup (in pgz_init()).
9246 */
9247 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9248 #endif /* CONFIG_PROB_GZALLOC */
9249
9250 /*
9251 * the KASAN quarantine for kalloc doesn't understand heaps
9252 * and trips the heap confusion panics. At the end of the day,
9253 * all these security measures are double duty with KASAN.
9254 *
9255 * On 32bit kernels, these protections are just too expensive.
9256 */
9257 #if !defined(__LP64__) || KASAN_ZALLOC
9258 zsecurity_options &= ~ZSECURITY_OPTIONS_KERNEL_DATA_MAP;
9259 #endif
9260
9261 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9262 /*
9263 * Randomly assign zones to one of the 4 general submaps,
9264 * and pick whether they allocate from the begining
9265 * or the end of it.
9266 *
9267 * A lot of OOB exploitation relies on precise interleaving
9268 * of specific types in the heap.
9269 *
9270 * Woops, you can't guarantee that anymore.
9271 */
9272 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9273 uint32_t r = zalloc_random_uniform(0,
9274 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9275
9276 zone_security_array[i].z_submap_from_end = (r & 1);
9277 zone_security_array[i].z_submap_idx += (r >> 1);
9278 }
9279 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9280
9281 thread_call_setup_with_options(&zone_expand_callout,
9282 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9283 THREAD_CALL_OPTIONS_ONCE);
9284
9285 thread_call_setup_with_options(&zone_defrag_callout,
9286 zone_defrag_async, NULL, THREAD_CALL_PRIORITY_USER,
9287 THREAD_CALL_OPTIONS_ONCE);
9288 }
9289
9290 #define ZONE_GUARD_SIZE (64UL << 10)
9291
9292 #if __LP64__
9293 static inline vm_offset_t
zone_restricted_va_max(void)9294 zone_restricted_va_max(void)
9295 {
9296 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
9297 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
9298
9299 return trunc_page(MIN(compressor_max, vm_page_max));
9300 }
9301 #endif
9302
9303 __startup_func
9304 static void
zone_tunables_fixup(void)9305 zone_tunables_fixup(void)
9306 {
9307 int wdt = 0;
9308
9309 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9310 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9311 PE_parse_boot_argn("pgz1", NULL, 0)) {
9312 /*
9313 * if pgz1= was used, but pgz= was not,
9314 * then the more specific pgz1 takes precedence.
9315 */
9316 pgz_all = false;
9317 }
9318 #endif
9319
9320 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9321 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9322 }
9323 if (zc_magazine_size > PAGE_SIZE / ZONE_MIN_ELEM_SIZE) {
9324 zc_magazine_size = (uint16_t)(PAGE_SIZE / ZONE_MIN_ELEM_SIZE);
9325 }
9326 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9327 !PE_parse_boot_argn("zet", NULL, 0)) {
9328 zone_exhausted_timeout = -1;
9329 }
9330 }
9331 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9332
9333 __startup_func
9334 static vm_size_t
zone_phys_size_max(void)9335 zone_phys_size_max(void)
9336 {
9337 vm_size_t zsize;
9338 vm_size_t zsizearg;
9339
9340 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
9341 zsize = zsizearg * (1024ULL * 1024);
9342 } else {
9343 /* Set target zone size as 1/4 of physical memory */
9344 zsize = (vm_size_t)(sane_size >> 2);
9345 #if defined(__LP64__)
9346 zsize += zsize >> 1;
9347 #endif /* __LP64__ */
9348 }
9349
9350 if (zsize < CONFIG_ZONE_MAP_MIN) {
9351 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
9352 }
9353 if (zsize > sane_size >> 1) {
9354 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
9355 }
9356 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
9357 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
9358 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
9359 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
9360 zsize = ZONE_MAP_MAX;
9361 }
9362
9363 return (vm_size_t)trunc_page(zsize);
9364 }
9365
9366 __startup_func
9367 static struct zone_map_range
zone_init_allocate_va(vm_map_address_t addr,vm_size_t size,bool random)9368 zone_init_allocate_va(vm_map_address_t addr, vm_size_t size, bool random)
9369 {
9370 int vm_alloc_flags = VM_FLAGS_ANYWHERE;
9371 struct zone_map_range r;
9372 kern_return_t kr;
9373 vm_map_entry_t entry;
9374
9375 if (random) {
9376 vm_alloc_flags |= VM_FLAGS_RANDOM_ADDR;
9377 }
9378
9379 vm_object_reference(kernel_object);
9380
9381 kr = vm_map_enter(kernel_map, &addr, size, 0,
9382 vm_alloc_flags, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_ZONE,
9383 kernel_object, addr, FALSE, VM_PROT_NONE, VM_PROT_NONE,
9384 VM_INHERIT_NONE);
9385
9386 if (KERN_SUCCESS != kr) {
9387 panic("vm_map_enter(0x%zx) failed: %d", (size_t)size, kr);
9388 }
9389
9390 vm_map_lookup_entry(kernel_map, addr, &entry);
9391 VME_OFFSET_SET(entry, addr);
9392
9393 r.min_address = (vm_offset_t)addr;
9394 r.max_address = (vm_offset_t)addr + size;
9395 return r;
9396 }
9397
9398 __startup_func
9399 static void
zone_submap_init(vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9400 zone_submap_init(
9401 vm_offset_t *submap_min,
9402 zone_submap_idx_t idx,
9403 uint64_t zone_sub_map_numer,
9404 uint64_t *remaining_denom,
9405 vm_offset_t *remaining_size)
9406 {
9407 vm_map_address_t addr;
9408 vm_offset_t submap_start, submap_end;
9409 vm_size_t submap_size;
9410 vm_map_t submap;
9411 vm_prot_t prot = VM_PROT_DEFAULT;
9412 kern_return_t kr;
9413
9414 submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9415 *remaining_denom);
9416 submap_start = *submap_min;
9417 submap_end = submap_start + submap_size;
9418
9419 #if defined(__LP64__)
9420 if (idx == Z_SUBMAP_IDX_VM) {
9421 vm_offset_t restricted_va_max = zone_restricted_va_max();
9422 if (submap_end > restricted_va_max) {
9423 #if DEBUG || DEVELOPMENT
9424 printf("zone_init: submap[%d] clipped to %zdM of %zdM\n", idx,
9425 (size_t)(restricted_va_max - submap_start) >> 20,
9426 (size_t)submap_size >> 20);
9427 #endif /* DEBUG || DEVELOPMENT */
9428 *remaining_size -= submap_end - restricted_va_max;
9429 submap_end = restricted_va_max;
9430 submap_size = restricted_va_max - submap_start;
9431 }
9432
9433 vm_packing_verify_range("vm_compressor",
9434 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9435 vm_packing_verify_range("vm_page",
9436 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9437 }
9438 #endif /* defined(__LP64__) */
9439
9440 zone_kmem_suballoc(*submap_min, submap_size,
9441 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9442 VM_KERN_MEMORY_ZONE, &submap);
9443
9444 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9445 zone_info.zi_map_range[ZONE_ADDR_READONLY].min_address = submap_start;
9446 zone_info.zi_map_range[ZONE_ADDR_READONLY].max_address = submap_end;
9447 prot = VM_PROT_NONE;
9448 }
9449
9450 addr = submap_start;
9451 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9452 VM_FLAGS_FIXED | VM_FLAGS_PERMANENT,
9453 VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_ZONE,
9454 kernel_object, addr, FALSE, prot, prot, VM_INHERIT_NONE);
9455 if (kr != KERN_SUCCESS) {
9456 panic("ksubmap[%d]: failed to make first entry (%d)", idx, kr);
9457 }
9458
9459 addr = submap_end - ZONE_GUARD_SIZE / 2;
9460 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9461 VM_FLAGS_FIXED | VM_FLAGS_PERMANENT,
9462 VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_ZONE,
9463 kernel_object, addr, FALSE, prot, prot, VM_INHERIT_NONE);
9464 if (kr != KERN_SUCCESS) {
9465 panic("ksubmap[%d]: failed to make last entry (%d)", idx, kr);
9466 }
9467
9468 #if DEBUG || DEVELOPMENT
9469 printf("zone_init: submap[%d] %p:%p (%zuM)\n",
9470 idx, (void *)submap_start, (void *)submap_end,
9471 (size_t)submap_size >> 20);
9472 #endif /* DEBUG || DEVELOPMENT */
9473
9474 zone_submaps[idx] = submap;
9475 *submap_min = submap_end;
9476 *remaining_size -= submap_size;
9477 *remaining_denom -= zone_sub_map_numer;
9478 }
9479
9480 /*
9481 * Allocate metadata array and migrate foreign initial metadata.
9482 *
9483 * So that foreign pages and native pages have the same scheme,
9484 * we allocate VA space that covers both foreign and native pages.
9485 */
9486 __startup_func
9487 static void
zone_metadata_init(void)9488 zone_metadata_init(void)
9489 {
9490 struct zone_map_range r0 = zone_info.zi_map_range[0];
9491 struct zone_map_range r1 = zone_info.zi_map_range[1];
9492 struct zone_map_range mr, br;
9493 vm_size_t meta_size, bits_size, foreign_base;
9494 vm_offset_t hstart, hend;
9495
9496 if (r0.min_address > r1.min_address) {
9497 r0 = zone_info.zi_map_range[1];
9498 r1 = zone_info.zi_map_range[0];
9499 }
9500
9501 meta_size = round_page(atop(r1.max_address - r0.min_address) *
9502 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
9503
9504 /*
9505 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9506 * of physical memory (16M per 1G).
9507 *
9508 * Let's preallocate for the worst to avoid weird panics.
9509 */
9510 bits_size = round_page(16 * (ptoa(zone_pages_wired_max) >> 10));
9511
9512 /*
9513 * Compute the size of the "hole" in the middle of the range.
9514 *
9515 * If it is smaller than 256k, just leave it be, with this layout:
9516 *
9517 * [G][ r0 meta ][ hole ][ r1 meta ][ bits ][G]
9518 *
9519 * else punch a hole with guard pages around the hole, and place the
9520 * bits in the hole if it fits, or after r1 otherwise, yielding either
9521 * of the following layouts:
9522 *
9523 * |__________________hend____________|
9524 * |__hstart_| |
9525 * [G][ r0 meta ][ bits ][G]..........[G][ r1 meta ][G]
9526 * [G][ r0 meta ][G]..................[G][ r1 meta ][ bits ][G]
9527 */
9528 hstart = round_page(atop(r0.max_address - r0.min_address) *
9529 sizeof(struct zone_page_metadata));
9530 hend = trunc_page(atop(r1.min_address - r0.min_address) *
9531 sizeof(struct zone_page_metadata));
9532
9533 if (hstart >= hend || hend - hstart < (256ul << 10)) {
9534 mr = zone_kmem_suballoc(0, meta_size + bits_size,
9535 VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR,
9536 VM_KERN_MEMORY_ZONE, &zone_meta_submaps[0]);
9537 mr.min_address += ZONE_GUARD_SIZE;
9538 mr.max_address -= ZONE_GUARD_SIZE;
9539 br.max_address = mr.max_address;
9540 mr.max_address -= bits_size;
9541 br.min_address = mr.max_address;
9542
9543 #if DEBUG || DEVELOPMENT
9544 printf("zone_init: metadata %p:%p (%zuK)\n",
9545 (void *)mr.min_address, (void *)mr.max_address,
9546 (size_t)zone_range_size(&mr) >> 10);
9547 printf("zone_init: metabits %p:%p (%zuK)\n",
9548 (void *)br.min_address, (void *)br.max_address,
9549 (size_t)zone_range_size(&br) >> 10);
9550 #endif /* DEBUG || DEVELOPMENT */
9551 } else {
9552 vm_size_t size, alloc_size = meta_size;
9553 vm_offset_t base;
9554 bool bits_in_middle = true;
9555
9556 if (hend - hstart - 2 * ZONE_GUARD_SIZE < bits_size) {
9557 alloc_size += bits_size;
9558 bits_in_middle = false;
9559 }
9560
9561 mr = zone_init_allocate_va(0, alloc_size, true);
9562
9563 base = mr.min_address;
9564 size = ZONE_GUARD_SIZE + hstart + ZONE_GUARD_SIZE;
9565 if (bits_in_middle) {
9566 size += bits_size;
9567 br.min_address = base + ZONE_GUARD_SIZE + hstart;
9568 br.max_address = br.min_address + bits_size;
9569 }
9570 zone_kmem_suballoc(base, size,
9571 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9572 VM_KERN_MEMORY_ZONE, &zone_meta_submaps[0]);
9573
9574 base += size;
9575 size = mr.min_address + hend - base;
9576 kmem_free(kernel_map, base, size);
9577
9578 base = mr.min_address + hend;
9579 size = mr.max_address - base;
9580 zone_kmem_suballoc(base, size,
9581 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_FLAGS_PERMANENT,
9582 VM_KERN_MEMORY_ZONE, &zone_meta_submaps[1]);
9583
9584 mr.min_address += ZONE_GUARD_SIZE;
9585 mr.max_address -= ZONE_GUARD_SIZE;
9586 if (!bits_in_middle) {
9587 br.max_address = mr.max_address;
9588 mr.max_address -= bits_size;
9589 br.min_address = mr.max_address;
9590 }
9591
9592 #if DEBUG || DEVELOPMENT
9593 printf("zone_init: metadata0 %p:%p (%zuK)\n",
9594 (void *)mr.min_address, (void *)(mr.min_address + hstart),
9595 (size_t)hstart >> 10);
9596 printf("zone_init: metadata1 %p:%p (%zuK)\n",
9597 (void *)(mr.min_address + hend), (void *)mr.max_address,
9598 (size_t)(zone_range_size(&mr) - hend) >> 10);
9599 printf("zone_init: metabits %p:%p (%zuK)\n",
9600 (void *)br.min_address, (void *)br.max_address,
9601 (size_t)zone_range_size(&br) >> 10);
9602 #endif /* DEBUG || DEVELOPMENT */
9603 }
9604
9605 br.min_address = (br.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9606 br.max_address = br.max_address & -ZBA_CHUNK_SIZE;
9607
9608 zone_info.zi_meta_range = mr;
9609 zone_info.zi_bits_range = br;
9610
9611 /*
9612 * Migrate the original static metadata into its new location.
9613 */
9614 struct zone_page_metadata *early_meta = zone_foreign_meta_array_startup;
9615
9616 if (zone_early_steal.min_address) {
9617 early_meta = (void *)zone_early_steal.min_address;
9618 }
9619
9620 zone_info.zi_meta_base = (struct zone_page_metadata *)mr.min_address -
9621 zone_pva_from_addr(r0.min_address).packed_address;
9622 foreign_base = zone_info.zi_map_range[ZONE_ADDR_FOREIGN].min_address;
9623 zone_meta_populate(foreign_base, zone_foreign_size());
9624 memcpy(zone_meta_from_addr(foreign_base), early_meta,
9625 atop(zone_foreign_size()) * sizeof(struct zone_page_metadata));
9626
9627 if (zone_early_steal.min_address) {
9628 pmap_remove(kernel_pmap, zone_early_steal.min_address,
9629 zone_early_steal.max_address);
9630 }
9631
9632 zba_populate(0);
9633 memcpy(zba_base_header(), zba_chunk_startup,
9634 sizeof(zba_chunk_startup));
9635 }
9636
9637 /*
9638 * Global initialization of Zone Allocator.
9639 * Runs after zone_bootstrap.
9640 */
9641 __startup_func
9642 static void
zone_init(void)9643 zone_init(void)
9644 {
9645 vm_size_t zone_map_size;
9646 vm_size_t remaining_size;
9647 vm_offset_t submap_min = 0;
9648 uint64_t denom = 0;
9649 uint32_t submap_count = 0;
9650 uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
9651 #if ZSECURITY_CONFIG(READ_ONLY)
9652 [Z_SUBMAP_IDX_VM] = 15,
9653 [Z_SUBMAP_IDX_READ_ONLY] = 5,
9654 #else
9655 [Z_SUBMAP_IDX_VM] = 20,
9656 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
9657 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA) && ZSECURITY_CONFIG(SAD_FENG_SHUI)
9658 [Z_SUBMAP_IDX_GENERAL_0] = 15,
9659 [Z_SUBMAP_IDX_GENERAL_1] = 15,
9660 [Z_SUBMAP_IDX_GENERAL_2] = 15,
9661 [Z_SUBMAP_IDX_GENERAL_3] = 15,
9662 [Z_SUBMAP_IDX_DATA] = 20,
9663 #elif ZSECURITY_CONFIG(SUBMAP_USER_DATA)
9664 [Z_SUBMAP_IDX_GENERAL_0] = 40,
9665 [Z_SUBMAP_IDX_DATA] = 40,
9666 #elif ZSECURITY_CONFIG(SAD_FENG_SHUI)
9667 #error invalid configuration: SAD_FENG_SHUI requires SUBMAP_USER_DATA
9668 #else
9669 [Z_SUBMAP_IDX_GENERAL_0] = 80,
9670 #endif /* ZSECURITY_CONFIG(SUBMAP_USER_DATA) && ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9671 };
9672
9673 zone_pages_wired_max = (uint32_t)atop(zone_phys_size_max());
9674
9675 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9676 denom += submap_ratios[idx];
9677 if (submap_ratios[idx] != 0) {
9678 submap_count++;
9679 }
9680 }
9681
9682 #if __LP64__
9683 zone_map_size = ZONE_MAP_VA_SIZE_LP64;
9684 #else
9685 zone_map_size = ptoa(zone_pages_wired_max *
9686 (denom + submap_ratios[Z_SUBMAP_IDX_VM]) / denom);
9687 #endif
9688
9689 /*
9690 * And now allocate the various pieces of VA and submaps.
9691 *
9692 * Make a first allocation of contiguous VA, that we'll deallocate,
9693 * and we'll carve-out memory in that range again linearly.
9694 * The kernel is stil single threaded at this stage.
9695 */
9696
9697 struct zone_map_range *map_range =
9698 &zone_info.zi_map_range[ZONE_ADDR_NATIVE];
9699
9700 *map_range = zone_init_allocate_va(0, zone_map_size, false);
9701 submap_min = map_range->min_address;
9702 remaining_size = zone_map_size;
9703
9704 #if CONFIG_PROB_GZALLOC
9705 vm_size_t pgz_size = pgz_get_size();
9706
9707 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
9708 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9709 VM_KERN_MEMORY_ZONE, &pgz_submap);
9710
9711 submap_min += pgz_size;
9712 remaining_size -= pgz_size;
9713 #if DEBUG || DEVELOPMENT
9714 printf("zone_init: pgzalloc %p:%p (%zuM) [%d slots]\n",
9715 (void *)zone_info.zi_pgz_range.min_address,
9716 (void *)zone_info.zi_pgz_range.max_address,
9717 (size_t)pgz_size >> 20, pgz_slots);
9718 #endif /* DEBUG || DEVELOPMENT */
9719 #endif /* CONFIG_PROB_GZALLOC */
9720
9721 /*
9722 * Allocate the submaps
9723 */
9724 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9725 if (submap_ratios[idx] == 0) {
9726 zone_submaps[idx] = VM_MAP_NULL;
9727 } else {
9728 zone_submap_init(&submap_min, idx, submap_ratios[idx],
9729 &denom, &remaining_size);
9730 }
9731 }
9732
9733 assert(submap_min == map_range->max_address);
9734
9735 /*
9736 * needs to be done before zone_metadata_init() which occupies
9737 * random space in the kernel, and the maps need a HUGE range.
9738 */
9739 kalloc_init_maps(map_range->max_address);
9740
9741 zone_metadata_init();
9742
9743 #if VM_TAG_SIZECLASSES
9744 if (zone_tagging_on) {
9745 zone_tagging_init(zone_map_size);
9746 }
9747 #endif
9748 #if CONFIG_GZALLOC
9749 gzalloc_init(zone_map_size);
9750 #endif
9751
9752 zone_create_flags_t kma_flags = ZC_NOCACHING |
9753 ZC_NOGC | ZC_NOGZALLOC | ZC_NOCALLOUT |
9754 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM_LP64;
9755
9756 (void)zone_create_ext("vm.permanent", 1, kma_flags,
9757 ZONE_ID_PERMANENT, ^(zone_t z) {
9758 z->z_permanent = true;
9759 z->z_elem_size = 1;
9760 });
9761 (void)zone_create_ext("vm.permanent.percpu", 1,
9762 kma_flags | ZC_PERCPU, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
9763 z->z_permanent = true;
9764 z->z_elem_size = 1;
9765 });
9766
9767 /*
9768 * Now migrate the startup statistics into their final storage.
9769 */
9770 int cpu = cpu_number();
9771 zone_index_foreach(idx) {
9772 zone_t tz = &zone_array[idx];
9773
9774 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
9775 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
9776
9777 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
9778 tz->z_stats = zs;
9779 }
9780 }
9781
9782 #if VM_TAG_SIZECLASSES
9783 if (zone_tagging_on) {
9784 vm_allocation_zones_init();
9785 }
9786 #endif
9787 }
9788 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
9789
9790 __startup_func
9791 static void
zone_cache_bootstrap(void)9792 zone_cache_bootstrap(void)
9793 {
9794 zone_t magzone;
9795
9796 magzone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
9797 zc_mag_size() * sizeof(zone_element_t),
9798 ZC_NOGZALLOC | ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE |
9799 ZC_SEQUESTER | ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
9800 magzone->z_elems_rsv = (uint16_t)(2 * zpercpu_count());
9801
9802 os_atomic_store(&zc_magazine_zone, magzone, compiler_acq_rel);
9803
9804 /*
9805 * Now that we are initialized, we can enable zone caching for zones that
9806 * were made before zcache_bootstrap() was called.
9807 *
9808 * The system is still single threaded so we don't need to take the lock.
9809 */
9810 zone_index_foreach(i) {
9811 zone_t z = &zone_array[i];
9812 if (z->z_pcpu_cache) {
9813 z->z_pcpu_cache = NULL;
9814 zone_enable_caching(z);
9815 }
9816 #if ZONE_ENABLE_LOGGING
9817 if (!z->z_gzalloc_tracked && z->z_self == z) {
9818 zone_setup_logging(z);
9819 }
9820 #endif /* ZONE_ENABLE_LOGGING */
9821 }
9822 }
9823 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, zone_cache_bootstrap);
9824
9825 void
zalloc_first_proc_made(void)9826 zalloc_first_proc_made(void)
9827 {
9828 zone_caching_disabled = 0;
9829 }
9830
9831 __startup_func
9832 vm_offset_t
zone_foreign_mem_init(vm_size_t size,bool allow_meta_steal)9833 zone_foreign_mem_init(vm_size_t size, bool allow_meta_steal)
9834 {
9835 struct zone_page_metadata *base;
9836 vm_offset_t mem;
9837
9838 if (atop(size) <= ZONE_FOREIGN_META_INLINE_COUNT) {
9839 base = zone_foreign_meta_array_startup;
9840 } else if (allow_meta_steal) {
9841 vm_size_t steal_size;
9842
9843 printf("zinit: not enough early foreigh metadata "
9844 "(%d > %d) stealing from the pmap\n",
9845 (int)atop(size), ZONE_FOREIGN_META_INLINE_COUNT);
9846 steal_size = round_page(atop(size) * sizeof(*base));
9847 base = pmap_steal_memory(steal_size);
9848 zone_early_steal.min_address = (vm_offset_t)base;
9849 zone_early_steal.max_address = (vm_offset_t)base + steal_size;
9850 } else {
9851 panic("ZONE_FOREIGN_META_INLINE_COUNT has become too small: "
9852 "%d > %d", (int)atop(size), ZONE_FOREIGN_META_INLINE_COUNT);
9853 }
9854
9855 mem = (vm_offset_t)pmap_steal_memory(size);
9856
9857 zone_info.zi_meta_base = base - zone_pva_from_addr(mem).packed_address;
9858 zone_info.zi_map_range[ZONE_ADDR_FOREIGN].min_address = mem;
9859 zone_info.zi_map_range[ZONE_ADDR_FOREIGN].max_address = mem + size;
9860
9861 zone_info.zi_bits_range = (struct zone_map_range){
9862 .min_address = (vm_offset_t)zba_chunk_startup,
9863 .max_address = (vm_offset_t)zba_chunk_startup +
9864 sizeof(zba_chunk_startup),
9865 };
9866 zba_init_chunk(0);
9867
9868 return mem;
9869 }
9870
9871 #endif /* !ZALLOC_TEST */
9872 #pragma mark - tests
9873 #if DEBUG || DEVELOPMENT
9874
9875 /*
9876 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
9877 * thread goes through at a time.
9878 *
9879 * Or we can end up with multiple test zones (if a second zinit() comes through
9880 * before zdestroy()), which could lead us to run out of zones.
9881 */
9882 static bool any_zone_test_running = FALSE;
9883
9884 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)9885 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
9886 {
9887 vm_offset_t elem_size = zone_elem_size(z);
9888 vm_offset_t base;
9889 struct zone_page_metadata *meta;
9890
9891 while (!zone_pva_is_null(page_index)) {
9892 base = zone_pva_to_addr(page_index) + zone_oob_offs(z);
9893 meta = zone_pva_to_meta(page_index);
9894
9895 if (meta->zm_inline_bitmap) {
9896 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
9897 uint32_t map = meta[i].zm_bitmap;
9898
9899 for (; map; map &= map - 1) {
9900 *elems++ = INSTANCE_PUT(base +
9901 elem_size * __builtin_clz(map));
9902 }
9903 base += elem_size * 32;
9904 }
9905 } else {
9906 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
9907 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
9908 for (size_t i = 0; i < (1u << order); i++) {
9909 uint64_t map = bits[i];
9910
9911 for (; map; map &= map - 1) {
9912 *elems++ = INSTANCE_PUT(base +
9913 elem_size * __builtin_clzll(map));
9914 }
9915 base += elem_size * 64;
9916 }
9917 }
9918
9919 page_index = meta->zm_page_next;
9920 }
9921 return elems;
9922 }
9923
9924 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)9925 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
9926 {
9927 zone_t zone = NULL;
9928 uintptr_t * array;
9929 uintptr_t * next;
9930 uintptr_t element;
9931 uint32_t idx, count, found;
9932 uint32_t nobtcount;
9933 uint32_t elemSize;
9934 size_t maxElems;
9935 kern_return_t kr;
9936
9937 zone_foreach(z) {
9938 if (!strncmp(zoneName, z->z_name, nameLen)) {
9939 zone = z;
9940 break;
9941 }
9942 }
9943 if (zone == NULL) {
9944 return KERN_INVALID_NAME;
9945 }
9946
9947 elemSize = (uint32_t)zone_elem_size(zone);
9948 maxElems = (zone->z_elems_avail + 1) & ~1ul;
9949
9950 kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array,
9951 maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG);
9952 if (KERN_SUCCESS != kr) {
9953 return kr;
9954 }
9955
9956 zone_lock(zone);
9957
9958 next = array;
9959 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
9960 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
9961 count = (uint32_t)(next - array);
9962
9963 zone_unlock(zone);
9964
9965 zone_leaks_scan(array, count, (uint32_t)zone_elem_size(zone), &found);
9966 assert(found <= count);
9967
9968 for (idx = 0; idx < count; idx++) {
9969 element = array[idx];
9970 if (kInstanceFlagReferenced & element) {
9971 continue;
9972 }
9973 element = INSTANCE_PUT(element) & ~kInstanceFlags;
9974 }
9975
9976 #if ZONE_ENABLE_LOGGING
9977 if (zone->z_btlog && !corruption_debug_flag) {
9978 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
9979 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
9980 btlog_copy_backtraces_for_elements(zone->z_btlog,
9981 (vm_address_t *)array, &count, elemSize, proc);
9982 }
9983 #endif /* ZONE_ENABLE_LOGGING */
9984
9985 for (nobtcount = idx = 0; idx < count; idx++) {
9986 element = array[idx];
9987 if (!element) {
9988 continue;
9989 }
9990 if (kInstanceFlagReferenced & element) {
9991 continue;
9992 }
9993 nobtcount++;
9994 }
9995 if (nobtcount) {
9996 proc(nobtcount, elemSize, BTREF_NULL);
9997 }
9998
9999 kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t));
10000
10001 return KERN_SUCCESS;
10002 }
10003
10004 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10005 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10006 {
10007 zone_security_flags_t zsflags;
10008 uint32_t x = 4;
10009 uint32_t *test_ptr;
10010
10011 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10012 printf("zone_ro_basic_test: Test already running.\n");
10013 return EALREADY;
10014 }
10015
10016 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10017
10018 for (int i = 0; i < 3; i++) {
10019 #if ZSECURITY_CONFIG(READ_ONLY)
10020 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10021 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10022 printf("zone_ro_basic_test: create a sub-page size zone\n");
10023
10024 printf("zone_ro_basic_test: verify flags were set\n");
10025 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10026
10027 printf("zone_ro_basic_test: zalloc an element\n");
10028 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10029 assert(test_ptr);
10030
10031 printf("zone_ro_basic_test: verify elem in the right submap\n");
10032 zone_require_ro_range_contains(ZONE_ID__FIRST_RO, test_ptr);
10033
10034 printf("zone_ro_basic_test: verify we can't write to it\n");
10035 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10036
10037 x = 4;
10038 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10039 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10040 assert(test_ptr);
10041 assert(*(uint32_t*)test_ptr == x);
10042
10043 x = 5;
10044 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10045 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10046 assert(test_ptr);
10047 assert(*(uint32_t*)test_ptr == x);
10048
10049 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10050 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10051
10052 printf("zone_ro_basic_test: free elem\n");
10053 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10054 assert(!test_ptr);
10055 #else
10056 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10057
10058 printf("zone_ro_basic_test: verify flags were set\n");
10059 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10060
10061 printf("zone_ro_basic_test: zalloc an element\n");
10062 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10063 assert(test_ptr);
10064
10065 x = 4;
10066 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10067 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10068 assert(test_ptr);
10069 assert(*(uint32_t*)test_ptr == x);
10070
10071 x = 5;
10072 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10073 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10074 assert(test_ptr);
10075 assert(*(uint32_t*)test_ptr == x);
10076
10077 printf("zone_ro_basic_test: free elem\n");
10078 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10079 assert(!test_ptr);
10080 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10081 }
10082
10083 printf("zone_ro_basic_test: garbage collection\n");
10084 zone_gc(ZONE_GC_DRAIN);
10085
10086 printf("zone_ro_basic_test: Test passed\n");
10087
10088 *out = 1;
10089 os_atomic_store(&any_zone_test_running, false, relaxed);
10090 return 0;
10091 }
10092 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10093
10094 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10095 zone_basic_test_run(__unused int64_t in, int64_t *out)
10096 {
10097 static zone_t test_zone_ptr = NULL;
10098
10099 unsigned int i = 0, max_iter = 5;
10100 void * test_ptr;
10101 zone_t test_zone;
10102 int rc = 0;
10103
10104 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10105 printf("zone_basic_test: Test already running.\n");
10106 return EALREADY;
10107 }
10108
10109 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10110
10111 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10112 do {
10113 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10114 assert(test_zone);
10115
10116 #if KASAN_ZALLOC
10117 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10118 #else
10119 if (test_zone->z_elems_free != 0)
10120 #endif
10121 {
10122 printf("zone_basic_test: free count is not zero\n");
10123 rc = EIO;
10124 goto out;
10125 }
10126
10127 if (test_zone_ptr == NULL) {
10128 /* Stash the zone pointer returned on the fist zinit */
10129 printf("zone_basic_test: zone created for the first time\n");
10130 test_zone_ptr = test_zone;
10131 } else if (test_zone != test_zone_ptr) {
10132 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10133 rc = EIO;
10134 goto out;
10135 }
10136
10137 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10138 zfree(test_zone, test_ptr);
10139
10140 zdestroy(test_zone);
10141 i++;
10142
10143 printf("zone_basic_test: Iteration %d successful\n", i);
10144 } while (i < max_iter);
10145
10146 /* test Z_VA_SEQUESTER */
10147 #if ZSECURITY_CONFIG(SEQUESTER)
10148 {
10149 zone_t test_pcpu_zone;
10150 kern_return_t kr;
10151 int idx, num_allocs = 8;
10152 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10153 void *allocs[num_allocs];
10154 void **allocs_pcpu;
10155 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10156
10157 test_zone = zone_create("test_zone_sysctl", elem_size,
10158 ZC_DESTRUCTIBLE);
10159 assert(test_zone);
10160 assert(zone_security_config(test_zone).z_va_sequester);
10161
10162 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10163 ZC_DESTRUCTIBLE | ZC_PERCPU);
10164 assert(test_pcpu_zone);
10165 assert(zone_security_config(test_pcpu_zone).z_va_sequester);
10166
10167 for (idx = 0; idx < num_allocs; idx++) {
10168 allocs[idx] = zalloc(test_zone);
10169 assert(NULL != allocs[idx]);
10170 printf("alloc[%d] %p\n", idx, allocs[idx]);
10171 }
10172 for (idx = 0; idx < num_allocs; idx++) {
10173 zfree(test_zone, allocs[idx]);
10174 }
10175 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10176
10177 kr = kernel_memory_allocate(kernel_map,
10178 (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10179 0, KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10180 assert(kr == KERN_SUCCESS);
10181
10182 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10183 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10184 Z_WAITOK | Z_ZERO);
10185 assert(NULL != allocs_pcpu[idx]);
10186 }
10187 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10188 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10189 }
10190 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10191
10192 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10193 vm_page_wire_count, vm_page_free_count,
10194 100L * phys_pages / zone_pages_wired_max);
10195 zone_gc(ZONE_GC_DRAIN);
10196 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10197 vm_page_wire_count, vm_page_free_count,
10198 100L * phys_pages / zone_pages_wired_max);
10199
10200 unsigned int allva = 0;
10201
10202 zone_foreach(z) {
10203 zone_lock(z);
10204 allva += z->z_wired_cur;
10205 if (zone_pva_is_null(z->z_pageq_va)) {
10206 zone_unlock(z);
10207 continue;
10208 }
10209 unsigned count = 0;
10210 uint64_t size;
10211 zone_pva_t pg = z->z_pageq_va;
10212 struct zone_page_metadata *page_meta;
10213 while (pg.packed_address) {
10214 page_meta = zone_pva_to_meta(pg);
10215 count += z->z_percpu ? 1 : z->z_chunk_pages;
10216 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10217 count -= page_meta->zm_page_index;
10218 }
10219 pg = page_meta->zm_page_next;
10220 }
10221 size = zone_size_wired(z);
10222 if (!size) {
10223 size = 1;
10224 }
10225 printf("%s%s: seq %d, res %d, %qd %%\n",
10226 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10227 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10228 zone_unlock(z);
10229 }
10230
10231 printf("total va: %d\n", allva);
10232
10233 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10234 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10235 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10236 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10237 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10238 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10239
10240 for (idx = 0; idx < num_allocs; idx++) {
10241 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10242 }
10243
10244 /* make sure the zone is still usable after a GC */
10245
10246 for (idx = 0; idx < num_allocs; idx++) {
10247 allocs[idx] = zalloc(test_zone);
10248 assert(allocs[idx]);
10249 printf("alloc[%d] %p\n", idx, allocs[idx]);
10250 }
10251 for (idx = 0; idx < num_allocs; idx++) {
10252 zfree(test_zone, allocs[idx]);
10253 }
10254
10255 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10256 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10257 Z_WAITOK | Z_ZERO);
10258 assert(NULL != allocs_pcpu[idx]);
10259 }
10260 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10261 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10262 }
10263
10264 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10265
10266 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10267
10268 zdestroy(test_zone);
10269 zdestroy(test_pcpu_zone);
10270 }
10271 #else
10272 printf("zone_basic_test: skipping sequester test (not enabled)\n");
10273 #endif /* ZSECURITY_CONFIG(SEQUESTER) */
10274
10275 printf("zone_basic_test: Test passed\n");
10276
10277
10278 *out = 1;
10279 out:
10280 os_atomic_store(&any_zone_test_running, false, relaxed);
10281 return rc;
10282 }
10283 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10284
10285 struct zone_stress_obj {
10286 TAILQ_ENTRY(zone_stress_obj) zso_link;
10287 };
10288
10289 struct zone_stress_ctx {
10290 thread_t zsc_leader;
10291 lck_mtx_t zsc_lock;
10292 zone_t zsc_zone;
10293 uint64_t zsc_end;
10294 uint32_t zsc_workers;
10295 };
10296
10297 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10298 zone_stress_worker(void *arg, wait_result_t __unused wr)
10299 {
10300 struct zone_stress_ctx *ctx = arg;
10301 bool leader = ctx->zsc_leader == current_thread();
10302 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10303 struct zone_bool_gen bg = { };
10304 struct zone_stress_obj *obj;
10305 uint32_t allocs = 0;
10306
10307 random_bool_init(&bg.zbg_bg);
10308
10309 do {
10310 for (int i = 0; i < 2000; i++) {
10311 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10312 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10313 switch (what) {
10314 case 0:
10315 case 1:
10316 if (allocs < 10000) {
10317 obj = zalloc(ctx->zsc_zone);
10318 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10319 allocs++;
10320 }
10321 break;
10322 case 2:
10323 case 3:
10324 if (allocs < 10000) {
10325 obj = zalloc(ctx->zsc_zone);
10326 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10327 allocs++;
10328 }
10329 break;
10330 case 4:
10331 if (leader) {
10332 zone_gc(ZONE_GC_DRAIN);
10333 }
10334 break;
10335 case 5:
10336 case 6:
10337 if (!TAILQ_EMPTY(&head)) {
10338 obj = TAILQ_FIRST(&head);
10339 TAILQ_REMOVE(&head, obj, zso_link);
10340 zfree(ctx->zsc_zone, obj);
10341 allocs--;
10342 }
10343 break;
10344 case 7:
10345 if (!TAILQ_EMPTY(&head)) {
10346 obj = TAILQ_LAST(&head, zone_stress_head);
10347 TAILQ_REMOVE(&head, obj, zso_link);
10348 zfree(ctx->zsc_zone, obj);
10349 allocs--;
10350 }
10351 break;
10352 }
10353 }
10354 } while (mach_absolute_time() < ctx->zsc_end);
10355
10356 while (!TAILQ_EMPTY(&head)) {
10357 obj = TAILQ_FIRST(&head);
10358 TAILQ_REMOVE(&head, obj, zso_link);
10359 zfree(ctx->zsc_zone, obj);
10360 }
10361
10362 lck_mtx_lock(&ctx->zsc_lock);
10363 if (--ctx->zsc_workers == 0) {
10364 thread_wakeup(ctx);
10365 } else if (leader) {
10366 while (ctx->zsc_workers) {
10367 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10368 THREAD_UNINT);
10369 }
10370 }
10371 lck_mtx_unlock(&ctx->zsc_lock);
10372
10373 if (!leader) {
10374 thread_terminate_self();
10375 __builtin_unreachable();
10376 }
10377 }
10378
10379 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10380 zone_stress_test_run(__unused int64_t in, int64_t *out)
10381 {
10382 struct zone_stress_ctx ctx = {
10383 .zsc_leader = current_thread(),
10384 .zsc_workers = 3,
10385 };
10386 kern_return_t kr;
10387 thread_t th;
10388
10389 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10390 printf("zone_stress_test: Test already running.\n");
10391 return EALREADY;
10392 }
10393
10394 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10395 ctx.zsc_zone = zone_create("test_zone_344", 344,
10396 ZC_DESTRUCTIBLE | ZC_NOCACHING);
10397 assert(ctx.zsc_zone->z_chunk_pages > 1);
10398
10399 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10400
10401 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10402
10403 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10404
10405 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10406 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10407 BASEPRI_DEFAULT, &th);
10408 if (kr == KERN_SUCCESS) {
10409 printf("zone_stress_test: thread %d: %p\n", i, th);
10410 thread_deallocate(th);
10411 } else {
10412 ctx.zsc_workers--;
10413 }
10414 }
10415
10416 zone_stress_worker(&ctx, 0);
10417
10418 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10419
10420 zdestroy(ctx.zsc_zone);
10421
10422 printf("zone_stress_test: Done\n");
10423
10424 *out = 1;
10425 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10426 os_atomic_store(&any_zone_test_running, false, relaxed);
10427 return 0;
10428 }
10429 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10430
10431 /*
10432 * Routines to test that zone garbage collection and zone replenish threads
10433 * running at the same time don't cause problems.
10434 */
10435
10436 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)10437 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
10438 {
10439 zone_gc(ZONE_GC_DRAIN);
10440 *out = 1;
10441 return 0;
10442 }
10443 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
10444
10445 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)10446 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
10447 {
10448 zone_t z = vm_map_entry_zone;
10449 struct data { struct data *next; } *node, *list = NULL;
10450
10451 if (z == NULL) {
10452 printf("Couldn't find a replenish zone\n");
10453 return EIO;
10454 }
10455
10456 /* big enough to go past replenishment */
10457 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
10458 node = zalloc(z);
10459 node->next = list;
10460 list = node;
10461 }
10462
10463 /*
10464 * release the memory we allocated
10465 */
10466 while (list != NULL) {
10467 node = list;
10468 list = list->next;
10469 zfree(z, node);
10470 }
10471
10472 *out = 1;
10473 return 0;
10474 }
10475 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
10476
10477 #endif /* DEBUG || DEVELOPMENT */
10478