1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
106
107 #include <pexpert/pexpert.h>
108
109 #include <machine/machparam.h>
110 #include <machine/machine_routines.h> /* ml_cpu_get_info */
111
112 #include <os/atomic.h>
113
114 #include <libkern/OSDebug.h>
115 #include <libkern/OSAtomic.h>
116 #include <libkern/section_keywords.h>
117 #include <sys/kdebug.h>
118
119 #include <san/kasan.h>
120 #include <libsa/stdlib.h>
121 #include <sys/errno.h>
122
123 #include <IOKit/IOBSD.h>
124 #include <arm64/amcc_rorgn.h>
125
126 #if DEBUG
127 #define z_debug_assert(expr) assert(expr)
128 #else
129 #define z_debug_assert(expr) (void)(expr)
130 #endif
131
132 /* Returns pid of the task with the largest number of VM map entries. */
133 extern pid_t find_largest_process_vm_map_entries(void);
134
135 /*
136 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
137 * For any other pid we try to kill that process synchronously.
138 */
139 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
140
141 extern zone_t vm_object_zone;
142 extern zone_t ipc_service_port_label_zone;
143
144 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
145 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
146
147 #if CONFIG_KERNEL_TBI && KASAN_TBI
148 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
149 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
150 #else /* CONFIG_KERNEL_TBI && KASAN_TBI */
151 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
152 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
153 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
154
155 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
156 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
157 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
158 #define ZONE_GUARD_DENSE (32 * 1024)
159 #define ZONE_GUARD_SPARSE (64 * 1024)
160 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
161
162 #if XNU_PLATFORM_MacOSX
163 #define ZONE_MAP_MAX (32ULL << 30)
164 #define ZONE_MAP_VA_SIZE_LP64 (128ULL << 30)
165 #else /* XNU_PLATFORM_MacOSX */
166 #define ZONE_MAP_MAX (8ULL << 30)
167 #define ZONE_MAP_VA_SIZE_LP64 (24ULL << 30)
168 #endif /* !XNU_PLATFORM_MacOSX */
169
170 __enum_closed_decl(zm_len_t, uint16_t, {
171 ZM_CHUNK_FREE = 0x0,
172 /* 1 through 8 are valid lengths */
173 ZM_CHUNK_LEN_MAX = 0x8,
174
175 /* PGZ magical values */
176 ZM_PGZ_FREE = 0x0,
177 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
178 ZM_PGZ_GUARD = 0xb, /* oo[b] */
179 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
180
181 /* secondary page markers */
182 ZM_SECONDARY_PAGE = 0xe,
183 ZM_SECONDARY_PCPU_PAGE = 0xf,
184 });
185
186 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
187
188 struct zone_page_metadata {
189 union {
190 struct {
191 /* The index of the zone this metadata page belongs to */
192 zone_id_t zm_index : 10;
193
194 /*
195 * This chunk ends with a guard page.
196 */
197 uint16_t zm_guarded : 1;
198
199 /*
200 * Whether `zm_bitmap` is an inline bitmap
201 * or a packed bitmap reference
202 */
203 uint16_t zm_inline_bitmap : 1;
204
205 /*
206 * Zones allocate in "chunks" of zone_t::z_chunk_pages
207 * consecutive pages, or zpercpu_count() pages if the
208 * zone is percpu.
209 *
210 * The first page of it has its metadata set with:
211 * - 0 if none of the pages are currently wired
212 * - the number of wired pages in the chunk
213 * (not scaled for percpu).
214 *
215 * Other pages in the chunk have their zm_chunk_len set
216 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
217 * depending on whether the zone is percpu or not.
218 * For those, zm_page_index holds the index of that page
219 * in the run, and zm_subchunk_len the remaining length
220 * within the chunk.
221 *
222 * Metadata used for PGZ pages can have 3 values:
223 * - ZM_PGZ_FREE: slot is free
224 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
225 * at offset (zm_pgz_orig_addr & PAGE_MASK)
226 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
227 * (will panic).
228 */
229 zm_len_t zm_chunk_len : 4;
230 };
231 uint16_t zm_bits;
232 };
233
234 union {
235 #define ZM_ALLOC_SIZE_LOCK 1u
236 uint16_t zm_alloc_size; /* first page only */
237 struct {
238 uint8_t zm_page_index; /* secondary pages only */
239 uint8_t zm_subchunk_len; /* secondary pages only */
240 };
241 uint16_t zm_oob_offs; /* in guard pages */
242 };
243 union {
244 uint32_t zm_bitmap; /* most zones */
245 uint32_t zm_bump; /* permanent zones */
246 };
247
248 union {
249 struct {
250 zone_pva_t zm_page_next;
251 zone_pva_t zm_page_prev;
252 };
253 vm_offset_t zm_pgz_orig_addr;
254 struct zone_page_metadata *zm_pgz_slot_next;
255 };
256 };
257 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
258
259 /*!
260 * @typedef zone_element_t
261 *
262 * @brief
263 * Type that represents a "resolved" zone element.
264 *
265 * @description
266 * This type encodes an element pointer as a pair of:
267 * { chunk base, element index }.
268 *
269 * The chunk base is extracted with @c trunc_page()
270 * as it is always page aligned, and occupies the bits above @c PAGE_SHIFT.
271 *
272 * The other bits encode the element index in the chunk rather than its address.
273 */
274 typedef struct zone_element {
275 vm_offset_t ze_value;
276 } zone_element_t;
277
278 /*!
279 * @typedef zone_magazine_t
280 *
281 * @brief
282 * Magazine of cached allocations.
283 *
284 * @field zm_cur how many elements this magazine holds (unused while loaded).
285 * @field zm_link linkage used by magazine depots.
286 * @field zm_elems an array of @c zc_mag_size() elements.
287 */
288 typedef struct zone_magazine {
289 uint16_t zm_cur;
290 STAILQ_ENTRY(zone_magazine) zm_link;
291 zone_element_t zm_elems[0];
292 } *zone_magazine_t;
293
294 /*!
295 * @typedef zone_cache_t
296 *
297 * @brief
298 * Magazine of cached allocations.
299 *
300 * @discussion
301 * Below is a diagram of the caching system. This design is inspired by the
302 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
303 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
304 * zone allocator (itself derived from this seminal work).
305 *
306 * It is divided into 3 layers:
307 * - the per-cpu layer,
308 * - the recirculation depot layer,
309 * - the Zone Allocator.
310 *
311 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
312 * which are stacks of up to @c zc_mag_size() elements.
313 *
314 * <h2>CPU layer</h2>
315 *
316 * The CPU layer (@c zone_cache_t) looks like this:
317 *
318 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
319 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
320 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
321 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
322 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
323 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
324 * ╰─────────┴─────────────────────────────╯
325 *
326 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
327 * or free to. Serialization is achieved through disabling preemption, and only
328 * the current CPU can acces those allocations. This is represented on the left
329 * hand side of the diagram above.
330 *
331 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
332 * full magazines, and is protected by the @c zm_depot_lock for access.
333 * The lock is expected to absolutely never be contended, as only the local CPU
334 * tends to access the local per-cpu depot in regular operation mode.
335 *
336 * However unlike UMA, our implementation allows for the zone GC to reclaim
337 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
338 *
339 *
340 * <h2>Recirculation Depot</h2>
341 *
342 * The recirculation depot layer is a list similar to the per-cpu depot,
343 * however it is different in two fundamental ways:
344 *
345 * - it is protected by the regular zone lock,
346 * - elements referenced by the magazines in that layer appear free
347 * to the zone layer.
348 *
349 *
350 * <h2>Magazine circulation and sizing</h2>
351 *
352 * The caching system sizes itself dynamically. Operations that allocate/free
353 * a single element call @c zone_lock_nopreempt_check_contention() which records
354 * contention on the lock by doing a trylock and recording its success.
355 *
356 * This information is stored in the @c z_contention_cur field of the zone,
357 * and a windoed moving average is maintained in @c z_contention_wma.
358 * Each time a CPU registers any contention, it will also allow its own per-cpu
359 * cache to grow, incrementing @c zc_depot_max, which is how the per-cpu layer
360 * might grow into using its local depot.
361 *
362 * Note that @c zc_depot_max assume that the (a) and (f) pre-loaded magazines
363 * on average contain @c zc_mag_size() elements.
364 *
365 * When a per-cpu layer cannot hold more full magazines in its depot,
366 * then it will overflow about 1/3 of its depot into the recirculation depot
367 * (see @c zfree_cached_slow(). Conversely, when a depot is empty, then it will
368 * refill its per-cpu depot to about 1/3 of its size from the recirculation
369 * depot (see @c zalloc_cached_slow()).
370 *
371 * Lastly, the zone layer keeps track of the high and low watermark of how many
372 * elements have been free per period of time (including being part of the
373 * recirculation depot) in the @c z_elems_free_min and @c z_elems_free_max
374 * fields. A weighted moving average of the amplitude of this is maintained in
375 * the @c z_elems_free_wss which informs the zone GC on how to gently trim
376 * zones without hurting performance.
377 *
378 *
379 * <h2>Security considerations</h2>
380 *
381 * The zone caching layer has been designed to avoid returning elements in
382 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
383 * and @c zfree() free to the (f) magazine, and only swap them when the
384 * requested operation cannot be fulfilled.
385 *
386 * The per-cpu overflow depot or the recirculation depots are similarly used
387 * in FIFO order.
388 *
389 * More importantly, when magazines flow through the recirculation depot,
390 * the elements they contain are marked as "free" in the zone layer bitmaps.
391 * Because allocations out of per-cpu caches verify the bitmaps at allocation
392 * time, this acts as a poor man's double-free quarantine. The magazines
393 * allow to avoid the cost of the bit-scanning involved in the zone-level
394 * @c zalloc_item() codepath.
395 *
396 *
397 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
398 * @field zc_free_cur denormalized number of elements in the (f) magazine
399 * @field zc_alloc_elems a pointer to the array of elements in (a)
400 * @field zc_free_elems a pointer to the array of elements in (f)
401 *
402 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
403 * @field zc_depot a list of @c zc_depot_cur full magazines
404 * @field zc_depot_cur number of magazines in @c zc_depot
405 * @field zc_depot_max the maximum number of elements in @c zc_depot,
406 * protected by the zone lock.
407 */
408 typedef struct zone_cache {
409 uint16_t zc_alloc_cur;
410 uint16_t zc_free_cur;
411 uint16_t zc_depot_cur;
412 uint16_t __zc_padding;
413 zone_element_t *zc_alloc_elems;
414 zone_element_t *zc_free_elems;
415 hw_lck_ticket_t zc_depot_lock;
416 uint32_t zc_depot_max;
417 struct zone_depot zc_depot;
418 } *zone_cache_t;
419
420 #if !__x86_64__
421 static
422 #endif
423 __security_const_late struct {
424 struct mach_vm_range zi_map_range; /* all zone submaps */
425 struct mach_vm_range zi_ro_range; /* read-only range */
426 struct mach_vm_range zi_meta_range; /* debugging only */
427 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
428 struct mach_vm_range zi_pgz_range;
429 struct zone_page_metadata *zi_pgz_meta;
430
431 /*
432 * The metadata lives within the zi_meta_range address range.
433 *
434 * The correct formula to find a metadata index is:
435 * absolute_page_index - page_index(zi_map_range.min_address)
436 *
437 * And then this index is used to dereference zi_meta_range.min_address
438 * as a `struct zone_page_metadata` array.
439 *
440 * To avoid doing that substraction all the time in the various fast-paths,
441 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
442 * that math all the time.
443 */
444 struct zone_page_metadata *zi_meta_base;
445 } zone_info;
446
447 __startup_data
448 static struct mach_vm_range zone_map_range;
449 __startup_data
450 vm_map_size_t zone_map_size;
451 __startup_data
452 static vm_map_size_t zone_meta_size;
453 __startup_data
454 static vm_map_size_t zone_bits_size;
455
456 /*
457 * Initial array of metadata for stolen memory.
458 *
459 * The numbers here have to be kept in sync with vm_map_steal_memory()
460 * so that we have reserved enough metadata.
461 *
462 * After zone_init() has run (which happens while the kernel is still single
463 * threaded), the metadata is moved to its final dynamic location, and
464 * this array is unmapped with the rest of __startup_data at lockdown.
465 */
466 #define ZONE_EARLY_META_INLINE_COUNT 64
467 __startup_data
468 static struct zone_page_metadata
469 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
470
471 #if __x86_64__
472 /*
473 * On Intel we can't "free" pmap stolen pages,
474 * so instead we use a static array in __KLDDATA
475 * which gets reclaimed at lockdown time.
476 */
477 __startup_data __attribute__((aligned(PAGE_SIZE)))
478 static uint8_t zone_early_pages_to_cram[PAGE_SIZE * 16];
479 #endif
480
481 /*
482 * The zone_locks_grp allows for collecting lock statistics.
483 * All locks are associated to this group in zinit.
484 * Look at tools/lockstat for debugging lock contention.
485 */
486 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
487 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
488
489 /*
490 * The zone metadata lock protects:
491 * - metadata faulting,
492 * - VM submap VA allocations,
493 * - early gap page queue list
494 */
495 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
496 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
497
498 /*
499 * Exclude more than one concurrent garbage collection
500 */
501 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
502 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
503 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
504
505 /*
506 * Panic logging metadata
507 */
508 bool panic_include_zprint = false;
509 bool panic_include_kalloc_types = false;
510 zone_t kalloc_type_src_zone = ZONE_NULL;
511 zone_t kalloc_type_dst_zone = ZONE_NULL;
512 mach_memory_info_t *panic_kext_memory_info = NULL;
513 vm_size_t panic_kext_memory_size = 0;
514 vm_offset_t panic_fault_address = 0;
515
516 /*
517 * Protects zone_array, num_zones, num_zones_in_use, and
518 * zone_destroyed_bitmap
519 */
520 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
521 static zone_id_t num_zones_in_use;
522 zone_id_t _Atomic num_zones;
523 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
524
525 /*
526 * Initial globals for zone stats until we can allocate the real ones.
527 * Those get migrated inside the per-CPU ones during zone_init() and
528 * this array is unmapped with the rest of __startup_data at lockdown.
529 */
530
531 /* zone to allocate zone_magazine structs from */
532 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
533 /*
534 * Until pid1 is made, zone caching is off,
535 * until compute_zone_working_set_size() runs for the firt time.
536 *
537 * -1 represents the "never enabled yet" value.
538 */
539 static int8_t zone_caching_disabled = -1;
540
541 __startup_data
542 static struct zone_cache zone_cache_startup[MAX_ZONES];
543 __startup_data
544 static struct zone_stats zone_stats_startup[MAX_ZONES];
545 struct zone zone_array[MAX_ZONES];
546 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
547 [0 ... MAX_ZONES - 1] = {
548 .z_kheap_id = KHEAP_ID_NONE,
549 .z_noencrypt = false,
550 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
551 .z_kalloc_type = false,
552 .z_va_sequester = ZSECURITY_CONFIG(SEQUESTER),
553 },
554 };
555 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
556
557 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
558 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
559
560 /* Used to keep track of destroyed slots in the zone_array */
561 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
562
563 /* number of zone mapped pages used by all zones */
564 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
565 size_t zone_pages_wired;
566 size_t zone_guard_pages;
567
568 /* Time in (ms) after which we panic for zone exhaustions */
569 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
570
571 #if VM_TAG_SIZECLASSES
572 /* enable tags for zones that ask for it */
573 static TUNABLE(bool, zone_tagging_on, "-zt", false);
574 #endif /* VM_TAG_SIZECLASSES */
575
576 #if DEBUG || DEVELOPMENT
577 static int zalloc_simulate_vm_pressure;
578 #endif /* DEBUG || DEVELOPMENT */
579
580 /*
581 * Zone caching tunables
582 *
583 * zc_mag_size():
584 * size of magazines, larger to reduce contention at the expense of memory
585 *
586 * zc_auto_enable_threshold
587 * number of contentions per second after which zone caching engages
588 * automatically.
589 *
590 * 0 to disable.
591 *
592 * zc_grow_threshold
593 * numer of contentions per second after which the per-cpu depot layer
594 * grows at each newly observed contention without restriction.
595 *
596 * 0 to disable.
597 *
598 * zc_recirc_batch
599 * how many magazines to transfer at most from/to the recirculation depot.
600 * Default 4.
601 *
602 * zc_defrag_ratio
603 * percentage of the working set to recirc size below which
604 * the zone is defragmented. Default is 66%.
605 *
606 * zc_defrag_threshold
607 * how much memory needs to be free before the auto-defrag is even considered.
608 * Default is 512k.
609 *
610 * zc_autogc_ratio
611 * percentage of the working set to min-free size below which
612 * the zone is auto-GCed to the working set size. Default is 20%.
613 *
614 * zc_autogc_threshold
615 * how much memory needs to be free before the auto-gc is even considered.
616 * Default is 4M.
617 *
618 * zc_free_batch_size
619 * The size of batches of frees/reclaim that can be done keeping
620 * the zone lock held (and preemption disabled).
621 */
622 static TUNABLE(uint16_t, zc_magazine_size, "zc_mag_size", 8);
623 static TUNABLE(uint32_t, zc_auto_threshold, "zc_auto_enable_threshold", 20);
624 static TUNABLE(uint32_t, zc_grow_threshold, "zc_grow_threshold", 8);
625 static TUNABLE(uint16_t, zc_recirc_batch, "zc_recirc_batch", 4);
626 static TUNABLE(uint32_t, zc_defrag_ratio, "zc_defrag_ratio", 66);
627 static TUNABLE(uint32_t, zc_defrag_threshold, "zc_defrag_threshold", 512u << 10);
628 static TUNABLE(uint32_t, zc_autogc_ratio, "zc_autogc_ratio", 20);
629 static TUNABLE(uint32_t, zc_autogc_threshold, "zc_autogc_threshold", 4u << 20);
630 static TUNABLE(uint32_t, zc_free_batch_size, "zc_free_batch_size", 256);
631
632 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
633 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
634 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
635 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
636 [Z_SUBMAP_IDX_VM] = "VM",
637 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
638 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
639 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
640 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
641 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
642 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
643 #else
644 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
645 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
646 [Z_SUBMAP_IDX_DATA] = "DATA",
647 };
648
649 #if __x86_64__
650 #define ZONE_ENTROPY_CNT 8
651 #else
652 #define ZONE_ENTROPY_CNT 2
653 #endif
654 static struct zone_bool_gen {
655 struct bool_gen zbg_bg;
656 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
657 } zone_bool_gen[MAX_CPUS];
658
659 #if CONFIG_PROB_GZALLOC
660 /*
661 * Probabilistic gzalloc
662 * =====================
663 *
664 *
665 * Probabilistic guard zalloc samples allocations and will protect them by
666 * double-mapping the page holding them and returning the secondary virtual
667 * address to its callers.
668 *
669 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
670 * are set.
671 *
672 *
673 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
674 * most zalloc/kalloc features:
675 * - zone_require is functional
676 * - zone caching or zone tagging is compatible
677 * - non-blocking allocation work (they will always return NULL with gzalloc).
678 *
679 * PGZ limitations:
680 * - VA sequestering isn't respected, as the slots (which are in limited
681 * quantity) will be reused for any type, however the PGZ quarantine
682 * somewhat mitigates the impact.
683 * - zones with elements larger than a page cannot be protected.
684 *
685 *
686 * Tunables:
687 * --------
688 *
689 * pgz=1:
690 * Turn on probabilistic guard malloc for all zones
691 *
692 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
693 *
694 * pgz_sample_rate=0 to 2^31
695 * average sample rate between two guarded allocations.
696 * 0 means every allocation.
697 *
698 * The default is a random number between 1000 and 10,000
699 *
700 * pgz_slots
701 * how many allocations to protect.
702 *
703 * Each costs:
704 * - a PTE in the pmap (when allocated)
705 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
706 * - 64 bytes per backtraces.
707 * On LP64 this is <16K per 100 slots.
708 *
709 * The default is ~200 slots per G of physical ram (32k / G)
710 *
711 * TODO:
712 * - try harder to allocate elements at the "end" to catch OOB more reliably.
713 *
714 * pgz_quarantine
715 * how many slots should be free at any given time.
716 *
717 * PGZ will round robin through free slots to be reused, but free slots are
718 * important to detect use-after-free by acting as a quarantine.
719 *
720 * By default, PGZ will keep 33% of the slots around at all time.
721 *
722 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
723 * Specific zones for which to enable probabilistic guard malloc.
724 * There must be no numbering gap (names after the gap will be ignored).
725 */
726 #if DEBUG || DEVELOPMENT
727 static TUNABLE(bool, pgz_all, "pgz", true);
728 #else
729 static TUNABLE(bool, pgz_all, "pgz", false);
730 #endif
731 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
732 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
733 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
734 #endif /* CONFIG_PROB_GZALLOC */
735
736 static zone_t zone_find_largest(uint64_t *zone_size);
737
738 #endif /* !ZALLOC_TEST */
739 #pragma mark Zone metadata
740 #if !ZALLOC_TEST
741
742 static inline bool
zone_has_index(zone_t z,zone_id_t zid)743 zone_has_index(zone_t z, zone_id_t zid)
744 {
745 return zone_array + zid == z;
746 }
747
748 static zone_element_t
zone_element_encode(vm_offset_t base,vm_offset_t eidx)749 zone_element_encode(vm_offset_t base, vm_offset_t eidx)
750 {
751 return (zone_element_t){ .ze_value = base | eidx };
752 }
753
754 static vm_offset_t
zone_element_base(zone_element_t ze)755 zone_element_base(zone_element_t ze)
756 {
757 return trunc_page(ze.ze_value);
758 }
759
760 static vm_offset_t
zone_element_idx(zone_element_t ze)761 zone_element_idx(zone_element_t ze)
762 {
763 return ze.ze_value & PAGE_MASK;
764 }
765
766 static vm_offset_t
zone_element_addr(zone_t z,zone_element_t ze,vm_offset_t esize)767 zone_element_addr(zone_t z, zone_element_t ze, vm_offset_t esize)
768 {
769 vm_offset_t offs = zone_elem_offs(z);
770
771 return offs + zone_element_base(ze) + esize * zone_element_idx(ze);
772 }
773
774 __abortlike
775 void
zone_invalid_panic(zone_t zone)776 zone_invalid_panic(zone_t zone)
777 {
778 panic("zone %p isn't in the zone_array", zone);
779 }
780
781 __abortlike
782 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)783 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
784 const char *kind)
785 {
786 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
787 kind, meta, zone_heap_name(zone), zone->z_name);
788 }
789
790 __abortlike
791 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)792 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
793 {
794 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
795 (void *)addr, zone_heap_name(zone), zone->z_name);
796 }
797
798 __abortlike
799 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)800 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
801 struct zone_page_metadata *meta)
802 {
803 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
804 zone_id_t zidx;
805 zone_t src_zone;
806
807 if (zsflags.z_kalloc_type) {
808 panic_include_kalloc_types = true;
809 kalloc_type_dst_zone = zone;
810 }
811
812 zidx = meta->zm_index;
813 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
814 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
815 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
816 zidx);
817 }
818
819 src_zone = &zone_array[zidx];
820 src_zsflags = zone_security_array[zidx];
821 if (src_zsflags.z_kalloc_type) {
822 panic_include_kalloc_types = true;
823 kalloc_type_src_zone = src_zone;
824 }
825
826 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
827 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
828 zone_heap_name(src_zone), src_zone->z_name, zidx);
829 }
830
831 __abortlike
832 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)833 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
834 {
835 panic("metadata list corruption through element %p detected in zone %s%s",
836 meta, zone_heap_name(zone), zone->z_name);
837 }
838
839 __abortlike
840 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)841 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
842 const char *kind)
843 {
844 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
845 zone_heap_name(zone), zone->z_name, meta);
846 }
847
848 __abortlike
849 static void
zone_meta_double_free_panic(zone_t zone,zone_element_t ze,const char * caller)850 zone_meta_double_free_panic(zone_t zone, zone_element_t ze, const char *caller)
851 {
852 panic("%s: double free of %p to zone %s%s", caller,
853 (void *)zone_element_addr(zone, ze, zone_elem_size(zone)),
854 zone_heap_name(zone), zone->z_name);
855 }
856
857 __abortlike
858 static void
zone_accounting_panic(zone_t zone,const char * kind)859 zone_accounting_panic(zone_t zone, const char *kind)
860 {
861 panic("accounting mismatch (%s) for zone %s%s", kind,
862 zone_heap_name(zone), zone->z_name);
863 }
864
865 #define zone_counter_sub(z, stat, value) ({ \
866 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
867 zone_accounting_panic(z, #stat " wrap-around"); \
868 } \
869 (z)->stat; \
870 })
871
872 static inline void
zone_elems_free_add(zone_t z,uint32_t count)873 zone_elems_free_add(zone_t z, uint32_t count)
874 {
875 uint32_t n = (z->z_elems_free += count);
876 if (z->z_elems_free_max < n) {
877 z->z_elems_free_max = n;
878 }
879 }
880
881 static inline void
zone_elems_free_sub(zone_t z,uint32_t count)882 zone_elems_free_sub(zone_t z, uint32_t count)
883 {
884 uint32_t n = zone_counter_sub(z, z_elems_free, count);
885
886 if (z->z_elems_free_min > n) {
887 z->z_elems_free_min = n;
888 }
889 }
890
891 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)892 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
893 vm_offset_t esize)
894 {
895 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
896 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
897 }
898 return m->zm_alloc_size;
899 }
900
901 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)902 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
903 vm_offset_t esize)
904 {
905 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
906 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
907 }
908 return m->zm_alloc_size;
909 }
910
911 __abortlike
912 static void
zone_nofail_panic(zone_t zone)913 zone_nofail_panic(zone_t zone)
914 {
915 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
916 zone_heap_name(zone), zone->z_name);
917 }
918
919 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)920 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
921 {
922 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
923 struct mach_vm_range r = { addr_start, addr_end };
924
925 return mach_vm_range_intersects(ro_r, &r);
926 }
927
928 #define from_range(r, addr, size) \
929 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
930 mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
931 mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
932
933 #define from_ro_map(addr, size) \
934 from_range(&zone_info.zi_ro_range, addr, size)
935
936 #define from_zone_map(addr, size) \
937 from_range(&zone_info.zi_map_range, addr, size)
938
939 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)940 zone_pva_is_null(zone_pva_t page)
941 {
942 return page.packed_address == 0;
943 }
944
945 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)946 zone_pva_is_queue(zone_pva_t page)
947 {
948 // actual kernel pages have the top bit set
949 return (int32_t)page.packed_address > 0;
950 }
951
952 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)953 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
954 {
955 return pva1.packed_address == pva2.packed_address;
956 }
957
958 __header_always_inline zone_pva_t *
zone_pageq_base(void)959 zone_pageq_base(void)
960 {
961 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
962
963 /*
964 * `-1` so that if the first __DATA variable is a page queue,
965 * it gets a non 0 index
966 */
967 return data_seg_start - 1;
968 }
969
970 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)971 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
972 struct zone_page_metadata *meta)
973 {
974 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
975
976 if (!zone_pva_is_equal(*queue_head, oldv)) {
977 zone_page_metadata_list_corruption(z, meta);
978 }
979 *queue_head = meta->zm_page_next;
980 }
981
982 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)983 zone_queue_encode(zone_pva_t *headp)
984 {
985 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
986 }
987
988 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)989 zone_pva_from_addr(vm_address_t addr)
990 {
991 // cannot use atop() because we want to maintain the sign bit
992 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
993 }
994
995 __header_always_inline zone_pva_t
zone_pva_from_element(zone_element_t ze)996 zone_pva_from_element(zone_element_t ze)
997 {
998 return zone_pva_from_addr(ze.ze_value);
999 }
1000
1001 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)1002 zone_pva_to_addr(zone_pva_t page)
1003 {
1004 // cause sign extension so that we end up with the right address
1005 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
1006 }
1007
1008 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)1009 zone_pva_to_meta(zone_pva_t page)
1010 {
1011 return &zone_info.zi_meta_base[page.packed_address];
1012 }
1013
1014 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)1015 zone_pva_from_meta(struct zone_page_metadata *meta)
1016 {
1017 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
1018 }
1019
1020 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)1021 zone_meta_from_addr(vm_offset_t addr)
1022 {
1023 return zone_pva_to_meta(zone_pva_from_addr(addr));
1024 }
1025
1026 __header_always_inline struct zone_page_metadata *
zone_meta_from_element(zone_element_t ze)1027 zone_meta_from_element(zone_element_t ze)
1028 {
1029 return zone_pva_to_meta(zone_pva_from_element(ze));
1030 }
1031
1032 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)1033 zone_index_from_ptr(const void *ptr)
1034 {
1035 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
1036 }
1037
1038 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)1039 zone_meta_to_addr(struct zone_page_metadata *meta)
1040 {
1041 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
1042 }
1043
1044 __attribute__((overloadable))
1045 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)1046 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
1047 {
1048 if (!zone_has_index(z, meta->zm_index)) {
1049 zone_page_metadata_index_confusion_panic(z, addr, meta);
1050 }
1051 }
1052
1053 __attribute__((overloadable))
1054 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,zone_element_t ze)1055 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, zone_element_t ze)
1056 {
1057 zone_meta_validate(z, meta, zone_element_addr(z, ze, zone_elem_size(z)));
1058 }
1059
1060 __attribute__((overloadable))
1061 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)1062 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
1063 {
1064 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
1065 }
1066
1067 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1068 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
1069 struct zone_page_metadata *meta)
1070 {
1071 zone_pva_t head = *headp;
1072 zone_pva_t queue_pva = zone_queue_encode(headp);
1073 struct zone_page_metadata *tmp;
1074
1075 meta->zm_page_next = head;
1076 if (!zone_pva_is_null(head)) {
1077 tmp = zone_pva_to_meta(head);
1078 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1079 zone_page_metadata_list_corruption(z, meta);
1080 }
1081 tmp->zm_page_prev = zone_pva_from_meta(meta);
1082 }
1083 meta->zm_page_prev = queue_pva;
1084 *headp = zone_pva_from_meta(meta);
1085 }
1086
1087 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1088 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1089 {
1090 zone_pva_t head = *headp;
1091 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1092 struct zone_page_metadata *tmp;
1093
1094 zone_meta_validate(z, meta);
1095
1096 if (!zone_pva_is_null(meta->zm_page_next)) {
1097 tmp = zone_pva_to_meta(meta->zm_page_next);
1098 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1099 zone_page_metadata_list_corruption(z, meta);
1100 }
1101 tmp->zm_page_prev = meta->zm_page_prev;
1102 }
1103 *headp = meta->zm_page_next;
1104
1105 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1106
1107 return meta;
1108 }
1109
1110 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1111 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1112 {
1113 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1114 struct zone_page_metadata *tmp;
1115
1116 if (!zone_pva_is_null(meta->zm_page_next)) {
1117 tmp = zone_pva_to_meta(meta->zm_page_next);
1118 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1119 zone_page_metadata_list_corruption(z, meta);
1120 }
1121 tmp->zm_page_prev = meta->zm_page_prev;
1122 }
1123 if (zone_pva_is_queue(meta->zm_page_prev)) {
1124 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1125 } else {
1126 tmp = zone_pva_to_meta(meta->zm_page_prev);
1127 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1128 zone_page_metadata_list_corruption(z, meta);
1129 }
1130 tmp->zm_page_next = meta->zm_page_next;
1131 }
1132
1133 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1134 }
1135
1136 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1137 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1138 struct zone_page_metadata *meta)
1139 {
1140 zone_meta_remqueue(z, meta);
1141 zone_meta_queue_push(z, headp, meta);
1142 }
1143
1144 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1145 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1146 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1147 {
1148 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1149
1150 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1151 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1152 zone_meta_requeue(z, &z->z_pageq_partial, m);
1153 zone_counter_sub(z, z_wired_empty, len);
1154 }
1155 }
1156
1157 /* allows a given metadata to reach the z_pageq_empty queue again */
1158 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1159 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1160 {
1161 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1162
1163 assert(new_size % sizeof(vm_offset_t) == 0);
1164 if (new_size == 0) {
1165 zone_meta_requeue(z, &z->z_pageq_empty, m);
1166 z->z_wired_empty += len;
1167 }
1168 }
1169
1170 /*
1171 * Routine to populate a page backing metadata in the zone_metadata_region.
1172 * Must be called without the zone lock held as it might potentially block.
1173 */
1174 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1175 zone_meta_populate(vm_offset_t base, vm_size_t size)
1176 {
1177 struct zone_page_metadata *from = zone_meta_from_addr(base);
1178 struct zone_page_metadata *to = from + atop(size);
1179 vm_offset_t page_addr = trunc_page(from);
1180
1181 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1182 #if !KASAN
1183 /*
1184 * This can race with another thread doing a populate on the same metadata
1185 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1186 * fault in the shadow when we first access the metadata page. Avoid this
1187 * by always synchronizing on the zone_metadata_region lock with KASan.
1188 */
1189 if (pmap_find_phys(kernel_pmap, page_addr)) {
1190 continue;
1191 }
1192 #endif
1193
1194 for (;;) {
1195 kern_return_t ret = KERN_SUCCESS;
1196
1197 /*
1198 * All updates to the zone_metadata_region are done
1199 * under the zone_metadata_region_lck
1200 */
1201 zone_meta_lock();
1202 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1203 ret = kernel_memory_populate(page_addr,
1204 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1205 VM_KERN_MEMORY_OSFMK);
1206 }
1207 zone_meta_unlock();
1208
1209 if (ret == KERN_SUCCESS) {
1210 break;
1211 }
1212
1213 /*
1214 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1215 * to bad system deadlocks, so if the allocation failed,
1216 * we need to do the VM_PAGE_WAIT() outside of the lock.
1217 */
1218 VM_PAGE_WAIT();
1219 }
1220 }
1221 }
1222
1223 __abortlike
1224 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr,bool cache)1225 zone_invalid_element_panic(zone_t zone, vm_offset_t addr, bool cache)
1226 {
1227 struct zone_page_metadata *meta;
1228 vm_offset_t page, esize = zone_elem_size(zone);
1229 const char *from_cache = "";
1230
1231 if (cache) {
1232 zone_element_t ze = { .ze_value = addr };
1233 addr = zone_element_addr(zone, ze, esize);
1234 from_cache = " (from cache)";
1235
1236 if (zone_element_idx(ze) >= zone->z_chunk_elems) {
1237 panic("eidx %d for addr %p being freed to zone %s%s, is larger "
1238 "than number fo element in chunk (%d)", (int)zone_element_idx(ze),
1239 (void *)addr, zone_heap_name(zone), zone->z_name,
1240 zone->z_chunk_elems);
1241 }
1242 }
1243
1244 if (!from_zone_map(addr, esize)) {
1245 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1246 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1247 }
1248 page = trunc_page(addr);
1249 meta = zone_meta_from_addr(addr);
1250
1251 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1252 panic("metadata %p corresponding to addr %p being freed to "
1253 "zone %s%s%s, is marked as secondary per cpu page",
1254 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1255 from_cache);
1256 }
1257 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1258 panic("metadata %p corresponding to addr %p being freed to "
1259 "zone %s%s%s, has chunk len greater than max",
1260 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1261 from_cache);
1262 }
1263
1264 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1265 page -= ptoa(meta->zm_page_index);
1266 }
1267
1268 if ((addr - page - zone_elem_offs(zone)) % esize) {
1269 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1270 "zone element size", (void *)addr, zone_heap_name(zone),
1271 zone->z_name, from_cache);
1272 }
1273
1274 zone_invalid_element_addr_panic(zone, addr);
1275 }
1276
1277 __header_always_inline
1278 struct zone_page_metadata *
zone_element_validate(zone_t zone,zone_element_t ze)1279 zone_element_validate(zone_t zone, zone_element_t ze)
1280 {
1281 struct zone_page_metadata *meta;
1282 vm_offset_t page = zone_element_base(ze);
1283
1284 if (!from_zone_map(page, 1)) {
1285 zone_invalid_element_panic(zone, ze.ze_value, true);
1286 }
1287 meta = zone_meta_from_addr(page);
1288
1289 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1290 zone_invalid_element_panic(zone, ze.ze_value, true);
1291 }
1292 if (zone_element_idx(ze) >= zone->z_chunk_elems) {
1293 zone_invalid_element_panic(zone, ze.ze_value, true);
1294 }
1295
1296 zone_meta_validate(zone, meta, ze);
1297
1298 return meta;
1299 }
1300
1301 __attribute__((always_inline))
1302 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,zone_element_t * ze)1303 zone_element_resolve(zone_t zone, vm_offset_t addr, zone_element_t *ze)
1304 {
1305 struct zone_page_metadata *meta;
1306 vm_offset_t offs, page, eidx;
1307
1308 if (!from_zone_map(addr, 1)) {
1309 zone_invalid_element_panic(zone, addr, false);
1310 }
1311 page = trunc_page(addr);
1312 meta = zone_meta_from_addr(addr);
1313 zone_meta_validate(zone, meta, addr);
1314
1315 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1316 page -= ptoa(meta->zm_page_index);
1317 meta -= meta->zm_page_index;
1318 }
1319
1320 offs = addr - page - zone_elem_offs(zone);
1321 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1322 if (eidx * zone_elem_size(zone) != offs) {
1323 zone_invalid_element_panic(zone, addr, false);
1324 }
1325
1326 *ze = zone_element_encode(page, eidx);
1327 return meta;
1328 }
1329
1330 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1331 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1332 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1333 {
1334 vm_offset_t addr = (vm_offset_t)ptr;
1335 vm_offset_t end = addr + elem_size;
1336 vm_offset_t offs;
1337
1338 /*
1339 * 0-sized allocations in a KALLOC_MINSIZE bucket
1340 * would be offset to the next allocation which is incorrect.
1341 */
1342 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1343
1344 /*
1345 * Given how chunks work, for a zone with PGZ guards on,
1346 * there's a single element which ends precisely
1347 * at the page boundary: the last one.
1348 */
1349 if (req_size == elem_size ||
1350 (end & PAGE_MASK) ||
1351 !zone_meta_from_addr(addr)->zm_guarded) {
1352 return ptr;
1353 }
1354
1355 offs = elem_size - req_size;
1356 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1357
1358 return (char *)addr + offs;
1359 }
1360 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1361
1362 __abortlike
1363 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1364 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1365 {
1366 struct zone_page_metadata *meta;
1367 vm_offset_t offs, size, page;
1368 zone_t zone;
1369
1370 page = trunc_page(addr);
1371 meta = zone_meta_from_addr(addr);
1372 zone = &zone_array[meta->zm_index];
1373
1374 if (zone->z_percpu) {
1375 panic("zone bound checks: address %p is a per-cpu allocation",
1376 (void *)addr);
1377 }
1378
1379 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1380 page -= ptoa(meta->zm_page_index);
1381 meta -= meta->zm_page_index;
1382 }
1383
1384 size = zone_elem_size(zone);
1385 offs = Z_FAST_MOD(addr - page + size - zone_elem_offs(zone),
1386 zone->z_quo_magic, size);
1387 panic("zone bound checks: buffer %p of length %zd overflows "
1388 "object %p of size %zd in zone %p[%s%s]",
1389 (void *)addr, len, (void *)(addr - offs), size,
1390 zone, zone_heap_name(zone), zone_name(zone));
1391 }
1392
1393 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1394 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1395 {
1396 struct zone_page_metadata *meta;
1397 vm_offset_t offs, page, size;
1398 zone_t zone;
1399
1400 if (!from_zone_map(addr, 1)) {
1401 return;
1402 }
1403
1404 #if CONFIG_PROB_GZALLOC
1405 if (__improbable(pgz_owned(addr))) {
1406 meta = zone_meta_from_addr(addr);
1407 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1408 }
1409 #endif /* CONFIG_PROB_GZALLOC */
1410 page = trunc_page(addr);
1411 meta = zone_meta_from_addr(addr);
1412 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1413 page -= ptoa(meta->zm_page_index);
1414 }
1415 zone = zone_by_id(meta->zm_index);
1416
1417 if (zone->z_percpu) {
1418 zone_element_bounds_check_panic(addr, len);
1419 }
1420
1421 if (zone->z_permanent) {
1422 /* We don't know bounds for those */
1423 return;
1424 }
1425
1426 size = zone_elem_size(zone);
1427 offs = Z_FAST_MOD(addr - page + size - zone_elem_offs(zone),
1428 zone->z_quo_magic, size);
1429 if (len > size - offs) {
1430 zone_element_bounds_check_panic(addr, len);
1431 }
1432 }
1433
1434 /*
1435 * Routine to get the size of a zone allocated address.
1436 * If the address doesnt belong to the zone maps, returns 0.
1437 */
1438 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1439 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1440 {
1441 vm_address_t addr = (vm_address_t)elem;
1442 struct zone_page_metadata *meta;
1443 vm_size_t esize, offs, end;
1444 zone_t zone;
1445
1446 if (from_zone_map(addr, sizeof(void *))) {
1447 meta = zone_meta_from_addr(addr);
1448 zone = zone_by_id(meta->zm_index);
1449 esize = zone_elem_size(zone);
1450 end = addr + esize;
1451 offs = 0;
1452
1453 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1454 /*
1455 * If the chunk uses guards, and that (addr + esize)
1456 * either crosses a page boundary or is at the boundary,
1457 * we need to look harder.
1458 */
1459 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1460 /*
1461 * Because in the vast majority of cases the element
1462 * size is sub-page, and that meta[1] must be faulted,
1463 * we can quickly peek at whether it's a guard.
1464 *
1465 * For elements larger than a page, finding the guard
1466 * page requires a little more effort.
1467 */
1468 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1469 offs = meta[1].zm_oob_offs;
1470 if (clear_oob) {
1471 meta[1].zm_oob_offs = 0;
1472 }
1473 } else if (esize > PAGE_SIZE) {
1474 struct zone_page_metadata *gmeta;
1475
1476 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1477 gmeta = meta + meta->zm_subchunk_len;
1478 } else {
1479 gmeta = meta + zone->z_chunk_pages;
1480 }
1481 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1482
1483 if (end >= zone_meta_to_addr(gmeta)) {
1484 offs = gmeta->zm_oob_offs;
1485 if (clear_oob) {
1486 gmeta->zm_oob_offs = 0;
1487 }
1488 }
1489 }
1490 }
1491 #else
1492 #pragma unused(end, clear_oob)
1493 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1494
1495 if (oob_offs) {
1496 *oob_offs = offs;
1497 }
1498 if (z) {
1499 *z = zone;
1500 }
1501 return esize;
1502 }
1503
1504 if (oob_offs) {
1505 *oob_offs = 0;
1506 }
1507
1508 return 0;
1509 }
1510
1511 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1512 zone_id_for_element(void *addr, vm_size_t esize)
1513 {
1514 zone_id_t zid = ZONE_ID_INVALID;
1515 if (from_zone_map(addr, esize)) {
1516 zid = zone_index_from_ptr(addr);
1517 __builtin_assume(zid != ZONE_ID_INVALID);
1518 }
1519 return zid;
1520 }
1521
1522 /* This function just formats the reason for the panics by redoing the checks */
1523 __abortlike
1524 static void
zone_require_panic(zone_t zone,void * addr)1525 zone_require_panic(zone_t zone, void *addr)
1526 {
1527 uint32_t zindex;
1528 zone_t other;
1529
1530 if (!from_zone_map(addr, zone_elem_size(zone))) {
1531 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1532 }
1533
1534 zindex = zone_index_from_ptr(addr);
1535 other = &zone_array[zindex];
1536 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1537 panic("zone_require failed: invalid zone index %d "
1538 "(addr: %p, expected: %s%s)", zindex,
1539 addr, zone_heap_name(zone), zone->z_name);
1540 } else {
1541 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1542 "(addr: %p, expected: %s%s)",
1543 zindex, zone_heap_name(other), other->z_name,
1544 addr, zone_heap_name(zone), zone->z_name);
1545 }
1546 }
1547
1548 __abortlike
1549 static void
zone_id_require_panic(zone_id_t zid,void * addr)1550 zone_id_require_panic(zone_id_t zid, void *addr)
1551 {
1552 zone_require_panic(&zone_array[zid], addr);
1553 }
1554
1555 /*
1556 * Routines to panic if a pointer is not mapped to an expected zone.
1557 * This can be used as a means of pinning an object to the zone it is expected
1558 * to be a part of. Causes a panic if the address does not belong to any
1559 * specified zone, does not belong to any zone, has been freed and therefore
1560 * unmapped from the zone, or the pointer contains an uninitialized value that
1561 * does not belong to any zone.
1562 */
1563 void
zone_require(zone_t zone,void * addr)1564 zone_require(zone_t zone, void *addr)
1565 {
1566 vm_size_t esize = zone_elem_size(zone);
1567
1568 if (from_zone_map(addr, esize) &&
1569 zone_has_index(zone, zone_index_from_ptr(addr))) {
1570 return;
1571 }
1572 zone_require_panic(zone, addr);
1573 }
1574
1575 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1576 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1577 {
1578 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1579 return;
1580 }
1581 zone_id_require_panic(zid, addr);
1582 }
1583
1584 bool
zone_owns(zone_t zone,void * addr)1585 zone_owns(zone_t zone, void *addr)
1586 {
1587 vm_size_t esize = zone_elem_size(zone);
1588
1589 if (from_zone_map(addr, esize)) {
1590 return zone_has_index(zone, zone_index_from_ptr(addr));
1591 }
1592 return false;
1593 }
1594
1595 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1596 zone_kmem_suballoc(
1597 mach_vm_offset_t addr,
1598 vm_size_t size,
1599 int flags,
1600 vm_tag_t tag,
1601 vm_map_t *new_map)
1602 {
1603 struct mach_vm_range r;
1604
1605 *new_map = kmem_suballoc(kernel_map, &addr, size,
1606 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1607 flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1608
1609 r.min_address = addr;
1610 r.max_address = addr + size;
1611 return r;
1612 }
1613
1614 #endif /* !ZALLOC_TEST */
1615 #pragma mark Zone bits allocator
1616
1617 /*!
1618 * @defgroup Zone Bitmap allocator
1619 * @{
1620 *
1621 * @brief
1622 * Functions implementing the zone bitmap allocator
1623 *
1624 * @discussion
1625 * The zone allocator maintains which elements are allocated or free in bitmaps.
1626 *
1627 * When the number of elements per page is smaller than 32, it is stored inline
1628 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1629 * and @c zm_bitmap used for storage).
1630 *
1631 * When the number of elements is larger, then a bitmap is allocated from
1632 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1633 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1634 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1635 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1636 * cannot be larger than 1024 bytes (8192 bits).
1637 *
1638 * This buddy allocator can actually accomodate allocations as large
1639 * as 8k on 16k systems and 2k on 4k systems.
1640 *
1641 * Note: @c zba_* functions are implementation details not meant to be used
1642 * outside of the allocation of the allocator itself. Interfaces to the rest of
1643 * the zone allocator are documented and not @c zba_* prefixed.
1644 */
1645
1646 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1647 #define ZBA_GRANULE sizeof(uint64_t)
1648 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1649 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1650 #define ZBA_MAX_ALLOC_ORDER 7
1651 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1652 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1653 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1654
1655 struct zone_bits_chain {
1656 uint32_t zbc_next;
1657 uint32_t zbc_prev;
1658 } __attribute__((aligned(ZBA_GRANULE)));
1659
1660 struct zone_bits_head {
1661 uint32_t zbh_next;
1662 uint32_t zbh_unused;
1663 } __attribute__((aligned(ZBA_GRANULE)));
1664
1665 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1666 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1667
1668 struct zone_bits_allocator_meta {
1669 uint32_t zbam_chunks;
1670 uint32_t __zbam_padding;
1671 struct zone_bits_head zbam_lists[ZBA_MAX_ORDER + 1];
1672 };
1673
1674 struct zone_bits_allocator_header {
1675 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1676 };
1677
1678 #if ZALLOC_TEST
1679 static struct zalloc_bits_allocator_test_setup {
1680 vm_offset_t zbats_base;
1681 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1682 } zba_test_info;
1683
1684 static struct zone_bits_allocator_header *
zba_base_header(void)1685 zba_base_header(void)
1686 {
1687 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1688 }
1689
1690 static void
zba_populate(uint32_t n)1691 zba_populate(uint32_t n)
1692 {
1693 vm_address_t base = zba_test_info.zbats_base;
1694 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1695 }
1696 #else
1697 __startup_data
1698 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE]
1699 __attribute__((aligned(ZBA_CHUNK_SIZE)));
1700 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1701
1702 static struct zone_bits_allocator_header *
zba_base_header(void)1703 zba_base_header(void)
1704 {
1705 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1706 }
1707
1708 static void
zba_lock(void)1709 zba_lock(void)
1710 {
1711 lck_mtx_lock(&zba_mtx);
1712 }
1713
1714 static void
zba_unlock(void)1715 zba_unlock(void)
1716 {
1717 lck_mtx_unlock(&zba_mtx);
1718 }
1719
1720 static void
zba_populate(uint32_t n)1721 zba_populate(uint32_t n)
1722 {
1723 vm_size_t size = ZBA_CHUNK_SIZE;
1724 vm_address_t addr;
1725
1726 addr = zone_info.zi_bits_range.min_address + n * size;
1727 if (addr >= zone_info.zi_bits_range.max_address) {
1728 uint64_t zsize = 0;
1729 zone_t z = zone_find_largest(&zsize);
1730 panic("zba_populate: out of bitmap space, "
1731 "likely due to memory leak in zone [%s%s] "
1732 "(%u%c, %d elements allocated)",
1733 zone_heap_name(z), zone_name(z),
1734 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1735 zone_count_allocated(z));
1736 }
1737
1738 for (;;) {
1739 kern_return_t kr = KERN_SUCCESS;
1740
1741 if (0 == pmap_find_phys(kernel_pmap, addr)) {
1742 kr = kernel_memory_populate(addr, size,
1743 KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1744 VM_KERN_MEMORY_OSFMK);
1745 }
1746
1747 if (kr == KERN_SUCCESS) {
1748 return;
1749 }
1750
1751 zba_unlock();
1752 VM_PAGE_WAIT();
1753 zba_lock();
1754 }
1755 }
1756 #endif
1757
1758 __pure2
1759 static struct zone_bits_allocator_meta *
zba_meta(void)1760 zba_meta(void)
1761 {
1762 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1763 }
1764
1765 __pure2
1766 static uint64_t *
zba_slot_base(void)1767 zba_slot_base(void)
1768 {
1769 return (uint64_t *)zba_base_header();
1770 }
1771
1772 __pure2
1773 static vm_address_t
zba_page_addr(uint32_t n)1774 zba_page_addr(uint32_t n)
1775 {
1776 return (vm_address_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1777 }
1778
1779 __pure2
1780 static struct zone_bits_head *
zba_head(uint32_t order)1781 zba_head(uint32_t order)
1782 {
1783 return &zba_meta()->zbam_lists[order];
1784 }
1785
1786 __pure2
1787 static uint32_t
zba_head_index(uint32_t order)1788 zba_head_index(uint32_t order)
1789 {
1790 uint32_t hdr_size = sizeof(struct zone_bits_allocator_header) +
1791 offsetof(struct zone_bits_allocator_meta, zbam_lists);
1792 return (hdr_size / ZBA_GRANULE) + order;
1793 }
1794
1795 __pure2
1796 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1797 zba_chain_for_index(uint32_t index)
1798 {
1799 return (struct zone_bits_chain *)(zba_slot_base() + index);
1800 }
1801
1802 __pure2
1803 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1804 zba_chain_to_index(const struct zone_bits_chain *zbc)
1805 {
1806 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1807 }
1808
1809 __abortlike
1810 static void
zba_head_corruption_panic(uint32_t order)1811 zba_head_corruption_panic(uint32_t order)
1812 {
1813 panic("zone bits allocator head[%d:%p] is corrupt", order,
1814 zba_head(order));
1815 }
1816
1817 __abortlike
1818 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1819 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1820 {
1821 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1822 }
1823
1824 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order)1825 zba_push_block(struct zone_bits_chain *zbc, uint32_t order)
1826 {
1827 struct zone_bits_head *hd = zba_head(order);
1828 uint32_t hd_index = zba_head_index(order);
1829 uint32_t index = zba_chain_to_index(zbc);
1830 struct zone_bits_chain *next;
1831
1832 if (hd->zbh_next) {
1833 next = zba_chain_for_index(hd->zbh_next);
1834 if (next->zbc_prev != hd_index) {
1835 zba_head_corruption_panic(order);
1836 }
1837 next->zbc_prev = index;
1838 }
1839 zbc->zbc_next = hd->zbh_next;
1840 zbc->zbc_prev = hd_index;
1841 hd->zbh_next = index;
1842 }
1843
1844 static void
zba_remove_block(struct zone_bits_chain * zbc)1845 zba_remove_block(struct zone_bits_chain *zbc)
1846 {
1847 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1848 uint32_t index = zba_chain_to_index(zbc);
1849
1850 if (prev->zbc_next != index) {
1851 zba_chain_corruption_panic(prev, zbc);
1852 }
1853 if ((prev->zbc_next = zbc->zbc_next)) {
1854 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1855 if (next->zbc_prev != index) {
1856 zba_chain_corruption_panic(zbc, next);
1857 }
1858 next->zbc_prev = zbc->zbc_prev;
1859 }
1860 }
1861
1862 static vm_address_t
zba_try_pop_block(uint32_t order)1863 zba_try_pop_block(uint32_t order)
1864 {
1865 struct zone_bits_head *hd = zba_head(order);
1866 struct zone_bits_chain *zbc;
1867
1868 if (hd->zbh_next == 0) {
1869 return 0;
1870 }
1871
1872 zbc = zba_chain_for_index(hd->zbh_next);
1873 zba_remove_block(zbc);
1874 return (vm_address_t)zbc;
1875 }
1876
1877 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1878 zba_header(vm_offset_t addr)
1879 {
1880 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1881 return (struct zone_bits_allocator_header *)addr;
1882 }
1883
1884 static size_t
zba_node_parent(size_t node)1885 zba_node_parent(size_t node)
1886 {
1887 return (node - 1) / 2;
1888 }
1889
1890 static size_t
zba_node_left_child(size_t node)1891 zba_node_left_child(size_t node)
1892 {
1893 return node * 2 + 1;
1894 }
1895
1896 static size_t
zba_node_buddy(size_t node)1897 zba_node_buddy(size_t node)
1898 {
1899 return ((node - 1) ^ 1) + 1;
1900 }
1901
1902 static size_t
zba_node(vm_offset_t addr,uint32_t order)1903 zba_node(vm_offset_t addr, uint32_t order)
1904 {
1905 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1906 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1907 }
1908
1909 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1910 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1911 {
1912 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1913 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1914 }
1915
1916 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1917 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1918 {
1919 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1920 }
1921
1922 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1923 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1924 {
1925 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1926 }
1927
1928 static void
zba_free(vm_offset_t addr,uint32_t order)1929 zba_free(vm_offset_t addr, uint32_t order)
1930 {
1931 struct zone_bits_allocator_header *zbah = zba_header(addr);
1932 struct zone_bits_chain *zbc;
1933 size_t node = zba_node(addr, order);
1934
1935 while (node) {
1936 size_t parent = zba_node_parent(node);
1937
1938 zba_node_flip_split(zbah, parent);
1939 if (zba_node_is_split(zbah, parent)) {
1940 break;
1941 }
1942
1943 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1944 zba_remove_block(zbc);
1945 order++;
1946 node = parent;
1947 }
1948
1949 zba_push_block(zba_chain_for_node(zbah, node, order), order);
1950 }
1951
1952 static vm_size_t
zba_chunk_header_size(uint32_t n)1953 zba_chunk_header_size(uint32_t n)
1954 {
1955 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1956 if (n == 0) {
1957 hdr_size += sizeof(struct zone_bits_allocator_meta);
1958 }
1959 return hdr_size;
1960 }
1961
1962 static void
zba_init_chunk(uint32_t n)1963 zba_init_chunk(uint32_t n)
1964 {
1965 vm_size_t hdr_size = zba_chunk_header_size(n);
1966 vm_offset_t page = zba_page_addr(n);
1967 struct zone_bits_allocator_header *zbah = zba_header(page);
1968 vm_size_t size = ZBA_CHUNK_SIZE;
1969 size_t node;
1970
1971 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1972 if (size < hdr_size + (ZBA_GRANULE << o)) {
1973 continue;
1974 }
1975 size -= ZBA_GRANULE << o;
1976 node = zba_node(page + size, o);
1977 zba_node_flip_split(zbah, zba_node_parent(node));
1978 zba_push_block(zba_chain_for_node(zbah, node, o), o);
1979 }
1980
1981 zba_meta()->zbam_chunks = n + 1;
1982 }
1983
1984 __attribute__((noinline))
1985 static void
zba_grow(void)1986 zba_grow(void)
1987 {
1988 uint32_t chunk = zba_meta()->zbam_chunks;
1989
1990 zba_populate(chunk);
1991 if (zba_meta()->zbam_chunks == chunk) {
1992 zba_init_chunk(chunk);
1993 }
1994 }
1995
1996 static vm_offset_t
zba_alloc(uint32_t order)1997 zba_alloc(uint32_t order)
1998 {
1999 struct zone_bits_allocator_header *zbah;
2000 uint32_t cur = order;
2001 vm_address_t addr;
2002 size_t node;
2003
2004 while ((addr = zba_try_pop_block(cur)) == 0) {
2005 if (cur++ >= ZBA_MAX_ORDER) {
2006 zba_grow();
2007 cur = order;
2008 }
2009 }
2010
2011 zbah = zba_header(addr);
2012 node = zba_node(addr, cur);
2013 zba_node_flip_split(zbah, zba_node_parent(node));
2014 while (cur > order) {
2015 cur--;
2016 zba_node_flip_split(zbah, node);
2017 node = zba_node_left_child(node);
2018 zba_push_block(zba_chain_for_node(zbah, node + 1, cur), cur);
2019 }
2020
2021 return addr;
2022 }
2023
2024 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
2025 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
2026 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
2027 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
2028
2029 #if !ZALLOC_TEST
2030 static uint32_t
zba_bits_ref_order(uint32_t bref)2031 zba_bits_ref_order(uint32_t bref)
2032 {
2033 return bref & 0x7;
2034 }
2035
2036 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)2037 zba_bits_ref_ptr(uint32_t bref)
2038 {
2039 return zba_slot_base() + (bref >> 3);
2040 }
2041
2042 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)2043 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
2044 zalloc_flags_t flags, vm_offset_t eidx)
2045 {
2046 size_t i = eidx / 32;
2047 uint32_t map;
2048
2049 if (eidx % 32) {
2050 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2051 if (map) {
2052 eidx = __builtin_ctz(map);
2053 meta[i].zm_bitmap ^= 1u << eidx;
2054 return i * 32 + eidx;
2055 }
2056 i++;
2057 }
2058
2059 uint32_t chunk_len = meta->zm_chunk_len;
2060 if (flags & Z_PCPU) {
2061 chunk_len = zpercpu_count();
2062 }
2063 for (int j = 0; j < chunk_len; j++, i++) {
2064 if (i >= chunk_len) {
2065 i = 0;
2066 }
2067 if (__probable(map = meta[i].zm_bitmap)) {
2068 meta[i].zm_bitmap &= map - 1;
2069 return i * 32 + __builtin_ctz(map);
2070 }
2071 }
2072
2073 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2074 }
2075
2076 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2077 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2078 vm_offset_t eidx)
2079 {
2080 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2081 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2082 size_t i = eidx / 64;
2083 uint64_t map;
2084
2085 if (eidx % 64) {
2086 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2087 if (map) {
2088 eidx = __builtin_ctzll(map);
2089 bits[i] ^= 1ull << eidx;
2090 return i * 64 + eidx;
2091 }
2092 i++;
2093 }
2094
2095 for (int j = 0; j < bits_size; i++, j++) {
2096 if (i >= bits_size) {
2097 i = 0;
2098 }
2099 if (__probable(map = bits[i])) {
2100 bits[i] &= map - 1;
2101 return i * 64 + __builtin_ctzll(map);
2102 }
2103 }
2104
2105 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2106 }
2107
2108 /*!
2109 * @function zone_meta_find_and_clear_bit
2110 *
2111 * @brief
2112 * The core of the bitmap allocator: find a bit set in the bitmaps.
2113 *
2114 * @discussion
2115 * This method will round robin through available allocations,
2116 * with a per-core memory of the last allocated element index allocated.
2117 *
2118 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2119 * double-free bugs way too practical.
2120 *
2121 * @param zone The zone we're allocating from.
2122 * @param meta The main metadata for the chunk being allocated from.
2123 * @param flags the alloc flags (for @c Z_PCPU).
2124 */
2125 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags)2126 zone_meta_find_and_clear_bit(zone_t zone, struct zone_page_metadata *meta,
2127 zalloc_flags_t flags)
2128 {
2129 zone_stats_t zs = zpercpu_get(zone->z_stats);
2130 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2131
2132 if (meta->zm_inline_bitmap) {
2133 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2134 } else {
2135 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2136 }
2137 zs->zs_alloc_rr = (uint16_t)eidx;
2138 return eidx;
2139 }
2140
2141 /*!
2142 * @function zone_meta_bits_init
2143 *
2144 * @brief
2145 * Initializes the zm_bitmap field(s) for a newly assigned chunk.
2146 *
2147 * @param meta The main metadata for the initialized chunk.
2148 * @param count The number of elements the chunk can hold
2149 * (which might be partial for partially populated chunks).
2150 * @param nbits The maximum nuber of bits that will be used.
2151 */
2152 static void
zone_meta_bits_init(struct zone_page_metadata * meta,uint32_t count,uint32_t nbits)2153 zone_meta_bits_init(struct zone_page_metadata *meta,
2154 uint32_t count, uint32_t nbits)
2155 {
2156 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2157 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2158
2159 if (meta->zm_inline_bitmap) {
2160 /*
2161 * We're called with the metadata zm_bitmap fields already
2162 * zeroed out.
2163 */
2164 for (size_t i = 0; 32 * i < count; i++) {
2165 if (32 * i + 32 <= count) {
2166 meta[i].zm_bitmap = ~0u;
2167 } else {
2168 meta[i].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2169 }
2170 }
2171 } else {
2172 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2173 uint64_t *bits;
2174
2175 assert(order <= ZBA_MAX_ALLOC_ORDER);
2176 assert(count <= ZBA_GRANULE_BITS << order);
2177
2178 zba_lock();
2179 bits = (uint64_t *)zba_alloc(order);
2180 zba_unlock();
2181
2182 for (size_t i = 0; i < 1u << order; i++) {
2183 if (64 * i + 64 <= count) {
2184 bits[i] = ~0ull;
2185 } else if (64 * i < count) {
2186 bits[i] = zba_map_mask_lt(uint64_t, count);
2187 } else {
2188 bits[i] = 0ull;
2189 }
2190 }
2191
2192 meta->zm_bitmap = (uint32_t)((vm_offset_t)bits -
2193 (vm_offset_t)zba_slot_base()) + order;
2194 }
2195 }
2196
2197 /*!
2198 * @function zone_meta_bits_merge
2199 *
2200 * @brief
2201 * Adds elements <code>[start, end)</code> to a chunk being extended.
2202 *
2203 * @param meta The main metadata for the extended chunk.
2204 * @param start The index of the first element to add to the chunk.
2205 * @param end The index of the last (exclusive) element to add.
2206 */
2207 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2208 zone_meta_bits_merge(struct zone_page_metadata *meta,
2209 uint32_t start, uint32_t end)
2210 {
2211 if (meta->zm_inline_bitmap) {
2212 while (start < end) {
2213 size_t s_i = start / 32;
2214 size_t s_e = end / 32;
2215
2216 if (s_i == s_e) {
2217 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2218 zba_map_mask_ge(uint32_t, start);
2219 break;
2220 }
2221
2222 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2223 start += 32 - (start % 32);
2224 }
2225 } else {
2226 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2227
2228 while (start < end) {
2229 size_t s_i = start / 64;
2230 size_t s_e = end / 64;
2231
2232 if (s_i == s_e) {
2233 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2234 zba_map_mask_ge(uint64_t, start);
2235 break;
2236 }
2237 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2238 start += 64 - (start % 64);
2239 }
2240 }
2241 }
2242
2243 /*!
2244 * @function zone_bits_free
2245 *
2246 * @brief
2247 * Frees a bitmap to the zone bitmap allocator.
2248 *
2249 * @param bref
2250 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2251 */
2252 static void
zone_bits_free(uint32_t bref)2253 zone_bits_free(uint32_t bref)
2254 {
2255 zba_lock();
2256 zba_free((vm_offset_t)zba_bits_ref_ptr(bref), zba_bits_ref_order(bref));
2257 zba_unlock();
2258 }
2259
2260 /*!
2261 * @function zone_meta_is_free
2262 *
2263 * @brief
2264 * Returns whether a given element appears free.
2265 */
2266 static bool
zone_meta_is_free(struct zone_page_metadata * meta,zone_element_t ze)2267 zone_meta_is_free(struct zone_page_metadata *meta, zone_element_t ze)
2268 {
2269 vm_offset_t eidx = zone_element_idx(ze);
2270 if (meta->zm_inline_bitmap) {
2271 uint32_t bit = zba_map_bit(uint32_t, eidx);
2272 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2273 } else {
2274 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2275 uint64_t bit = zba_map_bit(uint64_t, eidx);
2276 return bits[zba_map_index(uint64_t, eidx)] & bit;
2277 }
2278 }
2279
2280 /*!
2281 * @function zone_meta_mark_free
2282 *
2283 * @brief
2284 * Marks an element as free and returns whether it was marked as used.
2285 */
2286 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,zone_element_t ze)2287 zone_meta_mark_free(struct zone_page_metadata *meta, zone_element_t ze)
2288 {
2289 vm_offset_t eidx = zone_element_idx(ze);
2290
2291 if (meta->zm_inline_bitmap) {
2292 uint32_t bit = zba_map_bit(uint32_t, eidx);
2293 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2294 return false;
2295 }
2296 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2297 } else {
2298 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2299 uint64_t bit = zba_map_bit(uint64_t, eidx);
2300 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2301 return false;
2302 }
2303 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2304 }
2305 return true;
2306 }
2307
2308 /*!
2309 * @function zone_meta_mark_used
2310 *
2311 * @brief
2312 * Marks an element as used and returns whether it was marked as free
2313 */
2314 static bool
zone_meta_mark_used(struct zone_page_metadata * meta,zone_element_t ze)2315 zone_meta_mark_used(struct zone_page_metadata *meta, zone_element_t ze)
2316 {
2317 vm_offset_t eidx = zone_element_idx(ze);
2318
2319 if (meta->zm_inline_bitmap) {
2320 uint32_t bit = zba_map_bit(uint32_t, eidx);
2321 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2322 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2323 return true;
2324 }
2325 } else {
2326 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2327 uint64_t bit = zba_map_bit(uint64_t, eidx);
2328 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2329 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2330 return true;
2331 }
2332 }
2333 return false;
2334 }
2335
2336 #endif /* !ZALLOC_TEST */
2337 /*! @} */
2338 #pragma mark ZTAGS
2339 #if !ZALLOC_TEST
2340 #if VM_TAG_SIZECLASSES
2341 /*
2342 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
2343 * zones only.
2344 *
2345 * There are 3 kinds of tags that can be used:
2346 * - pre-registered VM_KERN_MEMORY_*
2347 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
2348 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
2349 *
2350 * The VM tracks the statistics in lazily allocated structures.
2351 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
2352 *
2353 * If for some reason the requested tag cannot be accounted for,
2354 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
2355 *
2356 * Each allocated element also remembers the tag it was assigned,
2357 * in its ztSlot() which lets zalloc/zfree update statistics correctly.
2358 */
2359
2360 // for zones with tagging enabled:
2361
2362 // calculate a pointer to the tag base entry,
2363 // holding either a uint32_t the first tag offset for a page in the zone map,
2364 // or two uint16_t tags if the page can only hold one or two elements
2365
2366 #define ZTAGBASE(zone, element) \
2367 (&((uint32_t *)zone_tagbase_range.min_address)[atop((element) - \
2368 zone_info.zi_map_range.min_address)])
2369
2370 static struct mach_vm_range zone_tagbase_range;
2371 static vm_map_t zone_tagbase_map;
2372 static vm_map_size_t zone_tagbase_map_size;
2373
2374 static struct mach_vm_range zone_tags_range;
2375 static vm_map_t zone_tags_map;
2376 static vm_map_size_t zone_tags_map_size;
2377
2378 // simple heap allocator for allocating the tags for new memory
2379
2380 static LCK_MTX_DECLARE(ztLock, &zone_locks_grp); /* heap lock */
2381
2382 /*
2383 * Array of all sizeclasses used by kalloc variants so that we can
2384 * have accounting per size class for each kalloc callsite
2385 */
2386 uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
2387
2388 enum{
2389 ztFreeIndexCount = 8,
2390 ztFreeIndexMax = (ztFreeIndexCount - 1),
2391 ztTagsPerBlock = 4
2392 };
2393
2394 struct ztBlock {
2395 #if __LITTLE_ENDIAN__
2396 uint64_t free:1,
2397 next:21,
2398 prev:21,
2399 size:21;
2400 #else
2401 // ztBlock needs free bit least significant
2402 #error !__LITTLE_ENDIAN__
2403 #endif
2404 };
2405 typedef struct ztBlock ztBlock;
2406
2407 static ztBlock * ztBlocks;
2408 static uint32_t ztBlocksCount;
2409 static uint32_t ztBlocksFree;
2410
2411 __startup_func
2412 void
__zone_site_register(vm_allocation_site_t * site)2413 __zone_site_register(vm_allocation_site_t *site)
2414 {
2415 if (zone_tagging_on) {
2416 vm_tag_alloc(site);
2417 }
2418 }
2419
2420 static uint32_t
ztLog2up(uint32_t size)2421 ztLog2up(uint32_t size)
2422 {
2423 if (1 == size) {
2424 size = 0;
2425 } else {
2426 size = 32 - __builtin_clz(size - 1);
2427 }
2428 return size;
2429 }
2430
2431 // pointer to the tag for an element
2432 static vm_tag_t *
ztSlot(zone_t zone,vm_offset_t element)2433 ztSlot(zone_t zone, vm_offset_t element)
2434 {
2435 vm_tag_t *result;
2436 if (zone->z_tags_inline) {
2437 result = (vm_tag_t *)ZTAGBASE(zone, element);
2438 if ((PAGE_MASK & element) >= zone_elem_size(zone)) {
2439 result++;
2440 }
2441 } else {
2442 result = &((vm_tag_t *)zone_tags_range.min_address)[ZTAGBASE(zone,
2443 element)[0] + (element & PAGE_MASK) / zone_elem_size(zone)];
2444 }
2445 return result;
2446 }
2447
2448 static uint32_t
ztLog2down(uint32_t size)2449 ztLog2down(uint32_t size)
2450 {
2451 size = 31 - __builtin_clz(size);
2452 return size;
2453 }
2454
2455 static void
ztFault(const void * address,size_t size,uint32_t flags)2456 ztFault(const void * address, size_t size, uint32_t flags)
2457 {
2458 vm_map_offset_t addr = (vm_map_offset_t) address;
2459 vm_map_offset_t page, end;
2460
2461 page = trunc_page(addr);
2462 end = round_page(addr + size);
2463
2464 for (; page < end; page += page_size) {
2465 if (!pmap_find_phys(kernel_pmap, page)) {
2466 kernel_memory_populate(page, PAGE_SIZE,
2467 KMA_NOFAIL | KMA_KOBJECT | flags,
2468 VM_KERN_MEMORY_DIAG);
2469 }
2470 }
2471 }
2472
2473 static boolean_t
ztPresent(const void * address,size_t size)2474 ztPresent(const void * address, size_t size)
2475 {
2476 vm_map_offset_t addr = (vm_map_offset_t) address;
2477 vm_map_offset_t page, end;
2478 boolean_t result;
2479
2480 page = trunc_page(addr);
2481 end = round_page(addr + size);
2482 for (result = TRUE; (page < end); page += page_size) {
2483 result = pmap_find_phys(kernel_pmap, page);
2484 if (!result) {
2485 break;
2486 }
2487 }
2488 return result;
2489 }
2490
2491
2492 void __unused
2493 ztDump(boolean_t sanity);
2494 void __unused
ztDump(boolean_t sanity)2495 ztDump(boolean_t sanity)
2496 {
2497 uint32_t q, cq, p;
2498
2499 for (q = 0; q <= ztFreeIndexMax; q++) {
2500 p = q;
2501 do{
2502 if (sanity) {
2503 cq = ztLog2down(ztBlocks[p].size);
2504 if (cq > ztFreeIndexMax) {
2505 cq = ztFreeIndexMax;
2506 }
2507 if (!ztBlocks[p].free
2508 || ((p != q) && (q != cq))
2509 || (ztBlocks[ztBlocks[p].next].prev != p)
2510 || (ztBlocks[ztBlocks[p].prev].next != p)) {
2511 kprintf("zterror at %d", p);
2512 ztDump(FALSE);
2513 kprintf("zterror at %d", p);
2514 assert(FALSE);
2515 }
2516 continue;
2517 }
2518 kprintf("zt[%03d]%c %d, %d, %d\n",
2519 p, ztBlocks[p].free ? 'F' : 'A',
2520 ztBlocks[p].next, ztBlocks[p].prev,
2521 ztBlocks[p].size);
2522 p = ztBlocks[p].next;
2523 if (p == q) {
2524 break;
2525 }
2526 }while (p != q);
2527 if (!sanity) {
2528 printf("\n");
2529 }
2530 }
2531 if (!sanity) {
2532 printf("-----------------------\n");
2533 }
2534 }
2535
2536
2537
2538 #define ZTBDEQ(idx) \
2539 ztBlocks[ztBlocks[(idx)].prev].next = ztBlocks[(idx)].next; \
2540 ztBlocks[ztBlocks[(idx)].next].prev = ztBlocks[(idx)].prev;
2541
2542 static void
ztFree(zone_t zone __unused,uint32_t index,uint32_t count)2543 ztFree(zone_t zone __unused, uint32_t index, uint32_t count)
2544 {
2545 uint32_t q, w, p, size, merge;
2546
2547 assert(count);
2548 ztBlocksFree += count;
2549
2550 // merge with preceding
2551 merge = (index + count);
2552 if ((merge < ztBlocksCount)
2553 && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
2554 && ztBlocks[merge].free) {
2555 ZTBDEQ(merge);
2556 count += ztBlocks[merge].size;
2557 }
2558
2559 // merge with following
2560 merge = (index - 1);
2561 if ((merge > ztFreeIndexMax)
2562 && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
2563 && ztBlocks[merge].free) {
2564 size = ztBlocks[merge].size;
2565 count += size;
2566 index -= size;
2567 ZTBDEQ(index);
2568 }
2569
2570 q = ztLog2down(count);
2571 if (q > ztFreeIndexMax) {
2572 q = ztFreeIndexMax;
2573 }
2574 w = q;
2575 // queue in order of size
2576 while (TRUE) {
2577 p = ztBlocks[w].next;
2578 if (p == q) {
2579 break;
2580 }
2581 if (ztBlocks[p].size >= count) {
2582 break;
2583 }
2584 w = p;
2585 }
2586 ztBlocks[p].prev = index;
2587 ztBlocks[w].next = index;
2588
2589 // fault in first
2590 ztFault(&ztBlocks[index], sizeof(ztBlocks[index]), 0);
2591
2592 // mark first & last with free flag and size
2593 ztBlocks[index].free = TRUE;
2594 ztBlocks[index].size = count;
2595 ztBlocks[index].prev = w;
2596 ztBlocks[index].next = p;
2597 if (count > 1) {
2598 index += (count - 1);
2599 // fault in last
2600 ztFault(&ztBlocks[index], sizeof(ztBlocks[index]), 0);
2601 ztBlocks[index].free = TRUE;
2602 ztBlocks[index].size = count;
2603 }
2604 }
2605
2606 static uint32_t
ztAlloc(zone_t zone,uint32_t count)2607 ztAlloc(zone_t zone, uint32_t count)
2608 {
2609 uint32_t q, w, p, leftover;
2610
2611 assert(count);
2612
2613 q = ztLog2up(count);
2614 if (q > ztFreeIndexMax) {
2615 q = ztFreeIndexMax;
2616 }
2617 do{
2618 w = q;
2619 while (TRUE) {
2620 p = ztBlocks[w].next;
2621 if (p == q) {
2622 break;
2623 }
2624 if (ztBlocks[p].size >= count) {
2625 // dequeue, mark both ends allocated
2626 ztBlocks[w].next = ztBlocks[p].next;
2627 ztBlocks[ztBlocks[p].next].prev = w;
2628 ztBlocks[p].free = FALSE;
2629 ztBlocksFree -= ztBlocks[p].size;
2630 if (ztBlocks[p].size > 1) {
2631 ztBlocks[p + ztBlocks[p].size - 1].free = FALSE;
2632 }
2633
2634 // fault all the allocation
2635 ztFault(&ztBlocks[p], count * sizeof(ztBlocks[p]), 0);
2636 // mark last as allocated
2637 if (count > 1) {
2638 ztBlocks[p + count - 1].free = FALSE;
2639 }
2640 // free remainder
2641 leftover = ztBlocks[p].size - count;
2642 if (leftover) {
2643 ztFree(zone, p + ztBlocks[p].size - leftover, leftover);
2644 }
2645
2646 return p;
2647 }
2648 w = p;
2649 }
2650 q++;
2651 }while (q <= ztFreeIndexMax);
2652
2653 return -1U;
2654 }
2655
2656 __startup_func
2657 static void
zone_tagging_init(void)2658 zone_tagging_init(void)
2659 {
2660 // allocate submaps VM_KERN_MEMORY_DIAG
2661 zone_tagbase_range = zone_kmem_suballoc(zone_tagbase_range.min_address,
2662 zone_tagbase_map_size, VM_FLAGS_FIXED_RANGE_SUBALLOC,
2663 VM_KERN_MEMORY_DIAG, &zone_tagbase_map);
2664
2665 zone_tags_range = zone_kmem_suballoc(zone_tags_range.min_address,
2666 zone_tags_map_size, VM_FLAGS_FIXED_RANGE_SUBALLOC, VM_KERN_MEMORY_DIAG,
2667 &zone_tags_map);
2668
2669 ztBlocks = (ztBlock *) zone_tags_range.min_address;
2670 ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock));
2671
2672 // initialize the qheads
2673 lck_mtx_lock(&ztLock);
2674
2675 ztFault(&ztBlocks[0], sizeof(ztBlocks[0]), 0);
2676 for (uint32_t idx = 0; idx < ztFreeIndexCount; idx++) {
2677 ztBlocks[idx].free = TRUE;
2678 ztBlocks[idx].next = idx;
2679 ztBlocks[idx].prev = idx;
2680 ztBlocks[idx].size = 0;
2681 }
2682 // free remaining space
2683 ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount);
2684
2685 lck_mtx_unlock(&ztLock);
2686 }
2687
2688 static void
ztMemoryAdd(zone_t zone,vm_offset_t mem,vm_size_t size)2689 ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size)
2690 {
2691 uint32_t * tagbase;
2692 uint32_t count, block, blocks, idx;
2693 size_t pages;
2694
2695 pages = atop(size);
2696 tagbase = ZTAGBASE(zone, mem);
2697
2698 lck_mtx_lock(&ztLock);
2699
2700 // fault tagbase
2701 ztFault(tagbase, pages * sizeof(uint32_t), 0);
2702
2703 if (!zone->z_tags_inline) {
2704 // allocate tags
2705 count = (uint32_t)(size / zone_elem_size(zone));
2706 blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
2707 block = ztAlloc(zone, blocks);
2708 if (-1U == block) {
2709 ztDump(false);
2710 }
2711 assert(-1U != block);
2712 }
2713
2714 lck_mtx_unlock(&ztLock);
2715
2716 if (!zone->z_tags_inline) {
2717 // set tag base for each page
2718 block *= ztTagsPerBlock;
2719 for (idx = 0; idx < pages; idx++) {
2720 vm_offset_t esize = zone_elem_size(zone);
2721 tagbase[idx] = block + (uint32_t)((ptoa(idx) + esize - 1) / esize);
2722 }
2723 }
2724 }
2725
2726 static void
ztMemoryRemove(zone_t zone,vm_offset_t mem,vm_size_t size)2727 ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size)
2728 {
2729 uint32_t * tagbase;
2730 uint32_t count, block, blocks, idx;
2731 size_t pages;
2732
2733 // set tag base for each page
2734 pages = atop(size);
2735 tagbase = ZTAGBASE(zone, mem);
2736 block = tagbase[0];
2737 for (idx = 0; idx < pages; idx++) {
2738 tagbase[idx] = 0xFFFFFFFF;
2739 }
2740
2741 lck_mtx_lock(&ztLock);
2742 if (!zone->z_tags_inline) {
2743 count = (uint32_t)(size / zone_elem_size(zone));
2744 blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
2745 assert(block != 0xFFFFFFFF);
2746 block /= ztTagsPerBlock;
2747 ztFree(NULL /* zone is unlocked */, block, blocks);
2748 }
2749
2750 lck_mtx_unlock(&ztLock);
2751 }
2752
2753 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2754 zone_index_from_tag_index(uint32_t sizeclass_idx)
2755 {
2756 return zone_tags_sizeclasses[sizeclass_idx];
2757 }
2758
2759 #endif /* VM_TAG_SIZECLASSES */
2760 #endif /* !ZALLOC_TEST */
2761 #pragma mark zalloc helpers
2762 #if !ZALLOC_TEST
2763
2764 __pure2
2765 static inline uint16_t
zc_mag_size(void)2766 zc_mag_size(void)
2767 {
2768 return zc_magazine_size;
2769 }
2770
2771 __attribute__((noinline, cold))
2772 static void
zone_lock_was_contended(zone_t zone,zone_cache_t zc)2773 zone_lock_was_contended(zone_t zone, zone_cache_t zc)
2774 {
2775 lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2776
2777 /*
2778 * If zone caching has been disabled due to memory pressure,
2779 * then recording contention is not useful, give the system
2780 * time to recover.
2781 */
2782 if (__improbable(zone_caching_disabled)) {
2783 return;
2784 }
2785
2786 zone->z_contention_cur++;
2787
2788 if (zc == NULL || zc->zc_depot_max >= INT16_MAX) {
2789 return;
2790 }
2791
2792 /*
2793 * Let the depot grow based on how bad the contention is,
2794 * and how populated the zone is.
2795 */
2796 if (zone->z_contention_wma < 2 * Z_CONTENTION_WMA_UNIT) {
2797 if (zc->zc_depot_max * zpercpu_count() * 20u >=
2798 zone->z_elems_avail) {
2799 return;
2800 }
2801 }
2802 if (zone->z_contention_wma < 4 * Z_CONTENTION_WMA_UNIT) {
2803 if (zc->zc_depot_max * zpercpu_count() * 10u >=
2804 zone->z_elems_avail) {
2805 return;
2806 }
2807 }
2808 if (!zc_grow_threshold || zone->z_contention_wma <
2809 zc_grow_threshold * Z_CONTENTION_WMA_UNIT) {
2810 return;
2811 }
2812
2813 zc->zc_depot_max++;
2814 }
2815
2816 static inline void
zone_lock_nopreempt_check_contention(zone_t zone,zone_cache_t zc)2817 zone_lock_nopreempt_check_contention(zone_t zone, zone_cache_t zc)
2818 {
2819 if (lck_ticket_lock_try_nopreempt(&zone->z_lock, &zone_locks_grp)) {
2820 return;
2821 }
2822
2823 zone_lock_was_contended(zone, zc);
2824 }
2825
2826 static inline void
zone_lock_check_contention(zone_t zone,zone_cache_t zc)2827 zone_lock_check_contention(zone_t zone, zone_cache_t zc)
2828 {
2829 disable_preemption();
2830 zone_lock_nopreempt_check_contention(zone, zc);
2831 }
2832
2833 static inline void
zone_unlock_nopreempt(zone_t zone)2834 zone_unlock_nopreempt(zone_t zone)
2835 {
2836 lck_ticket_unlock_nopreempt(&zone->z_lock);
2837 }
2838
2839 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2840 zone_depot_lock_nopreempt(zone_cache_t zc)
2841 {
2842 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2843 }
2844
2845 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2846 zone_depot_unlock_nopreempt(zone_cache_t zc)
2847 {
2848 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2849 }
2850
2851 static inline void
zone_depot_lock(zone_cache_t zc)2852 zone_depot_lock(zone_cache_t zc)
2853 {
2854 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2855 }
2856
2857 static inline void
zone_depot_unlock(zone_cache_t zc)2858 zone_depot_unlock(zone_cache_t zc)
2859 {
2860 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2861 }
2862
2863 const char *
zone_name(zone_t z)2864 zone_name(zone_t z)
2865 {
2866 return z->z_name;
2867 }
2868
2869 const char *
zone_heap_name(zone_t z)2870 zone_heap_name(zone_t z)
2871 {
2872 zone_security_flags_t zsflags = zone_security_config(z);
2873 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2874 return kalloc_heap_names[zsflags.z_kheap_id];
2875 }
2876 return "invalid";
2877 }
2878
2879 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2880 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2881 {
2882 vm_size_t elem_count, chunks;
2883
2884 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2885 zone_elem_size(z);
2886 chunks = (max_elems + elem_count - 1) / elem_count;
2887
2888 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2889 }
2890
2891 static inline vm_size_t
zone_submaps_approx_size(void)2892 zone_submaps_approx_size(void)
2893 {
2894 vm_size_t size = 0;
2895
2896 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2897 if (zone_submaps[idx] != VM_MAP_NULL) {
2898 size += zone_submaps[idx]->size;
2899 }
2900 }
2901
2902 return size;
2903 }
2904
2905 static void
zone_cache_swap_magazines(zone_cache_t cache)2906 zone_cache_swap_magazines(zone_cache_t cache)
2907 {
2908 uint16_t count_a = cache->zc_alloc_cur;
2909 uint16_t count_f = cache->zc_free_cur;
2910 zone_element_t *elems_a = cache->zc_alloc_elems;
2911 zone_element_t *elems_f = cache->zc_free_elems;
2912
2913 z_debug_assert(count_a <= zc_mag_size());
2914 z_debug_assert(count_f <= zc_mag_size());
2915
2916 cache->zc_alloc_cur = count_f;
2917 cache->zc_free_cur = count_a;
2918 cache->zc_alloc_elems = elems_f;
2919 cache->zc_free_elems = elems_a;
2920 }
2921
2922 /*!
2923 * @function zone_magazine_load
2924 *
2925 * @brief
2926 * Cache the value of @c zm_cur on the cache to avoid a dependent load
2927 * on the allocation fastpath.
2928 */
2929 static void
zone_magazine_load(uint16_t * count,zone_element_t ** elems,zone_magazine_t mag)2930 zone_magazine_load(uint16_t *count, zone_element_t **elems, zone_magazine_t mag)
2931 {
2932 z_debug_assert(mag->zm_cur <= zc_mag_size());
2933 *count = mag->zm_cur;
2934 *elems = mag->zm_elems;
2935 }
2936
2937 /*!
2938 * @function zone_magazine_replace
2939 *
2940 * @brief
2941 * Unlod a magazine and load a new one instead.
2942 */
2943 static zone_magazine_t
zone_magazine_replace(uint16_t * count,zone_element_t ** elems,zone_magazine_t mag)2944 zone_magazine_replace(uint16_t *count, zone_element_t **elems,
2945 zone_magazine_t mag)
2946 {
2947 zone_magazine_t old;
2948
2949 old = (zone_magazine_t)((uintptr_t)*elems -
2950 offsetof(struct zone_magazine, zm_elems));
2951 old->zm_cur = *count;
2952 z_debug_assert(old->zm_cur <= zc_mag_size());
2953 zone_magazine_load(count, elems, mag);
2954
2955 return old;
2956 }
2957
2958 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2959 zone_magazine_alloc(zalloc_flags_t flags)
2960 {
2961 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2962 }
2963
2964 static void
zone_magazine_free(zone_magazine_t mag)2965 zone_magazine_free(zone_magazine_t mag)
2966 {
2967 (zfree)(zc_magazine_zone, mag);
2968 }
2969
2970 static void
zone_magazine_free_list(struct zone_depot * mags)2971 zone_magazine_free_list(struct zone_depot *mags)
2972 {
2973 zone_magazine_t mag, tmp;
2974
2975 STAILQ_FOREACH_SAFE(mag, mags, zm_link, tmp) {
2976 zone_magazine_free(mag);
2977 }
2978
2979 STAILQ_INIT(mags);
2980 }
2981
2982 static void
zone_enable_caching(zone_t zone)2983 zone_enable_caching(zone_t zone)
2984 {
2985 zone_cache_t caches;
2986
2987 caches = zalloc_percpu_permanent_type(struct zone_cache);
2988 zpercpu_foreach(zc, caches) {
2989 zone_magazine_load(&zc->zc_alloc_cur, &zc->zc_alloc_elems,
2990 zone_magazine_alloc(Z_WAITOK | Z_NOFAIL));
2991 zone_magazine_load(&zc->zc_free_cur, &zc->zc_free_elems,
2992 zone_magazine_alloc(Z_WAITOK | Z_NOFAIL));
2993 STAILQ_INIT(&zc->zc_depot);
2994 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2995 }
2996
2997 if (os_atomic_xchg(&zone->z_pcpu_cache, caches, release)) {
2998 panic("allocating caches for zone %s twice", zone->z_name);
2999 }
3000 }
3001
3002 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)3003 zone_maps_owned(vm_address_t addr, vm_size_t size)
3004 {
3005 return from_zone_map(addr, size);
3006 }
3007
3008 #if KASAN_LIGHT
3009 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)3010 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
3011 {
3012 return from_zone_map(addr, size) ||
3013 mach_vm_range_size(&zone_info.zi_map_range) == 0;
3014 }
3015 #endif /* KASAN_LIGHT */
3016
3017 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)3018 zone_map_sizes(
3019 vm_map_size_t *psize,
3020 vm_map_size_t *pfree,
3021 vm_map_size_t *plargest_free)
3022 {
3023 vm_map_size_t size, free, largest;
3024
3025 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
3026
3027 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
3028 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
3029 *psize += size;
3030 *pfree += free;
3031 *plargest_free = MAX(*plargest_free, largest);
3032 }
3033 }
3034
3035 __attribute__((always_inline))
3036 vm_map_t
zone_submap(zone_security_flags_t zsflags)3037 zone_submap(zone_security_flags_t zsflags)
3038 {
3039 return zone_submaps[zsflags.z_submap_idx];
3040 }
3041
3042 unsigned
zpercpu_count(void)3043 zpercpu_count(void)
3044 {
3045 return zpercpu_early_count;
3046 }
3047
3048 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
3049 /*
3050 * Returns a random number of a given bit-width.
3051 *
3052 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
3053 *
3054 * This uses Intel's rdrand because random() uses FP registers
3055 * which causes FP faults and allocations which isn't something
3056 * we can do from zalloc itself due to reentrancy problems.
3057 *
3058 * For pre-rdrand machines (which we no longer support),
3059 * we use a bad biased random generator that doesn't use FP.
3060 * Such HW is no longer supported, but VM of newer OSes on older
3061 * bare metal is made to limp along (with reduced security) this way.
3062 */
3063 static uint64_t
zalloc_random_mask64(uint32_t bits)3064 zalloc_random_mask64(uint32_t bits)
3065 {
3066 uint64_t mask = ~0ull >> (64 - bits);
3067 uint64_t v;
3068
3069 #if __x86_64__
3070 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
3071 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
3072 v &= mask;
3073 } else {
3074 disable_preemption();
3075 int cpu = cpu_number();
3076 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3077 zone_bool_gen[cpu].zbg_entropy,
3078 ZONE_ENTROPY_CNT, bits);
3079 enable_preemption();
3080 }
3081 #else
3082 v = early_random() & mask;
3083 #endif
3084
3085 return v;
3086 }
3087
3088 /*
3089 * Returns a random number within [bound_min, bound_max)
3090 *
3091 * This isn't _exactly_ uniform, but the skew is small enough
3092 * not to matter for the consumers of this interface.
3093 *
3094 * Values within [bound_min, 2^64 % (bound_max - bound_min))
3095 * will be returned (bound_max - bound_min) / 2^64 more often
3096 * than values within [2^64 % (bound_max - bound_min), bound_max).
3097 */
3098 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)3099 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
3100 {
3101 uint64_t delta = bound_max - bound_min;
3102
3103 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
3104 }
3105
3106 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
3107 #if ZONE_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
3108 /*
3109 * Track all kalloc zones of specified size for zlog name
3110 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
3111 */
3112 static bool
track_kalloc_zones(zone_t z,const char * logname)3113 track_kalloc_zones(zone_t z, const char *logname)
3114 {
3115 const char *prefix;
3116 size_t len;
3117 zone_security_flags_t zsflags = zone_security_config(z);
3118
3119 prefix = "kalloc.type.var.";
3120 len = strlen(prefix);
3121 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
3122 strncmp(logname, prefix, len) == 0) {
3123 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
3124
3125 return zone_elem_size(z) == sizeclass;
3126 }
3127
3128 prefix = "kalloc.type.";
3129 len = strlen(prefix);
3130 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
3131 strncmp(logname, prefix, len) == 0) {
3132 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
3133
3134 return zone_elem_size(z) == sizeclass;
3135 }
3136
3137 prefix = "kalloc.";
3138 len = strlen(prefix);
3139 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
3140 strncmp(logname, prefix, len) == 0) {
3141 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
3142
3143 return zone_elem_size(z) == sizeclass;
3144 }
3145
3146 return false;
3147 }
3148 #endif
3149
3150 int
track_this_zone(const char * zonename,const char * logname)3151 track_this_zone(const char *zonename, const char *logname)
3152 {
3153 unsigned int len;
3154 const char *zc = zonename;
3155 const char *lc = logname;
3156
3157 /*
3158 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
3159 */
3160
3161 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
3162 /*
3163 * If the current characters don't match, check for a space in
3164 * in the zone name and a corresponding period in the log name.
3165 * If that's not there, then the strings don't match.
3166 */
3167
3168 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
3169 break;
3170 }
3171
3172 /*
3173 * The strings are equal so far. If we're at the end, then it's a match.
3174 */
3175
3176 if (*zc == '\0') {
3177 return TRUE;
3178 }
3179 }
3180
3181 return FALSE;
3182 }
3183
3184 #if DEBUG || DEVELOPMENT
3185
3186 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)3187 zone_element_info(void *addr, vm_tag_t * ptag)
3188 {
3189 vm_size_t size = 0;
3190 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3191 struct zone *src_zone;
3192
3193 if (from_zone_map(addr, sizeof(void *))) {
3194 src_zone = zone_by_id(zone_index_from_ptr(addr));
3195 #if VM_TAG_SIZECLASSES
3196 if (__improbable(src_zone->z_uses_tags)) {
3197 tag = *ztSlot(src_zone, (vm_offset_t)addr) >> 1;
3198 }
3199 #endif /* VM_TAG_SIZECLASSES */
3200 size = zone_elem_size(src_zone);
3201 }
3202 *ptag = tag;
3203 return size;
3204 }
3205
3206 #endif /* DEBUG || DEVELOPMENT */
3207 #endif /* !ZALLOC_TEST */
3208
3209 #pragma mark Zone zeroing and early random
3210 #if !ZALLOC_TEST
3211
3212 /*
3213 * Zone zeroing
3214 *
3215 * All allocations from zones are zeroed on free and are additionally
3216 * check that they are still zero on alloc. The check is
3217 * always on, on embedded devices. Perf regression was detected
3218 * on intel as we cant use the vectorized implementation of
3219 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3220 * initization and allocation. Therefore we perform the check
3221 * on 20% of the allocations.
3222 */
3223 #if ZALLOC_ENABLE_ZERO_CHECK
3224 #if defined(__x86_64__)
3225 /*
3226 * Peform zero validation on every 5th allocation
3227 */
3228 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3229 static uint32_t PERCPU_DATA(zzc_decrementer);
3230 #endif /* defined(__x86_64__) */
3231
3232 /*
3233 * Determine if zero validation for allocation should be skipped
3234 */
3235 static bool
zalloc_skip_zero_check(void)3236 zalloc_skip_zero_check(void)
3237 {
3238 #if defined(__x86_64__)
3239 uint32_t *counterp, cnt;
3240
3241 counterp = PERCPU_GET(zzc_decrementer);
3242 cnt = *counterp;
3243 if (__probable(cnt > 0)) {
3244 *counterp = cnt - 1;
3245 return true;
3246 }
3247 *counterp = zzc_rate - 1;
3248 #endif /* !defined(__x86_64__) */
3249 return false;
3250 }
3251
3252 __abortlike
3253 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3254 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3255 {
3256 uint32_t esize = (uint32_t)zone_elem_size(z);
3257 uint32_t first_offs = ~0u;
3258 uintptr_t first_bits = 0, v;
3259 char buf[1024];
3260 int pos = 0;
3261
3262 #if __LP64__
3263 #define ZPF "0x%016lx"
3264 #else
3265 #define ZPF "0x%08lx"
3266 #endif
3267
3268 buf[0] = '\0';
3269
3270 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3271 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3272 continue;
3273 }
3274 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3275 "%5d: "ZPF, o, v);
3276 if (first_offs > o) {
3277 first_offs = o;
3278 first_bits = v;
3279 }
3280 }
3281
3282 (panic)("[%s%s]: element modified after free "
3283 "(off:%d, val:"ZPF", sz:%d, ptr:%p)%s",
3284 zone_heap_name(z), zone_name(z),
3285 first_offs, first_bits, esize, (void *)elem, buf);
3286
3287 #undef ZPF
3288 }
3289
3290 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3291 zalloc_validate_element(zone_t zone, vm_offset_t elem, vm_size_t size,
3292 zalloc_flags_t flags)
3293 {
3294 if (flags & Z_NOZZC) {
3295 return;
3296 }
3297 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3298 zalloc_uaf_panic(zone, elem, size);
3299 }
3300 if (flags & Z_PCPU) {
3301 for (size_t i = zpercpu_count(); --i > 0;) {
3302 elem += PAGE_SIZE;
3303 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3304 zalloc_uaf_panic(zone, elem, size);
3305 }
3306 }
3307 }
3308 }
3309
3310 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3311
3312 static void
zone_early_scramble_rr(zone_t zone,zone_stats_t zstats)3313 zone_early_scramble_rr(zone_t zone, zone_stats_t zstats)
3314 {
3315 int cpu = cpu_number();
3316 zone_stats_t zs = zpercpu_get_cpu(zstats, cpu);
3317 uint32_t bits;
3318
3319 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3320 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3321
3322 zs->zs_alloc_rr += bits;
3323 zs->zs_alloc_rr %= zone->z_chunk_elems;
3324 }
3325
3326 #endif /* !ZALLOC_TEST */
3327 #pragma mark Zone Leak Detection
3328 #if !ZALLOC_TEST
3329 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
3330
3331 /*
3332 * Zone leak debugging code
3333 *
3334 * When enabled, this code keeps a log to track allocations to a particular
3335 * zone that have not yet been freed.
3336 *
3337 * Examining this log will reveal the source of a zone leak.
3338 *
3339 * The log is allocated only when logging is enabled (it is off by default),
3340 * so there is no effect on the system when it's turned off.
3341 *
3342 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3343 * zone name to log, with n starting at 1.
3344 *
3345 * Leaks debugging utilizes 2 tunables:
3346 * - zlsize (in kB) which describes how much "size" the record covers
3347 * (zones with smaller elements get more records, default is 4M).
3348 *
3349 * - zlfreq (in kB) which describes a sample rate in cumulative allocation
3350 * size at which automatic leak detection will sample allocations.
3351 * (default is 16k)
3352 *
3353 *
3354 * Zone corruption logging
3355 *
3356 * Logging can also be used to help identify the source of a zone corruption.
3357 *
3358 * First, identify the zone that is being corrupted,
3359 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3360 *
3361 * When -zc is used in conjunction with zlog,
3362 * it changes the logging style to track both allocations and frees to the zone.
3363 *
3364 * When the corruption is detected, examining the log will show you the stack
3365 * traces of the callers who last allocated and freed any particular element in
3366 * the zone.
3367 *
3368 * Corruption debugging logs will have zrecs records
3369 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3370 */
3371
3372 #define ZRECORDS_MAX (256u << 10)
3373 #define ZRECORDS_DEFAULT (16u << 10)
3374 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3375 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3376 static TUNABLE(uint32_t, zlfreq, "zlfreq", 16);
3377
3378 __startup_func
3379 static void
zone_leaks_init_zrecs(void)3380 zone_leaks_init_zrecs(void)
3381 {
3382 /*
3383 * Don't allow more than ZRECORDS_MAX records,
3384 * even if the user asked for more.
3385 *
3386 * This prevents accidentally hogging too much kernel memory
3387 * and making the system unusable.
3388 */
3389 if (zrecs == 0) {
3390 zrecs = ZRECORDS_DEFAULT *
3391 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3392 }
3393 if (zrecs > ZRECORDS_MAX) {
3394 zrecs = ZRECORDS_MAX;
3395 }
3396 }
3397 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3398
3399 static uint32_t
zone_leaks_record_count(zone_t z)3400 zone_leaks_record_count(zone_t z)
3401 {
3402 uint32_t recs = (zlsize << 10) / zone_elem_size(z);
3403
3404 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3405 }
3406
3407 static uint32_t
zone_leaks_sample_rate(zone_t z)3408 zone_leaks_sample_rate(zone_t z)
3409 {
3410 return (zlfreq << 10) / zone_elem_size(z);
3411 }
3412
3413 #if KASAN_TBI
3414 static TUNABLE(size_t, kasan_zrecs, "kasan_zrecs", 0);
3415 #endif
3416
3417 #if ZONE_ENABLE_LOGGING
3418 /* Log allocations and frees to help debug a zone element corruption */
3419 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3420
3421 /*
3422 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3423 */
3424 #define MAX_ZONES_LOG_REQUESTS 10
3425
3426 /**
3427 * @function zone_setup_logging
3428 *
3429 * @abstract
3430 * Optionally sets up a zone for logging.
3431 *
3432 * @discussion
3433 * We recognized two boot-args:
3434 *
3435 * zlog=<zone_to_log>
3436 * zrecs=<num_records_in_log>
3437 * zlsize=<memory to cover for leaks>
3438 *
3439 * The zlog arg is used to specify the zone name that should be logged,
3440 * and zrecs/zlsize is used to control the size of the log.
3441 */
3442 static void
zone_setup_logging(zone_t z)3443 zone_setup_logging(zone_t z)
3444 {
3445 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3446 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3447 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3448 bool logging_on = false;
3449
3450 /*
3451 * Append kalloc heap name to zone name (if zone is used by kalloc)
3452 */
3453 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3454
3455 /* zlog0 isn't allowed. */
3456 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3457 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3458
3459 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3460 if (track_this_zone(zone_name, zlog_val) ||
3461 track_kalloc_zones(z, zlog_val)) {
3462 logging_on = true;
3463 break;
3464 }
3465 }
3466 }
3467
3468 /*
3469 * Backwards compat. with the old boot-arg used to specify single zone
3470 * logging i.e. zlog Needs to happen after the newer zlogn checks
3471 * because the prefix will match all the zlogn
3472 * boot-args.
3473 */
3474 if (!logging_on &&
3475 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3476 if (track_this_zone(zone_name, zlog_val) ||
3477 track_kalloc_zones(z, zlog_val)) {
3478 logging_on = true;
3479 }
3480 }
3481
3482 /*
3483 * If we want to log a zone, see if we need to allocate buffer space for
3484 * the log.
3485 *
3486 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3487 * we have to defer allocation in that case.
3488 *
3489 * zone_init() will finish the job.
3490 *
3491 * If we want to log one of the VM related zones that's set up early on,
3492 * we will skip allocation of the log until zinit is called again later
3493 * on some other zone.
3494 */
3495 if (logging_on) {
3496 if (corruption_debug_flag) {
3497 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3498 } else {
3499 z->z_btlog = btlog_create(BTLOG_HASH,
3500 zone_leaks_record_count(z), 0);
3501 }
3502 if (z->z_btlog) {
3503 z->z_log_on = true;
3504 printf("zone[%s%s]: logging enabled\n",
3505 zone_heap_name(z), z->z_name);
3506 } else {
3507 printf("zone[%s%s]: failed to enable logging\n",
3508 zone_heap_name(z), z->z_name);
3509 }
3510 }
3511 }
3512
3513 #endif /* ZONE_ENABLE_LOGGING */
3514
3515 #if KASAN_TBI
3516 static void
zone_setup_kasan_logging(zone_t z)3517 zone_setup_kasan_logging(zone_t z)
3518 {
3519 if (!z->z_tbi_tag) {
3520 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3521 zone_heap_name(z), z->z_name);
3522 return;
3523 }
3524
3525 z->z_btlog_kasan = btlog_create(BTLOG_LOG, ZRECORDS_DEFAULT, 0);
3526 if (!z->z_btlog_kasan) {
3527 printf("zone[%s%s]: failed to enable kasan logging\n",
3528 zone_heap_name(z), z->z_name);
3529 }
3530 }
3531
3532 static void
kasan_tbi_log_bt(zone_t zone,uint8_t op,vm_offset_t addr,void * fp)3533 kasan_tbi_log_bt(zone_t zone, uint8_t op, vm_offset_t addr, void *fp)
3534 {
3535 assert(op == ZOP_ALLOC || op == ZOP_FREE);
3536
3537 if (__probable(zone->z_tbi_tag && zone->z_btlog_kasan)) {
3538 btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
3539 btlog_record(zone->z_btlog_kasan, (void *)addr, op, ref);
3540 }
3541 }
3542 #endif /* KASAN_TBI */
3543
3544 #if CONFIG_ZLEAKS
3545
3546 static thread_call_data_t zone_leaks_callout;
3547
3548 /*
3549 * The zone leak detector, abbreviated 'zleak', keeps track
3550 * of a subset of the currently outstanding allocations
3551 * made by the zone allocator.
3552 *
3553 * It will engage itself automatically if the zone map usage
3554 * goes above zleak_pages_global_wired_threshold pages.
3555 *
3556 * When that threshold is reached, zones who use more than
3557 * zleak_pages_per_zone_wired_threshold pages will get
3558 * a BTLOG_HASH btlog with sampling to minimize perf impact,
3559 * yet receive statistical data about the backtrace that is
3560 * the most likely to cause the leak.
3561 *
3562 * If the zone goes under the threshold enough, then the log
3563 * is disabled and backtraces freed. Data can be collected
3564 * from userspace with the zlog(1) command.
3565 */
3566
3567 /* whether the zleaks subsystem thinks the map is under pressure */
3568 uint32_t zleak_active;
3569 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3570
3571 /* Size of zone map at which to start collecting data */
3572 static size_t zleak_pages_global_wired_threshold = ~0;
3573 vm_size_t zleak_global_tracking_threshold = ~0;
3574
3575 /* Size a zone will have before we will collect data on it */
3576 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3577 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3578
3579 static inline bool
zleak_should_enable_for_zone(zone_t z)3580 zleak_should_enable_for_zone(zone_t z)
3581 {
3582 if (z->z_log_on) {
3583 return false;
3584 }
3585 if (z->z_btlog) {
3586 return false;
3587 }
3588 if (!zleak_active) {
3589 return false;
3590 }
3591 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3592 }
3593
3594 static inline bool
zleak_should_disable_for_zone(zone_t z)3595 zleak_should_disable_for_zone(zone_t z)
3596 {
3597 if (z->z_log_on) {
3598 return false;
3599 }
3600 if (!z->z_btlog) {
3601 return false;
3602 }
3603 if (!zleak_active) {
3604 return true;
3605 }
3606 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3607 }
3608
3609 static inline bool
zleak_should_activate(size_t pages)3610 zleak_should_activate(size_t pages)
3611 {
3612 return !zleak_active && pages >= zleak_pages_global_wired_threshold;
3613 }
3614
3615 static inline bool
zleak_should_deactivate(size_t pages)3616 zleak_should_deactivate(size_t pages)
3617 {
3618 return zleak_active && pages < zleak_pages_global_wired_threshold / 2;
3619 }
3620
3621 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3622 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3623 {
3624 size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
3625 btlog_t log;
3626
3627 if (zleak_should_activate(pages)) {
3628 zleak_active = 1;
3629 } else if (zleak_should_deactivate(pages)) {
3630 zleak_active = 0;
3631 }
3632
3633 zone_foreach(z) {
3634 if (zleak_should_disable_for_zone(z)) {
3635 log = z->z_btlog;
3636 z->z_btlog = NULL;
3637 assert(z->z_btlog_disabled == NULL);
3638 btlog_disable(log);
3639 z->z_btlog_disabled = log;
3640 }
3641
3642 if (zleak_should_enable_for_zone(z)) {
3643 log = z->z_btlog_disabled;
3644 if (log == NULL) {
3645 log = btlog_create(BTLOG_HASH,
3646 zone_leaks_record_count(z),
3647 zone_leaks_sample_rate(z));
3648 } else if (btlog_enable(log) == KERN_SUCCESS) {
3649 z->z_btlog_disabled = NULL;
3650 } else {
3651 log = NULL;
3652 }
3653 os_atomic_store(&z->z_btlog, log, release);
3654 }
3655 }
3656 }
3657
3658 __startup_func
3659 static void
zleak_init(void)3660 zleak_init(void)
3661 {
3662 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3663
3664 zleak_update_threshold(&zleak_global_tracking_threshold,
3665 zleak_max_zonemap_size / 2);
3666 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3667 zleak_global_tracking_threshold / 8);
3668
3669 thread_call_setup_with_options(&zone_leaks_callout,
3670 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3671 THREAD_CALL_OPTIONS_ONCE);
3672 }
3673 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3674
3675 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3676 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3677 {
3678 if (value >= zleak_max_zonemap_size) {
3679 return KERN_INVALID_VALUE;
3680 }
3681
3682 if (arg == &zleak_global_tracking_threshold) {
3683 zleak_global_tracking_threshold = (vm_size_t)value;
3684 zleak_pages_global_wired_threshold = atop(value);
3685 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3686 thread_call_enter(&zone_leaks_callout);
3687 }
3688 return KERN_SUCCESS;
3689 }
3690
3691 if (arg == &zleak_per_zone_tracking_threshold) {
3692 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3693 zleak_pages_per_zone_wired_threshold = atop(value);
3694 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3695 thread_call_enter(&zone_leaks_callout);
3696 }
3697 return KERN_SUCCESS;
3698 }
3699
3700 return KERN_INVALID_ARGUMENT;
3701 }
3702
3703 static void
panic_display_zleaks(bool has_syms)3704 panic_display_zleaks(bool has_syms)
3705 {
3706 bool did_header = false;
3707 vm_address_t bt[BTLOG_MAX_DEPTH];
3708 uint32_t len, count;
3709
3710 zone_foreach(z) {
3711 btlog_t log = z->z_btlog;
3712
3713 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3714 continue;
3715 }
3716
3717 count = btlog_guess_top(log, bt, &len);
3718 if (count == 0) {
3719 continue;
3720 }
3721
3722 if (!did_header) {
3723 paniclog_append_noflush("Zone (suspected) leak report:\n");
3724 did_header = true;
3725 }
3726
3727 paniclog_append_noflush(" Zone: %s%s\n",
3728 zone_heap_name(z), zone_name(z));
3729 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3730 (long)count * zone_scale_for_percpu(z, zone_elem_size(z)));
3731 paniclog_append_noflush(" Size: %ld\n",
3732 (long)zone_size_wired(z));
3733 paniclog_append_noflush(" Top backtrace:\n");
3734 for (uint32_t i = 0; i < len; i++) {
3735 if (has_syms) {
3736 paniclog_append_noflush(" %p ", (void *)bt[i]);
3737 panic_print_symbol_name(bt[i]);
3738 paniclog_append_noflush("\n");
3739 } else {
3740 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3741 }
3742 }
3743
3744 kmod_panic_dump(bt, len);
3745 paniclog_append_noflush("\n");
3746 }
3747 }
3748 #endif /* CONFIG_ZLEAKS */
3749
3750 static void
zalloc_log(btlog_t log,vm_offset_t addr,zalloc_flags_t flags,void * fp)3751 zalloc_log(btlog_t log, vm_offset_t addr, zalloc_flags_t flags, void *fp)
3752 {
3753 btref_t ref;
3754
3755 if (btlog_sample(log)) {
3756 ref = btref_get(fp, (flags & Z_NOWAIT) ? BTREF_GET_NOWAIT : 0);
3757 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3758 }
3759 }
3760
3761 static void
zfree_log(btlog_t log,vm_offset_t addr,void * fp)3762 zfree_log(btlog_t log, vm_offset_t addr, void *fp)
3763 {
3764 /*
3765 * See if we're doing logging on this zone.
3766 *
3767 * There are two styles of logging used depending on
3768 * whether we're trying to catch a leak or corruption.
3769 */
3770 if (btlog_get_type(log) == BTLOG_LOG) {
3771 /*
3772 * We're logging to catch a corruption.
3773 *
3774 * Add a record of this zfree operation to log.
3775 */
3776 btlog_record(log, (void *)addr, ZOP_FREE,
3777 btref_get(fp, BTREF_GET_NOWAIT));
3778 } else {
3779 /*
3780 * We're logging to catch a leak.
3781 *
3782 * Remove any record we might have for this element
3783 * since it's being freed. Note that we may not find it
3784 * if the buffer overflowed and that's OK.
3785 *
3786 * Since the log is of a limited size, old records get
3787 * overwritten if there are more zallocs than zfrees.
3788 */
3789 btlog_erase(log, (void *)addr);
3790 }
3791 }
3792
3793 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3794 #endif /* !ZALLOC_TEST */
3795 #pragma mark zone (re)fill
3796 #if !ZALLOC_TEST
3797
3798 /*!
3799 * @defgroup Zone Refill
3800 * @{
3801 *
3802 * @brief
3803 * Functions handling The zone refill machinery.
3804 *
3805 * @discussion
3806 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3807 *
3808 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3809 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3810 *
3811 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3812 *
3813 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3814 * to block, or if the reserve becomes depleted.
3815 *
3816 *
3817 * <h2>Synchronous expansion</h2>
3818 *
3819 * This mechanism is actually the only one that may refill a zone, and all the
3820 * other ones funnel through this one eventually.
3821 *
3822 * @c zone_expand_locked() implements the core of the expansion mechanism,
3823 * and will do so while a caller specified predicate is true.
3824 *
3825 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3826 * - one VM privileged thread,
3827 * - one regular thread.
3828 *
3829 * Regular threads that refill will put down their identity in @c z_expander,
3830 * so that priority inversion avoidance can be implemented.
3831 *
3832 * However, VM privileged threads are allowed to use VM page reserves,
3833 * which allows for the system to recover from extreme memory pressure
3834 * situations, allowing for the few allocations that @c zone_gc() or
3835 * killing processes require.
3836 *
3837 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3838 * is set. @c z_expander is not necessarily the identity of this VM privileged
3839 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3840 * could even be @c THREAD_NULL otherwise).
3841 *
3842 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3843 * spending a whole pointer on priority inheritance for VM privileged threads
3844 * (and other issues related to having two owners), we use the rwlock boost as
3845 * a stop gap to avoid priority inversions.
3846 *
3847 *
3848 * <h2>Chunk wiring policies</h2>
3849 *
3850 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3851 * to try to minimize fragmentation relative to element sizes not aligning with
3852 * a chunk size well. However, this can grow large and be hard to fulfill on
3853 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3854 * 4k page systems).
3855 *
3856 * This is why, when under memory pressure the system allows chunks to be
3857 * partially populated. The metadata of the first page in the chunk maintains
3858 * the count of actually populated pages.
3859 *
3860 * The metadata for addresses assigned to a zone are found of 4 queues:
3861 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3862 * elements (those can be targeted by @c zone_gc()),
3863 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3864 * used,
3865 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3866 * left,
3867 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3868 * the zone forever (if @c z_va_sequester is enabled), or the first secondary
3869 * metadata for a chunk whose corresponding page is not populated in the
3870 * chunk.
3871 *
3872 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3873 * queues are preferred.
3874 *
3875 *
3876 * <h2>Asynchronous expansion</h2>
3877 *
3878 * This mechanism allows for refilling zones used mostly with non blocking
3879 * callers. It relies on a thread call (@c zone_expand_callout) which will
3880 * iterate all zones and refill the ones marked with @c z_async_refilling.
3881 *
3882 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3883 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3884 *
3885 *
3886 * <h2>Dealing with zone allocations from the mach VM code</h2>
3887 *
3888 * The implementation of the mach VM itself uses the zone allocator
3889 * for things like the vm_map_entry data structure. In order to prevent
3890 * a recursion problem when adding more pages to a zone, the VM zones
3891 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3892 * or any VM map functions to allocate.
3893 *
3894 * Instead, a really simple coalescing first-fit allocator is used
3895 * for this submap, and no one else than zalloc can allocate from it.
3896 *
3897 * Memory is directly populated which doesn't require allocation of
3898 * VM map entries, and avoids recursion. The cost of this scheme however,
3899 * is that `vm_map_lookup_entry` will not function on those addresses
3900 * (nor any API relying on it).
3901 */
3902
3903 static thread_call_data_t zone_expand_callout;
3904
3905 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3906 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3907 {
3908 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3909
3910 if (zsflags.z_noencrypt) {
3911 kmaflags |= KMA_NOENCRYPT;
3912 }
3913 if (flags & Z_NOPAGEWAIT) {
3914 kmaflags |= KMA_NOPAGEWAIT;
3915 }
3916 if (z->z_permanent || (!z->z_destructible && zsflags.z_va_sequester)) {
3917 kmaflags |= KMA_PERMANENT;
3918 }
3919 if (zsflags.z_submap_from_end) {
3920 kmaflags |= KMA_LAST_FREE;
3921 }
3922
3923 return kmaflags;
3924 }
3925
3926 static inline void
zone_add_wired_pages(uint32_t pages)3927 zone_add_wired_pages(uint32_t pages)
3928 {
3929 size_t count = os_atomic_add(&zone_pages_wired, pages, relaxed);
3930
3931 #if CONFIG_ZLEAKS
3932 if (__improbable(zleak_should_activate(count) &&
3933 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3934 thread_call_enter(&zone_leaks_callout);
3935 }
3936 #else
3937 (void)count;
3938 #endif
3939 }
3940
3941 static inline void
zone_remove_wired_pages(uint32_t pages)3942 zone_remove_wired_pages(uint32_t pages)
3943 {
3944 size_t count = os_atomic_sub(&zone_pages_wired, pages, relaxed);
3945
3946 #if CONFIG_ZLEAKS
3947 if (__improbable(zleak_should_deactivate(count) &&
3948 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3949 thread_call_enter(&zone_leaks_callout);
3950 }
3951 #else
3952 (void)count;
3953 #endif
3954 }
3955
3956 /*!
3957 * @function zcram_and_lock()
3958 *
3959 * @brief
3960 * Prepare some memory for being usable for allocation purposes.
3961 *
3962 * @discussion
3963 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3964 * to be usable in the zone.
3965 *
3966 * This function assumes the metadata is already populated for the range.
3967 *
3968 * Calling this function with @c pg_start being 0 means that the memory
3969 * is either a partial chunk, or a full chunk, that isn't published anywhere
3970 * and the initialization can happen without locks held.
3971 *
3972 * Calling this function with a non zero @c pg_start means that we are extending
3973 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3974 * is already usable and published in the zone, so extending it requires holding
3975 * the zone lock.
3976 *
3977 * @param zone The zone to cram new populated pages into
3978 * @param addr The base address for the chunk(s)
3979 * @param pg_va_new The number of virtual pages newly assigned to the zone
3980 * @param pg_start The first newly populated page relative to @a addr.
3981 * @param pg_end The after-last newly populated page relative to @a addr.
3982 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3983 */
3984 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3985 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3986 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3987 {
3988 zone_id_t zindex = zone_index(zone);
3989 vm_offset_t elem_size = zone_elem_size(zone);
3990 uint32_t free_start = 0, free_end = 0;
3991 uint32_t oob_offs = zone_elem_offs(zone);
3992
3993 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3994 uint32_t chunk_pages = zone->z_chunk_pages;
3995 bool guarded = meta->zm_guarded;
3996
3997 assert(pg_start < pg_end && pg_end <= chunk_pages);
3998
3999 if (pg_start == 0) {
4000 uint16_t chunk_len = (uint16_t)pg_end;
4001 uint16_t secondary_len = ZM_SECONDARY_PAGE;
4002 bool inline_bitmap = false;
4003
4004 if (zone->z_percpu) {
4005 chunk_len = 1;
4006 secondary_len = ZM_SECONDARY_PCPU_PAGE;
4007 assert(pg_end == zpercpu_count());
4008 }
4009 if (!zone->z_permanent) {
4010 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
4011 }
4012
4013 meta[0] = (struct zone_page_metadata){
4014 .zm_index = zindex,
4015 .zm_guarded = guarded,
4016 .zm_inline_bitmap = inline_bitmap,
4017 .zm_chunk_len = chunk_len,
4018 .zm_alloc_size = lock,
4019 };
4020
4021 for (uint16_t i = 1; i < chunk_pages; i++) {
4022 meta[i] = (struct zone_page_metadata){
4023 .zm_index = zindex,
4024 .zm_guarded = guarded,
4025 .zm_inline_bitmap = inline_bitmap,
4026 .zm_chunk_len = secondary_len,
4027 .zm_page_index = (uint8_t)i,
4028 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
4029 };
4030 }
4031
4032 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
4033 if (!zone->z_permanent) {
4034 zone_meta_bits_init(meta, free_end, zone->z_chunk_elems);
4035 }
4036 } else {
4037 assert(!zone->z_percpu && !zone->z_permanent);
4038
4039 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
4040 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
4041 }
4042
4043 #if VM_TAG_SIZECLASSES
4044 if (__improbable(zone->z_uses_tags)) {
4045 assert(!zone->z_percpu);
4046 ztMemoryAdd(zone, addr + ptoa(pg_start),
4047 ptoa(pg_end - pg_start));
4048 }
4049 #endif /* VM_TAG_SIZECLASSES */
4050
4051 /*
4052 * Insert the initialized pages / metadatas into the right lists.
4053 */
4054
4055 zone_lock(zone);
4056 assert(zone->z_self == zone);
4057
4058 if (pg_start != 0) {
4059 assert(meta->zm_chunk_len == pg_start);
4060
4061 zone_meta_bits_merge(meta, free_start, free_end);
4062 meta->zm_chunk_len = (uint16_t)pg_end;
4063
4064 /*
4065 * consume the zone_meta_lock_in_partial()
4066 * done in zone_expand_locked()
4067 */
4068 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4069 zone_meta_remqueue(zone, meta);
4070 }
4071
4072 if (zone->z_permanent || meta->zm_alloc_size) {
4073 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4074 } else {
4075 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4076 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4077 }
4078 if (pg_end < chunk_pages) {
4079 /* push any non populated residual VA on z_pageq_va */
4080 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4081 }
4082
4083 zone_elems_free_add(zone, free_end - free_start);
4084 zone->z_elems_avail += free_end - free_start;
4085 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4086 if (pg_va_new) {
4087 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4088 }
4089 if (zone->z_wired_hwm < zone->z_wired_cur) {
4090 zone->z_wired_hwm = zone->z_wired_cur;
4091 }
4092
4093 #if CONFIG_ZLEAKS
4094 if (__improbable(zleak_should_enable_for_zone(zone) &&
4095 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4096 thread_call_enter(&zone_leaks_callout);
4097 }
4098 #endif /* CONFIG_ZLEAKS */
4099
4100 zone_add_wired_pages(pg_end - pg_start);
4101 }
4102
4103 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4104 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4105 {
4106 uint32_t chunk_pages = zone->z_chunk_pages;
4107
4108 assert(pages % chunk_pages == 0);
4109 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4110 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4111 zone_unlock(zone);
4112 }
4113 }
4114
4115 __startup_func
4116 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4117 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4118 {
4119 uint32_t pages = (uint32_t)atop(size);
4120
4121 assert(from_zone_map(newmem, size));
4122 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4123 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4124
4125 /*
4126 * The early pages we move at the pmap layer can't be "depopulated"
4127 * because there's no vm_page_t for them.
4128 *
4129 * "Lock" them so that they never hit z_pageq_empty.
4130 */
4131 bzero((void *)newmem, size);
4132 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4133 }
4134
4135 __attribute__((overloadable))
4136 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)4137 zone_submap_is_sequestered(zone_submap_idx_t idx)
4138 {
4139 switch (idx) {
4140 case Z_SUBMAP_IDX_READ_ONLY:
4141 case Z_SUBMAP_IDX_VM:
4142 return true;
4143 case Z_SUBMAP_IDX_DATA:
4144 return false;
4145 default:
4146 return ZSECURITY_CONFIG(SEQUESTER);
4147 }
4148 }
4149
4150 __attribute__((overloadable))
4151 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)4152 zone_submap_is_sequestered(zone_security_flags_t zsflags)
4153 {
4154 return zone_submap_is_sequestered(zsflags.z_submap_idx);
4155 }
4156
4157 /*!
4158 * @function zone_submap_alloc_sequestered_va
4159 *
4160 * @brief
4161 * Allocates VA without using vm_find_space().
4162 *
4163 * @discussion
4164 * Allocate VA quickly without using the slower vm_find_space() for cases
4165 * when the submaps are fully sequestered.
4166 *
4167 * The VM submap is used to implement the VM itself so it is always sequestered,
4168 * as it can't kmem_alloc which needs to always allocate vm entries.
4169 * However, it can use vm_map_enter() which tries to coalesce entries, which
4170 * always works, so the VM map only ever needs 2 entries (one for each end).
4171 *
4172 * The RO submap is similarly always sequestered if it exists (as a non
4173 * sequestered RO submap makes very little sense).
4174 *
4175 * The allocator is a very simple bump-allocator
4176 * that allocates from either end.
4177 */
4178 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4179 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4180 vm_offset_t *addrp)
4181 {
4182 vm_size_t size = ptoa(pages);
4183 vm_map_t map = zone_submap(zsflags);
4184 vm_map_entry_t first, last;
4185 vm_map_offset_t addr;
4186
4187 vm_map_lock(map);
4188
4189 first = vm_map_first_entry(map);
4190 last = vm_map_last_entry(map);
4191
4192 if (first->vme_end + size > last->vme_start) {
4193 vm_map_unlock(map);
4194 return KERN_NO_SPACE;
4195 }
4196
4197 if (zsflags.z_submap_from_end) {
4198 last->vme_start -= size;
4199 addr = last->vme_start;
4200 VME_OFFSET_SET(last, addr);
4201 } else {
4202 addr = first->vme_end;
4203 first->vme_end += size;
4204 }
4205 map->size += size;
4206
4207 vm_map_unlock(map);
4208
4209 *addrp = addr;
4210 return KERN_SUCCESS;
4211 }
4212
4213 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4214 zone_fill_initially(zone_t zone, vm_size_t nelems)
4215 {
4216 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4217 kern_return_t kr;
4218 vm_offset_t addr;
4219 uint32_t pages;
4220 zone_security_flags_t zsflags = zone_security_config(zone);
4221
4222 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4223 assert(zone->z_elems_avail == 0);
4224
4225 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4226 pages = zone_alloc_pages_for_nelems(zone, nelems);
4227 if (zone_submap_is_sequestered(zsflags)) {
4228 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4229 if (kr != KERN_SUCCESS) {
4230 panic("zone_submap_alloc_sequestered_va() "
4231 "of %u pages failed", pages);
4232 }
4233 kernel_memory_populate(addr, ptoa(pages),
4234 kmaflags, VM_KERN_MEMORY_ZONE);
4235 } else {
4236 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4237 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4238 kmaflags, VM_KERN_MEMORY_ZONE);
4239 }
4240
4241 zone_meta_populate(addr, ptoa(pages));
4242 zcram(zone, addr, pages, 0);
4243 }
4244
4245 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4246 __attribute__((noinline))
4247 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4248 zone_scramble_va_and_unlock(
4249 zone_t z,
4250 struct zone_page_metadata *meta,
4251 uint32_t runs,
4252 uint32_t pages,
4253 uint32_t chunk_pages,
4254 uint64_t guard_mask)
4255 {
4256 struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4257
4258 for (uint32_t run = 0, n = 0; run < runs; run++) {
4259 arr[run] = meta + n;
4260 n += chunk_pages + ((guard_mask >> run) & 1);
4261 }
4262
4263 /*
4264 * Fisher–Yates shuffle, for an array with indices [0, n)
4265 *
4266 * for i from n−1 downto 1 do
4267 * j ← random integer such that 0 ≤ j ≤ i
4268 * exchange a[j] and a[i]
4269 *
4270 * The point here is that early allocations aren't at a fixed
4271 * distance from each other.
4272 */
4273 for (uint32_t i = runs - 1; i > 0; i--) {
4274 uint32_t j = zalloc_random_uniform32(0, i + 1);
4275
4276 meta = arr[j];
4277 arr[j] = arr[i];
4278 arr[i] = meta;
4279 }
4280
4281 zone_lock(z);
4282
4283 for (uint32_t i = 0; i < runs; i++) {
4284 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4285 }
4286 z->z_va_cur += z->z_percpu ? runs : pages;
4287 }
4288
4289 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4290 dist_u32(uint32_t a, uint32_t b)
4291 {
4292 return a < b ? b - a : a - b;
4293 }
4294
4295 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4296 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4297 {
4298 for (; n-- > 0; pop--) {
4299 uint32_t bit = zalloc_random_uniform32(0, pop);
4300 uint64_t m = mask;
4301
4302 for (; bit; bit--) {
4303 m &= m - 1;
4304 }
4305
4306 mask ^= 1ull << __builtin_ctzll(m);
4307 }
4308
4309 return mask;
4310 }
4311
4312 /**
4313 * @function zalloc_random_bits
4314 *
4315 * @brief
4316 * Compute a random number with a specified number of bit set in a given width.
4317 *
4318 * @discussion
4319 * This function generates a "uniform" distribution of sets of bits set in
4320 * a given width, with typically less than width/4 calls to random.
4321 *
4322 * @param pop the target number of bits set.
4323 * @param width the number of bits in the random integer to generate.
4324 */
4325 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4326 zalloc_random_bits(uint32_t pop, uint32_t width)
4327 {
4328 uint64_t w_mask = (1ull << width) - 1;
4329 uint64_t mask;
4330 uint32_t cur;
4331
4332 if (3 * width / 4 <= pop) {
4333 mask = w_mask;
4334 cur = width;
4335 } else if (pop <= width / 4) {
4336 mask = 0;
4337 cur = 0;
4338 } else {
4339 /*
4340 * Chosing a random number this way will overwhelmingly
4341 * contain `width` bits +/- a few.
4342 */
4343 mask = zalloc_random_mask64(width);
4344 cur = __builtin_popcountll(mask);
4345
4346 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4347 /*
4348 * If the opposite mask has a closer popcount,
4349 * then start with that one as the seed.
4350 */
4351 cur = width - cur;
4352 mask ^= w_mask;
4353 }
4354 }
4355
4356 if (cur < pop) {
4357 /*
4358 * Setting `pop - cur` bits is really clearing that many from
4359 * the opposite mask.
4360 */
4361 mask ^= w_mask;
4362 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4363 mask ^= w_mask;
4364 } else if (pop < cur) {
4365 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4366 }
4367
4368 return mask;
4369 }
4370 #endif
4371
4372 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4373 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4374 {
4375 zone_security_flags_t zsflags = zone_security_config(z);
4376 struct zone_page_metadata *meta;
4377 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4378 uint32_t chunk_pages = z->z_chunk_pages;
4379 uint32_t runs, pages, guards, rnum;
4380 uint64_t guard_mask = 0;
4381 bool lead_guard = false;
4382 kern_return_t kr;
4383 vm_offset_t addr;
4384
4385 zone_unlock(z);
4386
4387 /*
4388 * A lot of OOB exploitation techniques rely on precise placement
4389 * and interleaving of zone pages. The layout that is sought
4390 * by attackers will be C/P/T types, where:
4391 * - (C)ompromised is the type for which attackers have a bug,
4392 * - (P)adding is used to pad memory,
4393 * - (T)arget is the type that the attacker will attempt to corrupt
4394 * by exploiting (C).
4395 *
4396 * Note that in some cases C==T and P isn't needed.
4397 *
4398 * In order to make those placement games much harder,
4399 * we grow zones by random runs of memory, up to 256k.
4400 * This makes predicting the precise layout of the heap
4401 * quite more complicated.
4402 *
4403 * Note: this function makes a very heavy use of random,
4404 * however, it is mostly limited to sequestered zones,
4405 * and eventually the layout will be fixed,
4406 * and the usage of random vastly reduced.
4407 *
4408 * For non sequestered zones, there's a single call
4409 * to random in order to decide whether we want
4410 * a guard page or not.
4411 */
4412 pages = chunk_pages;
4413 guards = 0;
4414 runs = 1;
4415 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4416 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4417 pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4418 runs = (pages + chunk_pages - 1) / chunk_pages;
4419 runs = zalloc_random_uniform32(1, runs + 1);
4420 pages = runs * chunk_pages;
4421 }
4422 static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4423 "make sure that `runs` will never be larger than 64");
4424 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4425
4426 /*
4427 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4428 * guards might be added after each chunk.
4429 *
4430 * Those guard pages are marked with the ZM_PGZ_GUARD
4431 * magical chunk len, and their zm_oob_offs field
4432 * is used to remember optional shift applied
4433 * to returned elements, in order to right-align-them
4434 * as much as possible.
4435 *
4436 * In an adversarial context, while guard pages
4437 * are extremely effective against linear overflow,
4438 * using a predictable density of guard pages feels like
4439 * a missed opportunity. Which is why we chose to insert
4440 * one guard page for about 32k of memory, and place it
4441 * randomly.
4442 */
4443 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4444 if (z->z_percpu) {
4445 /*
4446 * For per-cpu runs, have a 75% chance to have a guard.
4447 */
4448 rnum = zalloc_random_uniform32(0, 4 * 128);
4449 guards = rnum >= 128;
4450 } else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4451 vm_offset_t rest;
4452
4453 /*
4454 * For types that are less susceptible to have OOBs,
4455 * have a density of 1 guard every 64k, with a uniform
4456 * distribution.
4457 */
4458 rnum = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4459 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4460 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4461 guards += rnum < rest;
4462 } else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4463 /*
4464 * For chunks >= 32k, have a 75% chance of guard pages
4465 * between chunks.
4466 */
4467 rnum = zalloc_random_uniform32(65, 129);
4468 guards = runs * rnum / 128;
4469 } else {
4470 vm_offset_t rest;
4471
4472 /*
4473 * Otherwise, aim at 1 guard every 32k,
4474 * with a uniform distribution.
4475 */
4476 rnum = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4477 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4478 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4479 guards += rnum < rest;
4480 }
4481 assert3u(guards, <=, runs);
4482
4483 guard_mask = 0;
4484
4485 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4486 uint32_t g = 0;
4487
4488 /*
4489 * Several exploitation strategies rely on a C/T (compromised
4490 * then target types) ordering of pages with a sub-page reach
4491 * from C into T.
4492 *
4493 * We want to reliably thwart such exploitations
4494 * and hence force a guard page between alternating
4495 * memory types.
4496 */
4497 guard_mask |= 1ull << (runs - 1);
4498 g++;
4499
4500 /*
4501 * While we randomize the chunks lengths, an attacker with
4502 * precise timing control can guess when overflows happen,
4503 * and "measure" the runs, which gives them an indication
4504 * of where the next run start offset is.
4505 *
4506 * In order to make this knowledge unusable, add a guard page
4507 * _before_ the new run with a 25% probability, regardless
4508 * of whether we had enough guard pages.
4509 */
4510 if ((rnum & 3) == 0) {
4511 lead_guard = true;
4512 g++;
4513 }
4514 if (guards > g) {
4515 guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4516 } else {
4517 guards = g;
4518 }
4519 } else {
4520 assert3u(runs, ==, 1);
4521 assert3u(guards, <=, 1);
4522 guard_mask = guards << (runs - 1);
4523 }
4524 #else
4525 (void)rnum;
4526 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4527
4528 if (zone_submap_is_sequestered(zsflags)) {
4529 kr = zone_submap_alloc_sequestered_va(zsflags,
4530 pages + guards, &addr);
4531 } else {
4532 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4533 kr = kmem_alloc(zone_submap(zsflags), &addr,
4534 ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4535 }
4536
4537 if (kr != KERN_SUCCESS) {
4538 uint64_t zone_size = 0;
4539 zone_t zone_largest = zone_find_largest(&zone_size);
4540 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4541 "likely due to memory leak in zone [%s%s] "
4542 "(%u%c, %d elements allocated)",
4543 kr, zone_heap_name(z), zone_name(z),
4544 zone_heap_name(zone_largest), zone_name(zone_largest),
4545 mach_vm_size_pretty(zone_size),
4546 mach_vm_size_unit(zone_size),
4547 zone_count_allocated(zone_largest));
4548 }
4549
4550 meta = zone_meta_from_addr(addr);
4551 zone_meta_populate(addr, ptoa(pages + guards));
4552
4553 /*
4554 * Handle the leading guard page if any
4555 */
4556 if (lead_guard) {
4557 meta[0].zm_index = zone_index(z);
4558 meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4559 meta[0].zm_guarded = true;
4560 meta++;
4561 }
4562
4563 for (uint32_t run = 0, n = 0; run < runs; run++) {
4564 bool guarded = (guard_mask >> run) & 1;
4565
4566 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4567 meta[n].zm_index = zone_index(z);
4568 meta[n].zm_guarded = guarded;
4569 }
4570 if (guarded) {
4571 meta[n].zm_index = zone_index(z);
4572 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4573 n++;
4574 }
4575 }
4576 if (guards) {
4577 os_atomic_add(&zone_guard_pages, guards, relaxed);
4578 }
4579
4580 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4581 if (__improbable(zone_caching_disabled < 0)) {
4582 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4583 chunk_pages, guard_mask);
4584 }
4585 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4586
4587 zone_lock(z);
4588
4589 for (uint32_t run = 0, n = 0; run < runs; run++) {
4590 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4591 n += chunk_pages + ((guard_mask >> run) & 1);
4592 }
4593 z->z_va_cur += z->z_percpu ? runs : pages;
4594 }
4595
4596 static bool
zone_expand_pred_nope(__unused zone_t z)4597 zone_expand_pred_nope(__unused zone_t z)
4598 {
4599 return false;
4600 }
4601
4602 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4603 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4604 {
4605 #if DEBUG || DEVELOPMENT
4606 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4607 size, 0, 0, 0);
4608 #else
4609 (void)size;
4610 #endif
4611 }
4612
4613 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4614 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4615 {
4616 #if DEBUG || DEVELOPMENT
4617 task_t task = current_task_early();
4618 if (pages && task) {
4619 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4620 }
4621 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4622 pages, 0, 0, 0);
4623 #else
4624 (void)pages;
4625 #endif
4626 }
4627
4628 static inline bool
zone_supports_vm(zone_t z)4629 zone_supports_vm(zone_t z)
4630 {
4631 /*
4632 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
4633 * to overcommit because they're used to reclaim memory
4634 * (VM support).
4635 */
4636 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
4637 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
4638 }
4639
4640 __attribute__((noinline))
4641 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4642 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4643 {
4644 uint64_t wait_start = 0;
4645 long mapped;
4646
4647 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4648
4649 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4650 return;
4651 }
4652
4653 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4654
4655 /*
4656 * If the zone map is really exhausted, wait on the GC thread,
4657 * donating our priority (which is important because the GC
4658 * thread is at a rather low priority).
4659 */
4660 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4661 uint32_t wait_ms = n * (n + 1) / 2;
4662 uint64_t interval;
4663
4664 if (n == 1) {
4665 wait_start = mach_absolute_time();
4666 } else {
4667 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4668 }
4669 if (zone_exhausted_timeout > 0 &&
4670 wait_ms > zone_exhausted_timeout) {
4671 panic("zone map exhaustion: waited for %dms "
4672 "(pages: %ld, max: %ld, wanted: %d)",
4673 wait_ms, mapped, zone_pages_wired_max, pgs);
4674 }
4675
4676 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4677 &interval);
4678
4679 lck_spin_lock(&zone_exhausted_lock);
4680 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4681 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4682 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4683
4684 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4685 }
4686 }
4687
4688 static bool
zone_expand_wait_for_pages(bool waited)4689 zone_expand_wait_for_pages(bool waited)
4690 {
4691 if (waited) {
4692 return false;
4693 }
4694 #if DEBUG || DEVELOPMENT
4695 if (zalloc_simulate_vm_pressure) {
4696 return false;
4697 }
4698 #endif /* DEBUG || DEVELOPMENT */
4699 return !vm_pool_low();
4700 }
4701
4702 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags,bool (* pred)(zone_t))4703 zone_expand_locked(zone_t z, zalloc_flags_t flags, bool (*pred)(zone_t))
4704 {
4705 zone_security_flags_t zsflags = zone_security_config(z);
4706 struct zone_expand ze = {
4707 .ze_thread = current_thread(),
4708 };
4709
4710 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4711 ze.ze_thread->options |= TH_OPT_VMPRIV;
4712 ze.ze_clear_priv = true;
4713 }
4714
4715 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4716 /*
4717 * When the thread is VM privileged,
4718 * vm_page_grab() will call VM_PAGE_WAIT()
4719 * without our knowledge, so we must assume
4720 * it's being called unfortunately.
4721 *
4722 * In practice it's not a big deal because
4723 * Z_NOPAGEWAIT is not really used on zones
4724 * that VM privileged threads are going to expand.
4725 */
4726 ze.ze_pg_wait = true;
4727 ze.ze_vm_priv = true;
4728 }
4729
4730 for (;;) {
4731 if (!pred) {
4732 /* NULL pred means "try just once" */
4733 pred = zone_expand_pred_nope;
4734 } else if (!pred(z)) {
4735 goto out;
4736 }
4737
4738 if (z->z_expander == NULL) {
4739 z->z_expander = &ze;
4740 break;
4741 }
4742
4743 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4744 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4745 ze.ze_next = z->z_expander;
4746 z->z_expander = &ze;
4747 break;
4748 }
4749
4750 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4751 goto out;
4752 }
4753
4754 z->z_expanding_wait = true;
4755 lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4756 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4757 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4758 }
4759
4760 do {
4761 struct zone_page_metadata *meta = NULL;
4762 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4763 vm_page_t page_list = NULL;
4764 vm_offset_t addr = 0;
4765 int waited = 0;
4766
4767 /*
4768 * While we hold the zone lock, look if there's VA we can:
4769 * - complete from partial pages,
4770 * - reuse from the sequester list.
4771 *
4772 * When the page is being populated we pretend we allocated
4773 * an extra element so that zone_gc() can't attempt to free
4774 * the chunk (as it could become empty while we wait for pages).
4775 */
4776 if (zone_pva_is_null(z->z_pageq_va)) {
4777 zone_allocate_va_locked(z, flags);
4778 }
4779
4780 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4781 addr = zone_meta_to_addr(meta);
4782 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4783 cur_pages = meta->zm_page_index;
4784 meta -= cur_pages;
4785 addr -= ptoa(cur_pages);
4786 zone_meta_lock_in_partial(z, meta, cur_pages);
4787 }
4788 zone_unlock(z);
4789
4790 /*
4791 * And now allocate pages to populate our VA.
4792 */
4793 if (z->z_percpu) {
4794 min_pages = z->z_chunk_pages;
4795 } else {
4796 min_pages = (uint32_t)atop(round_page(zone_elem_offs(z) +
4797 zone_elem_size(z)));
4798 }
4799
4800 /*
4801 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4802 * if we're running out of zone memory
4803 */
4804 if (__improbable(zone_map_nearing_exhaustion())) {
4805 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4806 }
4807
4808 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4809
4810 while (pages < z->z_chunk_pages - cur_pages) {
4811 vm_page_t m = vm_page_grab();
4812
4813 if (m) {
4814 pages++;
4815 m->vmp_snext = page_list;
4816 page_list = m;
4817 vm_page_zero_fill(m);
4818 continue;
4819 }
4820
4821 if (pages >= min_pages &&
4822 !zone_expand_wait_for_pages(waited)) {
4823 break;
4824 }
4825
4826 if ((flags & Z_NOPAGEWAIT) == 0) {
4827 /*
4828 * The first time we're about to wait for pages,
4829 * mention that to waiters and wake them all.
4830 *
4831 * Set `ze_pg_wait` in our zone_expand context
4832 * so that waiters who care do not wait again.
4833 */
4834 if (!ze.ze_pg_wait) {
4835 zone_lock(z);
4836 if (z->z_expanding_wait) {
4837 z->z_expanding_wait = false;
4838 wakeup_all_with_inheritor(&z->z_expander,
4839 THREAD_AWAKENED);
4840 }
4841 ze.ze_pg_wait = true;
4842 zone_unlock(z);
4843 }
4844
4845 waited++;
4846 VM_PAGE_WAIT();
4847 continue;
4848 }
4849
4850 /*
4851 * Undo everything and bail out:
4852 *
4853 * - free pages
4854 * - undo the fake allocation if any
4855 * - put the VA back on the VA page queue.
4856 */
4857 vm_page_free_list(page_list, FALSE);
4858 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4859
4860 zone_lock(z);
4861
4862 if (cur_pages) {
4863 zone_meta_unlock_from_partial(z, meta, cur_pages);
4864 }
4865 if (meta) {
4866 zone_meta_queue_push(z, &z->z_pageq_va,
4867 meta + cur_pages);
4868 }
4869 goto page_shortage;
4870 }
4871
4872 vm_object_lock(kernel_object);
4873 kernel_memory_populate_object_and_unlock(kernel_object,
4874 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4875 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4876 (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY)
4877 ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4878
4879 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4880
4881 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4882 } while (pred(z));
4883
4884 page_shortage:
4885 if (z->z_expander == &ze) {
4886 z->z_expander = ze.ze_next;
4887 } else {
4888 assert(z->z_expander->ze_next == &ze);
4889 z->z_expander->ze_next = NULL;
4890 }
4891 if (z->z_expanding_wait) {
4892 z->z_expanding_wait = false;
4893 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4894 }
4895 out:
4896 if (ze.ze_clear_priv) {
4897 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4898 }
4899 }
4900
4901 static bool
zalloc_needs_refill(zone_t zone)4902 zalloc_needs_refill(zone_t zone)
4903 {
4904 if (zone->z_elems_free > zone->z_elems_rsv) {
4905 return false;
4906 }
4907 if (zone->z_wired_cur < zone->z_wired_max) {
4908 return true;
4909 }
4910 if (zone->exhaustible) {
4911 return false;
4912 }
4913 if (zone->expandable) {
4914 /*
4915 * If we're expandable, just don't go through this again.
4916 */
4917 zone->z_wired_max = ~0u;
4918 return true;
4919 }
4920 zone_unlock(zone);
4921
4922 panic("zone '%s%s' exhausted", zone_heap_name(zone), zone_name(zone));
4923 }
4924
4925 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4926 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4927 {
4928 zone_foreach(z) {
4929 if (z->no_callout) {
4930 /* z_async_refilling will never be set */
4931 continue;
4932 }
4933
4934 zone_lock(z);
4935 if (z->z_self && z->z_async_refilling) {
4936 z->z_async_refilling = false;
4937 zone_expand_locked(z, Z_WAITOK, zalloc_needs_refill);
4938 }
4939 zone_unlock(z);
4940 }
4941 }
4942
4943 static inline void
zone_expand_async_schedule_if_needed(zone_t zone)4944 zone_expand_async_schedule_if_needed(zone_t zone)
4945 {
4946 if (__improbable(startup_phase < STARTUP_SUB_THREAD_CALL)) {
4947 return;
4948 }
4949
4950 if (zone->z_elems_free > zone->z_elems_rsv || zone->z_async_refilling ||
4951 zone->no_callout) {
4952 return;
4953 }
4954
4955 if (!zone->expandable && zone->z_wired_cur >= zone->z_wired_max) {
4956 return;
4957 }
4958
4959 if (startup_phase < STARTUP_SUB_EARLY_BOOT) {
4960 return;
4961 }
4962
4963 if (!vm_pool_low() || zone_supports_vm(zone)) {
4964 zone->z_async_refilling = true;
4965 thread_call_enter(&zone_expand_callout);
4966 }
4967 }
4968
4969 #endif /* !ZALLOC_TEST */
4970 #pragma mark zone jetsam integration
4971 #if !ZALLOC_TEST
4972
4973 /*
4974 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4975 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4976 */
4977 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4978
4979 /*
4980 * Threshold above which largest zones should be included in the panic log
4981 */
4982 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4983
4984 /*
4985 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4986 * where X=zone_map_jetsam_limit.
4987 *
4988 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4989 */
4990 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4991 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4992
4993 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4994 zone_map_jetsam_set_limit(uint32_t value)
4995 {
4996 if (value <= 0 || value > 100) {
4997 return KERN_INVALID_VALUE;
4998 }
4999
5000 zone_map_jetsam_limit = value;
5001 os_atomic_store(&zone_pages_jetsam_threshold,
5002 zone_pages_wired_max * value / 100, relaxed);
5003 return KERN_SUCCESS;
5004 }
5005
5006 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)5007 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
5008 {
5009 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5010 *current_size = ptoa_64(phys_pages);
5011 *capacity = ptoa_64(zone_pages_wired_max);
5012 }
5013
5014 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)5015 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
5016 {
5017 zone_t largest_zone = zone_find_largest(zone_size);
5018
5019 /*
5020 * Append kalloc heap name to zone name (if zone is used by kalloc)
5021 */
5022 snprintf(zone_name, zone_name_len, "%s%s",
5023 zone_heap_name(largest_zone), largest_zone->z_name);
5024 }
5025
5026 static bool
zone_map_nearing_threshold(unsigned int threshold)5027 zone_map_nearing_threshold(unsigned int threshold)
5028 {
5029 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5030 return phys_pages * 100 > zone_pages_wired_max * threshold;
5031 }
5032
5033 bool
zone_map_nearing_exhaustion(void)5034 zone_map_nearing_exhaustion(void)
5035 {
5036 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5037
5038 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5039 }
5040
5041
5042 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5043
5044 /*
5045 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5046 * to walk through the jetsam priority bands and kill processes.
5047 */
5048 static zone_t
kill_process_in_largest_zone(void)5049 kill_process_in_largest_zone(void)
5050 {
5051 pid_t pid = -1;
5052 uint64_t zone_size = 0;
5053 zone_t largest_zone = zone_find_largest(&zone_size);
5054
5055 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5056 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5057 ptoa_64(zone_pages_wired_max),
5058 (uint64_t)zone_submaps_approx_size(),
5059 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5060 zone_map_jetsam_limit);
5061 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5062 largest_zone->z_name, (uintptr_t)zone_size);
5063
5064 /*
5065 * We want to make sure we don't call this function from userspace.
5066 * Or we could end up trying to synchronously kill the process
5067 * whose context we're in, causing the system to hang.
5068 */
5069 assert(current_task() == kernel_task);
5070
5071 /*
5072 * If vm_object_zone is the largest, check to see if the number of
5073 * elements in vm_map_entry_zone is comparable.
5074 *
5075 * If so, consider vm_map_entry_zone as the largest. This lets us target
5076 * a specific process to jetsam to quickly recover from the zone map
5077 * bloat.
5078 */
5079 if (largest_zone == vm_object_zone) {
5080 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5081 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5082 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5083 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5084 largest_zone = vm_map_entry_zone;
5085 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5086 (uintptr_t)zone_size_wired(largest_zone));
5087 }
5088 }
5089
5090 /* TODO: Extend this to check for the largest process in other zones as well. */
5091 if (largest_zone == vm_map_entry_zone) {
5092 pid = find_largest_process_vm_map_entries();
5093 } else {
5094 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5095 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5096 largest_zone->z_name);
5097 }
5098 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5099 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5100 }
5101
5102 return largest_zone;
5103 }
5104
5105 #endif /* !ZALLOC_TEST */
5106 #pragma mark probabilistic gzalloc
5107 #if !ZALLOC_TEST
5108 #if CONFIG_PROB_GZALLOC
5109
5110 extern uint32_t random(void);
5111 struct pgz_backtrace {
5112 uint32_t pgz_depth;
5113 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
5114 };
5115
5116 static int32_t PERCPU_DATA(pgz_sample_counter);
5117 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5118 static uint32_t pgz_uses; /* number of zones using PGZ */
5119 static int32_t pgz_slot_avail;
5120 #if OS_ATOMIC_HAS_LLSC
5121 struct zone_page_metadata *pgz_slot_head;
5122 #else
5123 static struct pgz_slot_head {
5124 uint32_t psh_count;
5125 uint32_t psh_slot;
5126 } pgz_slot_head;
5127 #endif
5128 struct zone_page_metadata *pgz_slot_tail;
5129 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5130
5131 static struct zone_page_metadata *
pgz_meta(uint32_t index)5132 pgz_meta(uint32_t index)
5133 {
5134 return &zone_info.zi_pgz_meta[2 * index + 1];
5135 }
5136
5137 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5138 pgz_bt(uint32_t slot, bool free)
5139 {
5140 return &pgz_backtraces[2 * slot + free];
5141 }
5142
5143 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5144 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5145 {
5146 struct backtrace_control ctl = {
5147 .btc_frame_addr = (uintptr_t)fp,
5148 };
5149
5150 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5151 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5152 }
5153
5154 static uint32_t
pgz_slot(vm_offset_t addr)5155 pgz_slot(vm_offset_t addr)
5156 {
5157 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5158 }
5159
5160 static vm_offset_t
pgz_addr(uint32_t slot)5161 pgz_addr(uint32_t slot)
5162 {
5163 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5164 }
5165
5166 static bool
pgz_sample(zalloc_flags_t flags)5167 pgz_sample(zalloc_flags_t flags)
5168 {
5169 int32_t *counterp, cnt;
5170
5171 counterp = PERCPU_GET(pgz_sample_counter);
5172 cnt = *counterp;
5173 if (__probable(cnt > 0)) {
5174 *counterp = cnt - 1;
5175 return false;
5176 }
5177
5178 if (pgz_slot_avail <= 0) {
5179 return false;
5180 }
5181
5182 /*
5183 * zalloc_random_uniform() might block, so when the sampled allocation
5184 * requested Z_NOWAIT, set the counter to `-1` which will cause
5185 * the next allocation that can block to generate a new random value.
5186 * No allocation on this CPU will sample until then.
5187 */
5188 if (flags & Z_NOWAIT) {
5189 *counterp = -1;
5190 } else {
5191 enable_preemption();
5192 *counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5193 disable_preemption();
5194 }
5195
5196 return cnt == 0;
5197 }
5198
5199 static inline bool
pgz_slot_alloc(uint32_t * slot)5200 pgz_slot_alloc(uint32_t *slot)
5201 {
5202 struct zone_page_metadata *m;
5203 uint32_t tries = 100;
5204
5205 disable_preemption();
5206
5207 #if OS_ATOMIC_USE_LLSC
5208 int32_t ov, nv;
5209 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5210 if (__improbable(ov <= 0)) {
5211 os_atomic_rmw_loop_give_up({
5212 enable_preemption();
5213 return false;
5214 });
5215 }
5216 nv = ov - 1;
5217 });
5218 #else
5219 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5220 os_atomic_inc(&pgz_slot_avail, relaxed);
5221 enable_preemption();
5222 return false;
5223 }
5224 #endif
5225
5226 again:
5227 if (__improbable(tries-- == 0)) {
5228 /*
5229 * Too much contention,
5230 * extremely unlikely but do not stay stuck.
5231 */
5232 os_atomic_inc(&pgz_slot_avail, relaxed);
5233 enable_preemption();
5234 return false;
5235 }
5236
5237 #if OS_ATOMIC_HAS_LLSC
5238 do {
5239 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5240 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5241 /*
5242 * Either we are waiting for an enqueuer (unlikely)
5243 * or we are competing with another core and
5244 * are looking at a popped element.
5245 */
5246 os_atomic_clear_exclusive();
5247 goto again;
5248 }
5249 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5250 m->zm_pgz_slot_next, relaxed));
5251 #else
5252 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5253 struct pgz_slot_head ov, nv;
5254 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5255 m = &base[ov.psh_slot * 2];
5256 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5257 /*
5258 * Either we are waiting for an enqueuer (unlikely)
5259 * or we are competing with another core and
5260 * are looking at a popped element.
5261 */
5262 os_atomic_rmw_loop_give_up(goto again);
5263 }
5264 nv.psh_count = ov.psh_count + 1;
5265 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5266 });
5267 #endif
5268
5269 enable_preemption();
5270
5271 m->zm_pgz_slot_next = NULL;
5272 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5273 return true;
5274 }
5275
5276 static inline bool
pgz_slot_free(uint32_t slot)5277 pgz_slot_free(uint32_t slot)
5278 {
5279 struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5280 struct zone_page_metadata *t;
5281
5282 disable_preemption();
5283 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5284 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5285 os_atomic_inc(&pgz_slot_avail, relaxed);
5286 enable_preemption();
5287
5288 return true;
5289 }
5290
5291 /*!
5292 * @function pgz_protect()
5293 *
5294 * @brief
5295 * Try to protect an allocation with PGZ.
5296 *
5297 * @param zone The zone the allocation was made against.
5298 * @param addr An allocated element address to protect.
5299 * @param flags The @c zalloc_flags_t passed to @c zalloc.
5300 * @param fp The caller frame pointer (for the backtrace).
5301 * @returns The new address for the element, or @c addr.
5302 */
5303 __attribute__((noinline))
5304 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,void * fp)5305 pgz_protect(zone_t zone, vm_offset_t addr, zalloc_flags_t flags, void *fp)
5306 {
5307 kern_return_t kr;
5308 uint32_t slot;
5309
5310 if (!pgz_slot_alloc(&slot)) {
5311 return addr;
5312 }
5313
5314 /*
5315 * Try to double-map the page (may fail if Z_NOWAIT).
5316 * we will always find a PA because pgz_init() pre-expanded the pmap.
5317 */
5318 vm_offset_t new_addr = pgz_addr(slot);
5319 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5320
5321 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5322 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5323 (flags & Z_NOWAIT) ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5324
5325 if (__improbable(kr != KERN_SUCCESS)) {
5326 pgz_slot_free(slot);
5327 return addr;
5328 }
5329
5330 struct zone_page_metadata tmp = {
5331 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5332 .zm_index = zone_index(zone),
5333 };
5334 struct zone_page_metadata *meta = pgz_meta(slot);
5335
5336 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5337 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5338 pgz_backtrace(pgz_bt(slot, false), fp);
5339
5340 return new_addr + (addr & PAGE_MASK);
5341 }
5342
5343 /*!
5344 * @function pgz_unprotect()
5345 *
5346 * @brief
5347 * Release a PGZ slot and returns the original address of a freed element.
5348 *
5349 * @param addr A PGZ protected element address.
5350 * @param fp The caller frame pointer (for the backtrace).
5351 * @returns The non protected address for the element
5352 * that was passed to @c pgz_protect().
5353 */
5354 __attribute__((noinline))
5355 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5356 pgz_unprotect(vm_offset_t addr, void *fp)
5357 {
5358 struct zone_page_metadata *meta;
5359 struct zone_page_metadata tmp;
5360 uint32_t slot;
5361
5362 slot = pgz_slot(addr);
5363 meta = zone_meta_from_addr(addr);
5364 tmp = *meta;
5365 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5366 goto double_free;
5367 }
5368
5369 pmap_remove(kernel_pmap, trunc_page(addr), trunc_page(addr) + PAGE_SIZE);
5370
5371 pgz_backtrace(pgz_bt(slot, true), fp);
5372
5373 tmp.zm_chunk_len = ZM_PGZ_FREE;
5374 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5375 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5376 goto double_free;
5377 }
5378
5379 pgz_slot_free(slot);
5380 return tmp.zm_pgz_orig_addr;
5381
5382 double_free:
5383 panic_fault_address = addr;
5384 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5385 panic("probabilistic gzalloc double free: %p", (void *)addr);
5386 }
5387
5388 bool
pgz_owned(mach_vm_address_t addr)5389 pgz_owned(mach_vm_address_t addr)
5390 {
5391 #if CONFIG_KERNEL_TBI
5392 addr = VM_KERNEL_TBI_FILL(addr);
5393 #endif /* CONFIG_KERNEL_TBI */
5394
5395 return mach_vm_range_contains(&zone_info.zi_pgz_range, addr);
5396 }
5397
5398
5399 __attribute__((always_inline))
5400 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5401 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5402 {
5403 struct zone_page_metadata *meta;
5404
5405 if (__probable(!pgz_owned(addr))) {
5406 return (vm_offset_t)addr;
5407 }
5408
5409 if (zone_addr_size_crosses_page(addr, size)) {
5410 panic("invalid size for PGZ protected address %p:%p",
5411 (void *)addr, (void *)(addr + size));
5412 }
5413
5414 meta = zone_meta_from_addr((vm_offset_t)addr);
5415 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5416 panic_fault_address = (vm_offset_t)addr;
5417 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5418 }
5419
5420 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5421 }
5422
5423 __attribute__((always_inline))
5424 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5425 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5426 {
5427 struct zone_page_metadata *meta;
5428 struct zone_page_metadata tmp;
5429
5430 if (__probable(!pgz_owned(addr))) {
5431 return addr;
5432 }
5433
5434 meta = zone_meta_from_addr(addr);
5435 tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5436
5437 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5438
5439 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5440 return 0;
5441 }
5442
5443 if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5444 return 0;
5445 }
5446
5447 return addr;
5448 }
5449
5450 static void
pgz_zone_init(zone_t z)5451 pgz_zone_init(zone_t z)
5452 {
5453 char zn[MAX_ZONE_NAME];
5454 char zv[MAX_ZONE_NAME];
5455 char key[30];
5456
5457 if (zone_elem_size(z) > PAGE_SIZE) {
5458 return;
5459 }
5460
5461 if (zone_index(z) == ZONE_ID_SELECT_SET) {
5462 return;
5463 }
5464
5465 if (pgz_all) {
5466 os_atomic_inc(&pgz_uses, relaxed);
5467 z->z_pgz_tracked = true;
5468 return;
5469 }
5470
5471 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5472
5473 for (int i = 1;; i++) {
5474 snprintf(key, sizeof(key), "pgz%d", i);
5475 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5476 break;
5477 }
5478 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5479 os_atomic_inc(&pgz_uses, relaxed);
5480 z->z_pgz_tracked = true;
5481 break;
5482 }
5483 }
5484 }
5485
5486 __startup_func
5487 static vm_size_t
pgz_get_size(void)5488 pgz_get_size(void)
5489 {
5490 if (pgz_slots == UINT32_MAX) {
5491 /*
5492 * Scale with RAM size: ~200 slots a G
5493 */
5494 pgz_slots = (uint32_t)(sane_size >> 22);
5495 }
5496
5497 /*
5498 * Make sure that the slot allocation scheme works.
5499 * see pgz_slot_alloc() / pgz_slot_free();
5500 */
5501 if (pgz_slots < zpercpu_count() * 4) {
5502 pgz_slots = zpercpu_count() * 4;
5503 }
5504 if (pgz_slots >= UINT16_MAX) {
5505 pgz_slots = UINT16_MAX - 1;
5506 }
5507
5508 /*
5509 * Quarantine is 33% of slots by default, no more than 90%.
5510 */
5511 if (pgz_quarantine == 0) {
5512 pgz_quarantine = pgz_slots / 3;
5513 }
5514 if (pgz_quarantine > pgz_slots * 9 / 10) {
5515 pgz_quarantine = pgz_slots * 9 / 10;
5516 }
5517 pgz_slot_avail = pgz_slots - pgz_quarantine;
5518
5519 return ptoa(2 * pgz_slots + 1);
5520 }
5521
5522 __startup_func
5523 static void
pgz_init(void)5524 pgz_init(void)
5525 {
5526 if (!pgz_uses) {
5527 return;
5528 }
5529
5530 if (pgz_sample_rate == 0) {
5531 /*
5532 * If no rate was provided, pick a random one that scales
5533 * with the number of protected zones.
5534 *
5535 * Use a binomal distribution to avoid having too many
5536 * really fast sample rates.
5537 */
5538 uint32_t factor = MIN(pgz_uses, 10);
5539 uint32_t max_rate = 1000 * factor;
5540 uint32_t min_rate = 100 * factor;
5541
5542 pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5543 zalloc_random_uniform32(min_rate, max_rate)) / 2;
5544 }
5545
5546 struct mach_vm_range *r = &zone_info.zi_pgz_range;
5547 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5548 zone_meta_populate(r->min_address, mach_vm_range_size(r));
5549
5550 for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5551 zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5552 }
5553
5554 for (size_t i = 1; i < pgz_slots; i++) {
5555 zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5556 &zone_info.zi_pgz_meta[2 * i + 1];
5557 }
5558 #if OS_ATOMIC_HAS_LLSC
5559 pgz_slot_head = &zone_info.zi_pgz_meta[1];
5560 #endif
5561 pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5562
5563 pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5564 2 * pgz_slots, ZALIGN_PTR);
5565
5566 /*
5567 * expand the pmap so that pmap_enter_options_addr()
5568 * in pgz_protect() never need to call pmap_expand().
5569 */
5570 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5571 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5572 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5573 PMAP_OPTIONS_NOENTER, NULL);
5574 }
5575
5576 /* do this last as this will enable pgz */
5577 percpu_foreach(counter, pgz_sample_counter) {
5578 *counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5579 }
5580 }
5581 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5582
5583 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5584 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5585 {
5586 struct pgz_backtrace *bt = pgz_bt(slot, free);
5587 const char *what = free ? "Free" : "Allocation";
5588 uintptr_t buf[MAX_ZTRACE_DEPTH];
5589
5590 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5591 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5592 return;
5593 }
5594
5595 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5596 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5597
5598 paniclog_append_noflush(" %s Backtrace:\n", what);
5599 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5600 if (has_syms) {
5601 paniclog_append_noflush(" %p ", (void *)buf[i]);
5602 panic_print_symbol_name(buf[i]);
5603 paniclog_append_noflush("\n");
5604 } else {
5605 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5606 }
5607 }
5608 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5609 }
5610
5611 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5612 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5613 {
5614 struct zone_page_metadata *meta;
5615 vm_offset_t elem, esize;
5616 const char *type;
5617 const char *prob;
5618 uint32_t slot;
5619 zone_t z;
5620
5621 slot = pgz_slot(addr);
5622 meta = pgz_meta(slot);
5623 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5624
5625 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5626
5627 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5628 meta->zm_index &&
5629 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5630 z = &zone_array[meta->zm_index];
5631 } else {
5632 paniclog_append_noflush(" Zone : <unknown>\n");
5633 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5634 paniclog_append_noflush("\n");
5635 return;
5636 }
5637
5638 esize = zone_elem_size(z);
5639 paniclog_append_noflush(" Zone : %s%s\n",
5640 zone_heap_name(z), zone_name(z));
5641 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5642 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5643 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5644
5645 if (addr < elem) {
5646 type = "out-of-bounds(underflow) + use-after-free";
5647 prob = "low";
5648 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5649 type = "double-free";
5650 prob = "high";
5651 } else if (addr < elem + esize) {
5652 type = "use-after-free";
5653 prob = "high";
5654 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5655 type = "out-of-bounds + use-after-free";
5656 prob = "low";
5657 } else {
5658 type = "out-of-bounds";
5659 prob = "high";
5660 }
5661 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5662 type, prob);
5663 if (addr < elem) {
5664 paniclog_append_noflush(" Access : %d byte(s) before\n",
5665 (uint32_t)(elem - addr) + 1);
5666 } else if (addr < elem + esize) {
5667 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5668 (uint32_t)(addr - elem) + 1);
5669 } else {
5670 paniclog_append_noflush(" Access : %d byte(s) past\n",
5671 (uint32_t)(addr - (elem + esize)) + 1);
5672 }
5673
5674 panic_display_pgz_bt(has_syms, slot, false);
5675 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5676 panic_display_pgz_bt(has_syms, slot, true);
5677 }
5678
5679 paniclog_append_noflush("\n");
5680 }
5681
5682 #endif /* CONFIG_PROB_GZALLOC */
5683 #endif /* !ZALLOC_TEST */
5684 #pragma mark zfree
5685 #if !ZALLOC_TEST
5686
5687 /*!
5688 * @defgroup zfree
5689 * @{
5690 *
5691 * @brief
5692 * The codepath for zone frees.
5693 *
5694 * @discussion
5695 * There are 4 major ways to allocate memory that end up in the zone allocator:
5696 * - @c zfree()
5697 * - @c zfree_percpu()
5698 * - @c kfree*()
5699 * - @c zfree_permanent()
5700 *
5701 * While permanent zones have their own allocation scheme, all other codepaths
5702 * will eventually go through the @c zfree_ext() choking point.
5703 *
5704 * Ignoring the @c gzalloc_free() codepath, the decision tree looks like this:
5705 * <code>
5706 * zfree_ext()
5707 * ├────[ cached fast path ]────╮
5708 * │ │
5709 * │ │
5710 * ├───> zfree_cached_slow() ───┤
5711 * │ │ │
5712 * │ v │
5713 * ╰───> zfree_item() ──────────┴───>
5714 * </code>
5715 *
5716 * @c zfree_ext() takes care of all the generic work to perform on an element
5717 * before it is freed (zeroing, logging, tagging, ...) then will hand it off to:
5718 * - @c zfree_item() if zone caching is off
5719 * - @c zfree_cached() if zone caching is on.
5720 *
5721 * @c zfree_cached can take a number of decisions:
5722 * - a fast path if the (f) or (a) magazines have space (preemption disabled),
5723 * - using the cpu local or recirculation depot calling @c zfree_cached_slow(),
5724 * - falling back to @c zfree_item() when CPU caching has been disabled.
5725 */
5726
5727 #if KASAN_ZALLOC
5728 /*
5729 * Called from zfree() to add the element being freed to the KASan quarantine.
5730 *
5731 * Returns true if the newly-freed element made it into the quarantine without
5732 * displacing another, false otherwise. In the latter case, addrp points to the
5733 * address of the displaced element, which will be freed by the zone.
5734 */
5735 static bool
kasan_quarantine_freed_element(zone_t * zonep,void ** addrp)5736 kasan_quarantine_freed_element(
5737 zone_t *zonep, /* the zone the element is being freed to */
5738 void **addrp) /* address of the element being freed */
5739 {
5740 zone_t zone = *zonep;
5741 void *addr = *addrp;
5742
5743 /*
5744 * Resize back to the real allocation size and hand off to the KASan
5745 * quarantine. `addr` may then point to a different allocation, if the
5746 * current element replaced another in the quarantine. The zone then
5747 * takes ownership of the swapped out free element.
5748 */
5749 vm_size_t usersz = zone_elem_size(zone) - 2 * zone->z_kasan_redzone;
5750 vm_size_t sz = usersz;
5751
5752 if (addr && zone->z_kasan_redzone) {
5753 kasan_check_free((vm_address_t)addr, usersz, KASAN_HEAP_ZALLOC);
5754 addr = (void *)kasan_dealloc((vm_address_t)addr, &sz);
5755 assert(sz == zone_elem_size(zone));
5756 }
5757 if (addr && !zone->kasan_noquarantine) {
5758 kasan_free(&addr, &sz, KASAN_HEAP_ZALLOC, zonep, usersz);
5759 if (!addr) {
5760 return TRUE;
5761 }
5762 }
5763 if (addr && zone->kasan_noquarantine) {
5764 kasan_unpoison(addr, zone_elem_size(zone));
5765 }
5766 *addrp = addr;
5767 return FALSE;
5768 }
5769 #endif /* KASAN_ZALLOC */
5770
5771 __header_always_inline void
zfree_drop(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze,bool recirc)5772 zfree_drop(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze,
5773 bool recirc)
5774 {
5775 vm_offset_t esize = zone_elem_size(zone);
5776
5777 if (zone_meta_mark_free(meta, ze) == recirc) {
5778 zone_meta_double_free_panic(zone, ze, __func__);
5779 }
5780
5781 vm_offset_t old_size = meta->zm_alloc_size;
5782 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5783 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5784
5785 if (new_size == 0) {
5786 /* whether the page was on the intermediate or all_used, queue, move it to free */
5787 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5788 zone->z_wired_empty += meta->zm_chunk_len;
5789 } else if (old_size + esize > max_size) {
5790 /* first free element on page, move from all_used */
5791 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5792 }
5793 }
5794
5795 static void
zfree_item(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze)5796 zfree_item(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze)
5797 {
5798 /* transfer preemption count to lock */
5799 zone_lock_nopreempt_check_contention(zone, NULL);
5800
5801 zfree_drop(zone, meta, ze, false);
5802 zone_elems_free_add(zone, 1);
5803
5804 zone_unlock(zone);
5805 }
5806
5807 __attribute__((noinline))
5808 static void
zfree_cached_slow(zone_t zone,struct zone_page_metadata * meta,zone_element_t ze,zone_cache_t cache)5809 zfree_cached_slow(zone_t zone, struct zone_page_metadata *meta,
5810 zone_element_t ze, zone_cache_t cache)
5811 {
5812 struct zone_depot mags;
5813 zone_magazine_t mag = NULL;
5814 uint32_t depot_max;
5815 uint16_t n_mags = 0;
5816
5817 if (zone == zc_magazine_zone) {
5818 mag = (zone_magazine_t)zone_element_addr(zone, ze,
5819 zone_elem_size(zone));
5820 #if KASAN_ZALLOC
5821 kasan_poison_range((vm_offset_t)mag, zone_elem_size(zone),
5822 ASAN_VALID);
5823 #endif
5824 } else {
5825 mag = zone_magazine_alloc(Z_NOWAIT);
5826 if (__improbable(mag == NULL)) {
5827 return zfree_item(zone, meta, ze);
5828 }
5829 mag->zm_cur = 1;
5830 mag->zm_elems[0] = ze;
5831 }
5832
5833 mag = zone_magazine_replace(&cache->zc_free_cur,
5834 &cache->zc_free_elems, mag);
5835
5836 z_debug_assert(cache->zc_free_cur <= 1);
5837 z_debug_assert(mag->zm_cur == zc_mag_size());
5838
5839 /*
5840 * Depot growth policy:
5841 *
5842 * The zc_alloc and zc_free are on average half empty/full,
5843 * hence count for "1" unit of zc_mag_size().
5844 *
5845 * We use the local depot for each `zc_depot_max` extra `zc_mag_size()`
5846 * worth of element we're allowed.
5847 *
5848 * If pushing the bucket puts us in excess of `zc_depot_max`,
5849 * then we trim (zc_recirc_batch) buckets out, in order
5850 * to amortize taking the zone lock.
5851 *
5852 * Note that `zc_depot_max` can be mutated by the GC concurrently,
5853 * so take a copy that we use throughout.
5854 */
5855 depot_max = os_atomic_load(&cache->zc_depot_max, relaxed);
5856 if (2 * zc_mag_size() <= depot_max) {
5857 zone_depot_lock_nopreempt(cache);
5858
5859 STAILQ_INSERT_TAIL(&cache->zc_depot, mag, zm_link);
5860 cache->zc_depot_cur++;
5861
5862 if (__probable((cache->zc_depot_cur + 1) * zc_mag_size() <=
5863 depot_max)) {
5864 return zone_depot_unlock(cache);
5865 }
5866
5867 /*
5868 * Never free more than half of the magazines.
5869 */
5870 n_mags = MIN(zc_recirc_batch, cache->zc_depot_cur / 2);
5871 assert(n_mags && n_mags < cache->zc_depot_cur);
5872
5873 STAILQ_FIRST(&mags) = mag = STAILQ_FIRST(&cache->zc_depot);
5874 for (uint16_t i = n_mags; i-- > 1;) {
5875 mag = STAILQ_NEXT(mag, zm_link);
5876 }
5877
5878 cache->zc_depot_cur -= n_mags;
5879 STAILQ_FIRST(&cache->zc_depot) = STAILQ_NEXT(mag, zm_link);
5880 STAILQ_NEXT(mag, zm_link) = NULL;
5881
5882 zone_depot_unlock(cache);
5883
5884 mags.stqh_last = &STAILQ_NEXT(mag, zm_link);
5885 } else {
5886 enable_preemption();
5887
5888 n_mags = 1;
5889 STAILQ_FIRST(&mags) = mag;
5890 mags.stqh_last = &STAILQ_NEXT(mag, zm_link);
5891 STAILQ_NEXT(mag, zm_link) = NULL;
5892 }
5893
5894 /*
5895 * Preflight validity of all the elements before we touch the zone
5896 * metadata, and then insert them into the recirculation depot.
5897 */
5898 STAILQ_FOREACH(mag, &mags, zm_link) {
5899 for (uint16_t i = 0; i < zc_mag_size(); i++) {
5900 zone_element_validate(zone, mag->zm_elems[i]);
5901 }
5902 }
5903
5904 zone_lock_check_contention(zone, cache);
5905
5906 STAILQ_FOREACH(mag, &mags, zm_link) {
5907 for (uint16_t i = 0; i < zc_mag_size(); i++) {
5908 zone_element_t e = mag->zm_elems[i];
5909
5910 if (!zone_meta_mark_free(zone_meta_from_element(e), e)) {
5911 zone_meta_double_free_panic(zone, e, __func__);
5912 }
5913 }
5914 }
5915 STAILQ_CONCAT(&zone->z_recirc, &mags);
5916 zone->z_recirc_cur += n_mags;
5917
5918 zone_elems_free_add(zone, n_mags * zc_mag_size());
5919
5920 zone_unlock(zone);
5921 }
5922
5923 /*
5924 * The function is noinline when zlog can be used so that the backtracing can
5925 * reliably skip the zfree_ext() and zfree_log()
5926 * boring frames.
5927 */
5928 #if ZONE_ENABLE_LOGGING
5929 __attribute__((noinline))
5930 #endif /* ZONE_ENABLE_LOGGING */
5931 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,vm_size_t elem_size)5932 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, vm_size_t elem_size)
5933 {
5934 struct zone_page_metadata *page_meta;
5935 vm_offset_t elem = (vm_offset_t)addr;
5936 zone_element_t ze;
5937 int cpu;
5938
5939 DTRACE_VM2(zfree, zone_t, zone, void*, addr);
5940
5941 #if CONFIG_KERNEL_TBI && KASAN_TBI
5942 if (zone->z_tbi_tag) {
5943 elem = kasan_tbi_tag_zfree(elem, elem_size, zone->z_percpu);
5944 /* addr is still consumed in the function: gzalloc_free */
5945 addr = (void *)elem;
5946 }
5947 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
5948 #if CONFIG_PROB_GZALLOC
5949 if (__improbable(pgz_owned(elem))) {
5950 elem = pgz_unprotect(elem, __builtin_frame_address(0));
5951 addr = (void *)elem;
5952 }
5953 #endif /* CONFIG_PROB_GZALLOC */
5954 #if VM_TAG_SIZECLASSES
5955 if (__improbable(zone->z_uses_tags)) {
5956 vm_tag_t tag = *ztSlot(zone, elem) >> 1;
5957 // set the tag with b0 clear so the block remains inuse
5958 *ztSlot(zone, elem) = 0xFFFE;
5959 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
5960 -(long)elem_size);
5961 }
5962 #endif /* VM_TAG_SIZECLASSES */
5963
5964 #if KASAN_ZALLOC
5965 /*
5966 * Call zone_element_resolve() and throw away the results in
5967 * order to validate the element and its zone membership.
5968 * Any validation panics need to happen now, while we're
5969 * still close to the caller.
5970 *
5971 * Note that elem has not been adjusted, so we have to remove the
5972 * redzone first.
5973 */
5974 zone_element_t ze_discard;
5975 vm_offset_t elem_actual = elem - zone->z_kasan_redzone;
5976 (void)zone_element_resolve(zone, elem_actual, &ze_discard);
5977
5978 if (kasan_quarantine_freed_element(&zone, &addr)) {
5979 return;
5980 }
5981 /*
5982 * kasan_quarantine_freed_element() might return a different
5983 * {zone, addr} than the one being freed for kalloc heaps.
5984 *
5985 * Make sure we reload everything.
5986 */
5987 elem = (vm_offset_t)addr;
5988 elem_size = zone_elem_size(zone);
5989 #endif
5990 #if KASAN_TBI
5991 kasan_tbi_log_bt(zone, ZOP_FREE, elem, __builtin_frame_address(0));
5992 #endif /* KASAN_TBI */
5993 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
5994 if (__improbable(zone->z_btlog)) {
5995 zfree_log(zone->z_btlog, elem, __builtin_frame_address(0));
5996 }
5997 #endif /* ZONE_ENABLE_LOGGING */
5998
5999 #if KASAN_ZALLOC
6000 if (zone->z_percpu) {
6001 zpercpu_foreach_cpu(i) {
6002 kasan_poison_range(elem + ptoa(i), elem_size,
6003 ASAN_HEAP_FREED);
6004 }
6005 } else {
6006 kasan_poison_range(elem, elem_size, ASAN_HEAP_FREED);
6007 }
6008 #endif
6009
6010 page_meta = zone_element_resolve(zone, elem, &ze);
6011 if (zone_meta_is_free(page_meta, ze)) {
6012 zone_meta_double_free_panic(zone, ze, __func__);
6013 }
6014
6015 disable_preemption();
6016 cpu = cpu_number();
6017 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += elem_size;
6018
6019 if (zone->z_pcpu_cache) {
6020 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6021
6022 if (cache->zc_free_cur >= zc_mag_size()) {
6023 if (cache->zc_alloc_cur >= zc_mag_size()) {
6024 return zfree_cached_slow(zone, page_meta, ze, cache);
6025 }
6026
6027 zone_cache_swap_magazines(cache);
6028 }
6029
6030 if (__probable(cache->zc_alloc_elems)) {
6031 uint16_t idx = cache->zc_free_cur++;
6032 if (idx >= zc_mag_size()) {
6033 zone_accounting_panic(zone, "zc_free_cur overflow");
6034 }
6035 cache->zc_free_elems[idx] = ze;
6036
6037 return enable_preemption();
6038 }
6039 }
6040
6041 return zfree_item(zone, page_meta, ze);
6042 }
6043
6044 void
6045 (zfree)(union zone_or_view zov, void *addr)
6046 {
6047 zone_t zone = zov.zov_view->zv_zone;
6048 zone_stats_t zstats = zov.zov_view->zv_stats;
6049 vm_offset_t esize = zone_elem_size(zone);
6050
6051 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6052 assert(!zone->z_percpu);
6053 #if !KASAN_KALLOC
6054 bzero(addr, esize);
6055 #endif /* !KASAN_KALLOC */
6056 zfree_ext(zone, zstats, addr, esize);
6057 }
6058
6059 __attribute__((noinline))
6060 void
zfree_percpu(union zone_or_view zov,void * addr)6061 zfree_percpu(union zone_or_view zov, void *addr)
6062 {
6063 zone_t zone = zov.zov_view->zv_zone;
6064 zone_stats_t zstats = zov.zov_view->zv_stats;
6065 vm_offset_t esize = zone_elem_size(zone);
6066
6067 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6068 assert(zone->z_percpu);
6069 addr = (void *)__zpcpu_demangle(addr);
6070 #if !KASAN_KALLOC
6071 zpercpu_foreach_cpu(i) {
6072 bzero((char *)addr + ptoa(i), esize);
6073 }
6074 #endif /* !KASAN_KALLOC */
6075 zfree_ext(zone, zstats, addr, esize);
6076 }
6077
6078 void
6079 (zfree_id)(zone_id_t zid, void *addr)
6080 {
6081 (zfree)(&zone_array[zid], addr);
6082 }
6083
6084 void
6085 (zfree_ro)(zone_id_t zid, void *addr)
6086 {
6087 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6088 zone_t zone = zone_by_id(zid);
6089 zone_stats_t zstats = zone->z_stats;
6090 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6091
6092 #if ZSECURITY_CONFIG(READ_ONLY)
6093 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6094 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6095 #elif !KASAN_KALLOC
6096 (void)zid;
6097 bzero(addr, esize);
6098 #endif /* !KASAN_KALLOC */
6099 zfree_ext(zone, zstats, addr, esize);
6100 }
6101
6102 /*! @} */
6103 #endif /* !ZALLOC_TEST */
6104 #pragma mark zalloc
6105 #if !ZALLOC_TEST
6106
6107 /*!
6108 * @defgroup zalloc
6109 * @{
6110 *
6111 * @brief
6112 * The codepath for zone allocations.
6113 *
6114 * @discussion
6115 * There are 4 major ways to allocate memory that end up in the zone allocator:
6116 * - @c zalloc(), @c zalloc_flags(), ...
6117 * - @c zalloc_percpu()
6118 * - @c kalloc*()
6119 * - @c zalloc_permanent()
6120 *
6121 * While permanent zones have their own allocation scheme, all other codepaths
6122 * will eventually go through the @c zalloc_ext() choking point.
6123 *
6124 * Ignoring the @c zalloc_gz() codepath, the decision tree looks like this:
6125 * <code>
6126 * zalloc_ext()
6127 * │
6128 * ├───> zalloc_cached() ──────> zalloc_cached_fast() ───╮
6129 * │ │ ^ │
6130 * │ │ │ │
6131 * │ ╰───> zalloc_cached_slow() ───╯ │
6132 * │ │ │
6133 * │<─────────────────╮ ├─────────────╮ │
6134 * │ │ │ │ │
6135 * │ │ v │ │
6136 * │<───────╮ ╭──> zalloc_item_slow() ────┤ │
6137 * │ │ │ │ │
6138 * │ │ │ v │
6139 * ╰───> zalloc_item() ──────────> zalloc_item_fast() ───┤
6140 * │
6141 * v
6142 * zalloc_return()
6143 * </code>
6144 *
6145 *
6146 * The @c zalloc_item() track is used when zone caching is off:
6147 * - @c zalloc_item_fast() is used when there are enough elements available,
6148 * - @c zalloc_item_slow() is used when a refill is needed, which can cause
6149 * the zone to grow. This is the only codepath that refills.
6150 *
6151 * This track uses the zone lock for serialization:
6152 * - taken in @c zalloc_item(),
6153 * - maintained during @c zalloc_item_slow() (possibly dropped and re-taken),
6154 * - dropped in @c zalloc_item_fast().
6155 *
6156 *
6157 * The @c zalloc_cached() track is used when zone caching is on:
6158 * - @c zalloc_cached_fast() is taken when the cache has elements,
6159 * - @c zalloc_cached_slow() is taken if a cache refill is needed.
6160 * It can chose many strategies:
6161 * ~ @c zalloc_cached_from_depot() to try to reuse cpu stashed magazines,
6162 * ~ @c zalloc_cached_from_recirc() using the global recirculation depot
6163 * @c z_recirc,
6164 * ~ using zalloc_import() if the zone has enough elements,
6165 * ~ falling back to the @c zalloc_item() track if zone caching is disabled
6166 * due to VM pressure or the zone has no available elements.
6167 *
6168 * This track disables preemption for serialization:
6169 * - preemption is disabled in @c zalloc_ext(),
6170 * - kept disabled during @c zalloc_cached_slow(), converted into a zone lock
6171 * if switching to @c zalloc_item_slow(),
6172 * - preemption is reenabled in @c zalloc_cached_fast().
6173 *
6174 * @c zalloc_cached_from_depot() also takes depot locks (taken by the caller,
6175 * released by @c zalloc_cached_from_depot().
6176 *
6177 * In general the @c zalloc_*_slow() codepaths deal with refilling and will
6178 * tail call into the @c zalloc_*_fast() code to perform the actual allocation.
6179 *
6180 * @c zalloc_return() is the final function everyone tail calls into,
6181 * which prepares the element for consumption by the caller and deals with
6182 * common treatment (zone logging, tags, kasan, validation, ...).
6183 */
6184
6185 /*!
6186 * @function zalloc_import
6187 *
6188 * @brief
6189 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6190 *
6191 * @param zone The zone to import elements from
6192 * @param elems The array to import into
6193 * @param n The number of elements to import. Must be non zero,
6194 * and smaller than @c zone->z_elems_free.
6195 */
6196 __header_always_inline vm_size_t
zalloc_import(zone_t zone,zone_element_t * elems,zalloc_flags_t flags,uint32_t n)6197 zalloc_import(
6198 zone_t zone,
6199 zone_element_t *elems,
6200 zalloc_flags_t flags,
6201 uint32_t n)
6202 {
6203 vm_offset_t esize = zone_elem_size(zone);
6204 uint32_t i = 0;
6205
6206 assertf(STAILQ_EMPTY(&zone->z_recirc),
6207 "Trying to import from zone %p [%s%s] with non empty recirc",
6208 zone, zone_heap_name(zone), zone_name(zone));
6209
6210 do {
6211 vm_offset_t page, eidx, size = 0;
6212 struct zone_page_metadata *meta;
6213
6214 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6215 meta = zone_pva_to_meta(zone->z_pageq_partial);
6216 page = zone_pva_to_addr(zone->z_pageq_partial);
6217 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6218 meta = zone_pva_to_meta(zone->z_pageq_empty);
6219 page = zone_pva_to_addr(zone->z_pageq_empty);
6220 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6221 } else {
6222 zone_accounting_panic(zone, "z_elems_free corruption");
6223 }
6224
6225 zone_meta_validate(zone, meta, page);
6226
6227 vm_offset_t old_size = meta->zm_alloc_size;
6228 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6229
6230 do {
6231 eidx = zone_meta_find_and_clear_bit(zone, meta, flags);
6232 elems[i++] = zone_element_encode(page, eidx);
6233 size += esize;
6234 } while (i < n && old_size + size + esize <= max_size);
6235
6236 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6237
6238 if (new_size + esize > max_size) {
6239 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6240 } else if (old_size == 0) {
6241 /* remove from free, move to intermediate */
6242 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6243 }
6244 } while (i < n);
6245
6246 return esize;
6247 }
6248
6249 /*!
6250 * @function zalloc_return
6251 *
6252 * @brief
6253 * Performs the tail-end of the work required on allocations before the caller
6254 * uses them.
6255 *
6256 * @discussion
6257 * This function is called without any zone lock held,
6258 * and preemption back to the state it had when @c zalloc_ext() was called.
6259 *
6260 * @param zone The zone we're allocating from.
6261 * @param ze The encoded element we just allocated.
6262 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6263 * @param elem_size The element size for this zone.
6264 */
6265 __attribute__((always_inline))
6266 static struct kalloc_result
zalloc_return(zone_t zone,zone_element_t ze,zalloc_flags_t flags __unused,vm_offset_t elem_size)6267 zalloc_return(
6268 zone_t zone,
6269 zone_element_t ze,
6270 zalloc_flags_t flags __unused,
6271 vm_offset_t elem_size)
6272 {
6273 vm_offset_t addr = zone_element_addr(zone, ze, elem_size);
6274
6275 #if CONFIG_KERNEL_TBI && KASAN_TBI
6276 addr = kasan_tbi_fix_address_tag(addr);
6277 kasan_tbi_log_bt(zone, ZOP_ALLOC, addr, __builtin_frame_address(0));
6278 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
6279 #if ZALLOC_ENABLE_ZERO_CHECK
6280 zalloc_validate_element(zone, addr, elem_size, flags);
6281 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6282 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
6283 if (__improbable(zone->z_btlog)) {
6284 zalloc_log(zone->z_btlog, addr, flags,
6285 __builtin_frame_address(0));
6286 }
6287 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
6288 #if VM_TAG_SIZECLASSES
6289 if (__improbable(zone->z_uses_tags)) {
6290 vm_tag_t tag = zalloc_flags_get_tag(flags);
6291 if (tag == VM_KERN_MEMORY_NONE) {
6292 zone_security_flags_t zsflags = zone_security_config(zone);
6293 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6294 tag = VM_KERN_MEMORY_KALLOC_DATA;
6295 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6296 zsflags.z_kalloc_type) {
6297 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6298 } else {
6299 tag = VM_KERN_MEMORY_KALLOC;
6300 }
6301 }
6302 // set the tag with b0 clear so the block remains inuse
6303 *ztSlot(zone, addr) = (vm_tag_t)(tag << 1);
6304 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6305 (long)elem_size);
6306 }
6307 #endif /* VM_TAG_SIZECLASSES */
6308 #if CONFIG_PROB_GZALLOC
6309 if ((flags & Z_PGZ) && !zone_addr_size_crosses_page(addr, elem_size)) {
6310 addr = pgz_protect(zone, addr, flags,
6311 __builtin_frame_address(0));
6312 }
6313 #endif
6314
6315 /*
6316 * Kasan integration of kalloc heaps are handled by kalloc_ext()
6317 */
6318 if ((flags & Z_SKIP_KASAN) == 0) {
6319 #if KASAN_ZALLOC
6320 if (zone->z_kasan_redzone) {
6321 addr = kasan_alloc(addr, elem_size,
6322 elem_size - 2 * zone->z_kasan_redzone,
6323 zone->z_kasan_redzone);
6324 elem_size -= 2 * zone->z_kasan_redzone;
6325 __nosan_bzero((char *)addr, elem_size);
6326 } else if (flags & Z_PCPU) {
6327 zpercpu_foreach_cpu(i) {
6328 kasan_poison_range(addr + ptoa(i), elem_size, ASAN_VALID);
6329 __nosan_bzero((char *)addr + ptoa(i), elem_size);
6330 }
6331 } else {
6332 kasan_poison_range(addr, elem_size, ASAN_VALID);
6333 __nosan_bzero((char *)addr, elem_size);
6334 }
6335 #endif /* KASAN_ZALLOC */
6336 #if CONFIG_KERNEL_TBI && KASAN_TBI
6337 if (__probable(zone->z_tbi_tag)) {
6338 addr = kasan_tbi_tag_zalloc(addr, elem_size,
6339 elem_size, (flags & Z_PCPU));
6340 } else {
6341 addr = kasan_tbi_tag_zalloc_default(addr,
6342 elem_size, (flags & Z_PCPU));
6343 }
6344 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
6345 }
6346
6347 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6348 return (struct kalloc_result){ (void *)addr, elem_size };
6349 }
6350
6351 __attribute__((noinline))
6352 static struct kalloc_result
zalloc_item_fast(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6353 zalloc_item_fast(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6354 {
6355 zone_element_t ze;
6356 vm_offset_t esize;
6357
6358 esize = zalloc_import(zone, &ze, flags, 1);
6359 zone_elems_free_sub(zone, 1);
6360 zpercpu_get(zstats)->zs_mem_allocated += esize;
6361 zone_unlock(zone);
6362
6363 return zalloc_return(zone, ze, flags, esize);
6364 }
6365
6366 static inline bool
zalloc_item_slow_should_schedule_async(zone_t zone,zalloc_flags_t flags)6367 zalloc_item_slow_should_schedule_async(zone_t zone, zalloc_flags_t flags)
6368 {
6369 /*
6370 * If we can't wait, then async it is.
6371 */
6372 if (flags & Z_NOWAIT) {
6373 return true;
6374 }
6375
6376 if (zone->z_elems_free == 0) {
6377 return false;
6378 }
6379
6380 /*
6381 * Early boot gets to tap in bootstrap reserves
6382 */
6383 if (startup_phase < STARTUP_SUB_EARLY_BOOT) {
6384 return true;
6385 }
6386
6387 /*
6388 * Allow threads to tap up to 3/4 of the reserve only doing asyncs.
6389 * Note that reserve-less zones will always say "true" here.
6390 */
6391 if (zone->z_elems_free >= zone->z_elems_rsv / 4) {
6392 return true;
6393 }
6394
6395 if (zone_supports_vm(zone)) {
6396 return true;
6397 }
6398
6399 /*
6400 * After this, only VM and GC threads get to tap in the reserve.
6401 */
6402 return current_thread()->options & (TH_OPT_ZONE_PRIV | TH_OPT_VMPRIV);
6403 }
6404
6405 /*!
6406 * @function zalloc_item_slow
6407 *
6408 * @brief
6409 * Performs allocations when the zone is out of elements.
6410 *
6411 * @discussion
6412 * This function might drop the lock and reenable preemption,
6413 * which means the per-CPU caching layer or recirculation depot
6414 * might have received elements.
6415 */
6416 __attribute__((noinline))
6417 static struct kalloc_result
zalloc_item_slow(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6418 zalloc_item_slow(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6419 {
6420 if (zalloc_item_slow_should_schedule_async(zone, flags)) {
6421 zone_expand_async_schedule_if_needed(zone);
6422 } else {
6423 zone_expand_locked(zone, flags, zalloc_needs_refill);
6424 }
6425 if (__improbable(zone->z_elems_free == 0)) {
6426 zone_unlock(zone);
6427 if (__improbable(flags & Z_NOFAIL)) {
6428 zone_nofail_panic(zone);
6429 }
6430 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6431 return (struct kalloc_result){ };
6432 }
6433
6434 /*
6435 * We might have changed core or got preempted/blocked while expanding
6436 * the zone. Allocating from the zone when the recirculation depot
6437 * is not empty is not allowed.
6438 *
6439 * It will be rare but possible for the depot to refill while we were
6440 * waiting for pages. If that happens we need to start over.
6441 */
6442 if (!STAILQ_EMPTY(&zone->z_recirc)) {
6443 zone_unlock(zone);
6444 return zalloc_ext(zone, zstats, flags);
6445 }
6446
6447 return zalloc_item_fast(zone, zstats, flags);
6448 }
6449
6450 /*!
6451 * @function zalloc_item
6452 *
6453 * @brief
6454 * Performs allocations when zone caching is off.
6455 *
6456 * @discussion
6457 * This function calls @c zalloc_item_slow() when refilling the zone
6458 * is needed, or @c zalloc_item_fast() if the zone has enough free elements.
6459 */
6460 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6461 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6462 {
6463 zone_lock_nopreempt_check_contention(zone, NULL);
6464
6465 /*
6466 * When we commited to the zalloc_item() path,
6467 * zone caching might have been flipped/enabled.
6468 *
6469 * If we got preempted for long enough, the recirculation layer
6470 * can have been populated, and allocating from the zone would be
6471 * incorrect.
6472 *
6473 * So double check for this extremely rare race here.
6474 */
6475 if (__improbable(!STAILQ_EMPTY(&zone->z_recirc))) {
6476 zone_unlock(zone);
6477 return zalloc_ext(zone, zstats, flags);
6478 }
6479
6480 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6481 return zalloc_item_slow(zone, zstats, flags);
6482 }
6483
6484 return zalloc_item_fast(zone, zstats, flags);
6485 }
6486
6487 __attribute__((always_inline))
6488 static struct kalloc_result
zalloc_cached_fast(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,zone_cache_t cache,zone_magazine_t freemag)6489 zalloc_cached_fast(
6490 zone_t zone,
6491 zone_stats_t zstats,
6492 zalloc_flags_t flags,
6493 zone_cache_t cache,
6494 zone_magazine_t freemag)
6495 {
6496 vm_offset_t esize = zone_elem_size(zone);
6497 zone_element_t ze;
6498 uint32_t index;
6499
6500 index = --cache->zc_alloc_cur;
6501 if (index >= zc_mag_size()) {
6502 zone_accounting_panic(zone, "zc_alloc_cur wrap around");
6503 }
6504 ze = cache->zc_alloc_elems[index];
6505 cache->zc_alloc_elems[index].ze_value = 0;
6506
6507 zpercpu_get(zstats)->zs_mem_allocated += esize;
6508 enable_preemption();
6509
6510 if (zone_meta_is_free(zone_meta_from_element(ze), ze)) {
6511 zone_meta_double_free_panic(zone, ze, __func__);
6512 }
6513
6514 if (freemag) {
6515 zone_magazine_free(freemag);
6516 }
6517 return zalloc_return(zone, ze, flags, esize);
6518 }
6519
6520 __attribute__((noinline))
6521 static struct kalloc_result
zalloc_cached_from_depot(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,zone_cache_t cache)6522 zalloc_cached_from_depot(
6523 zone_t zone,
6524 zone_stats_t zstats,
6525 zalloc_flags_t flags,
6526 zone_cache_t cache)
6527 {
6528 zone_magazine_t mag = STAILQ_FIRST(&cache->zc_depot);
6529
6530 STAILQ_REMOVE_HEAD(&cache->zc_depot, zm_link);
6531 STAILQ_NEXT(mag, zm_link) = NULL;
6532
6533 if (cache->zc_depot_cur-- == 0) {
6534 zone_accounting_panic(zone, "zc_depot_cur wrap-around");
6535 }
6536 zone_depot_unlock_nopreempt(cache);
6537
6538 mag = zone_magazine_replace(&cache->zc_alloc_cur,
6539 &cache->zc_alloc_elems, mag);
6540
6541 z_debug_assert(cache->zc_alloc_cur == zc_mag_size());
6542 z_debug_assert(mag->zm_cur == 0);
6543
6544 if (zone == zc_magazine_zone) {
6545 enable_preemption();
6546 bzero(mag, zone_elem_size(zone));
6547 return (struct kalloc_result){ mag, zone_elem_size(zone) };
6548 }
6549
6550 return zalloc_cached_fast(zone, zstats, flags, cache, mag);
6551 }
6552
6553 __attribute__((noinline))
6554 static struct kalloc_result
zalloc_cached_import(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,zone_cache_t cache)6555 zalloc_cached_import(
6556 zone_t zone,
6557 zone_stats_t zstats,
6558 zalloc_flags_t flags,
6559 zone_cache_t cache)
6560 {
6561 uint16_t n_elems = zc_mag_size();
6562
6563 if (zone->z_elems_free < n_elems + zone->z_elems_rsv / 2 &&
6564 os_sub_overflow(zone->z_elems_free,
6565 zone->z_elems_rsv / 2, &n_elems)) {
6566 n_elems = 0;
6567 }
6568
6569 z_debug_assert(n_elems <= zc_mag_size());
6570
6571 if (__improbable(n_elems == 0)) {
6572 /*
6573 * If importing elements would deplete the zone,
6574 * call zalloc_item_slow()
6575 */
6576 return zalloc_item_slow(zone, zstats, flags);
6577 }
6578
6579 if (__improbable(zone_caching_disabled)) {
6580 if (__improbable(zone_caching_disabled < 0)) {
6581 /*
6582 * In the first 10s after boot, mess with
6583 * the scan position in order to make early
6584 * allocations patterns less predictible.
6585 */
6586 zone_early_scramble_rr(zone, zstats);
6587 }
6588 return zalloc_item_fast(zone, zstats, flags);
6589 }
6590
6591 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6592
6593 cache->zc_alloc_cur = n_elems;
6594 zone_elems_free_sub(zone, n_elems);
6595
6596 zone_unlock_nopreempt(zone);
6597
6598 return zalloc_cached_fast(zone, zstats, flags, cache, NULL);
6599 }
6600
6601 static struct kalloc_result
zalloc_cached_from_recirc(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,zone_cache_t cache)6602 zalloc_cached_from_recirc(
6603 zone_t zone,
6604 zone_stats_t zstats,
6605 zalloc_flags_t flags,
6606 zone_cache_t cache)
6607 {
6608 struct zone_depot mags;
6609 zone_magazine_t mag;
6610 uint16_t n_mags = 1;
6611
6612 STAILQ_FIRST(&mags) = mag = STAILQ_FIRST(&zone->z_recirc);
6613
6614 for (;;) {
6615 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6616 zone_element_t e = mag->zm_elems[i];
6617
6618 if (!zone_meta_mark_used(zone_meta_from_element(e), e)) {
6619 zone_meta_double_free_panic(zone, e, __func__);
6620 }
6621 }
6622
6623 if (n_mags >= zone->z_recirc_cur) {
6624 STAILQ_INIT(&zone->z_recirc);
6625 assert(STAILQ_NEXT(mag, zm_link) == NULL);
6626 break;
6627 }
6628
6629 if (n_mags >= zc_recirc_batch || n_mags * zc_mag_size() >=
6630 cache->zc_depot_max) {
6631 STAILQ_FIRST(&zone->z_recirc) = STAILQ_NEXT(mag, zm_link);
6632 STAILQ_NEXT(mag, zm_link) = NULL;
6633 break;
6634 }
6635
6636 n_mags++;
6637 mag = STAILQ_NEXT(mag, zm_link);
6638 }
6639
6640 zone_elems_free_sub(zone, n_mags * zc_mag_size());
6641 zone_counter_sub(zone, z_recirc_cur, n_mags);
6642
6643 zone_unlock_nopreempt(zone);
6644
6645 mags.stqh_last = &STAILQ_NEXT(mag, zm_link);
6646
6647 /*
6648 * And then incorporate everything into our per-cpu layer.
6649 */
6650
6651 mag = STAILQ_FIRST(&mags);
6652
6653 if (n_mags > 1) {
6654 STAILQ_FIRST(&mags) = STAILQ_NEXT(mag, zm_link);
6655 STAILQ_NEXT(mag, zm_link) = NULL;
6656
6657 zone_depot_lock_nopreempt(cache);
6658
6659 cache->zc_depot_cur += n_mags - 1;
6660 STAILQ_CONCAT(&cache->zc_depot, &mags);
6661
6662 zone_depot_unlock_nopreempt(cache);
6663 }
6664
6665 mag = zone_magazine_replace(&cache->zc_alloc_cur,
6666 &cache->zc_alloc_elems, mag);
6667 z_debug_assert(cache->zc_alloc_cur == zc_mag_size());
6668 z_debug_assert(mag->zm_cur == 0);
6669
6670 return zalloc_cached_fast(zone, zstats, flags, cache, mag);
6671 }
6672
6673 __attribute__((noinline))
6674 static struct kalloc_result
zalloc_cached_slow(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags,zone_cache_t cache)6675 zalloc_cached_slow(
6676 zone_t zone,
6677 zone_stats_t zstats,
6678 zalloc_flags_t flags,
6679 zone_cache_t cache)
6680 {
6681 /*
6682 * Try to allocate from our local depot, if there's one.
6683 */
6684 if (STAILQ_FIRST(&cache->zc_depot)) {
6685 zone_depot_lock_nopreempt(cache);
6686
6687 if (STAILQ_FIRST(&cache->zc_depot)) {
6688 return zalloc_cached_from_depot(zone, zstats, flags,
6689 cache);
6690 }
6691
6692 zone_depot_unlock_nopreempt(cache);
6693 }
6694
6695 zone_lock_nopreempt_check_contention(zone, cache);
6696
6697 /*
6698 * If the recirculation depot is empty, we'll need to import.
6699 * The system is tuned for this to be extremely rare.
6700 */
6701 if (__improbable(STAILQ_EMPTY(&zone->z_recirc))) {
6702 return zalloc_cached_import(zone, zstats, flags, cache);
6703 }
6704
6705 /*
6706 * If the recirculation depot has elements, then try to fill from it.
6707 */
6708 return zalloc_cached_from_recirc(zone, zstats, flags, cache);
6709 }
6710
6711 /*!
6712 * @function zalloc_cached
6713 *
6714 * @brief
6715 * Performs allocations when zone caching is on.
6716 *
6717 * @discussion
6718 * This function calls @c zalloc_cached_fast() when the caches have elements
6719 * ready.
6720 *
6721 * Else it will call @c zalloc_cached_slow() so that the cache is refilled,
6722 * which might switch to the @c zalloc_item_slow() track when the backing zone
6723 * needs to be refilled.
6724 */
6725 static struct kalloc_result
zalloc_cached(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6726 zalloc_cached(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6727 {
6728 zone_cache_t cache;
6729
6730 cache = zpercpu_get(zone->z_pcpu_cache);
6731
6732 if (cache->zc_alloc_cur == 0) {
6733 if (__improbable(cache->zc_free_cur == 0)) {
6734 return zalloc_cached_slow(zone, zstats, flags, cache);
6735 }
6736 zone_cache_swap_magazines(cache);
6737 }
6738
6739 return zalloc_cached_fast(zone, zstats, flags, cache, NULL);
6740 }
6741
6742 /*!
6743 * @function zalloc_ext
6744 *
6745 * @brief
6746 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6747 */
6748 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6749 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6750 {
6751 /*
6752 * KASan uses zalloc() for fakestack, which can be called anywhere.
6753 * However, we make sure these calls can never block.
6754 */
6755 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6756 #if KASAN_ZALLOC
6757 zone->kasan_fakestacks ||
6758 #endif /* KASAN_ZALLOC */
6759 ml_get_interrupts_enabled() ||
6760 ml_is_quiescing() ||
6761 debug_mode_active(),
6762 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6763
6764 /*
6765 * Make sure Z_NOFAIL was not obviously misused
6766 */
6767 if (flags & Z_NOFAIL) {
6768 assert(!zone->exhaustible &&
6769 (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6770 }
6771 #if VM_TAG_SIZECLASSES
6772 if (__improbable(zone->z_uses_tags)) {
6773 vm_tag_t tag = zalloc_flags_get_tag(flags);
6774 if (flags & Z_VM_TAG_BT_BIT) {
6775 tag = vm_tag_bt() ?: tag;
6776 }
6777 if (tag != VM_KERN_MEMORY_NONE) {
6778 tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6779 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6780 }
6781 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6782 }
6783 #endif /* VM_TAG_SIZECLASSES */
6784
6785 disable_preemption();
6786
6787 #if ZALLOC_ENABLE_ZERO_CHECK
6788 if (zalloc_skip_zero_check()) {
6789 flags |= Z_NOZZC;
6790 }
6791 #endif
6792 #if CONFIG_PROB_GZALLOC
6793 if (zone->z_pgz_tracked && pgz_sample(flags)) {
6794 flags |= Z_PGZ;
6795 }
6796 #endif /* CONFIG_PROB_GZALLOC */
6797
6798 if (zone->z_pcpu_cache) {
6799 return zalloc_cached(zone, zstats, flags);
6800 }
6801
6802 return zalloc_item(zone, zstats, flags);
6803 }
6804
6805 __attribute__((always_inline))
6806 void *
zalloc(union zone_or_view zov)6807 zalloc(union zone_or_view zov)
6808 {
6809 return zalloc_flags(zov, Z_WAITOK);
6810 }
6811
6812 __attribute__((always_inline))
6813 void *
zalloc_noblock(union zone_or_view zov)6814 zalloc_noblock(union zone_or_view zov)
6815 {
6816 return zalloc_flags(zov, Z_NOWAIT);
6817 }
6818
6819 void *
6820 (zalloc_flags)(union zone_or_view zov, zalloc_flags_t flags)
6821 {
6822 zone_t zone = zov.zov_view->zv_zone;
6823 zone_stats_t zstats = zov.zov_view->zv_stats;
6824
6825 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6826 assert(!zone->z_percpu);
6827 return zalloc_ext(zone, zstats, flags).addr;
6828 }
6829
6830 __attribute__((always_inline))
6831 void *
6832 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6833 {
6834 return zalloc_flags(zone_by_id(zid), flags);
6835 }
6836
6837 void *
6838 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6839 {
6840 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6841 zone_t zone = zone_by_id(zid);
6842 zone_stats_t zstats = zone->z_stats;
6843 struct kalloc_result kr;
6844
6845 kr = zalloc_ext(zone, zstats, flags);
6846 #if ZSECURITY_CONFIG(READ_ONLY)
6847 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6848 if (kr.addr) {
6849 zone_require_ro(zid, kr.size, kr.addr);
6850 }
6851 #endif
6852 return kr.addr;
6853 }
6854
6855 #if ZSECURITY_CONFIG(READ_ONLY)
6856
6857 __attribute__((always_inline))
6858 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6859 from_current_stack(vm_offset_t addr, vm_size_t size)
6860 {
6861 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6862 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6863
6864 #if CONFIG_KERNEL_TBI
6865 addr = VM_KERNEL_TBI_FILL(addr);
6866 #endif /* CONFIG_KERNEL_TBI */
6867
6868 return (addr >= start) && (addr + size < end);
6869 }
6870
6871 /*
6872 * Check if an address is from const memory i.e TEXT or DATA CONST segements
6873 * or the SECURITY_READ_ONLY_LATE section.
6874 */
6875 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6876 __attribute__((always_inline))
6877 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6878 from_const_memory(const vm_offset_t addr, vm_size_t size)
6879 {
6880 return rorgn_contains(addr, size);
6881 }
6882 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6883 __attribute__((always_inline))
6884 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6885 from_const_memory(const vm_offset_t addr, vm_size_t size)
6886 {
6887 #pragma unused(addr, size)
6888 return true;
6889 }
6890 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6891
6892 __abortlike
6893 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6894 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6895 const vm_offset_t src, vm_size_t src_size)
6896 {
6897 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
6898 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
6899 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6900 const struct mach_vm_range rorgn_range = rorgn_get_range();
6901 #else
6902 const struct mach_vm_range rorgn_range = {};
6903 #endif
6904
6905 if (from_ro_map(src, src_size)) {
6906 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6907 zone_t dst_zone = &zone_array[zid];
6908 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6909 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6910 dst_zone->z_name);
6911 }
6912
6913 panic("zalloc_ro_mut failed: source (%p) not from RO zone map (%p - %p), "
6914 "current stack (%p - %p) or const memory (%p - %p)", (void *)src,
6915 (void *)zone_info.zi_ro_range.min_address,
6916 (void *)zone_info.zi_ro_range.max_address,
6917 (void *)stack_start, (void *)stack_end,
6918 (void *)rorgn_range.min_address, (void *)rorgn_range.max_address);
6919 }
6920
6921 __attribute__((always_inline))
6922 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6923 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6924 const vm_offset_t src, vm_size_t src_size)
6925 {
6926 if (from_current_stack(src, src_size) ||
6927 (from_ro_map(src, src_size) &&
6928 zid == zone_index_from_ptr((void *)src)) ||
6929 from_const_memory(src, src_size)) {
6930 return;
6931 }
6932 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6933 }
6934
6935 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6936
6937 __attribute__((noinline))
6938 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)6939 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
6940 const void *new_data, vm_size_t new_data_size)
6941 {
6942 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6943
6944 #if ZSECURITY_CONFIG(READ_ONLY)
6945 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
6946 new_data_size);
6947 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
6948 (vm_offset_t) new_data, new_data_size);
6949 #else
6950 (void)zid;
6951 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
6952 #endif
6953 }
6954
6955 __attribute__((noinline))
6956 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)6957 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
6958 zro_atomic_op_t op, uint64_t value)
6959 {
6960 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6961
6962 #if ZSECURITY_CONFIG(READ_ONLY)
6963 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
6964 #else
6965 (void)zid;
6966 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
6967 #endif
6968 return value;
6969 }
6970
6971 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)6972 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
6973 {
6974 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6975 #if ZSECURITY_CONFIG(READ_ONLY)
6976 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
6977 #else
6978 (void)zid;
6979 bzero((void *)((uintptr_t)elem + offset), size);
6980 #endif
6981 }
6982
6983 /*
6984 * This function will run in the PPL and needs to be robust
6985 * against an attacker with arbitrary kernel write.
6986 */
6987
6988 #if ZSECURITY_CONFIG(READ_ONLY)
6989
6990 __abortlike
6991 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)6992 zone_id_require_ro_panic(zone_id_t zid, void *addr)
6993 {
6994 struct zone_size_params p = zone_ro_size_params[zid];
6995 vm_offset_t elem = (vm_offset_t)addr;
6996 uint32_t zindex;
6997 zone_t other;
6998 zone_t zone = &zone_array[zid];
6999
7000 if (!from_ro_map(addr, 1)) {
7001 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7002 }
7003
7004 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7005 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7006 }
7007
7008 zindex = zone_index_from_ptr(addr);
7009 other = &zone_array[zindex];
7010 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7011 panic("zone_require_ro failed: invalid zone index %d "
7012 "(addr: %p, expected: %s%s)", zindex,
7013 addr, zone_heap_name(zone), zone->z_name);
7014 } else {
7015 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7016 "(addr: %p, expected: %s%s)",
7017 zindex, zone_heap_name(other), other->z_name,
7018 addr, zone_heap_name(zone), zone->z_name);
7019 }
7020 }
7021
7022 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7023
7024 __attribute__((always_inline))
7025 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7026 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7027 {
7028 #if ZSECURITY_CONFIG(READ_ONLY)
7029 struct zone_size_params p = zone_ro_size_params[zid];
7030 vm_offset_t elem = (vm_offset_t)addr;
7031
7032 if (!from_ro_map(addr, 1) ||
7033 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7034 zid != zone_meta_from_addr(elem)->zm_index) {
7035 zone_id_require_ro_panic(zid, addr);
7036 }
7037 #else
7038 #pragma unused(zid, addr)
7039 #endif
7040 }
7041
7042 void *
7043 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7044 {
7045 zone_t zone = zov.zov_view->zv_zone;
7046 zone_stats_t zstats = zov.zov_view->zv_stats;
7047
7048 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7049 assert(zone->z_percpu);
7050 flags |= Z_PCPU;
7051 return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7052 }
7053
7054 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7055 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7056 {
7057 struct zone_page_metadata *page_meta;
7058 vm_offset_t offs, addr;
7059 zone_pva_t pva;
7060
7061 assert(ml_get_interrupts_enabled() ||
7062 ml_is_quiescing() ||
7063 debug_mode_active() ||
7064 startup_phase < STARTUP_SUB_EARLY_BOOT);
7065
7066 size = (size + mask) & ~mask;
7067 assert(size <= PAGE_SIZE);
7068
7069 zone_lock(zone);
7070 assert(zone->z_self == zone);
7071
7072 for (;;) {
7073 pva = zone->z_pageq_partial;
7074 while (!zone_pva_is_null(pva)) {
7075 page_meta = zone_pva_to_meta(pva);
7076 if (page_meta->zm_bump + size <= PAGE_SIZE) {
7077 goto found;
7078 }
7079 pva = page_meta->zm_page_next;
7080 }
7081
7082 zone_expand_locked(zone, Z_WAITOK, NULL);
7083 }
7084
7085 found:
7086 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7087 page_meta->zm_bump = (uint16_t)(offs + size);
7088 page_meta->zm_alloc_size += size;
7089 zone->z_elems_free -= size;
7090 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7091
7092 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7093 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7094 }
7095
7096 zone_unlock(zone);
7097
7098 addr = offs + zone_pva_to_addr(pva);
7099
7100 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7101 return (void *)addr;
7102 }
7103
7104 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7105 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7106 {
7107 vm_offset_t addr;
7108
7109 kernel_memory_allocate(kernel_map, &addr, size, mask,
7110 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7111
7112 return (void *)addr;
7113 }
7114
7115 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7116 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7117 {
7118 if (size <= PAGE_SIZE) {
7119 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7120 return _zalloc_permanent(zone, size, mask);
7121 }
7122 return _zalloc_permanent_large(size, mask, tag);
7123 }
7124
7125 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7126 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7127 {
7128 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7129 return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7130 }
7131
7132 /*! @} */
7133 #endif /* !ZALLOC_TEST */
7134 #pragma mark zone GC / trimming
7135 #if !ZALLOC_TEST
7136
7137 static thread_call_data_t zone_defrag_callout;
7138
7139 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count,struct zone_depot * mags)7140 zone_reclaim_chunk(zone_t z, struct zone_page_metadata *meta,
7141 uint32_t free_count, struct zone_depot *mags)
7142 {
7143 vm_address_t page_addr;
7144 vm_size_t size_to_free;
7145 uint32_t bitmap_ref;
7146 uint32_t page_count;
7147 zone_security_flags_t zsflags = zone_security_config(z);
7148 bool sequester = zsflags.z_va_sequester && !z->z_destroyed;
7149 bool oob_guard = false;
7150
7151 if (zone_submap_is_sequestered(zsflags)) {
7152 /*
7153 * If the entire map is sequestered, we can't return the VA.
7154 * It stays pinned to the zone forever.
7155 */
7156 sequester = true;
7157 }
7158
7159 zone_meta_queue_pop(z, &z->z_pageq_empty);
7160
7161 page_addr = zone_meta_to_addr(meta);
7162 page_count = meta->zm_chunk_len;
7163 oob_guard = meta->zm_guarded;
7164
7165 if (meta->zm_alloc_size) {
7166 zone_metadata_corruption(z, meta, "alloc_size");
7167 }
7168 if (z->z_percpu) {
7169 if (page_count != 1) {
7170 zone_metadata_corruption(z, meta, "page_count");
7171 }
7172 size_to_free = ptoa(z->z_chunk_pages);
7173 zone_remove_wired_pages(z->z_chunk_pages);
7174 } else {
7175 if (page_count > z->z_chunk_pages) {
7176 zone_metadata_corruption(z, meta, "page_count");
7177 }
7178 if (page_count < z->z_chunk_pages) {
7179 /* Dequeue non populated VA from z_pageq_va */
7180 zone_meta_remqueue(z, meta + page_count);
7181 }
7182 size_to_free = ptoa(page_count);
7183 zone_remove_wired_pages(page_count);
7184 }
7185
7186 zone_counter_sub(z, z_elems_free, free_count);
7187 zone_counter_sub(z, z_elems_avail, free_count);
7188 zone_counter_sub(z, z_wired_empty, page_count);
7189 zone_counter_sub(z, z_wired_cur, page_count);
7190 if (z->z_elems_free_min < free_count) {
7191 z->z_elems_free_min = 0;
7192 } else {
7193 z->z_elems_free_min -= free_count;
7194 }
7195 if (z->z_elems_free_max < free_count) {
7196 z->z_elems_free_max = 0;
7197 } else {
7198 z->z_elems_free_max -= free_count;
7199 }
7200
7201 bitmap_ref = 0;
7202 if (sequester) {
7203 if (meta->zm_inline_bitmap) {
7204 for (int i = 0; i < meta->zm_chunk_len; i++) {
7205 meta[i].zm_bitmap = 0;
7206 }
7207 } else {
7208 bitmap_ref = meta->zm_bitmap;
7209 meta->zm_bitmap = 0;
7210 }
7211 meta->zm_chunk_len = 0;
7212 } else {
7213 if (!meta->zm_inline_bitmap) {
7214 bitmap_ref = meta->zm_bitmap;
7215 }
7216 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7217 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7218 }
7219
7220 #if CONFIG_ZLEAKS
7221 if (__improbable(zleak_should_disable_for_zone(z) &&
7222 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7223 thread_call_enter(&zone_leaks_callout);
7224 }
7225 #endif /* CONFIG_ZLEAKS */
7226
7227 zone_unlock(z);
7228
7229 if (bitmap_ref) {
7230 zone_bits_free(bitmap_ref);
7231 }
7232
7233 /* Free the pages for metadata and account for them */
7234 #if KASAN_ZALLOC
7235 kasan_poison_range(page_addr, size_to_free, ASAN_VALID);
7236 #endif
7237 #if VM_TAG_SIZECLASSES
7238 if (z->z_uses_tags) {
7239 ztMemoryRemove(z, page_addr, size_to_free);
7240 }
7241 #endif /* VM_TAG_SIZECLASSES */
7242
7243 if (sequester) {
7244 kernel_memory_depopulate(page_addr, size_to_free,
7245 KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7246 } else {
7247 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7248 kmem_free(zone_submap(zsflags), page_addr,
7249 ptoa(z->z_chunk_pages + oob_guard));
7250 if (oob_guard) {
7251 os_atomic_dec(&zone_guard_pages, relaxed);
7252 }
7253 }
7254
7255 zone_magazine_free_list(mags);
7256 thread_yield_to_preemption();
7257
7258 zone_lock(z);
7259
7260 if (sequester) {
7261 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7262 }
7263 }
7264
7265 static uint16_t
zone_reclaim_elements(zone_t z,uint16_t * count,zone_element_t * elems)7266 zone_reclaim_elements(zone_t z, uint16_t *count, zone_element_t *elems)
7267 {
7268 uint16_t n = *count;
7269
7270 z_debug_assert(n <= zc_mag_size());
7271
7272 for (uint16_t i = 0; i < n; i++) {
7273 zone_element_t ze = elems[i];
7274 elems[i].ze_value = 0;
7275 zfree_drop(z, zone_element_validate(z, ze), ze, false);
7276 }
7277
7278 *count = 0;
7279 return n;
7280 }
7281
7282 static uint16_t
zone_reclaim_recirc_magazine(zone_t z,struct zone_depot * mags)7283 zone_reclaim_recirc_magazine(zone_t z, struct zone_depot *mags)
7284 {
7285 zone_magazine_t mag = STAILQ_FIRST(&z->z_recirc);
7286
7287 STAILQ_REMOVE_HEAD(&z->z_recirc, zm_link);
7288 STAILQ_INSERT_TAIL(mags, mag, zm_link);
7289 zone_counter_sub(z, z_recirc_cur, 1);
7290
7291 z_debug_assert(mag->zm_cur == zc_mag_size());
7292
7293 for (uint16_t i = 0; i < zc_mag_size(); i++) {
7294 zone_element_t ze = mag->zm_elems[i];
7295 mag->zm_elems[i].ze_value = 0;
7296 zfree_drop(z, zone_element_validate(z, ze), ze, true);
7297 }
7298
7299 mag->zm_cur = 0;
7300
7301 return zc_mag_size();
7302 }
7303
7304 static void
zone_depot_trim(zone_cache_t zc,struct zone_depot * head)7305 zone_depot_trim(zone_cache_t zc, struct zone_depot *head)
7306 {
7307 zone_magazine_t mag;
7308
7309 if (zc->zc_depot_cur == 0 ||
7310 2 * (zc->zc_depot_cur + 1) * zc_mag_size() <= zc->zc_depot_max) {
7311 return;
7312 }
7313
7314 zone_depot_lock(zc);
7315
7316 while (zc->zc_depot_cur &&
7317 2 * (zc->zc_depot_cur + 1) * zc_mag_size() > zc->zc_depot_max) {
7318 mag = STAILQ_FIRST(&zc->zc_depot);
7319 STAILQ_REMOVE_HEAD(&zc->zc_depot, zm_link);
7320 STAILQ_INSERT_TAIL(head, mag, zm_link);
7321 zc->zc_depot_cur--;
7322 }
7323
7324 zone_depot_unlock(zc);
7325 }
7326
7327 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7328 ZONE_RECLAIM_TRIM,
7329 ZONE_RECLAIM_DRAIN,
7330 ZONE_RECLAIM_DESTROY,
7331 });
7332
7333 /*!
7334 * @function zone_reclaim
7335 *
7336 * @brief
7337 * Drains or trim the zone.
7338 *
7339 * @discussion
7340 * Draining the zone will free it from all its elements.
7341 *
7342 * Trimming the zone tries to respect the working set size, and avoids draining
7343 * the depot when it's not necessary.
7344 *
7345 * @param z The zone to reclaim from
7346 * @param mode The purpose of this reclaim.
7347 */
7348 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7349 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7350 {
7351 struct zone_depot mags = STAILQ_HEAD_INITIALIZER(mags);
7352 zone_magazine_t mag;
7353
7354 zone_lock(z);
7355
7356 if (mode == ZONE_RECLAIM_DESTROY) {
7357 if (!z->z_destructible || z->z_elems_rsv) {
7358 panic("zdestroy: Zone %s%s isn't destructible",
7359 zone_heap_name(z), z->z_name);
7360 }
7361
7362 if (!z->z_self || z->z_expander ||
7363 z->z_async_refilling || z->z_expanding_wait) {
7364 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7365 zone_heap_name(z), z->z_name);
7366 }
7367
7368 #if !KASAN_ZALLOC
7369 /*
7370 * Unset the valid bit. We'll hit an assert failure on further
7371 * operations on this zone, until zinit() is called again.
7372 *
7373 * Leave the zone valid for KASan as we will see zfree's on
7374 * quarantined free elements even after the zone is destroyed.
7375 */
7376 z->z_self = NULL;
7377 #endif
7378 z->z_destroyed = true;
7379 } else if (z->z_destroyed) {
7380 return zone_unlock(z);
7381 } else if (z->z_elems_free <= z->z_elems_rsv) {
7382 /* If the zone is under its reserve level, leave it alone. */
7383 return zone_unlock(z);
7384 }
7385
7386 if (z->z_pcpu_cache) {
7387 if (mode != ZONE_RECLAIM_TRIM) {
7388 zpercpu_foreach(zc, z->z_pcpu_cache) {
7389 zc->zc_depot_max /= 2;
7390 }
7391 } else {
7392 zpercpu_foreach(zc, z->z_pcpu_cache) {
7393 if (zc->zc_depot_max > 0) {
7394 zc->zc_depot_max--;
7395 }
7396 }
7397 }
7398
7399 zone_unlock(z);
7400
7401 if (mode == ZONE_RECLAIM_TRIM) {
7402 zpercpu_foreach(zc, z->z_pcpu_cache) {
7403 zone_depot_trim(zc, &mags);
7404 }
7405 } else {
7406 zpercpu_foreach(zc, z->z_pcpu_cache) {
7407 zone_depot_lock(zc);
7408 STAILQ_CONCAT(&mags, &zc->zc_depot);
7409 zc->zc_depot_cur = 0;
7410 zone_depot_unlock(zc);
7411 }
7412 }
7413
7414 zone_lock(z);
7415
7416 uint32_t freed = 0;
7417
7418 STAILQ_FOREACH(mag, &mags, zm_link) {
7419 freed += zone_reclaim_elements(z,
7420 &mag->zm_cur, mag->zm_elems);
7421
7422 if (freed >= zc_free_batch_size) {
7423 z->z_elems_free_min += freed;
7424 z->z_elems_free_max += freed;
7425 z->z_elems_free += freed;
7426 zone_unlock(z);
7427 thread_yield_to_preemption();
7428 zone_lock(z);
7429 freed = 0;
7430 }
7431 }
7432
7433 if (mode == ZONE_RECLAIM_DESTROY) {
7434 zpercpu_foreach(zc, z->z_pcpu_cache) {
7435 freed += zone_reclaim_elements(z,
7436 &zc->zc_alloc_cur, zc->zc_alloc_elems);
7437 freed += zone_reclaim_elements(z,
7438 &zc->zc_free_cur, zc->zc_free_elems);
7439 }
7440
7441 z->z_elems_free_wss = 0;
7442 z->z_elems_free_min = 0;
7443 z->z_elems_free_max = 0;
7444 z->z_contention_cur = 0;
7445 z->z_contention_wma = 0;
7446 } else {
7447 z->z_elems_free_min += freed;
7448 z->z_elems_free_max += freed;
7449 }
7450 z->z_elems_free += freed;
7451 }
7452
7453 for (;;) {
7454 struct zone_page_metadata *meta;
7455 uint32_t count, goal, freed = 0;
7456
7457 goal = z->z_elems_rsv;
7458 if (mode == ZONE_RECLAIM_TRIM) {
7459 /*
7460 * When trimming, only free elements in excess
7461 * of the working set estimate.
7462 *
7463 * However if we are in a situation where the working
7464 * set estimate is clearly growing, ignore the estimate
7465 * as the next working set update will grow it and
7466 * we want to avoid churn.
7467 */
7468 goal = MAX(goal, MAX(z->z_elems_free_wss,
7469 z->z_elems_free - z->z_elems_free_min));
7470
7471 /*
7472 * Add some slop to account for "the last partial chunk in flight"
7473 * so that we do not deplete the recirculation depot too harshly.
7474 */
7475 goal += z->z_chunk_elems / 2;
7476 }
7477
7478 if (z->z_elems_free <= goal) {
7479 break;
7480 }
7481
7482 /*
7483 * If we're above target, but we have no free page, then drain
7484 * the recirculation depot until we get a free chunk or exhaust
7485 * the depot.
7486 *
7487 * This is rather abrupt but also somehow will reduce
7488 * fragmentation anyway, and the zone code will import
7489 * over time anyway.
7490 */
7491 while (z->z_recirc_cur && zone_pva_is_null(z->z_pageq_empty)) {
7492 if (freed >= zc_free_batch_size) {
7493 zone_unlock(z);
7494 zone_magazine_free_list(&mags);
7495 thread_yield_to_preemption();
7496 zone_lock(z);
7497 freed = 0;
7498 /* we dropped the lock, needs to reassess */
7499 continue;
7500 }
7501 freed += zone_reclaim_recirc_magazine(z, &mags);
7502 }
7503
7504 if (zone_pva_is_null(z->z_pageq_empty)) {
7505 break;
7506 }
7507
7508 meta = zone_pva_to_meta(z->z_pageq_empty);
7509 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_size(z);
7510
7511 if (z->z_elems_free - count < goal) {
7512 break;
7513 }
7514
7515 zone_reclaim_chunk(z, meta, count, &mags);
7516 }
7517
7518 zone_unlock(z);
7519
7520 zone_magazine_free_list(&mags);
7521 }
7522
7523 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7524 zone_reclaim_all(zone_reclaim_mode_t mode)
7525 {
7526 /*
7527 * Start with zones with VA sequester since depopulating
7528 * pages will not need to allocate vm map entries for holes,
7529 * which will give memory back to the system faster.
7530 */
7531 zone_index_foreach(zid) {
7532 zone_t z = &zone_array[zid];
7533 if (z == zc_magazine_zone) {
7534 continue;
7535 }
7536 if (zone_security_array[zid].z_va_sequester && z->collectable) {
7537 zone_reclaim(z, mode);
7538 }
7539 }
7540
7541 zone_index_foreach(zid) {
7542 zone_t z = &zone_array[zid];
7543 if (z == zc_magazine_zone) {
7544 continue;
7545 }
7546 if (!zone_security_array[zid].z_va_sequester && z->collectable) {
7547 zone_reclaim(z, mode);
7548 }
7549 }
7550
7551 zone_reclaim(zc_magazine_zone, mode);
7552 }
7553
7554 void
zone_userspace_reboot_checks(void)7555 zone_userspace_reboot_checks(void)
7556 {
7557 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7558 if (label_zone_size != 0) {
7559 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7560 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7561 }
7562 }
7563
7564 void
zone_gc(zone_gc_level_t level)7565 zone_gc(zone_gc_level_t level)
7566 {
7567 zone_reclaim_mode_t mode;
7568 zone_t largest_zone = NULL;
7569
7570 switch (level) {
7571 case ZONE_GC_TRIM:
7572 mode = ZONE_RECLAIM_TRIM;
7573 break;
7574 case ZONE_GC_DRAIN:
7575 mode = ZONE_RECLAIM_DRAIN;
7576 break;
7577 case ZONE_GC_JETSAM:
7578 largest_zone = kill_process_in_largest_zone();
7579 mode = ZONE_RECLAIM_TRIM;
7580 break;
7581 }
7582
7583 current_thread()->options |= TH_OPT_ZONE_PRIV;
7584 lck_mtx_lock(&zone_gc_lock);
7585
7586 zone_reclaim_all(mode);
7587
7588 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7589 /*
7590 * If we possibly killed a process, but we're still critical,
7591 * we need to drain harder.
7592 */
7593 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7594 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7595 }
7596
7597 lck_mtx_unlock(&zone_gc_lock);
7598 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7599 }
7600
7601 void
zone_gc_trim(void)7602 zone_gc_trim(void)
7603 {
7604 zone_gc(ZONE_GC_TRIM);
7605 }
7606
7607 void
zone_gc_drain(void)7608 zone_gc_drain(void)
7609 {
7610 zone_gc(ZONE_GC_DRAIN);
7611 }
7612
7613 static bool
zone_defrag_needed(zone_t z)7614 zone_defrag_needed(zone_t z)
7615 {
7616 uint32_t recirc_size = z->z_recirc_cur * zc_mag_size();
7617
7618 if (recirc_size <= z->z_chunk_elems / 2) {
7619 return false;
7620 }
7621 if (recirc_size * z->z_elem_size <= zc_defrag_threshold) {
7622 return false;
7623 }
7624 return recirc_size * zc_defrag_ratio > z->z_elems_free_wss * 100;
7625 }
7626
7627 /*!
7628 * @function zone_defrag
7629 *
7630 * @brief
7631 * Resize the recirculation depot to match the working set size.
7632 *
7633 * @discussion
7634 * When zones grow very large due to a spike in usage, and then some of those
7635 * elements get freed, the elements in magazines in the recirculation depot
7636 * are in no particular order.
7637 *
7638 * In order to control fragmentation, we need to detect "empty" pages so that
7639 * they get onto the @c z_pageq_empty freelist, so that allocations re-pack
7640 * naturally.
7641 *
7642 * This is done very gently, never in excess of the working set and some slop.
7643 */
7644 static bool
zone_autogc_needed(zone_t z)7645 zone_autogc_needed(zone_t z)
7646 {
7647 uint32_t free_min = z->z_elems_free_min;
7648
7649 if (free_min * z->z_elem_size <= zc_autogc_threshold) {
7650 return false;
7651 }
7652
7653 return free_min * zc_autogc_ratio > z->z_elems_free_wss * 100;
7654 }
7655
7656 static void
zone_defrag(zone_t z)7657 zone_defrag(zone_t z)
7658 {
7659 struct zone_depot mags = STAILQ_HEAD_INITIALIZER(mags);
7660 zone_magazine_t mag, tmp;
7661 uint32_t freed = 0, goal = 0;
7662
7663 zone_lock(z);
7664
7665 goal = z->z_elems_free_wss + z->z_chunk_elems / 2 +
7666 zc_mag_size() - 1;
7667
7668 while (z->z_recirc_cur * zc_mag_size() > goal) {
7669 if (freed >= zc_free_batch_size) {
7670 zone_unlock(z);
7671 thread_yield_to_preemption();
7672 zone_lock(z);
7673 freed = 0;
7674 /* we dropped the lock, needs to reassess */
7675 continue;
7676 }
7677 freed += zone_reclaim_recirc_magazine(z, &mags);
7678 }
7679
7680 zone_unlock(z);
7681
7682 STAILQ_FOREACH_SAFE(mag, &mags, zm_link, tmp) {
7683 zone_magazine_free(mag);
7684 }
7685 }
7686
7687 static void
zone_defrag_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7688 zone_defrag_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7689 {
7690 zone_foreach(z) {
7691 if (!z->collectable || z == zc_magazine_zone) {
7692 continue;
7693 }
7694
7695 if (zone_autogc_needed(z)) {
7696 current_thread()->options |= TH_OPT_ZONE_PRIV;
7697 lck_mtx_lock(&zone_gc_lock);
7698 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7699 lck_mtx_unlock(&zone_gc_lock);
7700 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7701 } else if (zone_defrag_needed(z)) {
7702 zone_defrag(z);
7703 }
7704 }
7705
7706 if (zone_autogc_needed(zc_magazine_zone)) {
7707 current_thread()->options |= TH_OPT_ZONE_PRIV;
7708 lck_mtx_lock(&zone_gc_lock);
7709 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7710 lck_mtx_unlock(&zone_gc_lock);
7711 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7712 } else if (zone_defrag_needed(zc_magazine_zone)) {
7713 zone_defrag(zc_magazine_zone);
7714 }
7715 }
7716
7717 void
compute_zone_working_set_size(__unused void * param)7718 compute_zone_working_set_size(__unused void *param)
7719 {
7720 uint32_t zc_auto = zc_auto_threshold;
7721 bool kick_defrag = false;
7722
7723 /*
7724 * Keep zone caching disabled until the first proc is made.
7725 */
7726 if (__improbable(zone_caching_disabled < 0)) {
7727 return;
7728 }
7729
7730 zone_caching_disabled = vm_pool_low();
7731
7732 if (os_mul_overflow(zc_auto, Z_CONTENTION_WMA_UNIT, &zc_auto)) {
7733 zc_auto = 0;
7734 }
7735
7736 zone_foreach(z) {
7737 uint32_t wma;
7738 bool needs_caching = false;
7739
7740 if (z->z_self != z) {
7741 continue;
7742 }
7743
7744 zone_lock(z);
7745
7746 wma = z->z_elems_free_max - z->z_elems_free_min;
7747 wma = (3 * wma + z->z_elems_free_wss) / 4;
7748 z->z_elems_free_max = z->z_elems_free_min = z->z_elems_free;
7749 z->z_elems_free_wss = wma;
7750
7751 if (!kick_defrag &&
7752 (zone_defrag_needed(z) || zone_autogc_needed(z))) {
7753 kick_defrag = true;
7754 }
7755
7756 /* fixed point decimal of contentions per second */
7757 wma = z->z_contention_cur * Z_CONTENTION_WMA_UNIT /
7758 ZONE_WSS_UPDATE_PERIOD;
7759 z->z_contention_cur = 0;
7760 z->z_contention_wma = (3 * wma + z->z_contention_wma) / 4;
7761
7762 /*
7763 * If the zone seems to be very quiet,
7764 * gently lower its cpu-local depot size.
7765 */
7766 if (z->z_pcpu_cache && wma < Z_CONTENTION_WMA_UNIT / 2 &&
7767 z->z_contention_wma < Z_CONTENTION_WMA_UNIT / 2) {
7768 zpercpu_foreach(zc, z->z_pcpu_cache) {
7769 if (zc->zc_depot_max > zc_mag_size()) {
7770 zc->zc_depot_max--;
7771 }
7772 }
7773 }
7774
7775 /*
7776 * If the zone has been contending like crazy for two periods,
7777 * and is eligible, maybe it's time to enable caching.
7778 */
7779 if (!z->z_nocaching && !z->z_pcpu_cache && !z->exhaustible &&
7780 zc_auto && z->z_contention_wma >= zc_auto && wma >= zc_auto) {
7781 needs_caching = true;
7782 }
7783
7784 zone_unlock(z);
7785
7786 if (needs_caching) {
7787 zone_enable_caching(z);
7788 }
7789 }
7790
7791 if (kick_defrag) {
7792 thread_call_enter(&zone_defrag_callout);
7793 }
7794 }
7795
7796 #endif /* !ZALLOC_TEST */
7797 #pragma mark vm integration, MIG routines
7798 #if !ZALLOC_TEST
7799
7800 extern unsigned int stack_total;
7801 #if defined (__x86_64__)
7802 extern unsigned int inuse_ptepages_count;
7803 #endif
7804
7805 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)7806 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
7807 bool is_kt_var)
7808 {
7809 if (is_kt_var) {
7810 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
7811 return cur.ktv_var->kt_name;
7812 } else {
7813 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
7814 return cur.ktv_fixed->kt_zv.zv_name;
7815 }
7816 }
7817
7818 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7819 panic_print_types_in_zone(zone_t z, const char* debug_str)
7820 {
7821 kalloc_type_views_t kt_cur = {};
7822 const char *prev_type = "";
7823 size_t skip_over_site = sizeof("site.") - 1;
7824 zone_security_flags_t zsflags = zone_security_config(z);
7825 bool is_kt_var = false;
7826
7827 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
7828 uint32_t heap_id = KT_VAR_PTR_HEAP + ((zone_index(z) -
7829 kalloc_type_heap_array[KT_VAR_PTR_HEAP].kh_zstart) / KHEAP_NUM_ZONES);
7830 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
7831 is_kt_var = true;
7832 } else {
7833 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
7834 }
7835
7836 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
7837 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
7838
7839 while (kt_cur.ktv_fixed) {
7840 kalloc_type_views_t kt_next = {};
7841 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
7842 is_kt_var) + skip_over_site;
7843 if (strcmp(typename, prev_type) != 0) {
7844 paniclog_append_noflush("\t%-50s\n", typename);
7845 prev_type = typename;
7846 }
7847 kt_cur = kt_next;
7848 }
7849 paniclog_append_noflush("\n");
7850 }
7851
7852 static void
panic_display_kalloc_types(void)7853 panic_display_kalloc_types(void)
7854 {
7855 if (kalloc_type_src_zone) {
7856 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7857 }
7858 if (kalloc_type_dst_zone) {
7859 panic_print_types_in_zone(kalloc_type_dst_zone,
7860 "addr is being freed to");
7861 }
7862 }
7863
7864 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7865 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7866 uint64_t *zone_size)
7867 {
7868 zone_index_foreach(zid) {
7869 zone_t z = &zone_array[zid];
7870 vm_offset_t size = zone_size_wired(z);
7871
7872 if (zid == ZONE_ID_VM_PAGES) {
7873 continue;
7874 }
7875 for (uint32_t i = 0; i < n; i++) {
7876 if (size > zone_size[i]) {
7877 largest_zones[i] = z;
7878 zone_size[i] = size;
7879 break;
7880 }
7881 }
7882 }
7883 }
7884
7885 #define NUM_LARGEST_ZONES 5
7886 static void
panic_display_largest_zones(void)7887 panic_display_largest_zones(void)
7888 {
7889 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
7890 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
7891
7892 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
7893 (uint64_t *) &largest_size);
7894
7895 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
7896 "Zone Name", "Cur Size", "Free Size");
7897 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
7898 zone_t z = largest_zones[i];
7899 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
7900 zone_heap_name(z), z->z_name,
7901 mach_vm_size_pretty(largest_size[i]),
7902 mach_vm_size_unit(largest_size[i]),
7903 mach_vm_size_pretty(zone_size_free(z)),
7904 mach_vm_size_unit(zone_size_free(z)));
7905 }
7906 }
7907
7908 static void
panic_display_zprint(void)7909 panic_display_zprint(void)
7910 {
7911 panic_display_largest_zones();
7912 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
7913 (uintptr_t)(kernel_stack_size * stack_total));
7914 #if defined (__x86_64__)
7915 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
7916 (uintptr_t)ptoa(inuse_ptepages_count));
7917 #endif
7918 paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large",
7919 (uintptr_t)kalloc_large_total);
7920
7921 if (panic_kext_memory_info) {
7922 mach_memory_info_t *mem_info = panic_kext_memory_info;
7923
7924 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
7925 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
7926 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
7927 continue;
7928 }
7929 if (mem_info[i].size > (1024 * 1024)) {
7930 paniclog_append_noflush("%-5lld %10lld\n",
7931 mem_info[i].site, mem_info[i].size);
7932 }
7933 }
7934 }
7935 }
7936
7937 static void
panic_display_zone_info(void)7938 panic_display_zone_info(void)
7939 {
7940 paniclog_append_noflush("Zone info:\n");
7941 paniclog_append_noflush(" Zone map: %p - %p\n",
7942 (void *) zone_info.zi_map_range.min_address,
7943 (void *) zone_info.zi_map_range.max_address);
7944 #if CONFIG_PROB_GZALLOC
7945 if (pgz_submap) {
7946 paniclog_append_noflush(" . PGZ : %p - %p\n",
7947 (void *) pgz_submap->min_offset,
7948 (void *) pgz_submap->max_offset);
7949 }
7950 #endif /* CONFIG_PROB_GZALLOC */
7951 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7952 vm_map_t map = zone_submaps[i];
7953
7954 if (map == VM_MAP_NULL) {
7955 continue;
7956 }
7957 paniclog_append_noflush(" . %-6s: %p - %p\n",
7958 zone_submaps_names[i],
7959 (void *) map->min_offset,
7960 (void *) map->max_offset);
7961 }
7962 paniclog_append_noflush(" Metadata: %p - %p\n"
7963 " Bitmaps : %p - %p\n"
7964 "\n",
7965 (void *) zone_info.zi_meta_range.min_address,
7966 (void *) zone_info.zi_meta_range.max_address,
7967 (void *) zone_info.zi_bits_range.min_address,
7968 (void *) zone_info.zi_bits_range.max_address);
7969 }
7970
7971 static void
panic_display_zone_fault(vm_offset_t addr)7972 panic_display_zone_fault(vm_offset_t addr)
7973 {
7974 struct zone_page_metadata meta = { };
7975 vm_map_t map = VM_MAP_NULL;
7976 vm_offset_t oob_offs = 0, size = 0;
7977 int map_idx = -1;
7978 zone_t z = NULL;
7979 const char *kind = "whild deref";
7980 bool oob = false;
7981
7982 /*
7983 * First: look if we bumped into guard pages between submaps
7984 */
7985 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7986 map = zone_submaps[i];
7987 if (map == VM_MAP_NULL) {
7988 continue;
7989 }
7990
7991 if (addr >= map->min_offset && addr < map->max_offset) {
7992 map_idx = i;
7993 break;
7994 }
7995 }
7996
7997 if (map_idx == -1) {
7998 /* this really shouldn't happen, submaps are back to back */
7999 return;
8000 }
8001
8002 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8003
8004 /*
8005 * Second: look if there's just no metadata at all
8006 */
8007 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8008 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8009 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8010 zone_array[meta.zm_index].z_self == NULL) {
8011 paniclog_append_noflush(" Zone : <unknown>\n");
8012 kind = "wild deref, missing or invalid metadata";
8013 } else {
8014 z = &zone_array[meta.zm_index];
8015 paniclog_append_noflush(" Zone : %s%s\n",
8016 zone_heap_name(z), zone_name(z));
8017 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8018 kind = "out-of-bounds (high confidence)";
8019 oob = true;
8020 size = zone_element_size((void *)addr,
8021 &z, false, &oob_offs);
8022 } else {
8023 kind = "use-after-free (medium confidence)";
8024 }
8025 }
8026
8027 paniclog_append_noflush(" Address : %p\n", (void *)addr);
8028 if (oob) {
8029 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
8030 (void *)(trunc_page(addr) - (size - oob_offs)),
8031 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8032 }
8033 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
8034 zone_submaps_names[map_idx],
8035 (void *)map->min_offset, (void *)map->max_offset);
8036 paniclog_append_noflush(" Kind : %s\n", kind);
8037 if (oob) {
8038 paniclog_append_noflush(" Access : %d byte(s) past\n",
8039 (uint32_t)(addr & PAGE_MASK) + 1);
8040 }
8041 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
8042 "0x%04x 0x%08x 0x%08x 0x%08x\n",
8043 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8044 meta.zm_alloc_size, meta.zm_bitmap,
8045 meta.zm_page_next.packed_address,
8046 meta.zm_page_prev.packed_address);
8047 paniclog_append_noflush("\n");
8048 }
8049
8050 void
panic_display_zalloc(void)8051 panic_display_zalloc(void)
8052 {
8053 bool keepsyms = false;
8054
8055 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8056
8057 panic_display_zone_info();
8058
8059 if (panic_fault_address) {
8060 #if CONFIG_PROB_GZALLOC
8061 if (pgz_owned(panic_fault_address)) {
8062 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8063 } else
8064 #endif /* CONFIG_PROB_GZALLOC */
8065 if (zone_maps_owned(panic_fault_address, 1)) {
8066 panic_display_zone_fault(panic_fault_address);
8067 }
8068 }
8069
8070 if (panic_include_zprint) {
8071 panic_display_zprint();
8072 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8073 panic_display_largest_zones();
8074 }
8075 #if CONFIG_ZLEAKS
8076 if (zleak_active) {
8077 panic_display_zleaks(keepsyms);
8078 }
8079 #endif
8080 if (panic_include_kalloc_types) {
8081 panic_display_kalloc_types();
8082 }
8083 }
8084
8085 /*
8086 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8087 * requesting zone information.
8088 * Frees unused pages towards the end of the region, and zero'es out unused
8089 * space on the last page.
8090 */
8091 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8092 create_vm_map_copy(
8093 vm_offset_t start_addr,
8094 vm_size_t total_size,
8095 vm_size_t used_size)
8096 {
8097 kern_return_t kr;
8098 vm_offset_t end_addr;
8099 vm_size_t free_size;
8100 vm_map_copy_t copy;
8101
8102 if (used_size != total_size) {
8103 end_addr = start_addr + used_size;
8104 free_size = total_size - (round_page(end_addr) - start_addr);
8105
8106 if (free_size >= PAGE_SIZE) {
8107 kmem_free(ipc_kernel_map,
8108 round_page(end_addr), free_size);
8109 }
8110 bzero((char *) end_addr, round_page(end_addr) - end_addr);
8111 }
8112
8113 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8114 (vm_map_size_t)used_size, TRUE, ©);
8115 assert(kr == KERN_SUCCESS);
8116
8117 return copy;
8118 }
8119
8120 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8121 get_zone_info(
8122 zone_t z,
8123 mach_zone_name_t *zn,
8124 mach_zone_info_t *zi)
8125 {
8126 struct zone zcopy;
8127 vm_size_t cached = 0;
8128
8129 assert(z != ZONE_NULL);
8130 zone_lock(z);
8131 if (!z->z_self) {
8132 zone_unlock(z);
8133 return FALSE;
8134 }
8135 zcopy = *z;
8136 if (z->z_pcpu_cache) {
8137 zpercpu_foreach(zc, z->z_pcpu_cache) {
8138 cached += zc->zc_alloc_cur + zc->zc_free_cur;
8139 cached += zc->zc_depot_cur * zc_mag_size();
8140 }
8141 }
8142 zone_unlock(z);
8143
8144 if (zn != NULL) {
8145 /*
8146 * Append kalloc heap name to zone name (if zone is used by kalloc)
8147 */
8148 char temp_zone_name[MAX_ZONE_NAME] = "";
8149 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8150 zone_heap_name(z), z->z_name);
8151
8152 /* assuming here the name data is static */
8153 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8154 strlen(temp_zone_name) + 1);
8155 }
8156
8157 if (zi != NULL) {
8158 *zi = (mach_zone_info_t) {
8159 .mzi_count = zone_count_allocated(&zcopy) - cached,
8160 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8161 // max_size for zprint is now high-watermark of pages used
8162 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8163 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8164 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8165 .mzi_exhaustible = (uint64_t)zcopy.exhaustible,
8166 };
8167 zpercpu_foreach(zs, zcopy.z_stats) {
8168 zi->mzi_sum_size += zs->zs_mem_allocated;
8169 }
8170 if (zcopy.collectable) {
8171 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8172 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8173 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8174 }
8175 }
8176
8177 return TRUE;
8178 }
8179
8180 /* mach_memory_info entitlement */
8181 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8182
8183 /* macro needed to rate-limit mach_memory_info */
8184 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8185
8186 /* declarations necessary to call kauth_cred_issuser() */
8187 struct ucred;
8188 extern int kauth_cred_issuser(struct ucred *);
8189 extern struct ucred *kauth_cred_get(void);
8190
8191 static kern_return_t
mach_memory_info_security_check(void)8192 mach_memory_info_security_check(void)
8193 {
8194 /* If not root or does not have the memoryinfo entitlement, fail */
8195 if (!kauth_cred_issuser(kauth_cred_get())) {
8196 return KERN_NO_ACCESS;
8197 }
8198
8199 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8200 if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8201 return KERN_DENIED;
8202 }
8203
8204 /*
8205 * On release non-mac arm devices, allow mach_memory_info
8206 * to be called twice per day per boot. memorymaintenanced
8207 * calls it once per day, which leaves room for a sysdiagnose.
8208 */
8209 static uint64_t first_call, second_call = 0;
8210 uint64_t now = 0;
8211 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8212
8213 if (!first_call) {
8214 first_call = now;
8215 } else if (!second_call) {
8216 second_call = now;
8217 } else if (first_call + NSEC_DAY > now) {
8218 return KERN_DENIED;
8219 } else if (first_call + NSEC_DAY < now) {
8220 first_call = now;
8221 second_call = 0;
8222 }
8223 #endif
8224
8225 return KERN_SUCCESS;
8226 }
8227
8228 kern_return_t
mach_zone_info(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8229 mach_zone_info(
8230 host_priv_t host,
8231 mach_zone_name_array_t *namesp,
8232 mach_msg_type_number_t *namesCntp,
8233 mach_zone_info_array_t *infop,
8234 mach_msg_type_number_t *infoCntp)
8235 {
8236 return mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8237 }
8238
8239
8240 kern_return_t
mach_memory_info(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8241 mach_memory_info(
8242 host_priv_t host,
8243 mach_zone_name_array_t *namesp,
8244 mach_msg_type_number_t *namesCntp,
8245 mach_zone_info_array_t *infop,
8246 mach_msg_type_number_t *infoCntp,
8247 mach_memory_info_array_t *memoryInfop,
8248 mach_msg_type_number_t *memoryInfoCntp)
8249 {
8250 mach_zone_name_t *names;
8251 vm_offset_t names_addr;
8252 vm_size_t names_size;
8253
8254 mach_zone_info_t *info;
8255 vm_offset_t info_addr;
8256 vm_size_t info_size;
8257
8258 mach_memory_info_t *memory_info;
8259 vm_offset_t memory_info_addr;
8260 vm_size_t memory_info_size;
8261 vm_size_t memory_info_vmsize;
8262 unsigned int num_info;
8263
8264 unsigned int max_zones, used_zones, i;
8265 mach_zone_name_t *zn;
8266 mach_zone_info_t *zi;
8267 kern_return_t kr;
8268
8269 uint64_t zones_collectable_bytes = 0;
8270
8271 if (host == HOST_NULL) {
8272 return KERN_INVALID_HOST;
8273 }
8274
8275 kr = mach_memory_info_security_check();
8276 if (kr != KERN_SUCCESS) {
8277 return kr;
8278 }
8279
8280 /*
8281 * We assume that zones aren't freed once allocated.
8282 * We won't pick up any zones that are allocated later.
8283 */
8284
8285 max_zones = os_atomic_load(&num_zones, relaxed);
8286
8287 names_size = round_page(max_zones * sizeof *names);
8288 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8289 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8290 if (kr != KERN_SUCCESS) {
8291 return kr;
8292 }
8293 names = (mach_zone_name_t *) names_addr;
8294
8295 info_size = round_page(max_zones * sizeof *info);
8296 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8297 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8298 if (kr != KERN_SUCCESS) {
8299 kmem_free(ipc_kernel_map,
8300 names_addr, names_size);
8301 return kr;
8302 }
8303 info = (mach_zone_info_t *) info_addr;
8304
8305 zn = &names[0];
8306 zi = &info[0];
8307
8308 used_zones = max_zones;
8309 for (i = 0; i < max_zones; i++) {
8310 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8311 used_zones--;
8312 continue;
8313 }
8314 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8315 zn++;
8316 zi++;
8317 }
8318
8319 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8320 *namesCntp = used_zones;
8321
8322 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8323 *infoCntp = used_zones;
8324
8325 num_info = 0;
8326 memory_info_addr = 0;
8327
8328 if (memoryInfop && memoryInfoCntp) {
8329 vm_map_copy_t copy;
8330 num_info = vm_page_diagnose_estimate();
8331 memory_info_size = num_info * sizeof(*memory_info);
8332 memory_info_vmsize = round_page(memory_info_size);
8333 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8334 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8335 if (kr != KERN_SUCCESS) {
8336 return kr;
8337 }
8338
8339 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8340 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8341 assert(kr == KERN_SUCCESS);
8342
8343 memory_info = (mach_memory_info_t *) memory_info_addr;
8344 vm_page_diagnose(memory_info, num_info, zones_collectable_bytes);
8345
8346 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8347 assert(kr == KERN_SUCCESS);
8348
8349 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8350 (vm_map_size_t)memory_info_size, TRUE, ©);
8351 assert(kr == KERN_SUCCESS);
8352
8353 *memoryInfop = (mach_memory_info_t *) copy;
8354 *memoryInfoCntp = num_info;
8355 }
8356
8357 return KERN_SUCCESS;
8358 }
8359
8360 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8361 mach_zone_info_for_zone(
8362 host_priv_t host,
8363 mach_zone_name_t name,
8364 mach_zone_info_t *infop)
8365 {
8366 zone_t zone_ptr;
8367
8368 if (host == HOST_NULL) {
8369 return KERN_INVALID_HOST;
8370 }
8371
8372 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8373 if (!PE_i_can_has_debugger(NULL)) {
8374 return KERN_INVALID_HOST;
8375 }
8376 #endif
8377
8378 if (infop == NULL) {
8379 return KERN_INVALID_ARGUMENT;
8380 }
8381
8382 zone_ptr = ZONE_NULL;
8383 zone_foreach(z) {
8384 /*
8385 * Append kalloc heap name to zone name (if zone is used by kalloc)
8386 */
8387 char temp_zone_name[MAX_ZONE_NAME] = "";
8388 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8389 zone_heap_name(z), z->z_name);
8390
8391 /* Find the requested zone by name */
8392 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8393 zone_ptr = z;
8394 break;
8395 }
8396 }
8397
8398 /* No zones found with the requested zone name */
8399 if (zone_ptr == ZONE_NULL) {
8400 return KERN_INVALID_ARGUMENT;
8401 }
8402
8403 if (get_zone_info(zone_ptr, NULL, infop)) {
8404 return KERN_SUCCESS;
8405 }
8406 return KERN_FAILURE;
8407 }
8408
8409 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8410 mach_zone_info_for_largest_zone(
8411 host_priv_t host,
8412 mach_zone_name_t *namep,
8413 mach_zone_info_t *infop)
8414 {
8415 if (host == HOST_NULL) {
8416 return KERN_INVALID_HOST;
8417 }
8418
8419 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8420 if (!PE_i_can_has_debugger(NULL)) {
8421 return KERN_INVALID_HOST;
8422 }
8423 #endif
8424
8425 if (namep == NULL || infop == NULL) {
8426 return KERN_INVALID_ARGUMENT;
8427 }
8428
8429 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8430 return KERN_SUCCESS;
8431 }
8432 return KERN_FAILURE;
8433 }
8434
8435 uint64_t
get_zones_collectable_bytes(void)8436 get_zones_collectable_bytes(void)
8437 {
8438 uint64_t zones_collectable_bytes = 0;
8439 mach_zone_info_t zi;
8440
8441 zone_foreach(z) {
8442 if (get_zone_info(z, NULL, &zi)) {
8443 zones_collectable_bytes +=
8444 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8445 }
8446 }
8447
8448 return zones_collectable_bytes;
8449 }
8450
8451 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8452 mach_zone_get_zlog_zones(
8453 host_priv_t host,
8454 mach_zone_name_array_t *namesp,
8455 mach_msg_type_number_t *namesCntp)
8456 {
8457 #if ZONE_ENABLE_LOGGING
8458 unsigned int max_zones, logged_zones, i;
8459 kern_return_t kr;
8460 zone_t zone_ptr;
8461 mach_zone_name_t *names;
8462 vm_offset_t names_addr;
8463 vm_size_t names_size;
8464
8465 if (host == HOST_NULL) {
8466 return KERN_INVALID_HOST;
8467 }
8468
8469 if (namesp == NULL || namesCntp == NULL) {
8470 return KERN_INVALID_ARGUMENT;
8471 }
8472
8473 max_zones = os_atomic_load(&num_zones, relaxed);
8474
8475 names_size = round_page(max_zones * sizeof *names);
8476 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8477 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8478 if (kr != KERN_SUCCESS) {
8479 return kr;
8480 }
8481 names = (mach_zone_name_t *) names_addr;
8482
8483 zone_ptr = ZONE_NULL;
8484 logged_zones = 0;
8485 for (i = 0; i < max_zones; i++) {
8486 zone_t z = &(zone_array[i]);
8487 assert(z != ZONE_NULL);
8488
8489 /* Copy out the zone name if zone logging is enabled */
8490 if (z->z_btlog) {
8491 get_zone_info(z, &names[logged_zones], NULL);
8492 logged_zones++;
8493 }
8494 }
8495
8496 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8497 *namesCntp = logged_zones;
8498
8499 return KERN_SUCCESS;
8500
8501 #else /* ZONE_ENABLE_LOGGING */
8502 #pragma unused(host, namesp, namesCntp)
8503 return KERN_FAILURE;
8504 #endif /* ZONE_ENABLE_LOGGING */
8505 }
8506
8507 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8508 mach_zone_get_btlog_records(
8509 host_priv_t host,
8510 mach_zone_name_t name,
8511 zone_btrecord_array_t *recsp,
8512 mach_msg_type_number_t *numrecs)
8513 {
8514 #if ZONE_ENABLE_LOGGING
8515 zone_btrecord_t *recs;
8516 kern_return_t kr;
8517 vm_address_t addr;
8518 vm_size_t size;
8519 zone_t zone_ptr;
8520 vm_map_copy_t copy;
8521
8522 if (host == HOST_NULL) {
8523 return KERN_INVALID_HOST;
8524 }
8525
8526 if (recsp == NULL || numrecs == NULL) {
8527 return KERN_INVALID_ARGUMENT;
8528 }
8529
8530 zone_ptr = ZONE_NULL;
8531 zone_foreach(z) {
8532 /*
8533 * Append kalloc heap name to zone name (if zone is used by kalloc)
8534 */
8535 char temp_zone_name[MAX_ZONE_NAME] = "";
8536 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8537 zone_heap_name(z), z->z_name);
8538
8539 /* Find the requested zone by name */
8540 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8541 zone_ptr = z;
8542 break;
8543 }
8544 }
8545
8546 /* No zones found with the requested zone name */
8547 if (zone_ptr == ZONE_NULL) {
8548 return KERN_INVALID_ARGUMENT;
8549 }
8550
8551 /* Logging not turned on for the requested zone */
8552 if (!zone_ptr->z_btlog) {
8553 return KERN_FAILURE;
8554 }
8555
8556 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8557 if (kr != KERN_SUCCESS) {
8558 return kr;
8559 }
8560
8561 addr = (vm_address_t)recs;
8562 size = sizeof(zone_btrecord_t) * *numrecs;
8563
8564 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
8565 assert(kr == KERN_SUCCESS);
8566
8567 *recsp = (zone_btrecord_t *)copy;
8568 return KERN_SUCCESS;
8569
8570 #else /* !ZONE_ENABLE_LOGGING */
8571 #pragma unused(host, name, recsp, numrecs)
8572 return KERN_FAILURE;
8573 #endif /* !ZONE_ENABLE_LOGGING */
8574 }
8575
8576
8577 kern_return_t
mach_zone_force_gc(host_t host)8578 mach_zone_force_gc(
8579 host_t host)
8580 {
8581 if (host == HOST_NULL) {
8582 return KERN_INVALID_HOST;
8583 }
8584
8585 #if DEBUG || DEVELOPMENT
8586 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8587 /* Callout to buffer cache GC to drop elements in the apfs zones */
8588 if (consider_buffer_cache_collect != NULL) {
8589 (void)(*consider_buffer_cache_collect)(0);
8590 }
8591 zone_gc(ZONE_GC_DRAIN);
8592 #endif /* DEBUG || DEVELOPMENT */
8593 return KERN_SUCCESS;
8594 }
8595
8596 zone_t
zone_find_largest(uint64_t * zone_size)8597 zone_find_largest(uint64_t *zone_size)
8598 {
8599 zone_t largest_zone = 0;
8600 uint64_t largest_zone_size = 0;
8601 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8602 if (zone_size) {
8603 *zone_size = largest_zone_size;
8604 }
8605 return largest_zone;
8606 }
8607
8608 #endif /* !ZALLOC_TEST */
8609 #pragma mark zone creation, configuration, destruction
8610 #if !ZALLOC_TEST
8611
8612 static zone_t
zone_init_defaults(zone_id_t zid)8613 zone_init_defaults(zone_id_t zid)
8614 {
8615 zone_t z = &zone_array[zid];
8616
8617 z->z_wired_max = ~0u;
8618 z->collectable = true;
8619 z->expandable = true;
8620
8621 lck_ticket_init(&z->z_lock, &zone_locks_grp);
8622 STAILQ_INIT(&z->z_recirc);
8623 return z;
8624 }
8625
8626 static bool
zone_is_initializing(zone_t z)8627 zone_is_initializing(zone_t z)
8628 {
8629 return !z->z_self && !z->z_destroyed;
8630 }
8631
8632 void
zone_set_noexpand(zone_t zone,vm_size_t nelems)8633 zone_set_noexpand(zone_t zone, vm_size_t nelems)
8634 {
8635 if (!zone_is_initializing(zone)) {
8636 panic("%s: called after zone_create()", __func__);
8637 }
8638 zone->expandable = false;
8639 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8640 }
8641
8642 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems)8643 zone_set_exhaustible(zone_t zone, vm_size_t nelems)
8644 {
8645 if (!zone_is_initializing(zone)) {
8646 panic("%s: called after zone_create()", __func__);
8647 }
8648 zone->expandable = false;
8649 zone->exhaustible = true;
8650 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8651 }
8652
8653 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)8654 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
8655 {
8656 zone_t zone = zov.zov_zone;
8657
8658 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
8659 zone = zov.zov_view->zv_zone;
8660 } else {
8661 zone = zov.zov_zone;
8662 }
8663
8664 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
8665 }
8666
8667 /**
8668 * @function zone_create_find
8669 *
8670 * @abstract
8671 * Finds an unused zone for the given name and element size.
8672 *
8673 * @param name the zone name
8674 * @param size the element size (including redzones, ...)
8675 * @param flags the flags passed to @c zone_create*
8676 * @param zid_inout the desired zone ID or ZONE_ID_ANY
8677 *
8678 * @returns a zone to initialize further.
8679 */
8680 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8681 zone_create_find(
8682 const char *name,
8683 vm_size_t size,
8684 zone_create_flags_t flags,
8685 zone_id_t *zid_inout)
8686 {
8687 zone_id_t nzones, zid = *zid_inout;
8688 zone_t z;
8689
8690 simple_lock(&all_zones_lock, &zone_locks_grp);
8691
8692 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8693 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8694
8695 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8696 /*
8697 * The first time around, make sure the reserved zone IDs
8698 * have an initialized lock as zone_index_foreach() will
8699 * enumerate them.
8700 */
8701 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8702 zone_init_defaults(nzones++);
8703 }
8704
8705 os_atomic_store(&num_zones, nzones, release);
8706 }
8707
8708 if (zid != ZONE_ID_ANY) {
8709 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8710 panic("zone_create: invalid desired zone ID %d for %s",
8711 zid, name);
8712 }
8713 if (flags & ZC_DESTRUCTIBLE) {
8714 panic("zone_create: ID %d (%s) must be permanent", zid, name);
8715 }
8716 if (zone_array[zid].z_self) {
8717 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
8718 }
8719 z = &zone_array[zid];
8720 } else {
8721 if (flags & ZC_DESTRUCTIBLE) {
8722 /*
8723 * If possible, find a previously zdestroy'ed zone in the
8724 * zone_array that we can reuse.
8725 */
8726 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
8727 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
8728 z = &zone_array[i];
8729
8730 /*
8731 * If the zone name and the element size are the
8732 * same, we can just reuse the old zone struct.
8733 */
8734 if (strcmp(z->z_name, name) || zone_elem_size(z) != size) {
8735 continue;
8736 }
8737 bitmap_clear(zone_destroyed_bitmap, i);
8738 z->z_destroyed = false;
8739 z->z_self = z;
8740 zid = (zone_id_t)i;
8741 goto out;
8742 }
8743 }
8744
8745 zid = nzones++;
8746 z = zone_init_defaults(zid);
8747
8748 /*
8749 * The release barrier pairs with the acquire in
8750 * zone_index_foreach() and makes sure that enumeration loops
8751 * always see an initialized zone lock.
8752 */
8753 os_atomic_store(&num_zones, nzones, release);
8754 }
8755
8756 out:
8757 num_zones_in_use++;
8758 simple_unlock(&all_zones_lock);
8759
8760 *zid_inout = zid;
8761 return z;
8762 }
8763
8764 __abortlike
8765 static void
zone_create_panic(const char * name,const char * f1,const char * f2)8766 zone_create_panic(const char *name, const char *f1, const char *f2)
8767 {
8768 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
8769 name, f1, f2);
8770 }
8771 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
8772 if ((flags) & forbidden_flag) { \
8773 zone_create_panic(name, #current_flag, #forbidden_flag); \
8774 }
8775
8776 /*
8777 * Adjusts the size of the element based on minimum size, alignment
8778 * and kasan redzones
8779 */
8780 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint32_t * redzone __unused)8781 zone_elem_adjust_size(
8782 const char *name __unused,
8783 vm_size_t elem_size,
8784 zone_create_flags_t flags __unused,
8785 uint32_t *redzone __unused)
8786 {
8787 vm_size_t size;
8788 /*
8789 * Adjust element size for minimum size and pointer alignment
8790 */
8791 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
8792 if (size < ZONE_MIN_ELEM_SIZE) {
8793 size = ZONE_MIN_ELEM_SIZE;
8794 }
8795
8796 #if KASAN_ZALLOC
8797 /*
8798 * Expand the zone allocation size to include the redzones.
8799 *
8800 * For page-multiple zones add a full guard page because they
8801 * likely require alignment.
8802 */
8803 uint32_t redzone_tmp;
8804 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU)) {
8805 redzone_tmp = 0;
8806 } else if ((size & PAGE_MASK) == 0) {
8807 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
8808 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
8809 "alignment", name);
8810 }
8811 redzone_tmp = PAGE_SIZE;
8812 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
8813 redzone_tmp = 0;
8814 } else {
8815 redzone_tmp = KASAN_GUARD_SIZE;
8816 }
8817 size += redzone_tmp * 2;
8818 if (redzone) {
8819 *redzone = redzone_tmp;
8820 }
8821 #endif
8822 return size;
8823 }
8824
8825 /*
8826 * Returns the allocation chunk size that has least framentation
8827 */
8828 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)8829 zone_get_min_alloc_granule(
8830 vm_size_t elem_size,
8831 zone_create_flags_t flags)
8832 {
8833 vm_size_t alloc_granule = PAGE_SIZE;
8834 if (flags & ZC_PERCPU) {
8835 alloc_granule = PAGE_SIZE * zpercpu_count();
8836 if (PAGE_SIZE % elem_size > 256) {
8837 panic("zone_create: per-cpu zone has too much fragmentation");
8838 }
8839 } else if (flags & ZC_READONLY) {
8840 alloc_granule = PAGE_SIZE;
8841 } else if ((elem_size & PAGE_MASK) == 0) {
8842 /* zero fragmentation by definition */
8843 alloc_granule = elem_size;
8844 } else if (alloc_granule % elem_size == 0) {
8845 /* zero fragmentation by definition */
8846 } else {
8847 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
8848 vm_size_t alloc_tmp = PAGE_SIZE;
8849 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
8850
8851 #if __arm64__
8852 /*
8853 * Increase chunk size to 48K for sizes larger than 4K on 16k
8854 * machines, so as to reduce internal fragementation for kalloc
8855 * zones with sizes 12K and 24K.
8856 */
8857 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
8858 max_chunk_size = 48 * 1024;
8859 }
8860 #endif
8861 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
8862 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
8863 if (frag_tmp < frag) {
8864 frag = frag_tmp;
8865 alloc_granule = alloc_tmp;
8866 }
8867 }
8868 }
8869 return alloc_granule;
8870 }
8871
8872 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)8873 zone_get_early_alloc_size(
8874 const char *name __unused,
8875 vm_size_t elem_size,
8876 zone_create_flags_t flags,
8877 vm_size_t min_elems)
8878 {
8879 vm_size_t adjusted_size, alloc_granule, chunk_elems;
8880
8881 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
8882 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
8883 chunk_elems = alloc_granule / adjusted_size;
8884
8885 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
8886 }
8887
8888 zone_t
8889 zone_create_ext(
8890 const char *name,
8891 vm_size_t size,
8892 zone_create_flags_t flags,
8893 zone_id_t zid,
8894 void (^extra_setup)(zone_t))
8895 {
8896 vm_size_t alloc;
8897 uint32_t redzone;
8898 zone_t z;
8899 zone_security_flags_t *zsflags;
8900
8901 if (size > ZONE_MAX_ALLOC_SIZE) {
8902 panic("zone_create: element size too large: %zd", (size_t)size);
8903 }
8904
8905 if (size < 2 * sizeof(vm_size_t)) {
8906 /* Elements are too small for kasan. */
8907 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
8908 }
8909
8910 size = zone_elem_adjust_size(name, size, flags, &redzone);
8911 /*
8912 * Allocate the zone slot, return early if we found an older match.
8913 */
8914 z = zone_create_find(name, size, flags, &zid);
8915 if (__improbable(z->z_self)) {
8916 /* We found a zone to reuse */
8917 return z;
8918 }
8919
8920 /*
8921 * Initialize the zone properly.
8922 */
8923
8924 /*
8925 * If the kernel is post lockdown, copy the zone name passed in.
8926 * Else simply maintain a pointer to the name string as it can only
8927 * be a core XNU zone (no unloadable kext exists before lockdown).
8928 */
8929 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
8930 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
8931 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
8932 strlcpy(buf, name, nsz);
8933 z->z_name = buf;
8934 } else {
8935 z->z_name = name;
8936 }
8937 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
8938 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
8939 } else {
8940 /*
8941 * zone_init() hasn't run yet, use the storage provided by
8942 * zone_stats_startup(), and zone_init() will replace it
8943 * with the final value once the PERCPU zone exists.
8944 */
8945 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
8946 }
8947
8948 alloc = zone_get_min_alloc_granule(size, flags);
8949
8950 z->z_elem_size = (uint16_t)size;
8951 z->z_chunk_pages = (uint16_t)atop(alloc);
8952 z->z_quo_magic = Z_MAGIC_QUO(size);
8953 z->z_align_magic = Z_MAGIC_ALIGNED(size);
8954 if (flags & ZC_PERCPU) {
8955 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / z->z_elem_size);
8956 z->z_elem_offs = (uint16_t)(PAGE_SIZE % z->z_elem_size);
8957 } else {
8958 z->z_chunk_elems = (uint16_t)(alloc / z->z_elem_size);
8959 z->z_elem_offs = (uint16_t)(alloc % z->z_elem_size);
8960 }
8961 if (zone_element_idx(zone_element_encode(0,
8962 z->z_chunk_elems - 1)) != z->z_chunk_elems - 1) {
8963 panic("zone_element_encode doesn't work for zone [%s]", name);
8964 }
8965
8966 #if KASAN_ZALLOC
8967 z->z_kasan_redzone = redzone;
8968 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
8969 z->kasan_fakestacks = true;
8970 }
8971 #endif
8972
8973 /*
8974 * Handle KPI flags
8975 */
8976 zsflags = &zone_security_array[zid];
8977 /*
8978 * Some zones like ipc ports and procs rely on sequestering for
8979 * correctness, so explicitly turn on sequestering despite the
8980 * configuration.
8981 */
8982 if (flags & ZC_SEQUESTER) {
8983 zsflags->z_va_sequester = true;
8984 }
8985
8986 /* ZC_CACHING applied after all configuration is done */
8987 if (flags & ZC_NOCACHING) {
8988 z->z_nocaching = true;
8989 }
8990
8991 if (flags & ZC_READONLY) {
8992 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
8993 assert(zid <= ZONE_ID__LAST_RO);
8994 #if ZSECURITY_CONFIG(READ_ONLY)
8995 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
8996 zsflags->z_va_sequester = true;
8997 #endif
8998 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
8999 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9000 assert(size <= PAGE_SIZE);
9001 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9002 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9003 (uint32_t)size, zone_heap_name(z), z->z_name);
9004 }
9005 }
9006
9007 if (flags & ZC_PERCPU) {
9008 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9009 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9010 z->z_percpu = true;
9011 }
9012 if (flags & ZC_NOGC) {
9013 z->collectable = false;
9014 }
9015 /*
9016 * Handle ZC_NOENCRYPT from xnu only
9017 */
9018 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9019 zsflags->z_noencrypt = true;
9020 }
9021 if (flags & ZC_ALIGNMENT_REQUIRED) {
9022 z->alignment_required = true;
9023 }
9024 if (flags & ZC_NOCALLOUT) {
9025 z->no_callout = true;
9026 }
9027 if (flags & ZC_DESTRUCTIBLE) {
9028 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9029 z->z_destructible = true;
9030 }
9031 /*
9032 * Handle Internal flags
9033 */
9034 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9035 if (flags & ZC_PGZ_USE_GUARDS) {
9036 /*
9037 * Try to turn on guard pages only for zones
9038 * with a chance of OOB.
9039 */
9040 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9041 zsflags->z_pgz_use_guards = true;
9042 }
9043 z->z_pgz_use_guards = true;
9044 }
9045 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9046 if (!(flags & ZC_NOTBITAG)) {
9047 z->z_tbi_tag = true;
9048 }
9049 if (flags & ZC_KALLOC_TYPE) {
9050 zsflags->z_kalloc_type = true;
9051 }
9052 if (flags & ZC_VM) {
9053 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9054 zsflags->z_va_sequester = true;
9055 }
9056 if (flags & ZC_KASAN_NOQUARANTINE) {
9057 z->kasan_noquarantine = true;
9058 }
9059 /* ZC_KASAN_NOREDZONE already handled */
9060
9061 /*
9062 * Then if there's extra tuning, do it
9063 */
9064 if (extra_setup) {
9065 extra_setup(z);
9066 }
9067
9068 /*
9069 * Configure debugging features
9070 */
9071 #if CONFIG_PROB_GZALLOC
9072 if ((flags & (ZC_READONLY | ZC_PERCPU)) == 0) {
9073 pgz_zone_init(z);
9074 }
9075 #endif
9076 #if ZONE_ENABLE_LOGGING
9077 if (startup_phase >= STARTUP_SUB_ZALLOC) {
9078 /*
9079 * Check for and set up zone leak detection
9080 * if requested via boot-args.
9081 */
9082 zone_setup_logging(z);
9083 }
9084 #endif /* ZONE_ENABLE_LOGGING */
9085
9086 #if KASAN_TBI
9087 if (startup_phase >= STARTUP_SUB_ZALLOC) {
9088 zone_setup_kasan_logging(z);
9089 }
9090 #endif /* KASAN_TBI */
9091
9092 #if VM_TAG_SIZECLASSES
9093 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9094 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9095 static uint16_t sizeclass_idx;
9096 z->z_uses_tags = true;
9097 z->z_tags_inline = (((page_size + size - 1) / size) <=
9098 (sizeof(uint32_t) / sizeof(uint16_t)));
9099 if (zsflags->z_kheap_id == KHEAP_ID_DEFAULT) {
9100 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9101 z->z_tags_sizeclass = sizeclass_idx++;
9102 } else {
9103 uint16_t i = 0;
9104 for (; i < sizeclass_idx; i++) {
9105 if (size == zone_tags_sizeclasses[i]) {
9106 z->z_tags_sizeclass = i;
9107 break;
9108 }
9109 }
9110 /*
9111 * Size class wasn't found, add it to zone_tags_sizeclasses
9112 */
9113 if (i == sizeclass_idx) {
9114 assert(i < VM_TAG_SIZECLASSES);
9115 zone_tags_sizeclasses[i] = (uint16_t)size;
9116 z->z_tags_sizeclass = sizeclass_idx++;
9117 }
9118 }
9119 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9120 }
9121 #endif
9122
9123 /*
9124 * Finally, fixup properties based on security policies, boot-args, ...
9125 */
9126 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA)
9127 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9128 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9129 zsflags->z_va_sequester = false;
9130 }
9131 #endif
9132
9133 if ((flags & ZC_CACHING) && !z->z_nocaching) {
9134 /*
9135 * If zcache hasn't been initialized yet, remember our decision,
9136 *
9137 * zone_enable_caching() will be called again by
9138 * zcache_bootstrap(), while the system is still single
9139 * threaded, to build the missing caches.
9140 */
9141 if (__probable(zc_magazine_zone)) {
9142 zone_enable_caching(z);
9143 } else {
9144 z->z_pcpu_cache =
9145 __zpcpu_mangle_for_boot(&zone_cache_startup[zid]);
9146 }
9147 }
9148
9149 zone_lock(z);
9150 z->z_self = z;
9151 zone_unlock(z);
9152
9153 return z;
9154 }
9155
9156 __startup_func
9157 void
zone_create_startup(struct zone_create_startup_spec * spec)9158 zone_create_startup(struct zone_create_startup_spec *spec)
9159 {
9160 zone_t z;
9161
9162 z = zone_create_ext(spec->z_name, spec->z_size,
9163 spec->z_flags, spec->z_zid, spec->z_setup);
9164 if (spec->z_var) {
9165 *spec->z_var = z;
9166 }
9167 }
9168
9169 /*
9170 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9171 * union works. trust but verify.
9172 */
9173 #define zalloc_check_zov_alias(f1, f2) \
9174 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9175 zalloc_check_zov_alias(z_self, zv_zone);
9176 zalloc_check_zov_alias(z_stats, zv_stats);
9177 zalloc_check_zov_alias(z_name, zv_name);
9178 zalloc_check_zov_alias(z_views, zv_next);
9179 #undef zalloc_check_zov_alias
9180
9181 __startup_func
9182 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9183 zone_view_startup_init(struct zone_view_startup_spec *spec)
9184 {
9185 struct kalloc_heap *heap = NULL;
9186 zone_view_t zv = spec->zv_view;
9187 zone_t z;
9188 zone_security_flags_t zsflags;
9189
9190 switch (spec->zv_heapid) {
9191 case KHEAP_ID_DEFAULT:
9192 panic("%s: Use KALLOC_TYPE_DEFINE for zone view %s instead"
9193 "of ZONE_VIEW_DEFINE as it is from default kalloc heap",
9194 __func__, zv->zv_name);
9195 __builtin_unreachable();
9196 case KHEAP_ID_DATA_BUFFERS:
9197 heap = KHEAP_DATA_BUFFERS;
9198 break;
9199 default:
9200 heap = NULL;
9201 }
9202
9203 if (heap) {
9204 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9205 } else {
9206 z = *spec->zv_zone;
9207 assert(spec->zv_size <= zone_elem_size(z));
9208 }
9209
9210 assert(z);
9211
9212 zv->zv_zone = z;
9213 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9214 zv->zv_next = z->z_views;
9215 zsflags = zone_security_config(z);
9216 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9217 /*
9218 * count the raw view for zones not in a heap,
9219 * kalloc_heap_init() already counts it for its members.
9220 */
9221 zone_view_count += 2;
9222 } else {
9223 zone_view_count += 1;
9224 }
9225 z->z_views = zv;
9226 }
9227
9228 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9229 zone_create(
9230 const char *name,
9231 vm_size_t size,
9232 zone_create_flags_t flags)
9233 {
9234 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9235 }
9236
9237 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9238
9239 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9240 zone_create_ro(
9241 const char *name,
9242 vm_size_t size,
9243 zone_create_flags_t flags,
9244 zone_create_ro_id_t zc_ro_id)
9245 {
9246 assert(zc_ro_id <= ZC_RO_ID__LAST);
9247 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9248 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9249 return reserved_zid;
9250 }
9251
9252 zone_t
zinit(vm_size_t size,vm_size_t max,vm_size_t alloc __unused,const char * name)9253 zinit(
9254 vm_size_t size, /* the size of an element */
9255 vm_size_t max, /* maximum memory to use */
9256 vm_size_t alloc __unused, /* allocation size */
9257 const char *name) /* a name for the zone */
9258 {
9259 zone_t z = zone_create(name, size, ZC_DESTRUCTIBLE);
9260 z->z_wired_max = zone_alloc_pages_for_nelems(z, max / size);
9261 return z;
9262 }
9263
9264 void
zdestroy(zone_t z)9265 zdestroy(zone_t z)
9266 {
9267 unsigned int zindex = zone_index(z);
9268 zone_security_flags_t zsflags = zone_security_array[zindex];
9269
9270 current_thread()->options |= TH_OPT_ZONE_PRIV;
9271 lck_mtx_lock(&zone_gc_lock);
9272
9273 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9274
9275 lck_mtx_unlock(&zone_gc_lock);
9276 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9277
9278 zone_lock(z);
9279
9280 if (!zone_submap_is_sequestered(zsflags)) {
9281 while (!zone_pva_is_null(z->z_pageq_va)) {
9282 struct zone_page_metadata *meta;
9283
9284 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9285 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9286 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9287 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9288 zone_unlock(z);
9289 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9290 ptoa(z->z_chunk_pages));
9291 zone_lock(z);
9292 }
9293 }
9294
9295 #if !KASAN_ZALLOC
9296 /* Assert that all counts are zero */
9297 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9298 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9299 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9300 zone_heap_name(z), z->z_name);
9301 }
9302
9303 /* consistency check: make sure everything is indeed empty */
9304 assert(zone_pva_is_null(z->z_pageq_empty));
9305 assert(zone_pva_is_null(z->z_pageq_partial));
9306 assert(zone_pva_is_null(z->z_pageq_full));
9307 if (!zone_submap_is_sequestered(zsflags)) {
9308 assert(zone_pva_is_null(z->z_pageq_va));
9309 }
9310 #endif
9311
9312 zone_unlock(z);
9313
9314 simple_lock(&all_zones_lock, &zone_locks_grp);
9315
9316 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9317 /* Mark the zone as empty in the bitmap */
9318 bitmap_set(zone_destroyed_bitmap, zindex);
9319 num_zones_in_use--;
9320 assert(num_zones_in_use > 0);
9321
9322 simple_unlock(&all_zones_lock);
9323 }
9324
9325 #endif /* !ZALLOC_TEST */
9326 #pragma mark zalloc module init
9327 #if !ZALLOC_TEST
9328
9329 /*
9330 * Initialize the "zone of zones" which uses fixed memory allocated
9331 * earlier in memory initialization. zone_bootstrap is called
9332 * before zone_init.
9333 */
9334 __startup_func
9335 void
zone_bootstrap(void)9336 zone_bootstrap(void)
9337 {
9338 #if DEBUG || DEVELOPMENT
9339 #if __x86_64__
9340 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9341 /*
9342 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9343 * the fp faulting code gets triggered and deadlocks.
9344 */
9345 zone_caching_disabled = 1;
9346 }
9347 #endif /* __x86_64__ */
9348 #endif /* DEBUG || DEVELOPMENT */
9349
9350 /* Validate struct zone_packed_virtual_address expectations */
9351 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9352 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9353 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9354 }
9355
9356 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9357
9358 /*
9359 * Initialize random used to scramble early allocations
9360 */
9361 zpercpu_foreach_cpu(cpu) {
9362 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9363 }
9364
9365 #if CONFIG_PROB_GZALLOC
9366 /*
9367 * Set pgz_sample_counter on the boot CPU so that we do not sample
9368 * any allocation until PGZ has been properly setup (in pgz_init()).
9369 */
9370 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9371 #endif /* CONFIG_PROB_GZALLOC */
9372
9373 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9374 /*
9375 * Randomly assign zones to one of the 4 general submaps,
9376 * and pick whether they allocate from the begining
9377 * or the end of it.
9378 *
9379 * A lot of OOB exploitation relies on precise interleaving
9380 * of specific types in the heap.
9381 *
9382 * Woops, you can't guarantee that anymore.
9383 */
9384 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9385 uint32_t r = zalloc_random_uniform32(0,
9386 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9387
9388 zone_security_array[i].z_submap_from_end = (r & 1);
9389 zone_security_array[i].z_submap_idx += (r >> 1);
9390 }
9391 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9392
9393 thread_call_setup_with_options(&zone_expand_callout,
9394 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9395 THREAD_CALL_OPTIONS_ONCE);
9396
9397 thread_call_setup_with_options(&zone_defrag_callout,
9398 zone_defrag_async, NULL, THREAD_CALL_PRIORITY_USER,
9399 THREAD_CALL_OPTIONS_ONCE);
9400 }
9401
9402 #define ZONE_GUARD_SIZE (64UL << 10)
9403
9404 #if __LP64__
9405 static inline vm_offset_t
zone_restricted_va_max(void)9406 zone_restricted_va_max(void)
9407 {
9408 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
9409 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
9410
9411 return trunc_page(MIN(compressor_max, vm_page_max));
9412 }
9413 #else
9414 static inline vm_offset_t
zone_restricted_va_max(void)9415 zone_restricted_va_max(void)
9416 {
9417 return 0;
9418 }
9419 #endif
9420
9421 __startup_func
9422 static void
zone_tunables_fixup(void)9423 zone_tunables_fixup(void)
9424 {
9425 int wdt = 0;
9426
9427 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9428 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9429 PE_parse_boot_argn("pgz1", NULL, 0)) {
9430 /*
9431 * if pgz1= was used, but pgz= was not,
9432 * then the more specific pgz1 takes precedence.
9433 */
9434 pgz_all = false;
9435 }
9436 #endif
9437
9438 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9439 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9440 }
9441 if (zc_magazine_size > PAGE_SIZE / ZONE_MIN_ELEM_SIZE) {
9442 zc_magazine_size = (uint16_t)(PAGE_SIZE / ZONE_MIN_ELEM_SIZE);
9443 }
9444 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9445 !PE_parse_boot_argn("zet", NULL, 0)) {
9446 zone_exhausted_timeout = -1;
9447 }
9448 }
9449 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9450
9451 __startup_func
9452 static vm_size_t
zone_phys_size_max(void)9453 zone_phys_size_max(void)
9454 {
9455 vm_size_t zsize;
9456 vm_size_t zsizearg;
9457
9458 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
9459 zsize = zsizearg * (1024ULL * 1024);
9460 } else {
9461 /* Set target zone size as 1/4 of physical memory */
9462 zsize = (vm_size_t)(sane_size >> 2);
9463 #if defined(__LP64__)
9464 zsize += zsize >> 1;
9465 #endif /* __LP64__ */
9466 }
9467
9468 if (zsize < CONFIG_ZONE_MAP_MIN) {
9469 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
9470 }
9471 if (zsize > sane_size >> 1) {
9472 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
9473 }
9474 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
9475 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
9476 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
9477 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
9478 zsize = ZONE_MAP_MAX;
9479 }
9480
9481 return (vm_size_t)trunc_page(zsize);
9482 }
9483
9484 __startup_func
9485 static struct mach_vm_range
zone_init_allocate_va(vm_map_address_t addr,vm_size_t size,bool random)9486 zone_init_allocate_va(vm_map_address_t addr, vm_size_t size, bool random)
9487 {
9488 int vm_alloc_flags = VM_FLAGS_ANYWHERE;
9489 struct mach_vm_range r;
9490 kern_return_t kr;
9491 vm_map_entry_t entry;
9492
9493 if (random) {
9494 vm_alloc_flags |= VM_FLAGS_RANDOM_ADDR;
9495 }
9496
9497 vm_object_reference(kernel_object);
9498
9499 kr = vm_map_enter(kernel_map, &addr, size, 0,
9500 vm_alloc_flags, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_ZONE,
9501 kernel_object, addr, FALSE, VM_PROT_NONE, VM_PROT_NONE,
9502 VM_INHERIT_NONE);
9503
9504 if (KERN_SUCCESS != kr) {
9505 panic("vm_map_enter(0x%zx) failed: %d", (size_t)size, kr);
9506 }
9507
9508 vm_map_lookup_entry(kernel_map, addr, &entry);
9509 VME_OFFSET_SET(entry, addr);
9510
9511 r.min_address = (vm_offset_t)addr;
9512 r.max_address = (vm_offset_t)addr + size;
9513 return r;
9514 }
9515
9516 __startup_func
9517 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9518 zone_submap_init(
9519 mach_vm_offset_t *submap_min,
9520 zone_submap_idx_t idx,
9521 uint64_t zone_sub_map_numer,
9522 uint64_t *remaining_denom,
9523 vm_offset_t *remaining_size)
9524 {
9525 vm_map_create_options_t vmco;
9526 vm_map_address_t addr;
9527 vm_offset_t submap_start, submap_end;
9528 vm_size_t submap_size;
9529 vm_map_t submap;
9530 vm_prot_t prot = VM_PROT_DEFAULT;
9531 vm_prot_t prot_max = VM_PROT_ALL;
9532 kern_return_t kr;
9533
9534 submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9535 *remaining_denom);
9536 submap_start = *submap_min;
9537 submap_end = submap_start + submap_size;
9538
9539 #if defined(__LP64__)
9540 if (idx == Z_SUBMAP_IDX_VM) {
9541 vm_packing_verify_range("vm_compressor",
9542 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9543 vm_packing_verify_range("vm_page",
9544 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9545 }
9546 #endif /* defined(__LP64__) */
9547
9548 vmco = VM_MAP_CREATE_NEVER_FAULTS;
9549 if (!zone_submap_is_sequestered(idx)) {
9550 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9551 }
9552
9553 vm_map_will_allocate_early_map(&zone_submaps[idx]);
9554 submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
9555 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
9556 VM_KERN_MEMORY_ZONE).kmr_submap;
9557
9558 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9559 zone_info.zi_ro_range.min_address = submap_start;
9560 zone_info.zi_ro_range.max_address = submap_end;
9561 prot_max = prot = VM_PROT_NONE;
9562 }
9563
9564 addr = submap_start;
9565 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9566 VM_FLAGS_FIXED | VM_FLAGS_PERMANENT,
9567 VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_ZONE,
9568 kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9569 if (kr != KERN_SUCCESS) {
9570 panic("ksubmap[%s]: failed to make first entry (%d)",
9571 zone_submaps_names[idx], kr);
9572 }
9573
9574 addr = submap_end - ZONE_GUARD_SIZE / 2;
9575 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9576 VM_FLAGS_FIXED | VM_FLAGS_PERMANENT,
9577 VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_ZONE,
9578 kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9579 if (kr != KERN_SUCCESS) {
9580 panic("ksubmap[%s]: failed to make last entry (%d)",
9581 zone_submaps_names[idx], kr);
9582 }
9583
9584 #if DEBUG || DEVELOPMENT
9585 printf("zone_init: map %-5s %p:%p (%u%c)\n",
9586 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9587 mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
9588 #endif /* DEBUG || DEVELOPMENT */
9589
9590 zone_submaps[idx] = submap;
9591 *submap_min = submap_end;
9592 *remaining_size -= submap_size;
9593 *remaining_denom -= zone_sub_map_numer;
9594 }
9595
9596 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9597 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9598 {
9599 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9600 pva->packed_address += delta;
9601 }
9602 }
9603
9604 /*
9605 * Allocate metadata array and migrate bootstrap initial metadata and memory.
9606 */
9607 __startup_func
9608 static void
zone_metadata_init(void)9609 zone_metadata_init(void)
9610 {
9611 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9612 vm_map_entry_t first;
9613
9614 struct mach_vm_range meta_r, bits_r, early_r;
9615 vm_size_t early_sz;
9616 vm_offset_t reloc_base;
9617
9618 /*
9619 * Step 1: Allocate the metadata + bitmaps range
9620 *
9621 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9622 * of physical memory (16M per 1G).
9623 *
9624 * Let's preallocate for the worst to avoid weird panics.
9625 */
9626 vm_map_will_allocate_early_map(&zone_meta_map);
9627 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9628 zone_meta_size + zone_bits_size, VM_FLAGS_FIXED_RANGE_SUBALLOC,
9629 VM_KERN_MEMORY_ZONE, &zone_meta_map);
9630 meta_r.min_address += ZONE_GUARD_SIZE;
9631 meta_r.max_address -= ZONE_GUARD_SIZE;
9632 bits_r.max_address = meta_r.max_address;
9633 meta_r.max_address -= zone_bits_size;
9634 bits_r.min_address = meta_r.max_address;
9635
9636 #if DEBUG || DEVELOPMENT
9637 printf("zone_init: metadata %p:%p (%u%c)\n",
9638 (void *)meta_r.min_address, (void *)meta_r.max_address,
9639 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
9640 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
9641 printf("zone_init: metabits %p:%p (%u%c)\n",
9642 (void *)bits_r.min_address, (void *)bits_r.max_address,
9643 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
9644 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
9645 #endif /* DEBUG || DEVELOPMENT */
9646
9647 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9648 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
9649
9650 /*
9651 * Step 2: Install new ranges.
9652 * Relocate metadata and bits.
9653 */
9654 early_r = zone_info.zi_map_range;
9655 early_sz = mach_vm_range_size(&early_r);
9656
9657 zone_info.zi_map_range = zone_map_range;
9658 zone_info.zi_meta_range = meta_r;
9659 zone_info.zi_bits_range = bits_r;
9660 zone_info.zi_meta_base = (struct zone_page_metadata *)meta_r.min_address -
9661 zone_pva_from_addr(zone_map_range.min_address).packed_address;
9662
9663 vm_map_lock(vm_map);
9664 first = vm_map_first_entry(vm_map);
9665 reloc_base = first->vme_end;
9666 first->vme_end += early_sz;
9667 vm_map->size += early_sz;
9668 vm_map_unlock(vm_map);
9669
9670 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
9671 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
9672 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
9673 /* this needs to sign extend */
9674 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
9675
9676 zone_meta_populate(reloc_base, early_sz);
9677 memcpy(new_meta, early_meta,
9678 atop(early_sz) * sizeof(struct zone_page_metadata));
9679 for (uint32_t i = 0; i < atop(early_sz); i++) {
9680 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
9681 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
9682 }
9683
9684 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
9685 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
9686
9687 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
9688 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
9689 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
9690 }
9691
9692 zba_populate(0);
9693 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
9694
9695 /*
9696 * Step 3: Relocate the boostrap VM structs
9697 * (including rewriting their content).
9698 */
9699
9700 #if __x86_64__
9701 kernel_memory_populate(reloc_base, early_sz,
9702 KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL,
9703 VM_KERN_MEMORY_OSFMK);
9704 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
9705 #else
9706 for (vm_address_t addr = early_r.min_address;
9707 addr < early_r.max_address; addr += PAGE_SIZE) {
9708 pmap_paddr_t pa = kvtophys(trunc_page(addr));
9709 __assert_only kern_return_t kr;
9710
9711 kr = pmap_enter_options_addr(kernel_pmap, addr + reloc_delta,
9712 pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
9713 0, NULL);
9714 assert(kr == KERN_SUCCESS);
9715 }
9716 #endif
9717
9718 #if KASAN
9719 kasan_notify_address(reloc_base, early_sz);
9720 #if CONFIG_KERNEL_TBI && KASAN_TBI
9721 kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
9722 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
9723 #endif /* KASAN */
9724
9725 vm_map_relocate_early_maps(reloc_delta);
9726
9727 for (uint32_t i = 0; i < atop(early_sz); i++) {
9728 zone_id_t zid = new_meta[i].zm_index;
9729 zone_t z = &zone_array[zid];
9730 vm_size_t esize = zone_elem_size(z);
9731 vm_address_t base = reloc_base + ptoa(i);
9732 vm_address_t addr;
9733 zone_element_t ze;
9734
9735 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
9736 continue;
9737 }
9738
9739 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
9740 ze = zone_element_encode(base, eidx);
9741 if (zone_meta_is_free(&new_meta[i], ze)) {
9742 continue;
9743 }
9744
9745 addr = zone_element_addr(z, ze, esize);
9746 #if KASAN_ZALLOC
9747 if (z->z_kasan_redzone) {
9748 addr = kasan_alloc(addr, esize,
9749 esize - 2 * z->z_kasan_redzone,
9750 z->z_kasan_redzone);
9751 } else {
9752 kasan_poison_range(addr, esize, ASAN_VALID);
9753 }
9754 #endif
9755 vm_map_relocate_early_elem(zid, addr, reloc_delta);
9756 }
9757 }
9758
9759 #if !__x86_64__
9760 pmap_remove(kernel_pmap, early_r.min_address, early_r.max_address);
9761 #endif
9762 }
9763
9764 uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
9765 #if ZSECURITY_CONFIG(READ_ONLY)
9766 [Z_SUBMAP_IDX_VM] = 15,
9767 [Z_SUBMAP_IDX_READ_ONLY] = 5,
9768 #else
9769 [Z_SUBMAP_IDX_VM] = 20,
9770 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
9771 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA) && ZSECURITY_CONFIG(SAD_FENG_SHUI)
9772 [Z_SUBMAP_IDX_GENERAL_0] = 15,
9773 [Z_SUBMAP_IDX_GENERAL_1] = 15,
9774 [Z_SUBMAP_IDX_GENERAL_2] = 15,
9775 [Z_SUBMAP_IDX_GENERAL_3] = 15,
9776 [Z_SUBMAP_IDX_DATA] = 20,
9777 #elif ZSECURITY_CONFIG(SUBMAP_USER_DATA)
9778 [Z_SUBMAP_IDX_GENERAL_0] = 40,
9779 [Z_SUBMAP_IDX_DATA] = 40,
9780 #elif ZSECURITY_CONFIG(SAD_FENG_SHUI)
9781 #error invalid configuration: SAD_FENG_SHUI requires SUBMAP_USER_DATA
9782 #else
9783 [Z_SUBMAP_IDX_GENERAL_0] = 80,
9784 #endif /* ZSECURITY_CONFIG(SUBMAP_USER_DATA) && ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9785 };
9786
9787 __startup_func
9788 static void
zone_set_map_sizes(void)9789 zone_set_map_sizes(void)
9790 {
9791 uint64_t denom = 0;
9792 zone_pages_wired_max = (uint32_t)atop(zone_phys_size_max());
9793 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9794 denom += submap_ratios[idx];
9795 }
9796 assert(denom == 100);
9797
9798 #if __LP64__
9799 zone_map_size = ZONE_MAP_VA_SIZE_LP64;
9800 #else
9801 zone_map_size = ptoa(zone_pages_wired_max *
9802 (denom + submap_ratios[Z_SUBMAP_IDX_VM]) / denom);
9803 #endif
9804
9805 /*
9806 * Declare restrictions on zone max
9807 */
9808 vm_offset_t restricted_va_max = zone_restricted_va_max();
9809 vm_offset_t vm_submap_size = round_page(
9810 (submap_ratios[Z_SUBMAP_IDX_VM] * zone_map_size) / denom);
9811
9812 #if CONFIG_PROB_GZALLOC
9813 vm_submap_size += pgz_get_size();
9814 #endif /* CONFIG_PROB_GZALLOC */
9815 if (os_sub_overflow(restricted_va_max, vm_submap_size,
9816 &zone_map_range.min_address)) {
9817 zone_map_range.min_address = 0;
9818 }
9819
9820 zone_meta_size = round_page(atop(zone_map_size) *
9821 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
9822 zone_bits_size = round_page(16 * (ptoa(zone_pages_wired_max) >> 10));
9823
9824 #if VM_TAG_SIZECLASSES
9825 if (zone_tagging_on) {
9826 zone_tagbase_map_size = round_page(atop(zone_map_size) * sizeof(uint32_t));
9827 zone_tags_map_size = 2048 * 1024 * sizeof(vm_tag_t);
9828 }
9829 #endif /* VM_TAG_SIZECLASSES */
9830 }
9831 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
9832
9833 /*
9834 * Can't use zone_info.zi_map_range at this point as it is being used to
9835 * store the range of early pmap memory that was stolen to bootstrap the
9836 * necessary VM zones.
9837 */
9838 KMEM_RANGE_REGISTER_DYNAMIC(zones, &zone_map_range, ^() {
9839 return zone_map_size;
9840 });
9841 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^() {
9842 return zone_meta_size + zone_bits_size;
9843 });
9844
9845 #if VM_TAG_SIZECLASSES
9846 KMEM_RANGE_REGISTER_DYNAMIC(zone_tagbase, &zone_tagbase_range, ^() {
9847 return zone_tagbase_map_size;
9848 });
9849 KMEM_RANGE_REGISTER_DYNAMIC(zone_tags, &zone_tags_range, ^() {
9850 return zone_tags_map_size;
9851 });
9852 #endif /* VM_TAG_SIZECLASSES */
9853
9854
9855 /*
9856 * Global initialization of Zone Allocator.
9857 * Runs after zone_bootstrap.
9858 */
9859 __startup_func
9860 static void
zone_init(void)9861 zone_init(void)
9862 {
9863 vm_size_t remaining_size;
9864 mach_vm_offset_t submap_min = 0;
9865 uint64_t denom = 100;
9866 /*
9867 * And now allocate the various pieces of VA and submaps.
9868 */
9869
9870 #if !ZSECURITY_CONFIG(KERNEL_DATA_SPLIT)
9871 /*
9872 * Make a first allocation of contiguous VA, that we'll deallocate,
9873 * and we'll carve-out memory in that range again linearly.
9874 * The kernel is stil single threaded at this stage. This doesn't need
9875 * to be done on platforms that declare and process kmem_claims as that
9876 * process will create a temporary mapping for the required range.
9877 */
9878 zone_map_range = zone_init_allocate_va(0, zone_map_size, false);
9879 #endif /* !ZSECURITY_CONFIG(KERNEL_DATA_SPLIT) */
9880
9881 submap_min = zone_map_range.min_address;
9882 remaining_size = zone_map_size;
9883
9884 #if CONFIG_PROB_GZALLOC
9885 vm_size_t pgz_size = pgz_get_size();
9886
9887 vm_map_will_allocate_early_map(&pgz_submap);
9888 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
9889 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9890 VM_KERN_MEMORY_ZONE, &pgz_submap);
9891
9892 submap_min += pgz_size;
9893 remaining_size -= pgz_size;
9894 #if DEBUG || DEVELOPMENT
9895 printf("zone_init: pgzalloc %p:%p (%u%c) [%d slots]\n",
9896 (void *)zone_info.zi_pgz_range.min_address,
9897 (void *)zone_info.zi_pgz_range.max_address,
9898 mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
9899 pgz_slots);
9900 #endif /* DEBUG || DEVELOPMENT */
9901 #endif /* CONFIG_PROB_GZALLOC */
9902
9903 /*
9904 * Allocate the submaps
9905 */
9906 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9907 if (submap_ratios[idx] == 0) {
9908 zone_submaps[idx] = VM_MAP_NULL;
9909 } else {
9910 zone_submap_init(&submap_min, idx, submap_ratios[idx],
9911 &denom, &remaining_size);
9912 }
9913 }
9914
9915 zone_metadata_init();
9916
9917 #if VM_TAG_SIZECLASSES
9918 if (zone_tagging_on) {
9919 zone_tagging_init();
9920 }
9921 #endif
9922
9923 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
9924 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM_LP64;
9925
9926 (void)zone_create_ext("vm.permanent", 1, kma_flags,
9927 ZONE_ID_PERMANENT, ^(zone_t z) {
9928 z->z_permanent = true;
9929 z->z_elem_size = 1;
9930 });
9931 (void)zone_create_ext("vm.permanent.percpu", 1,
9932 kma_flags | ZC_PERCPU, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
9933 z->z_permanent = true;
9934 z->z_elem_size = 1;
9935 });
9936
9937 /*
9938 * Now migrate the startup statistics into their final storage.
9939 */
9940 int cpu = cpu_number();
9941 zone_index_foreach(idx) {
9942 zone_t tz = &zone_array[idx];
9943
9944 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
9945 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
9946
9947 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
9948 tz->z_stats = zs;
9949 }
9950 }
9951
9952 #if VM_TAG_SIZECLASSES
9953 if (zone_tagging_on) {
9954 vm_allocation_zones_init();
9955 }
9956 #endif
9957 }
9958 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
9959
9960 __startup_func
9961 static void
zone_cache_bootstrap(void)9962 zone_cache_bootstrap(void)
9963 {
9964 zone_t magzone;
9965
9966 magzone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
9967 zc_mag_size() * sizeof(zone_element_t),
9968 ZC_VM_LP64 | ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE |
9969 ZC_SEQUESTER | ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
9970 magzone->z_elems_rsv = (uint16_t)(2 * zpercpu_count());
9971
9972 os_atomic_store(&zc_magazine_zone, magzone, compiler_acq_rel);
9973
9974 /*
9975 * Now that we are initialized, we can enable zone caching for zones that
9976 * were made before zcache_bootstrap() was called.
9977 *
9978 * The system is still single threaded so we don't need to take the lock.
9979 */
9980 zone_index_foreach(i) {
9981 zone_t z = &zone_array[i];
9982 if (z->z_pcpu_cache) {
9983 z->z_pcpu_cache = NULL;
9984 zone_enable_caching(z);
9985 }
9986 #if ZONE_ENABLE_LOGGING
9987 if (z->z_self == z) {
9988 zone_setup_logging(z);
9989 }
9990 #endif /* ZONE_ENABLE_LOGGING */
9991 #if KASAN_TBI
9992 zone_setup_kasan_logging(z);
9993 #endif /* KASAN_TBI */
9994 }
9995 }
9996 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, zone_cache_bootstrap);
9997
9998 void
zalloc_first_proc_made(void)9999 zalloc_first_proc_made(void)
10000 {
10001 zone_caching_disabled = 0;
10002 }
10003
10004 __startup_func
10005 vm_offset_t
zone_early_mem_init(vm_size_t size)10006 zone_early_mem_init(vm_size_t size)
10007 {
10008 vm_offset_t mem;
10009
10010 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10011
10012 /*
10013 * The zone that is used early to bring up the VM is stolen here.
10014 *
10015 * When the zone subsystem is actually initialized,
10016 * zone_metadata_init() will be called, and those pages
10017 * and the elements they contain, will be relocated into
10018 * the VM submap (even for architectures when those zones
10019 * do not live there).
10020 */
10021 #if __x86_64__
10022 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10023 mem = (vm_offset_t)zone_early_pages_to_cram;
10024 #else
10025 mem = (vm_offset_t)pmap_steal_memory(size);
10026 #endif
10027
10028 zone_info.zi_meta_base = zone_early_meta_array_startup -
10029 zone_pva_from_addr(mem).packed_address;
10030 zone_info.zi_map_range.min_address = mem;
10031 zone_info.zi_map_range.max_address = mem + size;
10032
10033 zone_info.zi_bits_range = (struct mach_vm_range){
10034 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10035 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10036 sizeof(zba_chunk_startup),
10037 };
10038 zba_init_chunk(0);
10039
10040 return mem;
10041 }
10042
10043 #endif /* !ZALLOC_TEST */
10044 #pragma mark - tests
10045 #if DEBUG || DEVELOPMENT
10046
10047 /*
10048 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10049 * thread goes through at a time.
10050 *
10051 * Or we can end up with multiple test zones (if a second zinit() comes through
10052 * before zdestroy()), which could lead us to run out of zones.
10053 */
10054 static bool any_zone_test_running = FALSE;
10055
10056 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10057 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10058 {
10059 vm_offset_t elem_size = zone_elem_size(z);
10060 vm_offset_t base;
10061 struct zone_page_metadata *meta;
10062
10063 while (!zone_pva_is_null(page_index)) {
10064 base = zone_pva_to_addr(page_index) + zone_elem_offs(z);
10065 meta = zone_pva_to_meta(page_index);
10066
10067 if (meta->zm_inline_bitmap) {
10068 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10069 uint32_t map = meta[i].zm_bitmap;
10070
10071 for (; map; map &= map - 1) {
10072 *elems++ = INSTANCE_PUT(base +
10073 elem_size * __builtin_clz(map));
10074 }
10075 base += elem_size * 32;
10076 }
10077 } else {
10078 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10079 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10080 for (size_t i = 0; i < (1u << order); i++) {
10081 uint64_t map = bits[i];
10082
10083 for (; map; map &= map - 1) {
10084 *elems++ = INSTANCE_PUT(base +
10085 elem_size * __builtin_clzll(map));
10086 }
10087 base += elem_size * 64;
10088 }
10089 }
10090
10091 page_index = meta->zm_page_next;
10092 }
10093 return elems;
10094 }
10095
10096 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10097 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10098 {
10099 zone_t zone = NULL;
10100 uintptr_t * array;
10101 uintptr_t * next;
10102 uintptr_t element;
10103 uint32_t idx, count, found;
10104 uint32_t nobtcount;
10105 uint32_t elemSize;
10106 size_t maxElems;
10107
10108 zone_foreach(z) {
10109 if (!strncmp(zoneName, z->z_name, nameLen)) {
10110 zone = z;
10111 break;
10112 }
10113 }
10114 if (zone == NULL) {
10115 return KERN_INVALID_NAME;
10116 }
10117
10118 elemSize = (uint32_t)zone_elem_size(zone);
10119 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10120
10121 array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10122 if (array == NULL) {
10123 return KERN_RESOURCE_SHORTAGE;
10124 }
10125
10126 zone_lock(zone);
10127
10128 next = array;
10129 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10130 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10131 count = (uint32_t)(next - array);
10132
10133 zone_unlock(zone);
10134
10135 zone_leaks_scan(array, count, (uint32_t)zone_elem_size(zone), &found);
10136 assert(found <= count);
10137
10138 for (idx = 0; idx < count; idx++) {
10139 element = array[idx];
10140 if (kInstanceFlagReferenced & element) {
10141 continue;
10142 }
10143 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10144 }
10145
10146 #if ZONE_ENABLE_LOGGING
10147 if (zone->z_btlog && !corruption_debug_flag) {
10148 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10149 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10150 btlog_copy_backtraces_for_elements(zone->z_btlog,
10151 (vm_address_t *)array, &count, elemSize, proc);
10152 }
10153 #endif /* ZONE_ENABLE_LOGGING */
10154
10155 for (nobtcount = idx = 0; idx < count; idx++) {
10156 element = array[idx];
10157 if (!element) {
10158 continue;
10159 }
10160 if (kInstanceFlagReferenced & element) {
10161 continue;
10162 }
10163 nobtcount++;
10164 }
10165 if (nobtcount) {
10166 proc(nobtcount, elemSize, BTREF_NULL);
10167 }
10168
10169 kfree_type(vm_offset_t, maxElems, array);
10170 return KERN_SUCCESS;
10171 }
10172
10173 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10174 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10175 {
10176 zone_security_flags_t zsflags;
10177 uint32_t x = 4;
10178 uint32_t *test_ptr;
10179
10180 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10181 printf("zone_ro_basic_test: Test already running.\n");
10182 return EALREADY;
10183 }
10184
10185 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10186
10187 for (int i = 0; i < 3; i++) {
10188 #if ZSECURITY_CONFIG(READ_ONLY)
10189 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10190 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10191 printf("zone_ro_basic_test: create a sub-page size zone\n");
10192
10193 printf("zone_ro_basic_test: verify flags were set\n");
10194 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10195
10196 printf("zone_ro_basic_test: zalloc an element\n");
10197 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10198 assert(test_ptr);
10199
10200 printf("zone_ro_basic_test: verify we can't write to it\n");
10201 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10202
10203 x = 4;
10204 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10205 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10206 assert(test_ptr);
10207 assert(*(uint32_t*)test_ptr == x);
10208
10209 x = 5;
10210 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10211 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10212 assert(test_ptr);
10213 assert(*(uint32_t*)test_ptr == x);
10214
10215 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10216 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10217
10218 printf("zone_ro_basic_test: free elem\n");
10219 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10220 assert(!test_ptr);
10221 #else
10222 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10223
10224 printf("zone_ro_basic_test: verify flags were set\n");
10225 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10226
10227 printf("zone_ro_basic_test: zalloc an element\n");
10228 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10229 assert(test_ptr);
10230
10231 x = 4;
10232 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10233 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10234 assert(test_ptr);
10235 assert(*(uint32_t*)test_ptr == x);
10236
10237 x = 5;
10238 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10239 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10240 assert(test_ptr);
10241 assert(*(uint32_t*)test_ptr == x);
10242
10243 printf("zone_ro_basic_test: free elem\n");
10244 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10245 assert(!test_ptr);
10246 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10247 }
10248
10249 printf("zone_ro_basic_test: garbage collection\n");
10250 zone_gc(ZONE_GC_DRAIN);
10251
10252 printf("zone_ro_basic_test: Test passed\n");
10253
10254 *out = 1;
10255 os_atomic_store(&any_zone_test_running, false, relaxed);
10256 return 0;
10257 }
10258 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10259
10260 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10261 zone_basic_test_run(__unused int64_t in, int64_t *out)
10262 {
10263 static zone_t test_zone_ptr = NULL;
10264
10265 unsigned int i = 0, max_iter = 5;
10266 void * test_ptr;
10267 zone_t test_zone;
10268 int rc = 0;
10269
10270 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10271 printf("zone_basic_test: Test already running.\n");
10272 return EALREADY;
10273 }
10274
10275 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10276
10277 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10278 do {
10279 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10280 assert(test_zone);
10281
10282 #if KASAN_ZALLOC
10283 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10284 #else
10285 if (test_zone->z_elems_free != 0)
10286 #endif
10287 {
10288 printf("zone_basic_test: free count is not zero\n");
10289 rc = EIO;
10290 goto out;
10291 }
10292
10293 if (test_zone_ptr == NULL) {
10294 /* Stash the zone pointer returned on the fist zinit */
10295 printf("zone_basic_test: zone created for the first time\n");
10296 test_zone_ptr = test_zone;
10297 } else if (test_zone != test_zone_ptr) {
10298 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10299 rc = EIO;
10300 goto out;
10301 }
10302
10303 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10304 zfree(test_zone, test_ptr);
10305
10306 zdestroy(test_zone);
10307 i++;
10308
10309 printf("zone_basic_test: Iteration %d successful\n", i);
10310 } while (i < max_iter);
10311
10312 /* test Z_VA_SEQUESTER */
10313 #if ZSECURITY_CONFIG(SEQUESTER)
10314 {
10315 zone_t test_pcpu_zone;
10316 kern_return_t kr;
10317 int idx, num_allocs = 8;
10318 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10319 void *allocs[num_allocs];
10320 void **allocs_pcpu;
10321 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10322
10323 test_zone = zone_create("test_zone_sysctl", elem_size,
10324 ZC_DESTRUCTIBLE);
10325 assert(test_zone);
10326 assert(zone_security_config(test_zone).z_va_sequester);
10327
10328 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10329 ZC_DESTRUCTIBLE | ZC_PERCPU);
10330 assert(test_pcpu_zone);
10331 assert(zone_security_config(test_pcpu_zone).z_va_sequester);
10332
10333 for (idx = 0; idx < num_allocs; idx++) {
10334 allocs[idx] = zalloc(test_zone);
10335 assert(NULL != allocs[idx]);
10336 printf("alloc[%d] %p\n", idx, allocs[idx]);
10337 }
10338 for (idx = 0; idx < num_allocs; idx++) {
10339 zfree(test_zone, allocs[idx]);
10340 }
10341 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10342
10343 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10344 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10345 assert(kr == KERN_SUCCESS);
10346
10347 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10348 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10349 Z_WAITOK | Z_ZERO);
10350 assert(NULL != allocs_pcpu[idx]);
10351 }
10352 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10353 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10354 }
10355 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10356
10357 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10358 vm_page_wire_count, vm_page_free_count,
10359 100L * phys_pages / zone_pages_wired_max);
10360 zone_gc(ZONE_GC_DRAIN);
10361 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10362 vm_page_wire_count, vm_page_free_count,
10363 100L * phys_pages / zone_pages_wired_max);
10364
10365 unsigned int allva = 0;
10366
10367 zone_foreach(z) {
10368 zone_lock(z);
10369 allva += z->z_wired_cur;
10370 if (zone_pva_is_null(z->z_pageq_va)) {
10371 zone_unlock(z);
10372 continue;
10373 }
10374 unsigned count = 0;
10375 uint64_t size;
10376 zone_pva_t pg = z->z_pageq_va;
10377 struct zone_page_metadata *page_meta;
10378 while (pg.packed_address) {
10379 page_meta = zone_pva_to_meta(pg);
10380 count += z->z_percpu ? 1 : z->z_chunk_pages;
10381 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10382 count -= page_meta->zm_page_index;
10383 }
10384 pg = page_meta->zm_page_next;
10385 }
10386 size = zone_size_wired(z);
10387 if (!size) {
10388 size = 1;
10389 }
10390 printf("%s%s: seq %d, res %d, %qd %%\n",
10391 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10392 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10393 zone_unlock(z);
10394 }
10395
10396 printf("total va: %d\n", allva);
10397
10398 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10399 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10400 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10401 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10402 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10403 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10404
10405 for (idx = 0; idx < num_allocs; idx++) {
10406 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10407 }
10408
10409 /* make sure the zone is still usable after a GC */
10410
10411 for (idx = 0; idx < num_allocs; idx++) {
10412 allocs[idx] = zalloc(test_zone);
10413 assert(allocs[idx]);
10414 printf("alloc[%d] %p\n", idx, allocs[idx]);
10415 }
10416 for (idx = 0; idx < num_allocs; idx++) {
10417 zfree(test_zone, allocs[idx]);
10418 }
10419
10420 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10421 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10422 Z_WAITOK | Z_ZERO);
10423 assert(NULL != allocs_pcpu[idx]);
10424 }
10425 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10426 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10427 }
10428
10429 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10430
10431 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10432
10433 zdestroy(test_zone);
10434 zdestroy(test_pcpu_zone);
10435 }
10436 #else
10437 printf("zone_basic_test: skipping sequester test (not enabled)\n");
10438 #endif /* ZSECURITY_CONFIG(SEQUESTER) */
10439
10440 printf("zone_basic_test: Test passed\n");
10441
10442
10443 *out = 1;
10444 out:
10445 os_atomic_store(&any_zone_test_running, false, relaxed);
10446 return rc;
10447 }
10448 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10449
10450 struct zone_stress_obj {
10451 TAILQ_ENTRY(zone_stress_obj) zso_link;
10452 };
10453
10454 struct zone_stress_ctx {
10455 thread_t zsc_leader;
10456 lck_mtx_t zsc_lock;
10457 zone_t zsc_zone;
10458 uint64_t zsc_end;
10459 uint32_t zsc_workers;
10460 };
10461
10462 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10463 zone_stress_worker(void *arg, wait_result_t __unused wr)
10464 {
10465 struct zone_stress_ctx *ctx = arg;
10466 bool leader = ctx->zsc_leader == current_thread();
10467 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10468 struct zone_bool_gen bg = { };
10469 struct zone_stress_obj *obj;
10470 uint32_t allocs = 0;
10471
10472 random_bool_init(&bg.zbg_bg);
10473
10474 do {
10475 for (int i = 0; i < 2000; i++) {
10476 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10477 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10478 switch (what) {
10479 case 0:
10480 case 1:
10481 if (allocs < 10000) {
10482 obj = zalloc(ctx->zsc_zone);
10483 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10484 allocs++;
10485 }
10486 break;
10487 case 2:
10488 case 3:
10489 if (allocs < 10000) {
10490 obj = zalloc(ctx->zsc_zone);
10491 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10492 allocs++;
10493 }
10494 break;
10495 case 4:
10496 if (leader) {
10497 zone_gc(ZONE_GC_DRAIN);
10498 }
10499 break;
10500 case 5:
10501 case 6:
10502 if (!TAILQ_EMPTY(&head)) {
10503 obj = TAILQ_FIRST(&head);
10504 TAILQ_REMOVE(&head, obj, zso_link);
10505 zfree(ctx->zsc_zone, obj);
10506 allocs--;
10507 }
10508 break;
10509 case 7:
10510 if (!TAILQ_EMPTY(&head)) {
10511 obj = TAILQ_LAST(&head, zone_stress_head);
10512 TAILQ_REMOVE(&head, obj, zso_link);
10513 zfree(ctx->zsc_zone, obj);
10514 allocs--;
10515 }
10516 break;
10517 }
10518 }
10519 } while (mach_absolute_time() < ctx->zsc_end);
10520
10521 while (!TAILQ_EMPTY(&head)) {
10522 obj = TAILQ_FIRST(&head);
10523 TAILQ_REMOVE(&head, obj, zso_link);
10524 zfree(ctx->zsc_zone, obj);
10525 }
10526
10527 lck_mtx_lock(&ctx->zsc_lock);
10528 if (--ctx->zsc_workers == 0) {
10529 thread_wakeup(ctx);
10530 } else if (leader) {
10531 while (ctx->zsc_workers) {
10532 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10533 THREAD_UNINT);
10534 }
10535 }
10536 lck_mtx_unlock(&ctx->zsc_lock);
10537
10538 if (!leader) {
10539 thread_terminate_self();
10540 __builtin_unreachable();
10541 }
10542 }
10543
10544 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10545 zone_stress_test_run(__unused int64_t in, int64_t *out)
10546 {
10547 struct zone_stress_ctx ctx = {
10548 .zsc_leader = current_thread(),
10549 .zsc_workers = 3,
10550 };
10551 kern_return_t kr;
10552 thread_t th;
10553
10554 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10555 printf("zone_stress_test: Test already running.\n");
10556 return EALREADY;
10557 }
10558
10559 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10560 ctx.zsc_zone = zone_create("test_zone_344", 344,
10561 ZC_DESTRUCTIBLE | ZC_NOCACHING);
10562 assert(ctx.zsc_zone->z_chunk_pages > 1);
10563
10564 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10565
10566 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10567
10568 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10569
10570 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10571 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10572 BASEPRI_DEFAULT, &th);
10573 if (kr == KERN_SUCCESS) {
10574 printf("zone_stress_test: thread %d: %p\n", i, th);
10575 thread_deallocate(th);
10576 } else {
10577 ctx.zsc_workers--;
10578 }
10579 }
10580
10581 zone_stress_worker(&ctx, 0);
10582
10583 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10584
10585 zdestroy(ctx.zsc_zone);
10586
10587 printf("zone_stress_test: Done\n");
10588
10589 *out = 1;
10590 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10591 os_atomic_store(&any_zone_test_running, false, relaxed);
10592 return 0;
10593 }
10594 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10595
10596 /*
10597 * Routines to test that zone garbage collection and zone replenish threads
10598 * running at the same time don't cause problems.
10599 */
10600
10601 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)10602 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
10603 {
10604 zone_gc(ZONE_GC_DRAIN);
10605 *out = 1;
10606 return 0;
10607 }
10608 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
10609
10610 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)10611 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
10612 {
10613 zone_t z = vm_map_entry_zone;
10614 struct data { struct data *next; } *node, *list = NULL;
10615
10616 if (z == NULL) {
10617 printf("Couldn't find a replenish zone\n");
10618 return EIO;
10619 }
10620
10621 /* big enough to go past replenishment */
10622 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
10623 node = zalloc(z);
10624 node->next = list;
10625 list = node;
10626 }
10627
10628 /*
10629 * release the memory we allocated
10630 */
10631 while (list != NULL) {
10632 node = list;
10633 list = list->next;
10634 zfree(z, node);
10635 }
10636
10637 *out = 1;
10638 return 0;
10639 }
10640 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
10641
10642 #endif /* DEBUG || DEVELOPMENT */
10643