1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_memtag.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
107
108 #include <pexpert/pexpert.h>
109
110 #include <machine/machparam.h>
111 #include <machine/machine_routines.h> /* ml_cpu_get_info */
112
113 #include <os/atomic.h>
114
115 #include <libkern/OSDebug.h>
116 #include <libkern/OSAtomic.h>
117 #include <libkern/section_keywords.h>
118 #include <sys/kdebug.h>
119 #include <sys/code_signing.h>
120
121 #include <san/kasan.h>
122 #include <libsa/stdlib.h>
123 #include <sys/errno.h>
124
125 #include <IOKit/IOBSD.h>
126 #include <arm64/amcc_rorgn.h>
127
128 #if DEBUG
129 #define z_debug_assert(expr) assert(expr)
130 #else
131 #define z_debug_assert(expr) (void)(expr)
132 #endif
133
134
135 /* Returns pid of the task with the largest number of VM map entries. */
136 extern pid_t find_largest_process_vm_map_entries(void);
137
138 /*
139 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
140 * For any other pid we try to kill that process synchronously.
141 */
142 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
143
144 extern zone_t vm_object_zone;
145 extern zone_t ipc_service_port_label_zone;
146
147 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
148 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
149
150 #if CONFIG_KERNEL_TAGGING
151 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
152 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
153 #else /* CONFIG_KERNEL_TAGGING */
154 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
155 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
156 #endif /* CONFIG_KERNEL_TAGGING */
157
158 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
159 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
160 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
161 #define ZONE_GUARD_DENSE (32 * 1024)
162 #define ZONE_GUARD_SPARSE (64 * 1024)
163 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
164
165 #if XNU_PLATFORM_MacOSX
166 #define ZONE_MAP_MAX (32ULL << 30)
167 #define ZONE_MAP_VA_SIZE (128ULL << 30)
168 #else /* XNU_PLATFORM_MacOSX */
169 #define ZONE_MAP_MAX (8ULL << 30)
170 #define ZONE_MAP_VA_SIZE (24ULL << 30)
171 #endif /* !XNU_PLATFORM_MacOSX */
172
173 __enum_closed_decl(zm_len_t, uint16_t, {
174 ZM_CHUNK_FREE = 0x0,
175 /* 1 through 8 are valid lengths */
176 ZM_CHUNK_LEN_MAX = 0x8,
177
178 /* PGZ magical values */
179 ZM_PGZ_FREE = 0x0,
180 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
181 ZM_PGZ_GUARD = 0xb, /* oo[b] */
182 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
183
184 /* secondary page markers */
185 ZM_SECONDARY_PAGE = 0xe,
186 ZM_SECONDARY_PCPU_PAGE = 0xf,
187 });
188
189 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
190
191 struct zone_page_metadata {
192 union {
193 struct {
194 /* The index of the zone this metadata page belongs to */
195 zone_id_t zm_index : 10;
196
197 /*
198 * This chunk ends with a guard page.
199 */
200 uint16_t zm_guarded : 1;
201
202 /*
203 * Whether `zm_bitmap` is an inline bitmap
204 * or a packed bitmap reference
205 */
206 uint16_t zm_inline_bitmap : 1;
207
208 /*
209 * Zones allocate in "chunks" of zone_t::z_chunk_pages
210 * consecutive pages, or zpercpu_count() pages if the
211 * zone is percpu.
212 *
213 * The first page of it has its metadata set with:
214 * - 0 if none of the pages are currently wired
215 * - the number of wired pages in the chunk
216 * (not scaled for percpu).
217 *
218 * Other pages in the chunk have their zm_chunk_len set
219 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
220 * depending on whether the zone is percpu or not.
221 * For those, zm_page_index holds the index of that page
222 * in the run, and zm_subchunk_len the remaining length
223 * within the chunk.
224 *
225 * Metadata used for PGZ pages can have 3 values:
226 * - ZM_PGZ_FREE: slot is free
227 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
228 * at offset (zm_pgz_orig_addr & PAGE_MASK)
229 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
230 * (will panic).
231 */
232 zm_len_t zm_chunk_len : 4;
233 };
234 uint16_t zm_bits;
235 };
236
237 union {
238 #define ZM_ALLOC_SIZE_LOCK 1u
239 uint16_t zm_alloc_size; /* first page only */
240 struct {
241 uint8_t zm_page_index; /* secondary pages only */
242 uint8_t zm_subchunk_len; /* secondary pages only */
243 };
244 uint16_t zm_oob_offs; /* in guard pages */
245 };
246 union {
247 uint32_t zm_bitmap; /* most zones */
248 uint32_t zm_bump; /* permanent zones */
249 };
250
251 union {
252 struct {
253 zone_pva_t zm_page_next;
254 zone_pva_t zm_page_prev;
255 };
256 vm_offset_t zm_pgz_orig_addr;
257 struct zone_page_metadata *zm_pgz_slot_next;
258 };
259 };
260 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
261
262 /*!
263 * @typedef zone_magazine_t
264 *
265 * @brief
266 * Magazine of cached allocations.
267 *
268 * @field zm_next linkage used by magazine depots.
269 * @field zm_elems an array of @c zc_mag_size() elements.
270 */
271 struct zone_magazine {
272 zone_magazine_t zm_next;
273 smr_seq_t zm_seq;
274 vm_offset_t zm_elems[0];
275 };
276
277 /*!
278 * @typedef zone_cache_t
279 *
280 * @brief
281 * Magazine of cached allocations.
282 *
283 * @discussion
284 * Below is a diagram of the caching system. This design is inspired by the
285 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
286 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
287 * zone allocator (itself derived from this seminal work).
288 *
289 * It is divided into 3 layers:
290 * - the per-cpu layer,
291 * - the recirculation depot layer,
292 * - the Zone Allocator.
293 *
294 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
295 * which are stacks of up to @c zc_mag_size() elements.
296 *
297 * <h2>CPU layer</h2>
298 *
299 * The CPU layer (@c zone_cache_t) looks like this:
300 *
301 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
302 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
303 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
304 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
305 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
306 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
307 * ╰─────────┴─────────────────────────────╯
308 *
309 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
310 * or free to. Serialization is achieved through disabling preemption, and only
311 * the current CPU can acces those allocations. This is represented on the left
312 * hand side of the diagram above.
313 *
314 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
315 * full magazines, and is protected by the @c zm_depot_lock for access.
316 * The lock is expected to absolutely never be contended, as only the local CPU
317 * tends to access the local per-cpu depot in regular operation mode.
318 *
319 * However unlike UMA, our implementation allows for the zone GC to reclaim
320 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
321 *
322 *
323 * <h2>Recirculation Depot</h2>
324 *
325 * The recirculation depot layer is a list similar to the per-cpu depot,
326 * however it is different in two fundamental ways:
327 *
328 * - it is protected by the regular zone lock,
329 * - elements referenced by the magazines in that layer appear free
330 * to the zone layer.
331 *
332 *
333 * <h2>Magazine circulation and sizing</h2>
334 *
335 * The caching system sizes itself dynamically. Operations that allocate/free
336 * a single element call @c zone_lock_nopreempt_check_contention() which records
337 * contention on the lock by doing a trylock and recording its success.
338 *
339 * This information is stored in the @c z_recirc_cont_cur field of the zone,
340 * and a windowed moving average is maintained in @c z_contention_wma.
341 * The periodically run function @c compute_zone_working_set_size() will then
342 * take this into account to decide to grow the number of buckets allowed
343 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
344 * thresholds.
345 *
346 * The per-cpu layer will attempt to work with its depot, finding both full and
347 * empty magazines cached there. If it can't get what it needs, then it will
348 * mediate with the zone recirculation layer. Such recirculation is done in
349 * batches in order to amortize lock holds.
350 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
351 *
352 * The recirculation layer keeps a track of what the minimum amount of magazines
353 * it had over time was for each of the full and empty queues. This allows for
354 * @c compute_zone_working_set_size() to return memory to the system when a zone
355 * stops being used as much.
356 *
357 * <h2>Security considerations</h2>
358 *
359 * The zone caching layer has been designed to avoid returning elements in
360 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
361 * and @c zfree() free to the (f) magazine, and only swap them when the
362 * requested operation cannot be fulfilled.
363 *
364 * The per-cpu overflow depot or the recirculation depots are similarly used
365 * in FIFO order.
366 *
367 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
368 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
369 * @field zc_free_cur denormalized number of elements in the (f) magazine
370 * @field zc_alloc_elems a pointer to the array of elements in (a)
371 * @field zc_free_elems a pointer to the array of elements in (f)
372 *
373 * @field zc_depot a list of @c zc_depot_cur full magazines
374 */
375 typedef struct zone_cache {
376 hw_lck_ticket_t zc_depot_lock;
377 uint16_t zc_alloc_cur;
378 uint16_t zc_free_cur;
379 vm_offset_t *zc_alloc_elems;
380 vm_offset_t *zc_free_elems;
381 struct zone_depot zc_depot;
382 smr_t zc_smr;
383 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
384 } __attribute__((aligned(64))) * zone_cache_t;
385
386 #if !__x86_64__
387 static
388 #endif
389 __security_const_late struct {
390 struct mach_vm_range zi_map_range; /* all zone submaps */
391 struct mach_vm_range zi_ro_range; /* read-only range */
392 struct mach_vm_range zi_meta_range; /* debugging only */
393 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
394 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
395 struct mach_vm_range zi_pgz_range;
396 struct zone_page_metadata *zi_pgz_meta;
397
398 /*
399 * The metadata lives within the zi_meta_range address range.
400 *
401 * The correct formula to find a metadata index is:
402 * absolute_page_index - page_index(zi_map_range.min_address)
403 *
404 * And then this index is used to dereference zi_meta_range.min_address
405 * as a `struct zone_page_metadata` array.
406 *
407 * To avoid doing that substraction all the time in the various fast-paths,
408 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
409 * that math all the time.
410 */
411 struct zone_page_metadata *zi_meta_base;
412 } zone_info;
413
414 __startup_data static struct mach_vm_range zone_map_range;
415 __startup_data static vm_map_size_t zone_meta_size;
416 __startup_data static vm_map_size_t zone_bits_size;
417 __startup_data static vm_map_size_t zone_xtra_size;
418
419 /*
420 * Initial array of metadata for stolen memory.
421 *
422 * The numbers here have to be kept in sync with vm_map_steal_memory()
423 * so that we have reserved enough metadata.
424 *
425 * After zone_init() has run (which happens while the kernel is still single
426 * threaded), the metadata is moved to its final dynamic location, and
427 * this array is unmapped with the rest of __startup_data at lockdown.
428 */
429 #define ZONE_EARLY_META_INLINE_COUNT 64
430 __startup_data
431 static struct zone_page_metadata
432 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
433
434
435 __startup_data __attribute__((aligned(PAGE_MAX_SIZE)))
436 static uint8_t zone_early_pages_to_cram[PAGE_MAX_SIZE * 16];
437
438 /*
439 * The zone_locks_grp allows for collecting lock statistics.
440 * All locks are associated to this group in zinit.
441 * Look at tools/lockstat for debugging lock contention.
442 */
443 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
444 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
445
446 /*
447 * The zone metadata lock protects:
448 * - metadata faulting,
449 * - VM submap VA allocations,
450 * - early gap page queue list
451 */
452 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
453 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
454
455 /*
456 * Exclude more than one concurrent garbage collection
457 */
458 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
459 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
460 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
461
462 /*
463 * Panic logging metadata
464 */
465 bool panic_include_zprint = false;
466 bool panic_include_kalloc_types = false;
467 zone_t kalloc_type_src_zone = ZONE_NULL;
468 zone_t kalloc_type_dst_zone = ZONE_NULL;
469 mach_memory_info_t *panic_kext_memory_info = NULL;
470 vm_size_t panic_kext_memory_size = 0;
471 vm_offset_t panic_fault_address = 0;
472
473 /*
474 * Protects zone_array, num_zones, num_zones_in_use, and
475 * zone_destroyed_bitmap
476 */
477 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
478 static zone_id_t num_zones_in_use;
479 zone_id_t _Atomic num_zones;
480 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
481
482 /*
483 * Initial globals for zone stats until we can allocate the real ones.
484 * Those get migrated inside the per-CPU ones during zone_init() and
485 * this array is unmapped with the rest of __startup_data at lockdown.
486 */
487
488 /* zone to allocate zone_magazine structs from */
489 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
490 /*
491 * Until pid1 is made, zone caching is off,
492 * until compute_zone_working_set_size() runs for the firt time.
493 *
494 * -1 represents the "never enabled yet" value.
495 */
496 static int8_t zone_caching_disabled = -1;
497
498 __startup_data
499 static struct zone_stats zone_stats_startup[MAX_ZONES];
500 struct zone zone_array[MAX_ZONES];
501 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
502 [0 ... MAX_ZONES - 1] = {
503 .z_kheap_id = KHEAP_ID_NONE,
504 .z_noencrypt = false,
505 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
506 .z_kalloc_type = false,
507 .z_sig_eq = 0
508 },
509 };
510 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
511 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
512
513 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
514 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
515
516 /* Used to keep track of destroyed slots in the zone_array */
517 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
518
519 /* number of zone mapped pages used by all zones */
520 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
521 size_t zone_pages_wired;
522 size_t zone_guard_pages;
523
524 /* Time in (ms) after which we panic for zone exhaustions */
525 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
526 static bool zone_share_always = true;
527 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
528
529 #if VM_TAG_SIZECLASSES
530 /*
531 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
532 * zones only.
533 *
534 * There are 3 kinds of tags that can be used:
535 * - pre-registered VM_KERN_MEMORY_*
536 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
537 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
538 *
539 * The VM tracks the statistics in lazily allocated structures.
540 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
541 *
542 * If for some reason the requested tag cannot be accounted for,
543 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
544 *
545 * Each allocated element also remembers the tag it was assigned,
546 * which lets zalloc/zfree update statistics correctly.
547 */
548
549 /* enable tags for zones that ask for it */
550 static TUNABLE(bool, zone_tagging_on, "-zt", false);
551
552 /*
553 * Array of all sizeclasses used by kalloc variants so that we can
554 * have accounting per size class for each kalloc callsite
555 */
556 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
557 #endif /* VM_TAG_SIZECLASSES */
558
559 #if DEBUG || DEVELOPMENT
560 static int zalloc_simulate_vm_pressure;
561 #endif /* DEBUG || DEVELOPMENT */
562
563 #define Z_TUNABLE(t, n, d) \
564 TUNABLE(t, _##n, #n, d); \
565 __pure2 static inline t n(void) { return _##n; }
566
567 /*
568 * Zone caching tunables
569 *
570 * zc_mag_size():
571 * size of magazines, larger to reduce contention at the expense of memory
572 *
573 * zc_enable_level
574 * number of contentions per second after which zone caching engages
575 * automatically.
576 *
577 * 0 to disable.
578 *
579 * zc_grow_level
580 * number of contentions per second x cpu after which the number of magazines
581 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
582 *
583 * zc_shrink_level
584 * number of contentions per second x cpu below which the number of magazines
585 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
586 *
587 * zc_pcpu_max
588 * maximum memory size in bytes that can hang from a CPU,
589 * which will affect how many magazines are allowed in the depot.
590 *
591 * The alloc/free magazines are assumed to be on average half-empty
592 * and to count for "1" unit of magazines.
593 *
594 * zc_autotrim_size
595 * Size allowed to hang extra from the recirculation depot before
596 * auto-trim kicks in.
597 *
598 * zc_autotrim_buckets
599 *
600 * How many buckets in excess of the working-set are allowed
601 * before auto-trim kicks in for empty buckets.
602 *
603 * zc_free_batch_size
604 * The size of batches of frees/reclaim that can be done keeping
605 * the zone lock held (and preemption disabled).
606 */
607 Z_TUNABLE(uint16_t, zc_mag_size, 8);
608 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
609 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
610 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
611 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
612 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
613 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
614 static Z_TUNABLE(uint32_t, zc_free_batch_size, 256);
615
616 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
617 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
618 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
619 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
620 [Z_SUBMAP_IDX_VM] = "VM",
621 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
622 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
623 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
624 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
625 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
626 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
627 #else
628 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
629 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
630 [Z_SUBMAP_IDX_DATA] = "DATA",
631 };
632
633 #if __x86_64__
634 #define ZONE_ENTROPY_CNT 8
635 #else
636 #define ZONE_ENTROPY_CNT 2
637 #endif
638 static struct zone_bool_gen {
639 struct bool_gen zbg_bg;
640 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
641 } zone_bool_gen[MAX_CPUS];
642
643 #if CONFIG_PROB_GZALLOC
644 /*
645 * Probabilistic gzalloc
646 * =====================
647 *
648 *
649 * Probabilistic guard zalloc samples allocations and will protect them by
650 * double-mapping the page holding them and returning the secondary virtual
651 * address to its callers.
652 *
653 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
654 * are set.
655 *
656 *
657 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
658 * most zalloc/kalloc features:
659 * - zone_require is functional
660 * - zone caching or zone tagging is compatible
661 * - non-blocking allocation work (they will always return NULL with gzalloc).
662 *
663 * PGZ limitations:
664 * - VA sequestering isn't respected, as the slots (which are in limited
665 * quantity) will be reused for any type, however the PGZ quarantine
666 * somewhat mitigates the impact.
667 * - zones with elements larger than a page cannot be protected.
668 *
669 *
670 * Tunables:
671 * --------
672 *
673 * pgz=1:
674 * Turn on probabilistic guard malloc for all zones
675 *
676 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
677 *
678 * pgz_sample_rate=0 to 2^31
679 * average sample rate between two guarded allocations.
680 * 0 means every allocation.
681 *
682 * The default is a random number between 1000 and 10,000
683 *
684 * pgz_slots
685 * how many allocations to protect.
686 *
687 * Each costs:
688 * - a PTE in the pmap (when allocated)
689 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
690 * - 64 bytes per backtraces.
691 * On LP64 this is <16K per 100 slots.
692 *
693 * The default is ~200 slots per G of physical ram (32k / G)
694 *
695 * TODO:
696 * - try harder to allocate elements at the "end" to catch OOB more reliably.
697 *
698 * pgz_quarantine
699 * how many slots should be free at any given time.
700 *
701 * PGZ will round robin through free slots to be reused, but free slots are
702 * important to detect use-after-free by acting as a quarantine.
703 *
704 * By default, PGZ will keep 33% of the slots around at all time.
705 *
706 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
707 * Specific zones for which to enable probabilistic guard malloc.
708 * There must be no numbering gap (names after the gap will be ignored).
709 */
710 #if DEBUG || DEVELOPMENT
711 static TUNABLE(bool, pgz_all, "pgz", true);
712 #else
713 static TUNABLE(bool, pgz_all, "pgz", false);
714 #endif
715 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
716 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
717 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
718 #endif /* CONFIG_PROB_GZALLOC */
719
720 static zone_t zone_find_largest(uint64_t *zone_size);
721
722 #endif /* !ZALLOC_TEST */
723 #pragma mark Zone metadata
724 #if !ZALLOC_TEST
725
726 static inline bool
zone_has_index(zone_t z,zone_id_t zid)727 zone_has_index(zone_t z, zone_id_t zid)
728 {
729 return zone_array + zid == z;
730 }
731
732 __abortlike
733 void
zone_invalid_panic(zone_t zone)734 zone_invalid_panic(zone_t zone)
735 {
736 panic("zone %p isn't in the zone_array", zone);
737 }
738
739 __abortlike
740 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)741 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
742 const char *kind)
743 {
744 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
745 kind, meta, zone_heap_name(zone), zone->z_name);
746 }
747
748 __abortlike
749 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)750 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
751 {
752 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
753 (void *)addr, zone_heap_name(zone), zone->z_name);
754 }
755
756 __abortlike
757 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)758 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
759 struct zone_page_metadata *meta)
760 {
761 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
762 zone_id_t zidx;
763 zone_t src_zone;
764
765 if (zsflags.z_kalloc_type) {
766 panic_include_kalloc_types = true;
767 kalloc_type_dst_zone = zone;
768 }
769
770 zidx = meta->zm_index;
771 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
772 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
773 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
774 zidx);
775 }
776
777 src_zone = &zone_array[zidx];
778 src_zsflags = zone_security_array[zidx];
779 if (src_zsflags.z_kalloc_type) {
780 panic_include_kalloc_types = true;
781 kalloc_type_src_zone = src_zone;
782 }
783
784 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
785 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
786 zone_heap_name(src_zone), src_zone->z_name, zidx);
787 }
788
789 __abortlike
790 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)791 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
792 {
793 panic("metadata list corruption through element %p detected in zone %s%s",
794 meta, zone_heap_name(zone), zone->z_name);
795 }
796
797 __abortlike
798 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)799 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
800 const char *kind)
801 {
802 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
803 zone_heap_name(zone), zone->z_name, meta);
804 }
805
806 __abortlike
807 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)808 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
809 {
810 panic("%s: double free of %p to zone %s%s", caller,
811 (void *)addr, zone_heap_name(zone), zone->z_name);
812 }
813
814 __abortlike
815 static void
zone_accounting_panic(zone_t zone,const char * kind)816 zone_accounting_panic(zone_t zone, const char *kind)
817 {
818 panic("accounting mismatch (%s) for zone %s%s", kind,
819 zone_heap_name(zone), zone->z_name);
820 }
821
822 #define zone_counter_sub(z, stat, value) ({ \
823 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
824 zone_accounting_panic(z, #stat " wrap-around"); \
825 } \
826 (z)->stat; \
827 })
828
829 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)830 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
831 vm_offset_t esize)
832 {
833 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
834 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
835 }
836 return m->zm_alloc_size;
837 }
838
839 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)840 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
841 vm_offset_t esize)
842 {
843 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
844 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
845 }
846 return m->zm_alloc_size;
847 }
848
849 __abortlike
850 static void
zone_nofail_panic(zone_t zone)851 zone_nofail_panic(zone_t zone)
852 {
853 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
854 zone_heap_name(zone), zone->z_name);
855 }
856
857 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)858 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
859 {
860 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
861 struct mach_vm_range r = { addr_start, addr_end };
862
863 return mach_vm_range_intersects(ro_r, &r);
864 }
865
866 #define from_range(r, addr, size) \
867 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
868 mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
869 mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
870
871 #define from_ro_map(addr, size) \
872 from_range(&zone_info.zi_ro_range, addr, size)
873
874 #define from_zone_map(addr, size) \
875 from_range(&zone_info.zi_map_range, addr, size)
876
877 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)878 zone_pva_is_null(zone_pva_t page)
879 {
880 return page.packed_address == 0;
881 }
882
883 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)884 zone_pva_is_queue(zone_pva_t page)
885 {
886 // actual kernel pages have the top bit set
887 return (int32_t)page.packed_address > 0;
888 }
889
890 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)891 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
892 {
893 return pva1.packed_address == pva2.packed_address;
894 }
895
896 __header_always_inline zone_pva_t *
zone_pageq_base(void)897 zone_pageq_base(void)
898 {
899 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
900
901 /*
902 * `-1` so that if the first __DATA variable is a page queue,
903 * it gets a non 0 index
904 */
905 return data_seg_start - 1;
906 }
907
908 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)909 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
910 struct zone_page_metadata *meta)
911 {
912 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
913
914 if (!zone_pva_is_equal(*queue_head, oldv)) {
915 zone_page_metadata_list_corruption(z, meta);
916 }
917 *queue_head = meta->zm_page_next;
918 }
919
920 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)921 zone_queue_encode(zone_pva_t *headp)
922 {
923 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
924 }
925
926 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)927 zone_pva_from_addr(vm_address_t addr)
928 {
929 // cannot use atop() because we want to maintain the sign bit
930 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
931 }
932
933 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)934 zone_pva_to_addr(zone_pva_t page)
935 {
936 // cause sign extension so that we end up with the right address
937 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
938 }
939
940 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)941 zone_pva_to_meta(zone_pva_t page)
942 {
943 return &zone_info.zi_meta_base[page.packed_address];
944 }
945
946 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)947 zone_pva_from_meta(struct zone_page_metadata *meta)
948 {
949 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
950 }
951
952 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)953 zone_meta_from_addr(vm_offset_t addr)
954 {
955 return zone_pva_to_meta(zone_pva_from_addr(addr));
956 }
957
958 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)959 zone_index_from_ptr(const void *ptr)
960 {
961 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
962 }
963
964 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)965 zone_meta_to_addr(struct zone_page_metadata *meta)
966 {
967 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
968 }
969
970 __attribute__((overloadable))
971 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)972 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
973 {
974 if (!zone_has_index(z, meta->zm_index)) {
975 zone_page_metadata_index_confusion_panic(z, addr, meta);
976 }
977 }
978
979 __attribute__((overloadable))
980 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)981 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
982 {
983 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
984 }
985
986 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)987 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
988 struct zone_page_metadata *meta)
989 {
990 zone_pva_t head = *headp;
991 zone_pva_t queue_pva = zone_queue_encode(headp);
992 struct zone_page_metadata *tmp;
993
994 meta->zm_page_next = head;
995 if (!zone_pva_is_null(head)) {
996 tmp = zone_pva_to_meta(head);
997 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
998 zone_page_metadata_list_corruption(z, meta);
999 }
1000 tmp->zm_page_prev = zone_pva_from_meta(meta);
1001 }
1002 meta->zm_page_prev = queue_pva;
1003 *headp = zone_pva_from_meta(meta);
1004 }
1005
1006 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1007 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1008 {
1009 zone_pva_t head = *headp;
1010 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1011 struct zone_page_metadata *tmp;
1012
1013 zone_meta_validate(z, meta);
1014
1015 if (!zone_pva_is_null(meta->zm_page_next)) {
1016 tmp = zone_pva_to_meta(meta->zm_page_next);
1017 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1018 zone_page_metadata_list_corruption(z, meta);
1019 }
1020 tmp->zm_page_prev = meta->zm_page_prev;
1021 }
1022 *headp = meta->zm_page_next;
1023
1024 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1025
1026 return meta;
1027 }
1028
1029 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1030 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1031 {
1032 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1033 struct zone_page_metadata *tmp;
1034
1035 if (!zone_pva_is_null(meta->zm_page_next)) {
1036 tmp = zone_pva_to_meta(meta->zm_page_next);
1037 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1038 zone_page_metadata_list_corruption(z, meta);
1039 }
1040 tmp->zm_page_prev = meta->zm_page_prev;
1041 }
1042 if (zone_pva_is_queue(meta->zm_page_prev)) {
1043 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1044 } else {
1045 tmp = zone_pva_to_meta(meta->zm_page_prev);
1046 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1047 zone_page_metadata_list_corruption(z, meta);
1048 }
1049 tmp->zm_page_next = meta->zm_page_next;
1050 }
1051
1052 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1053 }
1054
1055 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1056 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1057 struct zone_page_metadata *meta)
1058 {
1059 zone_meta_remqueue(z, meta);
1060 zone_meta_queue_push(z, headp, meta);
1061 }
1062
1063 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1064 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1065 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1066 {
1067 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1068
1069 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1070 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1071 zone_meta_requeue(z, &z->z_pageq_partial, m);
1072 zone_counter_sub(z, z_wired_empty, len);
1073 }
1074 }
1075
1076 /* allows a given metadata to reach the z_pageq_empty queue again */
1077 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1078 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1079 {
1080 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1081
1082 assert(new_size % sizeof(vm_offset_t) == 0);
1083 if (new_size == 0) {
1084 zone_meta_requeue(z, &z->z_pageq_empty, m);
1085 z->z_wired_empty += len;
1086 }
1087 }
1088
1089 /*
1090 * Routine to populate a page backing metadata in the zone_metadata_region.
1091 * Must be called without the zone lock held as it might potentially block.
1092 */
1093 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1094 zone_meta_populate(vm_offset_t base, vm_size_t size)
1095 {
1096 struct zone_page_metadata *from = zone_meta_from_addr(base);
1097 struct zone_page_metadata *to = from + atop(size);
1098 vm_offset_t page_addr = trunc_page(from);
1099
1100 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1101 #if !KASAN
1102 /*
1103 * This can race with another thread doing a populate on the same metadata
1104 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1105 * fault in the shadow when we first access the metadata page. Avoid this
1106 * by always synchronizing on the zone_metadata_region lock with KASan.
1107 */
1108 if (pmap_find_phys(kernel_pmap, page_addr)) {
1109 continue;
1110 }
1111 #endif
1112
1113 for (;;) {
1114 kern_return_t ret = KERN_SUCCESS;
1115
1116 /*
1117 * All updates to the zone_metadata_region are done
1118 * under the zone_metadata_region_lck
1119 */
1120 zone_meta_lock();
1121 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1122 ret = kernel_memory_populate(page_addr,
1123 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1124 VM_KERN_MEMORY_OSFMK);
1125 }
1126 zone_meta_unlock();
1127
1128 if (ret == KERN_SUCCESS) {
1129 break;
1130 }
1131
1132 /*
1133 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1134 * to bad system deadlocks, so if the allocation failed,
1135 * we need to do the VM_PAGE_WAIT() outside of the lock.
1136 */
1137 VM_PAGE_WAIT();
1138 }
1139 }
1140 }
1141
1142 __abortlike
1143 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1144 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1145 {
1146 struct zone_page_metadata *meta;
1147 const char *from_cache = "";
1148 vm_offset_t page;
1149
1150 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1151 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1152 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1153 }
1154 page = trunc_page(addr);
1155 meta = zone_meta_from_addr(addr);
1156
1157 if (!zone_has_index(zone, meta->zm_index)) {
1158 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1159 }
1160
1161 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1162 panic("metadata %p corresponding to addr %p being freed to "
1163 "zone %s%s%s, is marked as secondary per cpu page",
1164 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1165 from_cache);
1166 }
1167 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1168 page -= ptoa(meta->zm_page_index);
1169 meta -= meta->zm_page_index;
1170 }
1171
1172 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1173 panic("metadata %p corresponding to addr %p being freed to "
1174 "zone %s%s%s, has chunk len greater than max",
1175 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1176 from_cache);
1177 }
1178
1179 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1180 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1181 "zone element size", (void *)addr, zone_heap_name(zone),
1182 zone->z_name, from_cache);
1183 }
1184
1185 zone_invalid_element_addr_panic(zone, addr);
1186 }
1187
1188 __attribute__((always_inline))
1189 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1190 zone_element_resolve(
1191 zone_t zone,
1192 vm_offset_t addr,
1193 vm_offset_t *idx)
1194 {
1195 struct zone_page_metadata *meta;
1196 vm_offset_t offs, eidx;
1197
1198 meta = zone_meta_from_addr(addr);
1199 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1200 zone_invalid_element_panic(zone, addr);
1201 }
1202
1203 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1204 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1205 offs += ptoa(meta->zm_page_index);
1206 meta -= meta->zm_page_index;
1207 }
1208
1209 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1210 if (eidx * zone_elem_outer_size(zone) != offs) {
1211 zone_invalid_element_panic(zone, addr);
1212 }
1213
1214 *idx = eidx;
1215 return meta;
1216 }
1217
1218 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1219 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1220 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1221 {
1222 vm_offset_t addr = (vm_offset_t)ptr;
1223 vm_offset_t end = addr + elem_size;
1224 vm_offset_t offs;
1225
1226 /*
1227 * 0-sized allocations in a KALLOC_MINSIZE bucket
1228 * would be offset to the next allocation which is incorrect.
1229 */
1230 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1231
1232 /*
1233 * Given how chunks work, for a zone with PGZ guards on,
1234 * there's a single element which ends precisely
1235 * at the page boundary: the last one.
1236 */
1237 if (req_size == elem_size ||
1238 (end & PAGE_MASK) ||
1239 !zone_meta_from_addr(addr)->zm_guarded) {
1240 return ptr;
1241 }
1242
1243 offs = elem_size - req_size;
1244 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1245
1246 return (char *)addr + offs;
1247 }
1248 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1249
1250 __abortlike
1251 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1252 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1253 {
1254 struct zone_page_metadata *meta;
1255 vm_offset_t offs, size, page;
1256 zone_t zone;
1257
1258 page = trunc_page(addr);
1259 meta = zone_meta_from_addr(addr);
1260 zone = &zone_array[meta->zm_index];
1261
1262 if (zone->z_percpu) {
1263 panic("zone bound checks: address %p is a per-cpu allocation",
1264 (void *)addr);
1265 }
1266
1267 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1268 page -= ptoa(meta->zm_page_index);
1269 meta -= meta->zm_page_index;
1270 }
1271
1272 size = zone_elem_outer_size(zone);
1273 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1274 zone->z_quo_magic, size);
1275 panic("zone bound checks: buffer %p of length %zd overflows "
1276 "object %p of size %zd in zone %p[%s%s]",
1277 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1278 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1279 }
1280
1281 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1282 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1283 {
1284 struct zone_page_metadata *meta;
1285 vm_offset_t offs, size;
1286 zone_t zone;
1287
1288 if (!from_zone_map(addr, 1)) {
1289 return;
1290 }
1291
1292 #if CONFIG_PROB_GZALLOC
1293 if (__improbable(pgz_owned(addr))) {
1294 meta = zone_meta_from_addr(addr);
1295 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1296 }
1297 #endif /* CONFIG_PROB_GZALLOC */
1298 meta = zone_meta_from_addr(addr);
1299 zone = zone_by_id(meta->zm_index);
1300
1301 if (zone->z_percpu) {
1302 zone_element_bounds_check_panic(addr, len);
1303 }
1304
1305 if (zone->z_permanent) {
1306 /* We don't know bounds for those */
1307 return;
1308 }
1309
1310 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1311 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1312 offs += ptoa(meta->zm_page_index);
1313 }
1314 size = zone_elem_outer_size(zone);
1315 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1316 if (len + zone_elem_redzone(zone) > size - offs) {
1317 zone_element_bounds_check_panic(addr, len);
1318 }
1319 }
1320
1321 /*
1322 * Routine to get the size of a zone allocated address.
1323 * If the address doesnt belong to the zone maps, returns 0.
1324 */
1325 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1326 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1327 {
1328 vm_address_t addr = (vm_address_t)elem;
1329 struct zone_page_metadata *meta;
1330 vm_size_t esize, offs, end;
1331 zone_t zone;
1332
1333 if (from_zone_map(addr, sizeof(void *))) {
1334 meta = zone_meta_from_addr(addr);
1335 zone = zone_by_id(meta->zm_index);
1336 esize = zone_elem_inner_size(zone);
1337 end = vm_memtag_canonicalize_address(addr + esize);
1338 offs = 0;
1339
1340 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1341 /*
1342 * If the chunk uses guards, and that (addr + esize)
1343 * either crosses a page boundary or is at the boundary,
1344 * we need to look harder.
1345 */
1346 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1347 /*
1348 * Because in the vast majority of cases the element
1349 * size is sub-page, and that meta[1] must be faulted,
1350 * we can quickly peek at whether it's a guard.
1351 *
1352 * For elements larger than a page, finding the guard
1353 * page requires a little more effort.
1354 */
1355 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1356 offs = meta[1].zm_oob_offs;
1357 if (clear_oob) {
1358 meta[1].zm_oob_offs = 0;
1359 }
1360 } else if (esize > PAGE_SIZE) {
1361 struct zone_page_metadata *gmeta;
1362
1363 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1364 gmeta = meta + meta->zm_subchunk_len;
1365 } else {
1366 gmeta = meta + zone->z_chunk_pages;
1367 }
1368 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1369
1370 if (end >= zone_meta_to_addr(gmeta)) {
1371 offs = gmeta->zm_oob_offs;
1372 if (clear_oob) {
1373 gmeta->zm_oob_offs = 0;
1374 }
1375 }
1376 }
1377 }
1378 #else
1379 #pragma unused(end, clear_oob)
1380 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1381
1382 if (oob_offs) {
1383 *oob_offs = offs;
1384 }
1385 if (z) {
1386 *z = zone;
1387 }
1388 return esize;
1389 }
1390
1391 if (oob_offs) {
1392 *oob_offs = 0;
1393 }
1394
1395 return 0;
1396 }
1397
1398 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1399 zone_id_for_element(void *addr, vm_size_t esize)
1400 {
1401 zone_id_t zid = ZONE_ID_INVALID;
1402 if (from_zone_map(addr, esize)) {
1403 zid = zone_index_from_ptr(addr);
1404 __builtin_assume(zid != ZONE_ID_INVALID);
1405 }
1406 return zid;
1407 }
1408
1409 /* This function just formats the reason for the panics by redoing the checks */
1410 __abortlike
1411 static void
zone_require_panic(zone_t zone,void * addr)1412 zone_require_panic(zone_t zone, void *addr)
1413 {
1414 uint32_t zindex;
1415 zone_t other;
1416
1417 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1418 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1419 }
1420
1421 zindex = zone_index_from_ptr(addr);
1422 other = &zone_array[zindex];
1423 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1424 panic("zone_require failed: invalid zone index %d "
1425 "(addr: %p, expected: %s%s)", zindex,
1426 addr, zone_heap_name(zone), zone->z_name);
1427 } else {
1428 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1429 "(addr: %p, expected: %s%s)",
1430 zindex, zone_heap_name(other), other->z_name,
1431 addr, zone_heap_name(zone), zone->z_name);
1432 }
1433 }
1434
1435 __abortlike
1436 static void
zone_id_require_panic(zone_id_t zid,void * addr)1437 zone_id_require_panic(zone_id_t zid, void *addr)
1438 {
1439 zone_require_panic(&zone_array[zid], addr);
1440 }
1441
1442 /*
1443 * Routines to panic if a pointer is not mapped to an expected zone.
1444 * This can be used as a means of pinning an object to the zone it is expected
1445 * to be a part of. Causes a panic if the address does not belong to any
1446 * specified zone, does not belong to any zone, has been freed and therefore
1447 * unmapped from the zone, or the pointer contains an uninitialized value that
1448 * does not belong to any zone.
1449 */
1450 void
zone_require(zone_t zone,void * addr)1451 zone_require(zone_t zone, void *addr)
1452 {
1453 vm_size_t esize = zone_elem_inner_size(zone);
1454
1455 if (from_zone_map(addr, esize) &&
1456 zone_has_index(zone, zone_index_from_ptr(addr))) {
1457 return;
1458 }
1459 zone_require_panic(zone, addr);
1460 }
1461
1462 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1463 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1464 {
1465 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1466 return;
1467 }
1468 zone_id_require_panic(zid, addr);
1469 }
1470
1471 bool
zone_owns(zone_t zone,void * addr)1472 zone_owns(zone_t zone, void *addr)
1473 {
1474 vm_size_t esize = zone_elem_inner_size(zone);
1475
1476 if (from_zone_map(addr, esize)) {
1477 return zone_has_index(zone, zone_index_from_ptr(addr));
1478 }
1479 return false;
1480 }
1481
1482 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1483 zone_kmem_suballoc(
1484 mach_vm_offset_t addr,
1485 vm_size_t size,
1486 int flags,
1487 vm_tag_t tag,
1488 vm_map_t *new_map)
1489 {
1490 struct mach_vm_range r;
1491
1492 *new_map = kmem_suballoc(kernel_map, &addr, size,
1493 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1494 flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1495
1496 r.min_address = addr;
1497 r.max_address = addr + size;
1498 return r;
1499 }
1500
1501 #endif /* !ZALLOC_TEST */
1502 #pragma mark Zone bits allocator
1503
1504 /*!
1505 * @defgroup Zone Bitmap allocator
1506 * @{
1507 *
1508 * @brief
1509 * Functions implementing the zone bitmap allocator
1510 *
1511 * @discussion
1512 * The zone allocator maintains which elements are allocated or free in bitmaps.
1513 *
1514 * When the number of elements per page is smaller than 32, it is stored inline
1515 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1516 * and @c zm_bitmap used for storage).
1517 *
1518 * When the number of elements is larger, then a bitmap is allocated from
1519 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1520 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1521 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1522 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1523 * cannot be larger than 1024 bytes (8192 bits).
1524 *
1525 * This buddy allocator can actually accomodate allocations as large
1526 * as 8k on 16k systems and 2k on 4k systems.
1527 *
1528 * Note: @c zba_* functions are implementation details not meant to be used
1529 * outside of the allocation of the allocator itself. Interfaces to the rest of
1530 * the zone allocator are documented and not @c zba_* prefixed.
1531 */
1532
1533 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1534 #define ZBA_GRANULE sizeof(uint64_t)
1535 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1536 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1537 #define ZBA_MAX_ALLOC_ORDER 7
1538 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1539 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1540 #define ZBA_PTR_MASK 0x0fffffff
1541 #define ZBA_ORDER_SHIFT 29
1542 #define ZBA_HAS_EXTRA_BIT 0x10000000
1543
1544 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1545 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1546
1547 struct zone_bits_chain {
1548 uint32_t zbc_next;
1549 uint32_t zbc_prev;
1550 } __attribute__((aligned(ZBA_GRANULE)));
1551
1552 struct zone_bits_head {
1553 uint32_t zbh_next;
1554 uint32_t zbh_unused;
1555 } __attribute__((aligned(ZBA_GRANULE)));
1556
1557 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1558 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1559
1560 struct zone_bits_allocator_meta {
1561 uint32_t zbam_left;
1562 uint32_t zbam_right;
1563 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1564 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1565 };
1566
1567 struct zone_bits_allocator_header {
1568 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1569 };
1570
1571 #if ZALLOC_TEST
1572 static struct zalloc_bits_allocator_test_setup {
1573 vm_offset_t zbats_base;
1574 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1575 } zba_test_info;
1576
1577 static struct zone_bits_allocator_header *
zba_base_header(void)1578 zba_base_header(void)
1579 {
1580 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1581 }
1582
1583 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1584 zba_populate(uint32_t n, bool with_extra __unused)
1585 {
1586 vm_address_t base = zba_test_info.zbats_base;
1587 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1588
1589 return KERN_SUCCESS;
1590 }
1591 #else
1592 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1593 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1594
1595 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1596 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1597
1598 static struct zone_bits_allocator_header *
zba_base_header(void)1599 zba_base_header(void)
1600 {
1601 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1602 }
1603
1604 static void
zba_lock(void)1605 zba_lock(void)
1606 {
1607 lck_mtx_lock(&zba_mtx);
1608 }
1609
1610 static void
zba_unlock(void)1611 zba_unlock(void)
1612 {
1613 lck_mtx_unlock(&zba_mtx);
1614 }
1615
1616 __abortlike
1617 static void
zba_memory_exhausted(void)1618 zba_memory_exhausted(void)
1619 {
1620 uint64_t zsize = 0;
1621 zone_t z = zone_find_largest(&zsize);
1622 panic("zba_populate: out of bitmap space, "
1623 "likely due to memory leak in zone [%s%s] "
1624 "(%u%c, %d elements allocated)",
1625 zone_heap_name(z), zone_name(z),
1626 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1627 zone_count_allocated(z));
1628 }
1629
1630
1631 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1632 zba_populate(uint32_t n, bool with_extra)
1633 {
1634 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1635 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1636 vm_address_t bits_addr;
1637 vm_address_t xtra_addr;
1638 kern_return_t kr;
1639
1640 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1641 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1642
1643 kr = kernel_memory_populate(bits_addr, bits_size,
1644 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1645 VM_KERN_MEMORY_OSFMK);
1646 if (kr != KERN_SUCCESS) {
1647 return kr;
1648 }
1649
1650
1651 if (with_extra) {
1652 kr = kernel_memory_populate(xtra_addr, xtra_size,
1653 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1654 VM_KERN_MEMORY_OSFMK);
1655 if (kr != KERN_SUCCESS) {
1656 kernel_memory_depopulate(bits_addr, bits_size,
1657 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1658 VM_KERN_MEMORY_OSFMK);
1659 }
1660 }
1661
1662 return kr;
1663 }
1664 #endif
1665
1666 __pure2
1667 static struct zone_bits_allocator_meta *
zba_meta(void)1668 zba_meta(void)
1669 {
1670 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1671 }
1672
1673 __pure2
1674 static uint64_t *
zba_slot_base(void)1675 zba_slot_base(void)
1676 {
1677 return (uint64_t *)zba_base_header();
1678 }
1679
1680 __pure2
1681 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1682 zba_head(uint32_t order, bool with_extra)
1683 {
1684 if (with_extra) {
1685 return &zba_meta()->zbam_lists_with_extra[order];
1686 } else {
1687 return &zba_meta()->zbam_lists[order];
1688 }
1689 }
1690
1691 __pure2
1692 static uint32_t
zba_head_index(struct zone_bits_head * hd)1693 zba_head_index(struct zone_bits_head *hd)
1694 {
1695 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1696 }
1697
1698 __pure2
1699 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1700 zba_chain_for_index(uint32_t index)
1701 {
1702 return (struct zone_bits_chain *)(zba_slot_base() + index);
1703 }
1704
1705 __pure2
1706 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1707 zba_chain_to_index(const struct zone_bits_chain *zbc)
1708 {
1709 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1710 }
1711
1712 __abortlike
1713 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1714 zba_head_corruption_panic(uint32_t order, bool with_extra)
1715 {
1716 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1717 order, with_extra, zba_head(order, with_extra));
1718 }
1719
1720 __abortlike
1721 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1722 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1723 {
1724 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1725 }
1726
1727 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1728 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1729 {
1730 struct zone_bits_head *hd = zba_head(order, with_extra);
1731 uint32_t hd_index = zba_head_index(hd);
1732 uint32_t index = zba_chain_to_index(zbc);
1733 struct zone_bits_chain *next;
1734
1735 if (hd->zbh_next) {
1736 next = zba_chain_for_index(hd->zbh_next);
1737 if (next->zbc_prev != hd_index) {
1738 zba_head_corruption_panic(order, with_extra);
1739 }
1740 next->zbc_prev = index;
1741 }
1742 zbc->zbc_next = hd->zbh_next;
1743 zbc->zbc_prev = hd_index;
1744 hd->zbh_next = index;
1745 }
1746
1747 static void
zba_remove_block(struct zone_bits_chain * zbc)1748 zba_remove_block(struct zone_bits_chain *zbc)
1749 {
1750 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1751 uint32_t index = zba_chain_to_index(zbc);
1752
1753 if (prev->zbc_next != index) {
1754 zba_chain_corruption_panic(prev, zbc);
1755 }
1756 if ((prev->zbc_next = zbc->zbc_next)) {
1757 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1758 if (next->zbc_prev != index) {
1759 zba_chain_corruption_panic(zbc, next);
1760 }
1761 next->zbc_prev = zbc->zbc_prev;
1762 }
1763 }
1764
1765 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1766 zba_try_pop_block(uint32_t order, bool with_extra)
1767 {
1768 struct zone_bits_head *hd = zba_head(order, with_extra);
1769 struct zone_bits_chain *zbc;
1770
1771 if (hd->zbh_next == 0) {
1772 return 0;
1773 }
1774
1775 zbc = zba_chain_for_index(hd->zbh_next);
1776 zba_remove_block(zbc);
1777 return (vm_address_t)zbc;
1778 }
1779
1780 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1781 zba_header(vm_offset_t addr)
1782 {
1783 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1784 return (struct zone_bits_allocator_header *)addr;
1785 }
1786
1787 static size_t
zba_node_parent(size_t node)1788 zba_node_parent(size_t node)
1789 {
1790 return (node - 1) / 2;
1791 }
1792
1793 static size_t
zba_node_left_child(size_t node)1794 zba_node_left_child(size_t node)
1795 {
1796 return node * 2 + 1;
1797 }
1798
1799 static size_t
zba_node_buddy(size_t node)1800 zba_node_buddy(size_t node)
1801 {
1802 return ((node - 1) ^ 1) + 1;
1803 }
1804
1805 static size_t
zba_node(vm_offset_t addr,uint32_t order)1806 zba_node(vm_offset_t addr, uint32_t order)
1807 {
1808 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1809 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1810 }
1811
1812 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1813 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1814 {
1815 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1816 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1817 }
1818
1819 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1820 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1821 {
1822 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1823 }
1824
1825 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1826 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1827 {
1828 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1829 }
1830
1831 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1832 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1833 {
1834 struct zone_bits_allocator_header *zbah = zba_header(addr);
1835 struct zone_bits_chain *zbc;
1836 size_t node = zba_node(addr, order);
1837
1838 while (node) {
1839 size_t parent = zba_node_parent(node);
1840
1841 zba_node_flip_split(zbah, parent);
1842 if (zba_node_is_split(zbah, parent)) {
1843 break;
1844 }
1845
1846 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1847 zba_remove_block(zbc);
1848 order++;
1849 node = parent;
1850 }
1851
1852 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1853 }
1854
1855 static vm_size_t
zba_chunk_header_size(uint32_t n)1856 zba_chunk_header_size(uint32_t n)
1857 {
1858 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1859 if (n == 0) {
1860 hdr_size += sizeof(struct zone_bits_allocator_meta);
1861 }
1862 return hdr_size;
1863 }
1864
1865 static void
zba_init_chunk(uint32_t n,bool with_extra)1866 zba_init_chunk(uint32_t n, bool with_extra)
1867 {
1868 vm_size_t hdr_size = zba_chunk_header_size(n);
1869 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1870 struct zone_bits_allocator_header *zbah = zba_header(page);
1871 vm_size_t size = ZBA_CHUNK_SIZE;
1872 size_t node;
1873
1874 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1875 if (size < hdr_size + (ZBA_GRANULE << o)) {
1876 continue;
1877 }
1878 size -= ZBA_GRANULE << o;
1879 node = zba_node(page + size, o);
1880 zba_node_flip_split(zbah, zba_node_parent(node));
1881 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1882 }
1883 }
1884
1885 __attribute__((noinline))
1886 static void
zba_grow(bool with_extra)1887 zba_grow(bool with_extra)
1888 {
1889 struct zone_bits_allocator_meta *meta = zba_meta();
1890 kern_return_t kr = KERN_SUCCESS;
1891 uint32_t chunk;
1892
1893 #if !ZALLOC_TEST
1894 if (meta->zbam_left >= meta->zbam_right) {
1895 zba_memory_exhausted();
1896 }
1897 #endif
1898
1899 if (with_extra) {
1900 chunk = meta->zbam_right - 1;
1901 } else {
1902 chunk = meta->zbam_left;
1903 }
1904
1905 kr = zba_populate(chunk, with_extra);
1906 if (kr == KERN_SUCCESS) {
1907 if (with_extra) {
1908 meta->zbam_right -= 1;
1909 } else {
1910 meta->zbam_left += 1;
1911 }
1912
1913 zba_init_chunk(chunk, with_extra);
1914 #if !ZALLOC_TEST
1915 } else {
1916 /*
1917 * zba_populate() has to be allowed to fail populating,
1918 * as we are under a global lock, we need to do the
1919 * VM_PAGE_WAIT() outside of the lock.
1920 */
1921 assert(kr == KERN_RESOURCE_SHORTAGE);
1922 zba_unlock();
1923 VM_PAGE_WAIT();
1924 zba_lock();
1925 #endif
1926 }
1927 }
1928
1929 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1930 zba_alloc(uint32_t order, bool with_extra)
1931 {
1932 struct zone_bits_allocator_header *zbah;
1933 uint32_t cur = order;
1934 vm_address_t addr;
1935 size_t node;
1936
1937 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1938 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1939 zba_grow(with_extra);
1940 cur = order;
1941 }
1942 }
1943
1944 zbah = zba_header(addr);
1945 node = zba_node(addr, cur);
1946 zba_node_flip_split(zbah, zba_node_parent(node));
1947 while (cur > order) {
1948 cur--;
1949 zba_node_flip_split(zbah, node);
1950 node = zba_node_left_child(node);
1951 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1952 cur, with_extra);
1953 }
1954
1955 return addr;
1956 }
1957
1958 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
1959 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
1960 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
1961 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
1962
1963 #if !ZALLOC_TEST
1964 #if VM_TAG_SIZECLASSES
1965
1966 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1967 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1968 {
1969 vm_offset_t base = zone_info.zi_xtra_range.min_address;
1970 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1971
1972 return (void *)(base + ((offs + idx) << zba_xtra_shift));
1973 }
1974
1975 #endif /* VM_TAG_SIZECLASSES */
1976
1977 static uint32_t
zba_bits_ref_order(uint32_t bref)1978 zba_bits_ref_order(uint32_t bref)
1979 {
1980 return bref >> ZBA_ORDER_SHIFT;
1981 }
1982
1983 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1984 zba_bits_ref_ptr(uint32_t bref)
1985 {
1986 return zba_slot_base() + (bref & ZBA_PTR_MASK);
1987 }
1988
1989 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1990 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1991 zalloc_flags_t flags, vm_offset_t eidx)
1992 {
1993 size_t i = eidx / 32;
1994 uint32_t map;
1995
1996 if (eidx % 32) {
1997 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
1998 if (map) {
1999 eidx = __builtin_ctz(map);
2000 meta[i].zm_bitmap ^= 1u << eidx;
2001 return i * 32 + eidx;
2002 }
2003 i++;
2004 }
2005
2006 uint32_t chunk_len = meta->zm_chunk_len;
2007 if (flags & Z_PCPU) {
2008 chunk_len = zpercpu_count();
2009 }
2010 for (int j = 0; j < chunk_len; j++, i++) {
2011 if (i >= chunk_len) {
2012 i = 0;
2013 }
2014 if (__probable(map = meta[i].zm_bitmap)) {
2015 meta[i].zm_bitmap &= map - 1;
2016 return i * 32 + __builtin_ctz(map);
2017 }
2018 }
2019
2020 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2021 }
2022
2023 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2024 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2025 vm_offset_t eidx)
2026 {
2027 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2028 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2029 size_t i = eidx / 64;
2030 uint64_t map;
2031
2032 if (eidx % 64) {
2033 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2034 if (map) {
2035 eidx = __builtin_ctzll(map);
2036 bits[i] ^= 1ull << eidx;
2037 return i * 64 + eidx;
2038 }
2039 i++;
2040 }
2041
2042 for (int j = 0; j < bits_size; i++, j++) {
2043 if (i >= bits_size) {
2044 i = 0;
2045 }
2046 if (__probable(map = bits[i])) {
2047 bits[i] &= map - 1;
2048 return i * 64 + __builtin_ctzll(map);
2049 }
2050 }
2051
2052 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2053 }
2054
2055 /*!
2056 * @function zone_meta_find_and_clear_bit
2057 *
2058 * @brief
2059 * The core of the bitmap allocator: find a bit set in the bitmaps.
2060 *
2061 * @discussion
2062 * This method will round robin through available allocations,
2063 * with a per-core memory of the last allocated element index allocated.
2064 *
2065 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2066 * double-free bugs way too practical.
2067 *
2068 * @param zone The zone we're allocating from.
2069 * @param meta The main metadata for the chunk being allocated from.
2070 * @param flags the alloc flags (for @c Z_PCPU).
2071 */
2072 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2073 zone_meta_find_and_clear_bit(
2074 zone_t zone,
2075 zone_stats_t zs,
2076 struct zone_page_metadata *meta,
2077 zalloc_flags_t flags)
2078 {
2079 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2080
2081 if (meta->zm_inline_bitmap) {
2082 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2083 } else {
2084 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2085 }
2086 zs->zs_alloc_rr = (uint16_t)eidx;
2087 return eidx;
2088 }
2089
2090 /*!
2091 * @function zone_meta_bits_init_inline
2092 *
2093 * @brief
2094 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2095 *
2096 * @param meta The main metadata for the initialized chunk.
2097 * @param count The number of elements the chunk can hold
2098 * (which might be partial for partially populated chunks).
2099 */
2100 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2101 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2102 {
2103 /*
2104 * We're called with the metadata zm_bitmap fields already zeroed out.
2105 */
2106 for (size_t i = 0; i < count / 32; i++) {
2107 meta[i].zm_bitmap = ~0u;
2108 }
2109 if (count % 32) {
2110 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2111 }
2112 }
2113
2114 /*!
2115 * @function zone_meta_bits_alloc_init
2116 *
2117 * @brief
2118 * Allocates a zm_bitmap field for a newly assigned chunk.
2119 *
2120 * @param count The number of elements the chunk can hold
2121 * (which might be partial for partially populated chunks).
2122 * @param nbits The maximum nuber of bits that will be used.
2123 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2124 */
2125 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2126 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2127 {
2128 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2129 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2130
2131 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2132 uint64_t *bits;
2133 size_t i = 0;
2134
2135 assert(order <= ZBA_MAX_ALLOC_ORDER);
2136 assert(count <= ZBA_GRANULE_BITS << order);
2137
2138 zba_lock();
2139 bits = (uint64_t *)zba_alloc(order, with_extra);
2140 zba_unlock();
2141
2142 while (i < count / 64) {
2143 bits[i++] = ~0ull;
2144 }
2145 if (count % 64) {
2146 bits[i++] = zba_map_mask_lt(uint64_t, count);
2147 }
2148 while (i < 1u << order) {
2149 bits[i++] = 0;
2150 }
2151
2152 return (uint32_t)(bits - zba_slot_base()) +
2153 (order << ZBA_ORDER_SHIFT) +
2154 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2155 }
2156
2157 /*!
2158 * @function zone_meta_bits_merge
2159 *
2160 * @brief
2161 * Adds elements <code>[start, end)</code> to a chunk being extended.
2162 *
2163 * @param meta The main metadata for the extended chunk.
2164 * @param start The index of the first element to add to the chunk.
2165 * @param end The index of the last (exclusive) element to add.
2166 */
2167 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2168 zone_meta_bits_merge(struct zone_page_metadata *meta,
2169 uint32_t start, uint32_t end)
2170 {
2171 if (meta->zm_inline_bitmap) {
2172 while (start < end) {
2173 size_t s_i = start / 32;
2174 size_t s_e = end / 32;
2175
2176 if (s_i == s_e) {
2177 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2178 zba_map_mask_ge(uint32_t, start);
2179 break;
2180 }
2181
2182 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2183 start += 32 - (start % 32);
2184 }
2185 } else {
2186 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2187
2188 while (start < end) {
2189 size_t s_i = start / 64;
2190 size_t s_e = end / 64;
2191
2192 if (s_i == s_e) {
2193 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2194 zba_map_mask_ge(uint64_t, start);
2195 break;
2196 }
2197 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2198 start += 64 - (start % 64);
2199 }
2200 }
2201 }
2202
2203 /*!
2204 * @function zone_bits_free
2205 *
2206 * @brief
2207 * Frees a bitmap to the zone bitmap allocator.
2208 *
2209 * @param bref
2210 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2211 */
2212 static void
zone_bits_free(uint32_t bref)2213 zone_bits_free(uint32_t bref)
2214 {
2215 zba_lock();
2216 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2217 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2218 zba_unlock();
2219 }
2220
2221 /*!
2222 * @function zone_meta_is_free
2223 *
2224 * @brief
2225 * Returns whether a given element appears free.
2226 */
2227 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2228 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2229 {
2230 if (meta->zm_inline_bitmap) {
2231 uint32_t bit = zba_map_bit(uint32_t, eidx);
2232 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2233 } else {
2234 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2235 uint64_t bit = zba_map_bit(uint64_t, eidx);
2236 return bits[zba_map_index(uint64_t, eidx)] & bit;
2237 }
2238 }
2239
2240 /*!
2241 * @function zone_meta_mark_free
2242 *
2243 * @brief
2244 * Marks an element as free and returns whether it was marked as used.
2245 */
2246 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2247 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2248 {
2249 if (meta->zm_inline_bitmap) {
2250 uint32_t bit = zba_map_bit(uint32_t, eidx);
2251 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2252 return false;
2253 }
2254 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2255 } else {
2256 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2257 uint64_t bit = zba_map_bit(uint64_t, eidx);
2258 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2259 return false;
2260 }
2261 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2262 }
2263 return true;
2264 }
2265
2266 #if VM_TAG_SIZECLASSES
2267
2268 __startup_func
2269 void
__zone_site_register(vm_allocation_site_t * site)2270 __zone_site_register(vm_allocation_site_t *site)
2271 {
2272 if (zone_tagging_on) {
2273 vm_tag_alloc(site);
2274 }
2275 }
2276
2277 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2278 zone_index_from_tag_index(uint32_t sizeclass_idx)
2279 {
2280 return zone_tags_sizeclasses[sizeclass_idx];
2281 }
2282
2283 #endif /* VM_TAG_SIZECLASSES */
2284 #endif /* !ZALLOC_TEST */
2285 /*! @} */
2286 #pragma mark zalloc helpers
2287 #if !ZALLOC_TEST
2288
2289 static inline void *
zstack_tbi_fix(vm_offset_t elem)2290 zstack_tbi_fix(vm_offset_t elem)
2291 {
2292 #if CONFIG_KERNEL_TAGGING
2293 elem = vm_memtag_fixup_ptr(elem);
2294 #endif /* CONFIG_KERNEL_TAGGING */
2295 return (void *)elem;
2296 }
2297
2298 static inline vm_offset_t
zstack_tbi_fill(void * addr)2299 zstack_tbi_fill(void *addr)
2300 {
2301 vm_offset_t elem = (vm_offset_t)addr;
2302
2303 return vm_memtag_canonicalize_address(elem);
2304 }
2305
2306 __attribute__((always_inline))
2307 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2308 zstack_push_no_delta(zstack_t *stack, void *addr)
2309 {
2310 vm_offset_t elem = zstack_tbi_fill(addr);
2311
2312 *(vm_offset_t *)addr = stack->z_head - elem;
2313 stack->z_head = elem;
2314 }
2315
2316 __attribute__((always_inline))
2317 void
zstack_push(zstack_t * stack,void * addr)2318 zstack_push(zstack_t *stack, void *addr)
2319 {
2320 zstack_push_no_delta(stack, addr);
2321 stack->z_count++;
2322 }
2323
2324 __attribute__((always_inline))
2325 static inline void *
zstack_pop_no_delta(zstack_t * stack)2326 zstack_pop_no_delta(zstack_t *stack)
2327 {
2328 void *addr = zstack_tbi_fix(stack->z_head);
2329
2330 stack->z_head += *(vm_offset_t *)addr;
2331 *(vm_offset_t *)addr = 0;
2332
2333 return addr;
2334 }
2335
2336 __attribute__((always_inline))
2337 void *
zstack_pop(zstack_t * stack)2338 zstack_pop(zstack_t *stack)
2339 {
2340 stack->z_count--;
2341 return zstack_pop_no_delta(stack);
2342 }
2343
2344 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2345 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2346 {
2347 uint32_t ticket;
2348
2349 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2350 &ticket, &zone_locks_grp))) {
2351 return;
2352 }
2353
2354 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2355
2356 /*
2357 * If zone caching has been disabled due to memory pressure,
2358 * then recording contention is not useful, give the system
2359 * time to recover.
2360 */
2361 if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2362 zone->z_recirc_cont_cur++;
2363 }
2364 }
2365
2366 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2367 zone_recirc_lock_nopreempt(zone_t zone)
2368 {
2369 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2370 }
2371
2372 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2373 zone_recirc_unlock_nopreempt(zone_t zone)
2374 {
2375 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2376 }
2377
2378 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2379 zone_lock_nopreempt_check_contention(zone_t zone)
2380 {
2381 uint32_t ticket;
2382 #if KASAN_FAKESTACK
2383 spl_t s = 0;
2384 if (zone->z_kasan_fakestacks) {
2385 s = splsched();
2386 }
2387 #endif /* KASAN_FAKESTACK */
2388
2389 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2390 &zone_locks_grp))) {
2391 #if KASAN_FAKESTACK
2392 zone->z_kasan_spl = s;
2393 #endif /* KASAN_FAKESTACK */
2394 return;
2395 }
2396
2397 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2398 #if KASAN_FAKESTACK
2399 zone->z_kasan_spl = s;
2400 #endif /* KASAN_FAKESTACK */
2401
2402 /*
2403 * If zone caching has been disabled due to memory pressure,
2404 * then recording contention is not useful, give the system
2405 * time to recover.
2406 */
2407 if (__probable(!zone_caching_disabled &&
2408 !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2409 zone->z_recirc_cont_cur++;
2410 }
2411 }
2412
2413 static inline void
zone_lock_nopreempt(zone_t zone)2414 zone_lock_nopreempt(zone_t zone)
2415 {
2416 #if KASAN_FAKESTACK
2417 spl_t s = 0;
2418 if (zone->z_kasan_fakestacks) {
2419 s = splsched();
2420 }
2421 #endif /* KASAN_FAKESTACK */
2422 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2423 #if KASAN_FAKESTACK
2424 zone->z_kasan_spl = s;
2425 #endif /* KASAN_FAKESTACK */
2426 }
2427
2428 static inline void
zone_unlock_nopreempt(zone_t zone)2429 zone_unlock_nopreempt(zone_t zone)
2430 {
2431 #if KASAN_FAKESTACK
2432 spl_t s = zone->z_kasan_spl;
2433 zone->z_kasan_spl = 0;
2434 #endif /* KASAN_FAKESTACK */
2435 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2436 #if KASAN_FAKESTACK
2437 if (zone->z_kasan_fakestacks) {
2438 splx(s);
2439 }
2440 #endif /* KASAN_FAKESTACK */
2441 }
2442
2443 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2444 zone_depot_lock_nopreempt(zone_cache_t zc)
2445 {
2446 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2447 }
2448
2449 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2450 zone_depot_unlock_nopreempt(zone_cache_t zc)
2451 {
2452 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2453 }
2454
2455 static inline void
zone_depot_lock(zone_cache_t zc)2456 zone_depot_lock(zone_cache_t zc)
2457 {
2458 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2459 }
2460
2461 static inline void
zone_depot_unlock(zone_cache_t zc)2462 zone_depot_unlock(zone_cache_t zc)
2463 {
2464 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2465 }
2466
2467 zone_t
zone_by_id(size_t zid)2468 zone_by_id(size_t zid)
2469 {
2470 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2471 }
2472
2473 static inline bool
zone_supports_vm(zone_t z)2474 zone_supports_vm(zone_t z)
2475 {
2476 /*
2477 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2478 * to overcommit because they're used to reclaim memory
2479 * (VM support).
2480 */
2481 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2482 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2483 }
2484
2485 const char *
zone_name(zone_t z)2486 zone_name(zone_t z)
2487 {
2488 return z->z_name;
2489 }
2490
2491 const char *
zone_heap_name(zone_t z)2492 zone_heap_name(zone_t z)
2493 {
2494 zone_security_flags_t zsflags = zone_security_config(z);
2495 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2496 return kalloc_heap_names[zsflags.z_kheap_id];
2497 }
2498 return "invalid";
2499 }
2500
2501 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2502 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2503 {
2504 vm_size_t elem_count, chunks;
2505
2506 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2507 zone_elem_outer_size(z);
2508 chunks = (max_elems + elem_count - 1) / elem_count;
2509
2510 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2511 }
2512
2513 static inline vm_size_t
zone_submaps_approx_size(void)2514 zone_submaps_approx_size(void)
2515 {
2516 vm_size_t size = 0;
2517
2518 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2519 if (zone_submaps[idx] != VM_MAP_NULL) {
2520 size += zone_submaps[idx]->size;
2521 }
2522 }
2523
2524 return size;
2525 }
2526
2527 static inline void
zone_depot_init(struct zone_depot * zd)2528 zone_depot_init(struct zone_depot *zd)
2529 {
2530 *zd = (struct zone_depot){
2531 .zd_tail = &zd->zd_head,
2532 };
2533 }
2534
2535 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2536 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2537 {
2538 if (zd->zd_full++ == 0) {
2539 zd->zd_tail = &mag->zm_next;
2540 }
2541 mag->zm_next = zd->zd_head;
2542 zd->zd_head = mag;
2543 }
2544
2545 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2546 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2547 {
2548 zd->zd_full++;
2549 mag->zm_next = *zd->zd_tail;
2550 *zd->zd_tail = mag;
2551 zd->zd_tail = &mag->zm_next;
2552 }
2553
2554 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2555 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2556 {
2557 zd->zd_empty++;
2558 mag->zm_next = *zd->zd_tail;
2559 *zd->zd_tail = mag;
2560 }
2561
2562 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2563 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2564 {
2565 zone_magazine_t mag = zd->zd_head;
2566
2567 assert(zd->zd_full);
2568
2569 zd->zd_full--;
2570 if (z && z->z_recirc_full_min > zd->zd_full) {
2571 z->z_recirc_full_min = zd->zd_full;
2572 }
2573 zd->zd_head = mag->zm_next;
2574 if (zd->zd_full == 0) {
2575 zd->zd_tail = &zd->zd_head;
2576 }
2577
2578 mag->zm_next = NULL;
2579 return mag;
2580 }
2581
2582 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2583 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2584 {
2585 zone_magazine_t mag = *zd->zd_tail;
2586
2587 assert(zd->zd_empty);
2588
2589 zd->zd_empty--;
2590 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2591 z->z_recirc_empty_min = zd->zd_empty;
2592 }
2593 *zd->zd_tail = mag->zm_next;
2594
2595 mag->zm_next = NULL;
2596 return mag;
2597 }
2598
2599 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2600 zone_depot_move_full(
2601 struct zone_depot *dst,
2602 struct zone_depot *src,
2603 uint32_t n,
2604 zone_t z)
2605 {
2606 zone_magazine_t head, last;
2607
2608 assert(n);
2609 assert(src->zd_full >= n);
2610
2611 src->zd_full -= n;
2612 if (z && z->z_recirc_full_min > src->zd_full) {
2613 z->z_recirc_full_min = src->zd_full;
2614 }
2615 head = last = src->zd_head;
2616 for (uint32_t i = n; i-- > 1;) {
2617 last = last->zm_next;
2618 }
2619
2620 src->zd_head = last->zm_next;
2621 if (src->zd_full == 0) {
2622 src->zd_tail = &src->zd_head;
2623 }
2624
2625 if (z && zone_security_array[zone_index(z)].z_lifo) {
2626 if (dst->zd_full == 0) {
2627 dst->zd_tail = &last->zm_next;
2628 }
2629 last->zm_next = dst->zd_head;
2630 dst->zd_head = head;
2631 } else {
2632 last->zm_next = *dst->zd_tail;
2633 *dst->zd_tail = head;
2634 dst->zd_tail = &last->zm_next;
2635 }
2636 dst->zd_full += n;
2637
2638 return last->zm_seq;
2639 }
2640
2641 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2642 zone_depot_move_empty(
2643 struct zone_depot *dst,
2644 struct zone_depot *src,
2645 uint32_t n,
2646 zone_t z)
2647 {
2648 zone_magazine_t head, last;
2649
2650 assert(n);
2651 assert(src->zd_empty >= n);
2652
2653 src->zd_empty -= n;
2654 if (z && z->z_recirc_empty_min > src->zd_empty) {
2655 z->z_recirc_empty_min = src->zd_empty;
2656 }
2657 head = last = *src->zd_tail;
2658 for (uint32_t i = n; i-- > 1;) {
2659 last = last->zm_next;
2660 }
2661
2662 *src->zd_tail = last->zm_next;
2663
2664 dst->zd_empty += n;
2665 last->zm_next = *dst->zd_tail;
2666 *dst->zd_tail = head;
2667 }
2668
2669 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2670 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2671 {
2672 if (depot->zd_full == 0) {
2673 return false;
2674 }
2675
2676 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2677 }
2678
2679 static void
zone_cache_swap_magazines(zone_cache_t cache)2680 zone_cache_swap_magazines(zone_cache_t cache)
2681 {
2682 uint16_t count_a = cache->zc_alloc_cur;
2683 uint16_t count_f = cache->zc_free_cur;
2684 vm_offset_t *elems_a = cache->zc_alloc_elems;
2685 vm_offset_t *elems_f = cache->zc_free_elems;
2686
2687 z_debug_assert(count_a <= zc_mag_size());
2688 z_debug_assert(count_f <= zc_mag_size());
2689
2690 cache->zc_alloc_cur = count_f;
2691 cache->zc_free_cur = count_a;
2692 cache->zc_alloc_elems = elems_f;
2693 cache->zc_free_elems = elems_a;
2694 }
2695
2696 __pure2
2697 static smr_t
zone_cache_smr(zone_cache_t cache)2698 zone_cache_smr(zone_cache_t cache)
2699 {
2700 return cache->zc_smr;
2701 }
2702
2703 /*!
2704 * @function zone_magazine_replace
2705 *
2706 * @brief
2707 * Unlod a magazine and load a new one instead.
2708 */
2709 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2710 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2711 {
2712 zone_magazine_t old;
2713 vm_offset_t **elems;
2714
2715 mag->zm_seq = SMR_SEQ_INVALID;
2716
2717 if (empty) {
2718 elems = &zc->zc_free_elems;
2719 zc->zc_free_cur = 0;
2720 } else {
2721 elems = &zc->zc_alloc_elems;
2722 zc->zc_alloc_cur = zc_mag_size();
2723 }
2724 old = (zone_magazine_t)((uintptr_t)*elems -
2725 offsetof(struct zone_magazine, zm_elems));
2726 *elems = mag->zm_elems;
2727
2728 return old;
2729 }
2730
2731 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2732 zone_magazine_alloc(zalloc_flags_t flags)
2733 {
2734 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2735 }
2736
2737 static void
zone_magazine_free(zone_magazine_t mag)2738 zone_magazine_free(zone_magazine_t mag)
2739 {
2740 (zfree)(zc_magazine_zone, mag);
2741 }
2742
2743 static void
zone_magazine_free_list(struct zone_depot * zd)2744 zone_magazine_free_list(struct zone_depot *zd)
2745 {
2746 zone_magazine_t tmp, mag = *zd->zd_tail;
2747
2748 while (mag) {
2749 tmp = mag->zm_next;
2750 zone_magazine_free(mag);
2751 mag = tmp;
2752 }
2753
2754 *zd->zd_tail = NULL;
2755 zd->zd_empty = 0;
2756 }
2757
2758 void
zone_enable_caching(zone_t zone)2759 zone_enable_caching(zone_t zone)
2760 {
2761 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2762 zone_cache_t caches;
2763 size_t depot_limit;
2764
2765 depot_limit = zc_pcpu_max() / size_per_mag;
2766 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2767
2768 caches = zalloc_percpu_permanent_type(struct zone_cache);
2769 zpercpu_foreach(zc, caches) {
2770 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2771 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2772 zone_depot_init(&zc->zc_depot);
2773 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2774 }
2775
2776 zone_lock(zone);
2777 assert(zone->z_pcpu_cache == NULL);
2778 zone->z_pcpu_cache = caches;
2779 zone->z_recirc_cont_cur = 0;
2780 zone->z_recirc_cont_wma = 0;
2781 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2782 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2783 zone_unlock(zone);
2784 }
2785
2786 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2787 zone_maps_owned(vm_address_t addr, vm_size_t size)
2788 {
2789 return from_zone_map(addr, size);
2790 }
2791
2792 #if KASAN_LIGHT
2793 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2794 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2795 {
2796 return from_zone_map(addr, size) ||
2797 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2798 }
2799 #endif /* KASAN_LIGHT */
2800
2801 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2802 zone_map_sizes(
2803 vm_map_size_t *psize,
2804 vm_map_size_t *pfree,
2805 vm_map_size_t *plargest_free)
2806 {
2807 vm_map_size_t size, free, largest;
2808
2809 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2810
2811 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2812 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2813 *psize += size;
2814 *pfree += free;
2815 *plargest_free = MAX(*plargest_free, largest);
2816 }
2817 }
2818
2819 __attribute__((always_inline))
2820 vm_map_t
zone_submap(zone_security_flags_t zsflags)2821 zone_submap(zone_security_flags_t zsflags)
2822 {
2823 return zone_submaps[zsflags.z_submap_idx];
2824 }
2825
2826 unsigned
zpercpu_count(void)2827 zpercpu_count(void)
2828 {
2829 return zpercpu_early_count;
2830 }
2831
2832 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2833 /*
2834 * Returns a random number of a given bit-width.
2835 *
2836 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2837 *
2838 * This uses Intel's rdrand because random() uses FP registers
2839 * which causes FP faults and allocations which isn't something
2840 * we can do from zalloc itself due to reentrancy problems.
2841 *
2842 * For pre-rdrand machines (which we no longer support),
2843 * we use a bad biased random generator that doesn't use FP.
2844 * Such HW is no longer supported, but VM of newer OSes on older
2845 * bare metal is made to limp along (with reduced security) this way.
2846 */
2847 static uint64_t
zalloc_random_mask64(uint32_t bits)2848 zalloc_random_mask64(uint32_t bits)
2849 {
2850 uint64_t mask = ~0ull >> (64 - bits);
2851 uint64_t v;
2852
2853 #if __x86_64__
2854 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2855 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2856 v &= mask;
2857 } else {
2858 disable_preemption();
2859 int cpu = cpu_number();
2860 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2861 zone_bool_gen[cpu].zbg_entropy,
2862 ZONE_ENTROPY_CNT, bits);
2863 enable_preemption();
2864 }
2865 #else
2866 v = early_random() & mask;
2867 #endif
2868
2869 return v;
2870 }
2871
2872 /*
2873 * Returns a random number within [bound_min, bound_max)
2874 *
2875 * This isn't _exactly_ uniform, but the skew is small enough
2876 * not to matter for the consumers of this interface.
2877 *
2878 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2879 * will be returned (bound_max - bound_min) / 2^64 more often
2880 * than values within [2^64 % (bound_max - bound_min), bound_max).
2881 */
2882 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2883 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2884 {
2885 uint64_t delta = bound_max - bound_min;
2886
2887 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2888 }
2889
2890 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2891 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2892 /*
2893 * Track all kalloc zones of specified size for zlog name
2894 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2895 *
2896 * Additionally track all shared kalloc zones with shared.kalloc
2897 */
2898 static bool
track_kalloc_zones(zone_t z,const char * logname)2899 track_kalloc_zones(zone_t z, const char *logname)
2900 {
2901 const char *prefix;
2902 size_t len;
2903 zone_security_flags_t zsflags = zone_security_config(z);
2904
2905 prefix = "kalloc.type.var.";
2906 len = strlen(prefix);
2907 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2908 strncmp(logname, prefix, len) == 0) {
2909 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2910
2911 return zone_elem_inner_size(z) == sizeclass;
2912 }
2913
2914 prefix = "kalloc.type.";
2915 len = strlen(prefix);
2916 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2917 strncmp(logname, prefix, len) == 0) {
2918 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2919
2920 return zone_elem_inner_size(z) == sizeclass;
2921 }
2922
2923 prefix = "kalloc.";
2924 len = strlen(prefix);
2925 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2926 strncmp(logname, prefix, len) == 0) {
2927 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2928
2929 return zone_elem_inner_size(z) == sizeclass;
2930 }
2931
2932 prefix = "shared.kalloc";
2933 if ((zsflags.z_kheap_id == KHEAP_ID_SHARED) &&
2934 (strcmp(logname, prefix) == 0)) {
2935 return true;
2936 }
2937
2938 return false;
2939 }
2940 #endif
2941
2942 int
track_this_zone(const char * zonename,const char * logname)2943 track_this_zone(const char *zonename, const char *logname)
2944 {
2945 unsigned int len;
2946 const char *zc = zonename;
2947 const char *lc = logname;
2948
2949 /*
2950 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2951 */
2952
2953 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2954 /*
2955 * If the current characters don't match, check for a space in
2956 * in the zone name and a corresponding period in the log name.
2957 * If that's not there, then the strings don't match.
2958 */
2959
2960 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2961 break;
2962 }
2963
2964 /*
2965 * The strings are equal so far. If we're at the end, then it's a match.
2966 */
2967
2968 if (*zc == '\0') {
2969 return TRUE;
2970 }
2971 }
2972
2973 return FALSE;
2974 }
2975
2976 #if DEBUG || DEVELOPMENT
2977
2978 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2979 zone_element_info(void *addr, vm_tag_t * ptag)
2980 {
2981 vm_size_t size = 0;
2982 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2983 struct zone *src_zone;
2984
2985 if (from_zone_map(addr, sizeof(void *))) {
2986 src_zone = zone_by_id(zone_index_from_ptr(addr));
2987 size = zone_elem_inner_size(src_zone);
2988 #if VM_TAG_SIZECLASSES
2989 if (__improbable(src_zone->z_uses_tags)) {
2990 struct zone_page_metadata *meta;
2991 vm_offset_t eidx;
2992 vm_tag_t *slot;
2993
2994 meta = zone_element_resolve(src_zone,
2995 (vm_offset_t)addr, &eidx);
2996 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
2997 tag = *slot;
2998 }
2999 #endif /* VM_TAG_SIZECLASSES */
3000 }
3001
3002 *ptag = tag;
3003 return size;
3004 }
3005
3006 #endif /* DEBUG || DEVELOPMENT */
3007 #if KASAN_CLASSIC
3008
3009 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3010 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3011 {
3012 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3013
3014 *zonep = zone;
3015 return zone_elem_inner_size(zone);
3016 }
3017
3018 #endif /* KASAN_CLASSIC */
3019 #endif /* !ZALLOC_TEST */
3020 #pragma mark Zone zeroing and early random
3021 #if !ZALLOC_TEST
3022
3023 /*
3024 * Zone zeroing
3025 *
3026 * All allocations from zones are zeroed on free and are additionally
3027 * check that they are still zero on alloc. The check is
3028 * always on, on embedded devices. Perf regression was detected
3029 * on intel as we cant use the vectorized implementation of
3030 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3031 * initization and allocation. Therefore we perform the check
3032 * on 20% of the allocations.
3033 */
3034 #if ZALLOC_ENABLE_ZERO_CHECK
3035 #if defined(__x86_64__)
3036 /*
3037 * Peform zero validation on every 5th allocation
3038 */
3039 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3040 static uint32_t PERCPU_DATA(zzc_decrementer);
3041 #endif /* defined(__x86_64__) */
3042
3043 /*
3044 * Determine if zero validation for allocation should be skipped
3045 */
3046 static bool
zalloc_skip_zero_check(void)3047 zalloc_skip_zero_check(void)
3048 {
3049 #if defined(__x86_64__)
3050 uint32_t *counterp, cnt;
3051
3052 counterp = PERCPU_GET(zzc_decrementer);
3053 cnt = *counterp;
3054 if (__probable(cnt > 0)) {
3055 *counterp = cnt - 1;
3056 return true;
3057 }
3058 *counterp = zzc_rate - 1;
3059 #endif /* !defined(__x86_64__) */
3060 return false;
3061 }
3062
3063 __abortlike
3064 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3065 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3066 {
3067 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3068 uint32_t first_offs = ~0u;
3069 uintptr_t first_bits = 0, v;
3070 char buf[1024];
3071 int pos = 0;
3072
3073 buf[0] = '\0';
3074
3075 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3076 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3077 continue;
3078 }
3079 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3080 "%5d: 0x%016lx", o, v);
3081 if (first_offs > o) {
3082 first_offs = o;
3083 first_bits = v;
3084 }
3085 }
3086
3087 (panic)("[%s%s]: element modified after free "
3088 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3089 zone_heap_name(z), zone_name(z),
3090 first_offs, first_bits, esize, (void *)elem, buf);
3091 }
3092
3093 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3094 zalloc_validate_element(
3095 zone_t zone,
3096 vm_offset_t elem,
3097 vm_size_t size,
3098 zalloc_flags_t flags)
3099 {
3100 if (flags & Z_NOZZC) {
3101 return;
3102 }
3103 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3104 zalloc_uaf_panic(zone, elem, size);
3105 }
3106 if (flags & Z_PCPU) {
3107 for (size_t i = zpercpu_count(); --i > 0;) {
3108 elem += PAGE_SIZE;
3109 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3110 zalloc_uaf_panic(zone, elem, size);
3111 }
3112 }
3113 }
3114 }
3115
3116 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3117
3118 __attribute__((noinline))
3119 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3120 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3121 {
3122 #if KASAN_FAKESTACK
3123 /*
3124 * This can cause re-entrancy with kasan fakestacks
3125 */
3126 #pragma unused(zone, cpu, zs)
3127 #else
3128 uint32_t bits;
3129
3130 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3131 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3132
3133 zs->zs_alloc_rr += bits;
3134 zs->zs_alloc_rr %= zone->z_chunk_elems;
3135 #endif
3136 }
3137
3138 #endif /* !ZALLOC_TEST */
3139 #pragma mark Zone Leak Detection
3140 #if !ZALLOC_TEST
3141 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3142
3143 /*
3144 * Zone leak debugging code
3145 *
3146 * When enabled, this code keeps a log to track allocations to a particular
3147 * zone that have not yet been freed.
3148 *
3149 * Examining this log will reveal the source of a zone leak.
3150 *
3151 * The log is allocated only when logging is enabled (it is off by default),
3152 * so there is no effect on the system when it's turned off.
3153 *
3154 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3155 * zone name to log, with n starting at 1.
3156 *
3157 * Leaks debugging utilizes 2 tunables:
3158 * - zlsize (in kB) which describes how much "size" the record covers
3159 * (zones with smaller elements get more records, default is 4M).
3160 *
3161 * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3162 * size at which automatic leak detection will sample allocations.
3163 * (default is 8k)
3164 *
3165 *
3166 * Zone corruption logging
3167 *
3168 * Logging can also be used to help identify the source of a zone corruption.
3169 *
3170 * First, identify the zone that is being corrupted,
3171 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3172 *
3173 * When -zc is used in conjunction with zlog,
3174 * it changes the logging style to track both allocations and frees to the zone.
3175 *
3176 * When the corruption is detected, examining the log will show you the stack
3177 * traces of the callers who last allocated and freed any particular element in
3178 * the zone.
3179 *
3180 * Corruption debugging logs will have zrecs records
3181 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3182 */
3183
3184 #define ZRECORDS_MAX (256u << 10)
3185 #define ZRECORDS_DEFAULT (16u << 10)
3186 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3187 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3188 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3189
3190 __startup_func
3191 static void
zone_leaks_init_zrecs(void)3192 zone_leaks_init_zrecs(void)
3193 {
3194 /*
3195 * Don't allow more than ZRECORDS_MAX records,
3196 * even if the user asked for more.
3197 *
3198 * This prevents accidentally hogging too much kernel memory
3199 * and making the system unusable.
3200 */
3201 if (zrecs == 0) {
3202 zrecs = ZRECORDS_DEFAULT *
3203 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3204 }
3205 if (zrecs > ZRECORDS_MAX) {
3206 zrecs = ZRECORDS_MAX;
3207 }
3208 }
3209 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3210
3211 static uint32_t
zone_leaks_record_count(zone_t z)3212 zone_leaks_record_count(zone_t z)
3213 {
3214 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3215
3216 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3217 }
3218
3219 static uint32_t
zone_leaks_sample_rate(zone_t z)3220 zone_leaks_sample_rate(zone_t z)
3221 {
3222 return zlfreq / zone_elem_inner_size(z);
3223 }
3224
3225 #if ZALLOC_ENABLE_LOGGING
3226 /* Log allocations and frees to help debug a zone element corruption */
3227 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3228
3229 /*
3230 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3231 */
3232 #define MAX_ZONES_LOG_REQUESTS 10
3233
3234 /**
3235 * @function zone_setup_logging
3236 *
3237 * @abstract
3238 * Optionally sets up a zone for logging.
3239 *
3240 * @discussion
3241 * We recognized two boot-args:
3242 *
3243 * zlog=<zone_to_log>
3244 * zrecs=<num_records_in_log>
3245 * zlsize=<memory to cover for leaks>
3246 *
3247 * The zlog arg is used to specify the zone name that should be logged,
3248 * and zrecs/zlsize is used to control the size of the log.
3249 */
3250 static void
zone_setup_logging(zone_t z)3251 zone_setup_logging(zone_t z)
3252 {
3253 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3254 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3255 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3256 bool logging_on = false;
3257
3258 /*
3259 * Append kalloc heap name to zone name (if zone is used by kalloc)
3260 */
3261 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3262
3263 /* zlog0 isn't allowed. */
3264 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3265 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3266
3267 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3268 if (track_this_zone(zone_name, zlog_val) ||
3269 track_kalloc_zones(z, zlog_val)) {
3270 logging_on = true;
3271 break;
3272 }
3273 }
3274 }
3275
3276 /*
3277 * Backwards compat. with the old boot-arg used to specify single zone
3278 * logging i.e. zlog Needs to happen after the newer zlogn checks
3279 * because the prefix will match all the zlogn
3280 * boot-args.
3281 */
3282 if (!logging_on &&
3283 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3284 if (track_this_zone(zone_name, zlog_val) ||
3285 track_kalloc_zones(z, zlog_val)) {
3286 logging_on = true;
3287 }
3288 }
3289
3290 /*
3291 * If we want to log a zone, see if we need to allocate buffer space for
3292 * the log.
3293 *
3294 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3295 * we have to defer allocation in that case.
3296 *
3297 * zone_init() will finish the job.
3298 *
3299 * If we want to log one of the VM related zones that's set up early on,
3300 * we will skip allocation of the log until zinit is called again later
3301 * on some other zone.
3302 */
3303 if (logging_on) {
3304 if (corruption_debug_flag) {
3305 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3306 } else {
3307 z->z_btlog = btlog_create(BTLOG_HASH,
3308 zone_leaks_record_count(z), 0);
3309 }
3310 if (z->z_btlog) {
3311 z->z_log_on = true;
3312 printf("zone[%s%s]: logging enabled\n",
3313 zone_heap_name(z), z->z_name);
3314 } else {
3315 printf("zone[%s%s]: failed to enable logging\n",
3316 zone_heap_name(z), z->z_name);
3317 }
3318 }
3319 }
3320
3321 #endif /* ZALLOC_ENABLE_LOGGING */
3322 #if KASAN_TBI
3323 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3324
3325 __startup_func
3326 static void
kasan_tbi_init_zrecs(void)3327 kasan_tbi_init_zrecs(void)
3328 {
3329 /*
3330 * Don't allow more than ZRECORDS_MAX records,
3331 * even if the user asked for more.
3332 *
3333 * This prevents accidentally hogging too much kernel memory
3334 * and making the system unusable.
3335 */
3336 if (kasan_zrecs == 0) {
3337 kasan_zrecs = ZRECORDS_DEFAULT *
3338 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3339 }
3340 if (kasan_zrecs > ZRECORDS_MAX) {
3341 kasan_zrecs = ZRECORDS_MAX;
3342 }
3343 }
3344 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3345
3346 static void
zone_setup_kasan_logging(zone_t z)3347 zone_setup_kasan_logging(zone_t z)
3348 {
3349 if (!z->z_tbi_tag) {
3350 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3351 zone_heap_name(z), z->z_name);
3352 return;
3353 }
3354
3355 z->z_log_on = true;
3356 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3357 if (!z->z_btlog) {
3358 printf("zone[%s%s]: failed to enable kasan logging\n",
3359 zone_heap_name(z), z->z_name);
3360 }
3361 }
3362
3363 #endif /* KASAN_TBI */
3364 #if CONFIG_ZLEAKS
3365
3366 static thread_call_data_t zone_leaks_callout;
3367
3368 /*
3369 * The zone leak detector, abbreviated 'zleak', keeps track
3370 * of a subset of the currently outstanding allocations
3371 * made by the zone allocator.
3372 *
3373 * Zones who use more than zleak_pages_per_zone_wired_threshold
3374 * pages will get a BTLOG_HASH btlog with sampling to minimize
3375 * perf impact, yet receive statistical data about the backtrace
3376 * that is the most likely to cause the leak.
3377 *
3378 * If the zone goes under the threshold enough, then the log
3379 * is disabled and backtraces freed. Data can be collected
3380 * from userspace with the zlog(1) command.
3381 */
3382
3383 uint32_t zleak_active;
3384 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3385
3386 /* Size a zone will have before we will collect data on it */
3387 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3388 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3389
3390 static inline bool
zleak_should_enable_for_zone(zone_t z)3391 zleak_should_enable_for_zone(zone_t z)
3392 {
3393 if (z->z_log_on) {
3394 return false;
3395 }
3396 if (z->z_btlog) {
3397 return false;
3398 }
3399 if (z->z_exhausts) {
3400 return false;
3401 }
3402 if (zone_exhaustible(z)) {
3403 return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3404 }
3405 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3406 }
3407
3408 static inline bool
zleak_should_disable_for_zone(zone_t z)3409 zleak_should_disable_for_zone(zone_t z)
3410 {
3411 if (z->z_log_on) {
3412 return false;
3413 }
3414 if (!z->z_btlog) {
3415 return false;
3416 }
3417 if (zone_exhaustible(z)) {
3418 return z->z_wired_cur * 8 < z->z_wired_max * 7;
3419 }
3420 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3421 }
3422
3423 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3424 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3425 {
3426 btlog_t log;
3427
3428 zone_foreach(z) {
3429 if (zleak_should_disable_for_zone(z)) {
3430 log = z->z_btlog;
3431 z->z_btlog = NULL;
3432 assert(z->z_btlog_disabled == NULL);
3433 btlog_disable(log);
3434 z->z_btlog_disabled = log;
3435 os_atomic_dec(&zleak_active, relaxed);
3436 }
3437
3438 if (zleak_should_enable_for_zone(z)) {
3439 log = z->z_btlog_disabled;
3440 if (log == NULL) {
3441 log = btlog_create(BTLOG_HASH,
3442 zone_leaks_record_count(z),
3443 zone_leaks_sample_rate(z));
3444 } else if (btlog_enable(log) == KERN_SUCCESS) {
3445 z->z_btlog_disabled = NULL;
3446 } else {
3447 log = NULL;
3448 }
3449 os_atomic_store(&z->z_btlog, log, release);
3450 os_atomic_inc(&zleak_active, relaxed);
3451 }
3452 }
3453 }
3454
3455 __startup_func
3456 static void
zleak_init(void)3457 zleak_init(void)
3458 {
3459 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3460
3461 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3462 zleak_max_zonemap_size / 8);
3463
3464 thread_call_setup_with_options(&zone_leaks_callout,
3465 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3466 THREAD_CALL_OPTIONS_ONCE);
3467 }
3468 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3469
3470 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3471 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3472 {
3473 if (value >= zleak_max_zonemap_size) {
3474 return KERN_INVALID_VALUE;
3475 }
3476
3477 if (arg == &zleak_per_zone_tracking_threshold) {
3478 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3479 zleak_pages_per_zone_wired_threshold = atop(value);
3480 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3481 thread_call_enter(&zone_leaks_callout);
3482 }
3483 return KERN_SUCCESS;
3484 }
3485
3486 return KERN_INVALID_ARGUMENT;
3487 }
3488
3489 static void
panic_display_zleaks(bool has_syms)3490 panic_display_zleaks(bool has_syms)
3491 {
3492 bool did_header = false;
3493 vm_address_t bt[BTLOG_MAX_DEPTH];
3494 uint32_t len, count;
3495
3496 zone_foreach(z) {
3497 btlog_t log = z->z_btlog;
3498
3499 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3500 continue;
3501 }
3502
3503 count = btlog_guess_top(log, bt, &len);
3504 if (count == 0) {
3505 continue;
3506 }
3507
3508 if (!did_header) {
3509 paniclog_append_noflush("Zone (suspected) leak report:\n");
3510 did_header = true;
3511 }
3512
3513 paniclog_append_noflush(" Zone: %s%s\n",
3514 zone_heap_name(z), zone_name(z));
3515 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3516 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3517 paniclog_append_noflush(" Size: %ld\n",
3518 (long)zone_size_wired(z));
3519 paniclog_append_noflush(" Top backtrace:\n");
3520 for (uint32_t i = 0; i < len; i++) {
3521 if (has_syms) {
3522 paniclog_append_noflush(" %p ", (void *)bt[i]);
3523 panic_print_symbol_name(bt[i]);
3524 paniclog_append_noflush("\n");
3525 } else {
3526 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3527 }
3528 }
3529
3530 kmod_panic_dump(bt, len);
3531 paniclog_append_noflush("\n");
3532 }
3533 }
3534 #endif /* CONFIG_ZLEAKS */
3535
3536 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3537 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3538
3539 #if !KASAN_TBI
3540 __cold
3541 #endif
3542 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3543 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3544 {
3545 btlog_t log = zone->z_btlog;
3546 btref_get_flags_t flags = 0;
3547 btref_t ref;
3548
3549 #if !KASAN_TBI
3550 if (!log || !btlog_sample(log)) {
3551 return;
3552 }
3553 #endif
3554 if (get_preemption_level() || zone_supports_vm(zone)) {
3555 /*
3556 * VM zones can be used by btlog, avoid reentrancy issues.
3557 */
3558 flags = BTREF_GET_NOWAIT;
3559 }
3560
3561 ref = btref_get(fp, flags);
3562 while (count-- > 0) {
3563 if (count) {
3564 btref_retain(ref);
3565 }
3566 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3567 addr += *(vm_offset_t *)addr;
3568 }
3569 }
3570
3571 #define ZALLOC_LOG(zone, addr, count) ({ \
3572 if ((zone)->z_btlog) { \
3573 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3574 } \
3575 })
3576
3577 #if !KASAN_TBI
3578 __cold
3579 #endif
3580 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3581 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3582 {
3583 btlog_t log = zone->z_btlog;
3584 btref_get_flags_t flags = 0;
3585 btref_t ref;
3586
3587 #if !KASAN_TBI
3588 if (!log) {
3589 return;
3590 }
3591 #endif
3592
3593 /*
3594 * See if we're doing logging on this zone.
3595 *
3596 * There are two styles of logging used depending on
3597 * whether we're trying to catch a leak or corruption.
3598 */
3599 #if !KASAN_TBI
3600 if (btlog_get_type(log) == BTLOG_HASH) {
3601 /*
3602 * We're logging to catch a leak.
3603 *
3604 * Remove any record we might have for this element
3605 * since it's being freed. Note that we may not find it
3606 * if the buffer overflowed and that's OK.
3607 *
3608 * Since the log is of a limited size, old records get
3609 * overwritten if there are more zallocs than zfrees.
3610 */
3611 while (count-- > 0) {
3612 btlog_erase(log, (void *)addr);
3613 addr += *(vm_offset_t *)addr;
3614 }
3615 return;
3616 }
3617 #endif /* !KASAN_TBI */
3618
3619 if (get_preemption_level() || zone_supports_vm(zone)) {
3620 /*
3621 * VM zones can be used by btlog, avoid reentrancy issues.
3622 */
3623 flags = BTREF_GET_NOWAIT;
3624 }
3625
3626 ref = btref_get(fp, flags);
3627 while (count-- > 0) {
3628 if (count) {
3629 btref_retain(ref);
3630 }
3631 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3632 addr += *(vm_offset_t *)addr;
3633 }
3634 }
3635
3636 #define ZFREE_LOG(zone, addr, count) ({ \
3637 if ((zone)->z_btlog) { \
3638 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3639 } \
3640 })
3641
3642 #else
3643 #define ZALLOC_LOG(...) ((void)0)
3644 #define ZFREE_LOG(...) ((void)0)
3645 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3646 #endif /* !ZALLOC_TEST */
3647 #pragma mark zone (re)fill
3648 #if !ZALLOC_TEST
3649
3650 /*!
3651 * @defgroup Zone Refill
3652 * @{
3653 *
3654 * @brief
3655 * Functions handling The zone refill machinery.
3656 *
3657 * @discussion
3658 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3659 *
3660 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3661 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3662 *
3663 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3664 *
3665 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3666 * to block, or if the reserve becomes depleted.
3667 *
3668 *
3669 * <h2>Synchronous expansion</h2>
3670 *
3671 * This mechanism is actually the only one that may refill a zone, and all the
3672 * other ones funnel through this one eventually.
3673 *
3674 * @c zone_expand_locked() implements the core of the expansion mechanism,
3675 * and will do so while a caller specified predicate is true.
3676 *
3677 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3678 * - one VM privileged thread,
3679 * - one regular thread.
3680 *
3681 * Regular threads that refill will put down their identity in @c z_expander,
3682 * so that priority inversion avoidance can be implemented.
3683 *
3684 * However, VM privileged threads are allowed to use VM page reserves,
3685 * which allows for the system to recover from extreme memory pressure
3686 * situations, allowing for the few allocations that @c zone_gc() or
3687 * killing processes require.
3688 *
3689 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3690 * is set. @c z_expander is not necessarily the identity of this VM privileged
3691 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3692 * could even be @c THREAD_NULL otherwise).
3693 *
3694 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3695 * spending a whole pointer on priority inheritance for VM privileged threads
3696 * (and other issues related to having two owners), we use the rwlock boost as
3697 * a stop gap to avoid priority inversions.
3698 *
3699 *
3700 * <h2>Chunk wiring policies</h2>
3701 *
3702 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3703 * to try to minimize fragmentation relative to element sizes not aligning with
3704 * a chunk size well. However, this can grow large and be hard to fulfill on
3705 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3706 * 4k page systems).
3707 *
3708 * This is why, when under memory pressure the system allows chunks to be
3709 * partially populated. The metadata of the first page in the chunk maintains
3710 * the count of actually populated pages.
3711 *
3712 * The metadata for addresses assigned to a zone are found of 4 queues:
3713 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3714 * elements (those can be targeted by @c zone_gc()),
3715 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3716 * used,
3717 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3718 * left,
3719 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3720 * the zone forever, or the first secondary metadata for a chunk whose
3721 * corresponding page is not populated in the chunk.
3722 *
3723 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3724 * queues are preferred.
3725 *
3726 *
3727 * <h2>Asynchronous expansion</h2>
3728 *
3729 * This mechanism allows for refilling zones used mostly with non blocking
3730 * callers. It relies on a thread call (@c zone_expand_callout) which will
3731 * iterate all zones and refill the ones marked with @c z_async_refilling.
3732 *
3733 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3734 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3735 *
3736 *
3737 * <h2>Dealing with zone allocations from the mach VM code</h2>
3738 *
3739 * The implementation of the mach VM itself uses the zone allocator
3740 * for things like the vm_map_entry data structure. In order to prevent
3741 * a recursion problem when adding more pages to a zone, the VM zones
3742 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3743 * or any VM map functions to allocate.
3744 *
3745 * Instead, a really simple coalescing first-fit allocator is used
3746 * for this submap, and no one else than zalloc can allocate from it.
3747 *
3748 * Memory is directly populated which doesn't require allocation of
3749 * VM map entries, and avoids recursion. The cost of this scheme however,
3750 * is that `vm_map_lookup_entry` will not function on those addresses
3751 * (nor any API relying on it).
3752 */
3753
3754 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3755 static void zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd);
3756 static thread_call_data_t zone_expand_callout;
3757
3758 __attribute__((overloadable))
3759 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3760 zone_submap_is_sequestered(zone_submap_idx_t idx)
3761 {
3762 return idx != Z_SUBMAP_IDX_DATA;
3763 }
3764
3765 __attribute__((overloadable))
3766 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3767 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3768 {
3769 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3770 }
3771
3772 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3773 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3774 {
3775 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3776
3777 if (zsflags.z_noencrypt) {
3778 kmaflags |= KMA_NOENCRYPT;
3779 }
3780 if (zsflags.z_submap_idx == Z_SUBMAP_IDX_DATA) {
3781 kmaflags |= KMA_DATA;
3782 }
3783 if (flags & Z_NOPAGEWAIT) {
3784 kmaflags |= KMA_NOPAGEWAIT;
3785 }
3786 if (z->z_permanent || (!z->z_destructible &&
3787 zone_submap_is_sequestered(zsflags))) {
3788 kmaflags |= KMA_PERMANENT;
3789 }
3790 if (zsflags.z_submap_from_end) {
3791 kmaflags |= KMA_LAST_FREE;
3792 }
3793
3794 if (z->z_tbi_tag) {
3795 kmaflags |= KMA_TAG;
3796 }
3797
3798 return kmaflags;
3799 }
3800
3801 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3802 zone_add_wired_pages(zone_t z, uint32_t pages)
3803 {
3804 os_atomic_add(&zone_pages_wired, pages, relaxed);
3805
3806 #if CONFIG_ZLEAKS
3807 if (__improbable(zleak_should_enable_for_zone(z) &&
3808 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3809 thread_call_enter(&zone_leaks_callout);
3810 }
3811 #else
3812 (void)z;
3813 #endif
3814 }
3815
3816 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3817 zone_remove_wired_pages(zone_t z, uint32_t pages)
3818 {
3819 os_atomic_sub(&zone_pages_wired, pages, relaxed);
3820
3821 #if CONFIG_ZLEAKS
3822 if (__improbable(zleak_should_disable_for_zone(z) &&
3823 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3824 thread_call_enter(&zone_leaks_callout);
3825 }
3826 #else
3827 (void)z;
3828 #endif
3829 }
3830
3831 #if CONFIG_KERNEL_TAGGING
3832 static inline vm_address_t
zone_tag_element(zone_t zone,vm_offset_t addr,vm_size_t elem_size)3833 zone_tag_element(zone_t zone, vm_offset_t addr, vm_size_t elem_size)
3834 {
3835 vm_offset_t tagged_address;
3836
3837 tagged_address = vm_memtag_assign_tag(addr, elem_size);
3838
3839 vm_memtag_set_tag(tagged_address, elem_size);
3840
3841 if (zone->z_percpu) {
3842 zpercpu_foreach_cpu(index) {
3843 vm_memtag_set_tag(tagged_address + ptoa(index), elem_size);
3844 }
3845 }
3846
3847 return tagged_address;
3848 }
3849
3850 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3851 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3852 {
3853 vm_offset_t elem_size = zone_elem_outer_size(zone);
3854 vm_offset_t oob_offs = zone_elem_outer_offs(zone);
3855
3856 for (uint32_t i = start; i < end; i++) {
3857 vm_offset_t elem_addr = base + oob_offs + i * elem_size;
3858
3859 (void)zone_tag_element(zone, elem_addr, elem_size);
3860 }
3861 }
3862 #endif /* CONFIG_KERNEL_TAGGING */
3863
3864 /*!
3865 * @function zcram_and_lock()
3866 *
3867 * @brief
3868 * Prepare some memory for being usable for allocation purposes.
3869 *
3870 * @discussion
3871 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3872 * to be usable in the zone.
3873 *
3874 * This function assumes the metadata is already populated for the range.
3875 *
3876 * Calling this function with @c pg_start being 0 means that the memory
3877 * is either a partial chunk, or a full chunk, that isn't published anywhere
3878 * and the initialization can happen without locks held.
3879 *
3880 * Calling this function with a non zero @c pg_start means that we are extending
3881 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3882 * is already usable and published in the zone, so extending it requires holding
3883 * the zone lock.
3884 *
3885 * @param zone The zone to cram new populated pages into
3886 * @param addr The base address for the chunk(s)
3887 * @param pg_va_new The number of virtual pages newly assigned to the zone
3888 * @param pg_start The first newly populated page relative to @a addr.
3889 * @param pg_end The after-last newly populated page relative to @a addr.
3890 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3891 */
3892 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3893 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3894 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3895 {
3896 zone_id_t zindex = zone_index(zone);
3897 vm_offset_t elem_size = zone_elem_outer_size(zone);
3898 uint32_t free_start = 0, free_end = 0;
3899 uint32_t oob_offs = zone_elem_outer_offs(zone);
3900
3901 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3902 uint32_t chunk_pages = zone->z_chunk_pages;
3903 bool guarded = meta->zm_guarded;
3904
3905 assert(pg_start < pg_end && pg_end <= chunk_pages);
3906
3907 if (pg_start == 0) {
3908 uint16_t chunk_len = (uint16_t)pg_end;
3909 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3910 bool inline_bitmap = false;
3911
3912 if (zone->z_percpu) {
3913 chunk_len = 1;
3914 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3915 assert(pg_end == zpercpu_count());
3916 }
3917 if (!zone->z_permanent && !zone->z_uses_tags) {
3918 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3919 }
3920
3921 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3922
3923 meta[0] = (struct zone_page_metadata){
3924 .zm_index = zindex,
3925 .zm_guarded = guarded,
3926 .zm_inline_bitmap = inline_bitmap,
3927 .zm_chunk_len = chunk_len,
3928 .zm_alloc_size = lock,
3929 };
3930
3931 if (!zone->z_permanent && !inline_bitmap) {
3932 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3933 zone->z_chunk_elems, zone->z_uses_tags);
3934 }
3935
3936 for (uint16_t i = 1; i < chunk_pages; i++) {
3937 meta[i] = (struct zone_page_metadata){
3938 .zm_index = zindex,
3939 .zm_guarded = guarded,
3940 .zm_inline_bitmap = inline_bitmap,
3941 .zm_chunk_len = secondary_len,
3942 .zm_page_index = (uint8_t)i,
3943 .zm_bitmap = meta[0].zm_bitmap,
3944 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
3945 };
3946 }
3947
3948 if (inline_bitmap) {
3949 zone_meta_bits_init_inline(meta, free_end);
3950 }
3951 } else {
3952 assert(!zone->z_percpu && !zone->z_permanent);
3953
3954 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3955 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3956 }
3957
3958 #if CONFIG_KERNEL_TAGGING
3959 if (__probable(zone->z_tbi_tag)) {
3960 zcram_memtag_init(zone, addr, free_end, free_start);
3961 }
3962 #endif /* CONFIG_KERNEL_TAGGING */
3963
3964 #if KASAN_CLASSIC
3965 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3966 if (zone->z_permanent) {
3967 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3968 } else if (zone->z_percpu) {
3969 for (uint32_t i = 0; i < pg_end; i++) {
3970 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3971 zone_elem_outer_size(zone),
3972 zone_elem_outer_offs(zone),
3973 zone_elem_redzone(zone));
3974 }
3975 } else {
3976 kasan_zmem_add(addr, ptoa(pg_end),
3977 zone_elem_outer_size(zone),
3978 zone_elem_outer_offs(zone),
3979 zone_elem_redzone(zone));
3980 }
3981 #endif /* KASAN_CLASSIC */
3982
3983 /*
3984 * Insert the initialized pages / metadatas into the right lists.
3985 */
3986
3987 zone_lock(zone);
3988 assert(zone->z_self == zone);
3989
3990 if (pg_start != 0) {
3991 assert(meta->zm_chunk_len == pg_start);
3992
3993 zone_meta_bits_merge(meta, free_start, free_end);
3994 meta->zm_chunk_len = (uint16_t)pg_end;
3995
3996 /*
3997 * consume the zone_meta_lock_in_partial()
3998 * done in zone_expand_locked()
3999 */
4000 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4001 zone_meta_remqueue(zone, meta);
4002 }
4003
4004 if (zone->z_permanent || meta->zm_alloc_size) {
4005 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4006 } else {
4007 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4008 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4009 }
4010 if (pg_end < chunk_pages) {
4011 /* push any non populated residual VA on z_pageq_va */
4012 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4013 }
4014
4015 zone->z_elems_free += free_end - free_start;
4016 zone->z_elems_avail += free_end - free_start;
4017 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4018 if (pg_va_new) {
4019 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4020 }
4021 if (zone->z_wired_hwm < zone->z_wired_cur) {
4022 zone->z_wired_hwm = zone->z_wired_cur;
4023 }
4024
4025 #if CONFIG_ZLEAKS
4026 if (__improbable(zleak_should_enable_for_zone(zone) &&
4027 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4028 thread_call_enter(&zone_leaks_callout);
4029 }
4030 #endif /* CONFIG_ZLEAKS */
4031
4032 zone_add_wired_pages(zone, pg_end - pg_start);
4033 }
4034
4035 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4036 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4037 {
4038 uint32_t chunk_pages = zone->z_chunk_pages;
4039
4040 assert(pages % chunk_pages == 0);
4041 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4042 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4043 zone_unlock(zone);
4044 }
4045 }
4046
4047 __startup_func
4048 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4049 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4050 {
4051 uint32_t pages = (uint32_t)atop(size);
4052
4053
4054 assert(from_zone_map(newmem, size));
4055 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4056 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4057
4058 /*
4059 * The early pages we move at the pmap layer can't be "depopulated"
4060 * because there's no vm_page_t for them.
4061 *
4062 * "Lock" them so that they never hit z_pageq_empty.
4063 */
4064 vm_memtag_bzero((void *)newmem, size);
4065 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4066 }
4067
4068 /*!
4069 * @function zone_submap_alloc_sequestered_va
4070 *
4071 * @brief
4072 * Allocates VA without using vm_find_space().
4073 *
4074 * @discussion
4075 * Allocate VA quickly without using the slower vm_find_space() for cases
4076 * when the submaps are fully sequestered.
4077 *
4078 * The VM submap is used to implement the VM itself so it is always sequestered,
4079 * as it can't kmem_alloc which needs to always allocate vm entries.
4080 * However, it can use vm_map_enter() which tries to coalesce entries, which
4081 * always works, so the VM map only ever needs 2 entries (one for each end).
4082 *
4083 * The RO submap is similarly always sequestered if it exists (as a non
4084 * sequestered RO submap makes very little sense).
4085 *
4086 * The allocator is a very simple bump-allocator
4087 * that allocates from either end.
4088 */
4089 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4090 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4091 vm_offset_t *addrp)
4092 {
4093 vm_size_t size = ptoa(pages);
4094 vm_map_t map = zone_submap(zsflags);
4095 vm_map_entry_t first, last;
4096 vm_map_offset_t addr;
4097
4098 vm_map_lock(map);
4099
4100 first = vm_map_first_entry(map);
4101 last = vm_map_last_entry(map);
4102
4103 if (first->vme_end + size > last->vme_start) {
4104 vm_map_unlock(map);
4105 return KERN_NO_SPACE;
4106 }
4107
4108 if (zsflags.z_submap_from_end) {
4109 last->vme_start -= size;
4110 addr = last->vme_start;
4111 VME_OFFSET_SET(last, addr);
4112 } else {
4113 addr = first->vme_end;
4114 first->vme_end += size;
4115 }
4116 map->size += size;
4117
4118 vm_map_unlock(map);
4119
4120 *addrp = addr;
4121 return KERN_SUCCESS;
4122 }
4123
4124 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4125 zone_fill_initially(zone_t zone, vm_size_t nelems)
4126 {
4127 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4128 kern_return_t kr;
4129 vm_offset_t addr;
4130 uint32_t pages;
4131 zone_security_flags_t zsflags = zone_security_config(zone);
4132
4133 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4134 assert(zone->z_elems_avail == 0);
4135
4136 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4137 pages = zone_alloc_pages_for_nelems(zone, nelems);
4138 if (zone_submap_is_sequestered(zsflags)) {
4139 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4140 if (kr != KERN_SUCCESS) {
4141 panic("zone_submap_alloc_sequestered_va() "
4142 "of %u pages failed", pages);
4143 }
4144 kernel_memory_populate(addr, ptoa(pages),
4145 kmaflags, VM_KERN_MEMORY_ZONE);
4146 } else {
4147 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4148 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4149 kmaflags, VM_KERN_MEMORY_ZONE);
4150 }
4151
4152 zone_meta_populate(addr, ptoa(pages));
4153 zcram(zone, addr, pages, 0);
4154 }
4155
4156 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4157 __attribute__((noinline))
4158 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4159 zone_scramble_va_and_unlock(
4160 zone_t z,
4161 struct zone_page_metadata *meta,
4162 uint32_t runs,
4163 uint32_t pages,
4164 uint32_t chunk_pages,
4165 uint64_t guard_mask)
4166 {
4167 struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4168
4169 for (uint32_t run = 0, n = 0; run < runs; run++) {
4170 arr[run] = meta + n;
4171 n += chunk_pages + ((guard_mask >> run) & 1);
4172 }
4173
4174 /*
4175 * Fisher–Yates shuffle, for an array with indices [0, n)
4176 *
4177 * for i from n−1 downto 1 do
4178 * j ← random integer such that 0 ≤ j ≤ i
4179 * exchange a[j] and a[i]
4180 *
4181 * The point here is that early allocations aren't at a fixed
4182 * distance from each other.
4183 */
4184 for (uint32_t i = runs - 1; i > 0; i--) {
4185 uint32_t j = zalloc_random_uniform32(0, i + 1);
4186
4187 meta = arr[j];
4188 arr[j] = arr[i];
4189 arr[i] = meta;
4190 }
4191
4192 zone_lock(z);
4193
4194 for (uint32_t i = 0; i < runs; i++) {
4195 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4196 }
4197 z->z_va_cur += z->z_percpu ? runs : pages;
4198 }
4199
4200 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4201 dist_u32(uint32_t a, uint32_t b)
4202 {
4203 return a < b ? b - a : a - b;
4204 }
4205
4206 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4207 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4208 {
4209 for (; n-- > 0; pop--) {
4210 uint32_t bit = zalloc_random_uniform32(0, pop);
4211 uint64_t m = mask;
4212
4213 for (; bit; bit--) {
4214 m &= m - 1;
4215 }
4216
4217 mask ^= 1ull << __builtin_ctzll(m);
4218 }
4219
4220 return mask;
4221 }
4222
4223 /**
4224 * @function zalloc_random_bits
4225 *
4226 * @brief
4227 * Compute a random number with a specified number of bit set in a given width.
4228 *
4229 * @discussion
4230 * This function generates a "uniform" distribution of sets of bits set in
4231 * a given width, with typically less than width/4 calls to random.
4232 *
4233 * @param pop the target number of bits set.
4234 * @param width the number of bits in the random integer to generate.
4235 */
4236 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4237 zalloc_random_bits(uint32_t pop, uint32_t width)
4238 {
4239 uint64_t w_mask = (1ull << width) - 1;
4240 uint64_t mask;
4241 uint32_t cur;
4242
4243 if (3 * width / 4 <= pop) {
4244 mask = w_mask;
4245 cur = width;
4246 } else if (pop <= width / 4) {
4247 mask = 0;
4248 cur = 0;
4249 } else {
4250 /*
4251 * Chosing a random number this way will overwhelmingly
4252 * contain `width` bits +/- a few.
4253 */
4254 mask = zalloc_random_mask64(width);
4255 cur = __builtin_popcountll(mask);
4256
4257 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4258 /*
4259 * If the opposite mask has a closer popcount,
4260 * then start with that one as the seed.
4261 */
4262 cur = width - cur;
4263 mask ^= w_mask;
4264 }
4265 }
4266
4267 if (cur < pop) {
4268 /*
4269 * Setting `pop - cur` bits is really clearing that many from
4270 * the opposite mask.
4271 */
4272 mask ^= w_mask;
4273 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4274 mask ^= w_mask;
4275 } else if (pop < cur) {
4276 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4277 }
4278
4279 return mask;
4280 }
4281 #endif
4282
4283 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4284 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4285 {
4286 zone_security_flags_t zsflags = zone_security_config(z);
4287 struct zone_page_metadata *meta;
4288 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4289 uint32_t chunk_pages = z->z_chunk_pages;
4290 uint32_t runs, pages, guards, rnum;
4291 uint64_t guard_mask = 0;
4292 bool lead_guard = false;
4293 kern_return_t kr;
4294 vm_offset_t addr;
4295
4296 zone_unlock(z);
4297
4298 /*
4299 * A lot of OOB exploitation techniques rely on precise placement
4300 * and interleaving of zone pages. The layout that is sought
4301 * by attackers will be C/P/T types, where:
4302 * - (C)ompromised is the type for which attackers have a bug,
4303 * - (P)adding is used to pad memory,
4304 * - (T)arget is the type that the attacker will attempt to corrupt
4305 * by exploiting (C).
4306 *
4307 * Note that in some cases C==T and P isn't needed.
4308 *
4309 * In order to make those placement games much harder,
4310 * we grow zones by random runs of memory, up to 256k.
4311 * This makes predicting the precise layout of the heap
4312 * quite more complicated.
4313 *
4314 * Note: this function makes a very heavy use of random,
4315 * however, it is mostly limited to sequestered zones,
4316 * and eventually the layout will be fixed,
4317 * and the usage of random vastly reduced.
4318 *
4319 * For non sequestered zones, there's a single call
4320 * to random in order to decide whether we want
4321 * a guard page or not.
4322 */
4323 pages = chunk_pages;
4324 guards = 0;
4325 runs = 1;
4326 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4327 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4328 pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4329 runs = (pages + chunk_pages - 1) / chunk_pages;
4330 runs = zalloc_random_uniform32(1, runs + 1);
4331 pages = runs * chunk_pages;
4332 }
4333 static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4334 "make sure that `runs` will never be larger than 64");
4335 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4336
4337 /*
4338 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4339 * guards might be added after each chunk.
4340 *
4341 * Those guard pages are marked with the ZM_PGZ_GUARD
4342 * magical chunk len, and their zm_oob_offs field
4343 * is used to remember optional shift applied
4344 * to returned elements, in order to right-align-them
4345 * as much as possible.
4346 *
4347 * In an adversarial context, while guard pages
4348 * are extremely effective against linear overflow,
4349 * using a predictable density of guard pages feels like
4350 * a missed opportunity. Which is why we chose to insert
4351 * one guard page for about 32k of memory, and place it
4352 * randomly.
4353 */
4354 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4355 if (z->z_percpu) {
4356 /*
4357 * For per-cpu runs, have a 75% chance to have a guard.
4358 */
4359 rnum = zalloc_random_uniform32(0, 4 * 128);
4360 guards = rnum >= 128;
4361 } else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4362 vm_offset_t rest;
4363
4364 /*
4365 * For types that are less susceptible to have OOBs,
4366 * have a density of 1 guard every 64k, with a uniform
4367 * distribution.
4368 */
4369 rnum = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4370 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4371 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4372 guards += rnum < rest;
4373 } else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4374 /*
4375 * For chunks >= 32k, have a 75% chance of guard pages
4376 * between chunks.
4377 */
4378 rnum = zalloc_random_uniform32(65, 129);
4379 guards = runs * rnum / 128;
4380 } else {
4381 vm_offset_t rest;
4382
4383 /*
4384 * Otherwise, aim at 1 guard every 32k,
4385 * with a uniform distribution.
4386 */
4387 rnum = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4388 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4389 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4390 guards += rnum < rest;
4391 }
4392 assert3u(guards, <=, runs);
4393
4394 guard_mask = 0;
4395
4396 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4397 uint32_t g = 0;
4398
4399 /*
4400 * Several exploitation strategies rely on a C/T (compromised
4401 * then target types) ordering of pages with a sub-page reach
4402 * from C into T.
4403 *
4404 * We want to reliably thwart such exploitations
4405 * and hence force a guard page between alternating
4406 * memory types.
4407 */
4408 guard_mask |= 1ull << (runs - 1);
4409 g++;
4410
4411 /*
4412 * While we randomize the chunks lengths, an attacker with
4413 * precise timing control can guess when overflows happen,
4414 * and "measure" the runs, which gives them an indication
4415 * of where the next run start offset is.
4416 *
4417 * In order to make this knowledge unusable, add a guard page
4418 * _before_ the new run with a 25% probability, regardless
4419 * of whether we had enough guard pages.
4420 */
4421 if ((rnum & 3) == 0) {
4422 lead_guard = true;
4423 g++;
4424 }
4425 if (guards > g) {
4426 guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4427 } else {
4428 guards = g;
4429 }
4430 } else {
4431 assert3u(runs, ==, 1);
4432 assert3u(guards, <=, 1);
4433 guard_mask = guards << (runs - 1);
4434 }
4435 #else
4436 (void)rnum;
4437 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4438
4439 if (zone_submap_is_sequestered(zsflags)) {
4440 kr = zone_submap_alloc_sequestered_va(zsflags,
4441 pages + guards, &addr);
4442 } else {
4443 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4444 kr = kmem_alloc(zone_submap(zsflags), &addr,
4445 ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4446 }
4447
4448 if (kr != KERN_SUCCESS) {
4449 uint64_t zone_size = 0;
4450 zone_t zone_largest = zone_find_largest(&zone_size);
4451 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4452 "likely due to memory leak in zone [%s%s] "
4453 "(%u%c, %d elements allocated)",
4454 kr, zone_heap_name(z), zone_name(z),
4455 zone_heap_name(zone_largest), zone_name(zone_largest),
4456 mach_vm_size_pretty(zone_size),
4457 mach_vm_size_unit(zone_size),
4458 zone_count_allocated(zone_largest));
4459 }
4460
4461 meta = zone_meta_from_addr(addr);
4462 zone_meta_populate(addr, ptoa(pages + guards));
4463
4464 /*
4465 * Handle the leading guard page if any
4466 */
4467 if (lead_guard) {
4468 meta[0].zm_index = zone_index(z);
4469 meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4470 meta[0].zm_guarded = true;
4471 meta++;
4472 }
4473
4474 for (uint32_t run = 0, n = 0; run < runs; run++) {
4475 bool guarded = (guard_mask >> run) & 1;
4476
4477 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4478 meta[n].zm_index = zone_index(z);
4479 meta[n].zm_guarded = guarded;
4480 }
4481 if (guarded) {
4482 meta[n].zm_index = zone_index(z);
4483 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4484 n++;
4485 }
4486 }
4487 if (guards) {
4488 os_atomic_add(&zone_guard_pages, guards, relaxed);
4489 }
4490
4491 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4492 if (__improbable(zone_caching_disabled < 0)) {
4493 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4494 chunk_pages, guard_mask);
4495 }
4496 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4497
4498 zone_lock(z);
4499
4500 for (uint32_t run = 0, n = 0; run < runs; run++) {
4501 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4502 n += chunk_pages + ((guard_mask >> run) & 1);
4503 }
4504 z->z_va_cur += z->z_percpu ? runs : pages;
4505 }
4506
4507 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4508 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4509 {
4510 #if DEBUG || DEVELOPMENT
4511 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4512 size, 0, 0, 0);
4513 #else
4514 (void)size;
4515 #endif
4516 }
4517
4518 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4519 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4520 {
4521 #if DEBUG || DEVELOPMENT
4522 task_t task = current_task_early();
4523 if (pages && task) {
4524 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4525 }
4526 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4527 pages, 0, 0, 0);
4528 #else
4529 (void)pages;
4530 #endif
4531 }
4532
4533 __attribute__((noinline))
4534 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4535 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4536 {
4537 uint64_t wait_start = 0;
4538 long mapped;
4539
4540 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4541
4542 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4543 return;
4544 }
4545
4546 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4547
4548 /*
4549 * If the zone map is really exhausted, wait on the GC thread,
4550 * donating our priority (which is important because the GC
4551 * thread is at a rather low priority).
4552 */
4553 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4554 uint32_t wait_ms = n * (n + 1) / 2;
4555 uint64_t interval;
4556
4557 if (n == 1) {
4558 wait_start = mach_absolute_time();
4559 } else {
4560 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4561 }
4562 if (zone_exhausted_timeout > 0 &&
4563 wait_ms > zone_exhausted_timeout) {
4564 panic("zone map exhaustion: waited for %dms "
4565 "(pages: %ld, max: %ld, wanted: %d)",
4566 wait_ms, mapped, zone_pages_wired_max, pgs);
4567 }
4568
4569 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4570 &interval);
4571
4572 lck_spin_lock(&zone_exhausted_lock);
4573 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4574 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4575 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4576
4577 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4578 }
4579 }
4580
4581 static bool
zone_expand_wait_for_pages(bool waited)4582 zone_expand_wait_for_pages(bool waited)
4583 {
4584 if (waited) {
4585 return false;
4586 }
4587 #if DEBUG || DEVELOPMENT
4588 if (zalloc_simulate_vm_pressure) {
4589 return false;
4590 }
4591 #endif /* DEBUG || DEVELOPMENT */
4592 return !vm_pool_low();
4593 }
4594
4595 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4596 zone_expand_async_schedule_if_allowed(zone_t zone)
4597 {
4598 if (zone->z_async_refilling || zone->no_callout) {
4599 return;
4600 }
4601
4602 if (zone_exhausted(zone)) {
4603 return;
4604 }
4605
4606 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4607 return;
4608 }
4609
4610 if (!vm_pool_low() || zone_supports_vm(zone)) {
4611 zone->z_async_refilling = true;
4612 thread_call_enter(&zone_expand_callout);
4613 }
4614 }
4615
4616 __attribute__((noinline))
4617 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4618 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4619 {
4620 struct zone_depot zd;
4621 zone_magazine_t mag = NULL;
4622
4623 if (z->z_depot_size) {
4624 z->z_depot_size = 0;
4625 z->z_depot_cleanup = true;
4626
4627 zone_depot_init(&zd);
4628 zone_depot_trim(z, 0, &zd);
4629
4630 zone_recirc_lock_nopreempt(z);
4631 if (zd.zd_full) {
4632 zone_depot_move_full(&z->z_recirc,
4633 &zd, zd.zd_full, NULL);
4634 }
4635 if (zd.zd_empty) {
4636 zone_depot_move_empty(&z->z_recirc,
4637 &zd, zd.zd_empty, NULL);
4638 }
4639 zone_recirc_unlock_nopreempt(z);
4640 }
4641
4642 zone_recirc_lock_nopreempt(z);
4643 if (z->z_recirc.zd_full) {
4644 mag = zone_depot_pop_head_full(&z->z_recirc, z);
4645 }
4646 zone_recirc_unlock_nopreempt(z);
4647
4648 if (mag) {
4649 zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4650 zone_magazine_free(mag);
4651 }
4652
4653 return mag != NULL;
4654 }
4655
4656 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4657 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4658 {
4659 if (zone->z_elems_free > zone->z_elems_rsv) {
4660 return false;
4661 }
4662 if (!zone_exhausted(zone)) {
4663 return true;
4664 }
4665 if (zone->z_pcpu_cache && zone->z_depot_size) {
4666 if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4667 return false;
4668 }
4669 }
4670 return (flags & Z_NOFAIL) != 0;
4671 }
4672
4673 static void
zone_wakeup_exhausted_waiters(zone_t z)4674 zone_wakeup_exhausted_waiters(zone_t z)
4675 {
4676 z->z_exhausted_wait = false;
4677 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, false);
4678 thread_wakeup(&z->z_expander);
4679 }
4680
4681 __attribute__((noinline))
4682 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4683 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4684 {
4685 if (z->z_pcpu_cache && z->z_depot_size &&
4686 zalloc_expand_drain_exhausted_caches_locked(z)) {
4687 return;
4688 }
4689
4690 if (!z->z_exhausted_wait) {
4691 zone_recirc_lock_nopreempt(z);
4692 z->z_exhausted_wait = true;
4693 zone_recirc_unlock_nopreempt(z);
4694 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, true);
4695 }
4696
4697 assert_wait(&z->z_expander, TH_UNINT);
4698 zone_unlock(z);
4699 thread_block(THREAD_CONTINUE_NULL);
4700 zone_lock(z);
4701 }
4702
4703 static pmap_mapping_type_t
zone_mapping_type(zone_t z)4704 zone_mapping_type(zone_t z)
4705 {
4706 zone_security_flags_t zsflags = zone_security_config(z);
4707
4708 /*
4709 * If the zone has z_submap_idx is not Z_SUBMAP_IDX_DATA or
4710 * Z_SUBMAP_IDX_READ_ONLY, mark the corresponding mapping
4711 * type as PMAP_MAPPING_TYPE_RESTRICTED.
4712 */
4713 switch (zsflags.z_submap_idx) {
4714 case Z_SUBMAP_IDX_DATA:
4715 return PMAP_MAPPING_TYPE_DEFAULT;
4716 case Z_SUBMAP_IDX_READ_ONLY:
4717 return PMAP_MAPPING_TYPE_ROZONE;
4718 default:
4719 return PMAP_MAPPING_TYPE_RESTRICTED;
4720 }
4721 }
4722
4723 static vm_prot_t
zone_page_prot(zone_security_flags_t zsflags)4724 zone_page_prot(zone_security_flags_t zsflags)
4725 {
4726 switch (zsflags.z_submap_idx) {
4727 case Z_SUBMAP_IDX_READ_ONLY:
4728 return VM_PROT_READ;
4729 default:
4730 return VM_PROT_READ | VM_PROT_WRITE;
4731 }
4732 }
4733
4734 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4735 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4736 {
4737 zone_security_flags_t zsflags = zone_security_config(z);
4738 struct zone_expand ze = {
4739 .ze_thread = current_thread(),
4740 };
4741
4742 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4743 ze.ze_thread->options |= TH_OPT_VMPRIV;
4744 ze.ze_clear_priv = true;
4745 }
4746
4747 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4748 /*
4749 * When the thread is VM privileged,
4750 * vm_page_grab() will call VM_PAGE_WAIT()
4751 * without our knowledge, so we must assume
4752 * it's being called unfortunately.
4753 *
4754 * In practice it's not a big deal because
4755 * Z_NOPAGEWAIT is not really used on zones
4756 * that VM privileged threads are going to expand.
4757 */
4758 ze.ze_pg_wait = true;
4759 ze.ze_vm_priv = true;
4760 }
4761
4762 for (;;) {
4763 if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4764 goto out;
4765 }
4766
4767 if (z->z_expander == NULL) {
4768 z->z_expander = &ze;
4769 break;
4770 }
4771
4772 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4773 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4774 ze.ze_next = z->z_expander;
4775 z->z_expander = &ze;
4776 break;
4777 }
4778
4779 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4780 goto out;
4781 }
4782
4783 z->z_expanding_wait = true;
4784 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4785 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4786 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4787 }
4788
4789 do {
4790 struct zone_page_metadata *meta = NULL;
4791 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4792 vm_page_t page_list = NULL;
4793 vm_offset_t addr = 0;
4794 int waited = 0;
4795
4796 if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4797 __ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4798 continue; /* reevaluate if we really need it */
4799 }
4800
4801 /*
4802 * While we hold the zone lock, look if there's VA we can:
4803 * - complete from partial pages,
4804 * - reuse from the sequester list.
4805 *
4806 * When the page is being populated we pretend we allocated
4807 * an extra element so that zone_gc() can't attempt to free
4808 * the chunk (as it could become empty while we wait for pages).
4809 */
4810 if (zone_pva_is_null(z->z_pageq_va)) {
4811 zone_allocate_va_locked(z, flags);
4812 }
4813
4814 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4815 addr = zone_meta_to_addr(meta);
4816 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4817 cur_pages = meta->zm_page_index;
4818 meta -= cur_pages;
4819 addr -= ptoa(cur_pages);
4820 zone_meta_lock_in_partial(z, meta, cur_pages);
4821 }
4822 zone_unlock(z);
4823
4824 /*
4825 * And now allocate pages to populate our VA.
4826 */
4827 min_pages = z->z_chunk_pages;
4828 #if !KASAN_CLASSIC
4829 if (!z->z_percpu) {
4830 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4831 zone_elem_outer_size(z)));
4832 }
4833 #endif /* !KASAN_CLASSIC */
4834
4835 /*
4836 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4837 * if we're running out of zone memory
4838 */
4839 if (__improbable(zone_map_nearing_exhaustion())) {
4840 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4841 }
4842
4843 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4844
4845 while (pages < z->z_chunk_pages - cur_pages) {
4846 vm_page_t m = vm_page_grab();
4847
4848 if (m) {
4849 pages++;
4850 m->vmp_snext = page_list;
4851 page_list = m;
4852 vm_page_zero_fill(m);
4853 continue;
4854 }
4855
4856 if (pages >= min_pages &&
4857 !zone_expand_wait_for_pages(waited)) {
4858 break;
4859 }
4860
4861 if ((flags & Z_NOPAGEWAIT) == 0) {
4862 /*
4863 * The first time we're about to wait for pages,
4864 * mention that to waiters and wake them all.
4865 *
4866 * Set `ze_pg_wait` in our zone_expand context
4867 * so that waiters who care do not wait again.
4868 */
4869 if (!ze.ze_pg_wait) {
4870 zone_lock(z);
4871 if (z->z_expanding_wait) {
4872 z->z_expanding_wait = false;
4873 wakeup_all_with_inheritor(&z->z_expander,
4874 THREAD_AWAKENED);
4875 }
4876 ze.ze_pg_wait = true;
4877 zone_unlock(z);
4878 }
4879
4880 waited++;
4881 VM_PAGE_WAIT();
4882 continue;
4883 }
4884
4885 /*
4886 * Undo everything and bail out:
4887 *
4888 * - free pages
4889 * - undo the fake allocation if any
4890 * - put the VA back on the VA page queue.
4891 */
4892 vm_page_free_list(page_list, FALSE);
4893 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4894
4895 zone_lock(z);
4896
4897 zone_expand_async_schedule_if_allowed(z);
4898
4899 if (cur_pages) {
4900 zone_meta_unlock_from_partial(z, meta, cur_pages);
4901 }
4902 if (meta) {
4903 zone_meta_queue_push(z, &z->z_pageq_va,
4904 meta + cur_pages);
4905 }
4906 goto page_shortage;
4907 }
4908
4909 vm_object_t object;
4910 object = kernel_object_default;
4911 vm_object_lock(object);
4912
4913 kernel_memory_populate_object_and_unlock(object,
4914 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4915 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4916 zone_page_prot(zsflags), zone_mapping_type(z));
4917
4918 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4919
4920 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4921
4922 /*
4923 * permanent zones only try once,
4924 * the retry loop is in the caller
4925 */
4926 } while (!z->z_permanent && zalloc_needs_refill(z, flags));
4927
4928 page_shortage:
4929 if (z->z_expander == &ze) {
4930 z->z_expander = ze.ze_next;
4931 } else {
4932 assert(z->z_expander->ze_next == &ze);
4933 z->z_expander->ze_next = NULL;
4934 }
4935 if (z->z_expanding_wait) {
4936 z->z_expanding_wait = false;
4937 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4938 }
4939 out:
4940 if (ze.ze_clear_priv) {
4941 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4942 }
4943 }
4944
4945 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4946 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4947 {
4948 zone_foreach(z) {
4949 if (z->no_callout) {
4950 /* z_async_refilling will never be set */
4951 continue;
4952 }
4953
4954 if (!z->z_async_refilling) {
4955 /*
4956 * avoid locking all zones, because the one(s)
4957 * we're looking for have been set _before_
4958 * thread_call_enter() was called, if we fail
4959 * to observe the bit, it means the thread-call
4960 * has been "dinged" again and we'll notice it then.
4961 */
4962 continue;
4963 }
4964
4965 zone_lock(z);
4966 if (z->z_self && z->z_async_refilling) {
4967 zone_expand_locked(z, Z_WAITOK);
4968 /*
4969 * clearing _after_ we grow is important,
4970 * so that we avoid waking up the thread call
4971 * while we grow and cause to run a second time.
4972 */
4973 z->z_async_refilling = false;
4974 }
4975 zone_unlock(z);
4976 }
4977 }
4978
4979 #endif /* !ZALLOC_TEST */
4980 #pragma mark zone jetsam integration
4981 #if !ZALLOC_TEST
4982
4983 /*
4984 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4985 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4986 */
4987 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4988
4989 /*
4990 * Threshold above which largest zones should be included in the panic log
4991 */
4992 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4993
4994 /*
4995 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4996 * where X=zone_map_jetsam_limit.
4997 *
4998 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4999 */
5000 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
5001 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
5002
5003 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)5004 zone_map_jetsam_set_limit(uint32_t value)
5005 {
5006 if (value <= 0 || value > 100) {
5007 return KERN_INVALID_VALUE;
5008 }
5009
5010 zone_map_jetsam_limit = value;
5011 os_atomic_store(&zone_pages_jetsam_threshold,
5012 zone_pages_wired_max * value / 100, relaxed);
5013 return KERN_SUCCESS;
5014 }
5015
5016 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)5017 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
5018 {
5019 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5020 *current_size = ptoa_64(phys_pages);
5021 *capacity = ptoa_64(zone_pages_wired_max);
5022 }
5023
5024 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)5025 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
5026 {
5027 zone_t largest_zone = zone_find_largest(zone_size);
5028
5029 /*
5030 * Append kalloc heap name to zone name (if zone is used by kalloc)
5031 */
5032 snprintf(zone_name, zone_name_len, "%s%s",
5033 zone_heap_name(largest_zone), largest_zone->z_name);
5034 }
5035
5036 static bool
zone_map_nearing_threshold(unsigned int threshold)5037 zone_map_nearing_threshold(unsigned int threshold)
5038 {
5039 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5040 return phys_pages * 100 > zone_pages_wired_max * threshold;
5041 }
5042
5043 bool
zone_map_nearing_exhaustion(void)5044 zone_map_nearing_exhaustion(void)
5045 {
5046 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5047
5048 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5049 }
5050
5051
5052 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5053
5054 /*
5055 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5056 * to walk through the jetsam priority bands and kill processes.
5057 */
5058 static zone_t
kill_process_in_largest_zone(void)5059 kill_process_in_largest_zone(void)
5060 {
5061 pid_t pid = -1;
5062 uint64_t zone_size = 0;
5063 zone_t largest_zone = zone_find_largest(&zone_size);
5064
5065 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5066 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5067 ptoa_64(zone_pages_wired_max),
5068 (uint64_t)zone_submaps_approx_size(),
5069 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5070 zone_map_jetsam_limit);
5071 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5072 largest_zone->z_name, (uintptr_t)zone_size);
5073
5074 /*
5075 * We want to make sure we don't call this function from userspace.
5076 * Or we could end up trying to synchronously kill the process
5077 * whose context we're in, causing the system to hang.
5078 */
5079 assert(current_task() == kernel_task);
5080
5081 /*
5082 * If vm_object_zone is the largest, check to see if the number of
5083 * elements in vm_map_entry_zone is comparable.
5084 *
5085 * If so, consider vm_map_entry_zone as the largest. This lets us target
5086 * a specific process to jetsam to quickly recover from the zone map
5087 * bloat.
5088 */
5089 if (largest_zone == vm_object_zone) {
5090 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5091 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5092 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5093 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5094 largest_zone = vm_map_entry_zone;
5095 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5096 (uintptr_t)zone_size_wired(largest_zone));
5097 }
5098 }
5099
5100 /* TODO: Extend this to check for the largest process in other zones as well. */
5101 if (largest_zone == vm_map_entry_zone) {
5102 pid = find_largest_process_vm_map_entries();
5103 } else {
5104 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5105 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5106 largest_zone->z_name);
5107 }
5108 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5109 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5110 }
5111
5112 return largest_zone;
5113 }
5114
5115 #endif /* !ZALLOC_TEST */
5116 #pragma mark probabilistic gzalloc
5117 #if !ZALLOC_TEST
5118 #if CONFIG_PROB_GZALLOC
5119
5120 extern uint32_t random(void);
5121 struct pgz_backtrace {
5122 uint32_t pgz_depth;
5123 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
5124 };
5125
5126 static int32_t PERCPU_DATA(pgz_sample_counter);
5127 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5128 static uint32_t pgz_uses; /* number of zones using PGZ */
5129 static int32_t pgz_slot_avail;
5130 #if OS_ATOMIC_HAS_LLSC
5131 struct zone_page_metadata *pgz_slot_head;
5132 #else
5133 static struct pgz_slot_head {
5134 uint32_t psh_count;
5135 uint32_t psh_slot;
5136 } pgz_slot_head;
5137 #endif
5138 struct zone_page_metadata *pgz_slot_tail;
5139 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5140
5141 static struct zone_page_metadata *
pgz_meta(uint32_t index)5142 pgz_meta(uint32_t index)
5143 {
5144 return &zone_info.zi_pgz_meta[2 * index + 1];
5145 }
5146
5147 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5148 pgz_bt(uint32_t slot, bool free)
5149 {
5150 return &pgz_backtraces[2 * slot + free];
5151 }
5152
5153 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5154 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5155 {
5156 struct backtrace_control ctl = {
5157 .btc_frame_addr = (uintptr_t)fp,
5158 };
5159
5160 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5161 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5162 }
5163
5164 static uint32_t
pgz_slot(vm_offset_t addr)5165 pgz_slot(vm_offset_t addr)
5166 {
5167 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5168 }
5169
5170 static vm_offset_t
pgz_addr(uint32_t slot)5171 pgz_addr(uint32_t slot)
5172 {
5173 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5174 }
5175
5176 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5177 pgz_sample(vm_offset_t addr, vm_size_t esize)
5178 {
5179 int32_t *counterp, cnt;
5180
5181 if (zone_addr_size_crosses_page(addr, esize)) {
5182 return false;
5183 }
5184
5185 /*
5186 * Note: accessing pgz_sample_counter is racy but this is
5187 * kind of acceptable given that this is not
5188 * a security load bearing feature.
5189 */
5190
5191 counterp = PERCPU_GET(pgz_sample_counter);
5192 cnt = *counterp;
5193 if (__probable(cnt > 0)) {
5194 *counterp = cnt - 1;
5195 return false;
5196 }
5197
5198 if (pgz_slot_avail <= 0) {
5199 return false;
5200 }
5201
5202 /*
5203 * zalloc_random_uniform() might block, so when preemption is disabled,
5204 * set the counter to `-1` which will cause the next allocation
5205 * that can block to generate a new random value.
5206 *
5207 * No allocation on this CPU will sample until then.
5208 */
5209 if (get_preemption_level()) {
5210 *counterp = -1;
5211 } else {
5212 *counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5213 }
5214
5215 return cnt == 0;
5216 }
5217
5218 static inline bool
pgz_slot_alloc(uint32_t * slot)5219 pgz_slot_alloc(uint32_t *slot)
5220 {
5221 struct zone_page_metadata *m;
5222 uint32_t tries = 100;
5223
5224 disable_preemption();
5225
5226 #if OS_ATOMIC_USE_LLSC
5227 int32_t ov, nv;
5228 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5229 if (__improbable(ov <= 0)) {
5230 os_atomic_rmw_loop_give_up({
5231 enable_preemption();
5232 return false;
5233 });
5234 }
5235 nv = ov - 1;
5236 });
5237 #else
5238 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5239 os_atomic_inc(&pgz_slot_avail, relaxed);
5240 enable_preemption();
5241 return false;
5242 }
5243 #endif
5244
5245 again:
5246 if (__improbable(tries-- == 0)) {
5247 /*
5248 * Too much contention,
5249 * extremely unlikely but do not stay stuck.
5250 */
5251 os_atomic_inc(&pgz_slot_avail, relaxed);
5252 enable_preemption();
5253 return false;
5254 }
5255
5256 #if OS_ATOMIC_HAS_LLSC
5257 do {
5258 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5259 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5260 /*
5261 * Either we are waiting for an enqueuer (unlikely)
5262 * or we are competing with another core and
5263 * are looking at a popped element.
5264 */
5265 os_atomic_clear_exclusive();
5266 goto again;
5267 }
5268 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5269 m->zm_pgz_slot_next, relaxed));
5270 #else
5271 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5272 struct pgz_slot_head ov, nv;
5273 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5274 m = &base[ov.psh_slot * 2];
5275 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5276 /*
5277 * Either we are waiting for an enqueuer (unlikely)
5278 * or we are competing with another core and
5279 * are looking at a popped element.
5280 */
5281 os_atomic_rmw_loop_give_up(goto again);
5282 }
5283 nv.psh_count = ov.psh_count + 1;
5284 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5285 });
5286 #endif
5287
5288 enable_preemption();
5289
5290 m->zm_pgz_slot_next = NULL;
5291 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5292 return true;
5293 }
5294
5295 static inline bool
pgz_slot_free(uint32_t slot)5296 pgz_slot_free(uint32_t slot)
5297 {
5298 struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5299 struct zone_page_metadata *t;
5300
5301 disable_preemption();
5302 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5303 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5304 os_atomic_inc(&pgz_slot_avail, relaxed);
5305 enable_preemption();
5306
5307 return true;
5308 }
5309
5310 /*!
5311 * @function pgz_protect()
5312 *
5313 * @brief
5314 * Try to protect an allocation with PGZ.
5315 *
5316 * @param zone The zone the allocation was made against.
5317 * @param addr An allocated element address to protect.
5318 * @param fp The caller frame pointer (for the backtrace).
5319 * @returns The new address for the element, or @c addr.
5320 */
5321 __attribute__((noinline))
5322 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5323 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5324 {
5325 kern_return_t kr;
5326 uint32_t slot;
5327
5328 if (!pgz_slot_alloc(&slot)) {
5329 return addr;
5330 }
5331
5332 /*
5333 * Try to double-map the page (may fail if Z_NOWAIT).
5334 * we will always find a PA because pgz_init() pre-expanded the pmap.
5335 */
5336 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5337 vm_offset_t new_addr = pgz_addr(slot);
5338 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5339 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5340 get_preemption_level() ? PMAP_OPTIONS_NOWAIT : 0, NULL,
5341 PMAP_MAPPING_TYPE_INFER);
5342
5343 if (__improbable(kr != KERN_SUCCESS)) {
5344 pgz_slot_free(slot);
5345 return addr;
5346 }
5347
5348 struct zone_page_metadata tmp = {
5349 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5350 .zm_index = zone_index(zone),
5351 };
5352 struct zone_page_metadata *meta = pgz_meta(slot);
5353
5354 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5355 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5356 pgz_backtrace(pgz_bt(slot, false), fp);
5357
5358 return new_addr + (addr & PAGE_MASK);
5359 }
5360
5361 /*!
5362 * @function pgz_unprotect()
5363 *
5364 * @brief
5365 * Release a PGZ slot and returns the original address of a freed element.
5366 *
5367 * @param addr A PGZ protected element address.
5368 * @param fp The caller frame pointer (for the backtrace).
5369 * @returns The non protected address for the element
5370 * that was passed to @c pgz_protect().
5371 */
5372 __attribute__((noinline))
5373 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5374 pgz_unprotect(vm_offset_t addr, void *fp)
5375 {
5376 struct zone_page_metadata *meta;
5377 struct zone_page_metadata tmp;
5378 uint32_t slot;
5379
5380 slot = pgz_slot(addr);
5381 meta = zone_meta_from_addr(addr);
5382 tmp = *meta;
5383 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5384 goto double_free;
5385 }
5386
5387 pmap_remove(kernel_pmap, vm_memtag_canonicalize_address(trunc_page(addr)),
5388 vm_memtag_canonicalize_address(trunc_page(addr) + PAGE_SIZE));
5389
5390 pgz_backtrace(pgz_bt(slot, true), fp);
5391
5392 tmp.zm_chunk_len = ZM_PGZ_FREE;
5393 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5394 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5395 goto double_free;
5396 }
5397
5398 pgz_slot_free(slot);
5399 return tmp.zm_pgz_orig_addr;
5400
5401 double_free:
5402 panic_fault_address = addr;
5403 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5404 panic("probabilistic gzalloc double free: %p", (void *)addr);
5405 }
5406
5407 bool
pgz_owned(mach_vm_address_t addr)5408 pgz_owned(mach_vm_address_t addr)
5409 {
5410 return mach_vm_range_contains(&zone_info.zi_pgz_range, vm_memtag_canonicalize_address(addr));
5411 }
5412
5413
5414 __attribute__((always_inline))
5415 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5416 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5417 {
5418 struct zone_page_metadata *meta;
5419
5420 if (__probable(!pgz_owned(addr))) {
5421 return (vm_offset_t)addr;
5422 }
5423
5424 if (zone_addr_size_crosses_page(addr, size)) {
5425 panic("invalid size for PGZ protected address %p:%p",
5426 (void *)addr, (void *)(addr + size));
5427 }
5428
5429 meta = zone_meta_from_addr((vm_offset_t)addr);
5430 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5431 panic_fault_address = (vm_offset_t)addr;
5432 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5433 }
5434
5435 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5436 }
5437
5438 __attribute__((always_inline))
5439 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5440 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5441 {
5442 struct zone_page_metadata *meta;
5443 struct zone_page_metadata tmp;
5444
5445 if (__probable(!pgz_owned(addr))) {
5446 return addr;
5447 }
5448
5449 meta = zone_meta_from_addr(addr);
5450 tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5451
5452 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5453
5454 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5455 return 0;
5456 }
5457
5458 if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5459 return 0;
5460 }
5461
5462 return addr;
5463 }
5464
5465 static void
pgz_zone_init(zone_t z)5466 pgz_zone_init(zone_t z)
5467 {
5468 char zn[MAX_ZONE_NAME];
5469 char zv[MAX_ZONE_NAME];
5470 char key[30];
5471
5472 if (zone_elem_inner_size(z) > PAGE_SIZE) {
5473 return;
5474 }
5475
5476 if (pgz_all) {
5477 os_atomic_inc(&pgz_uses, relaxed);
5478 z->z_pgz_tracked = true;
5479 return;
5480 }
5481
5482 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5483
5484 for (int i = 1;; i++) {
5485 snprintf(key, sizeof(key), "pgz%d", i);
5486 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5487 break;
5488 }
5489 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5490 os_atomic_inc(&pgz_uses, relaxed);
5491 z->z_pgz_tracked = true;
5492 break;
5493 }
5494 }
5495 }
5496
5497 __startup_func
5498 static vm_size_t
pgz_get_size(void)5499 pgz_get_size(void)
5500 {
5501 if (pgz_slots == UINT32_MAX) {
5502 /*
5503 * Scale with RAM size: ~200 slots a G
5504 */
5505 pgz_slots = (uint32_t)(sane_size >> 22);
5506 }
5507
5508 /*
5509 * Make sure that the slot allocation scheme works.
5510 * see pgz_slot_alloc() / pgz_slot_free();
5511 */
5512 if (pgz_slots < zpercpu_count() * 4) {
5513 pgz_slots = zpercpu_count() * 4;
5514 }
5515 if (pgz_slots >= UINT16_MAX) {
5516 pgz_slots = UINT16_MAX - 1;
5517 }
5518
5519 /*
5520 * Quarantine is 33% of slots by default, no more than 90%.
5521 */
5522 if (pgz_quarantine == 0) {
5523 pgz_quarantine = pgz_slots / 3;
5524 }
5525 if (pgz_quarantine > pgz_slots * 9 / 10) {
5526 pgz_quarantine = pgz_slots * 9 / 10;
5527 }
5528 pgz_slot_avail = pgz_slots - pgz_quarantine;
5529
5530 return ptoa(2 * pgz_slots + 1);
5531 }
5532
5533 __startup_func
5534 static void
pgz_init(void)5535 pgz_init(void)
5536 {
5537 if (!pgz_uses) {
5538 return;
5539 }
5540
5541 if (pgz_sample_rate == 0) {
5542 /*
5543 * If no rate was provided, pick a random one that scales
5544 * with the number of protected zones.
5545 *
5546 * Use a binomal distribution to avoid having too many
5547 * really fast sample rates.
5548 */
5549 uint32_t factor = MIN(pgz_uses, 10);
5550 uint32_t max_rate = 1000 * factor;
5551 uint32_t min_rate = 100 * factor;
5552
5553 pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5554 zalloc_random_uniform32(min_rate, max_rate)) / 2;
5555 }
5556
5557 struct mach_vm_range *r = &zone_info.zi_pgz_range;
5558 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5559 zone_meta_populate(r->min_address, mach_vm_range_size(r));
5560
5561 for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5562 zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5563 }
5564
5565 for (size_t i = 1; i < pgz_slots; i++) {
5566 zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5567 &zone_info.zi_pgz_meta[2 * i + 1];
5568 }
5569 #if OS_ATOMIC_HAS_LLSC
5570 pgz_slot_head = &zone_info.zi_pgz_meta[1];
5571 #endif
5572 pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5573
5574 pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5575 2 * pgz_slots, ZALIGN_PTR);
5576
5577 /*
5578 * expand the pmap so that pmap_enter_options_addr()
5579 * in pgz_protect() never need to call pmap_expand().
5580 */
5581 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5582 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5583 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5584 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5585 }
5586
5587 /* do this last as this will enable pgz */
5588 percpu_foreach(counter, pgz_sample_counter) {
5589 *counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5590 }
5591 }
5592 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5593
5594 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5595 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5596 {
5597 struct pgz_backtrace *bt = pgz_bt(slot, free);
5598 const char *what = free ? "Free" : "Allocation";
5599 uintptr_t buf[MAX_ZTRACE_DEPTH];
5600
5601 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5602 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5603 return;
5604 }
5605
5606 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5607 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5608
5609 paniclog_append_noflush(" %s Backtrace:\n", what);
5610 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5611 if (has_syms) {
5612 paniclog_append_noflush(" %p ", (void *)buf[i]);
5613 panic_print_symbol_name(buf[i]);
5614 paniclog_append_noflush("\n");
5615 } else {
5616 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5617 }
5618 }
5619 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5620 }
5621
5622 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5623 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5624 {
5625 struct zone_page_metadata *meta;
5626 vm_offset_t elem, esize;
5627 const char *type;
5628 const char *prob;
5629 uint32_t slot;
5630 zone_t z;
5631
5632 slot = pgz_slot(addr);
5633 meta = pgz_meta(slot);
5634 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5635
5636 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5637
5638 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5639 meta->zm_index &&
5640 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5641 z = &zone_array[meta->zm_index];
5642 } else {
5643 paniclog_append_noflush(" Zone : <unknown>\n");
5644 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5645 paniclog_append_noflush("\n");
5646 return;
5647 }
5648
5649 esize = zone_elem_inner_size(z);
5650 paniclog_append_noflush(" Zone : %s%s\n",
5651 zone_heap_name(z), zone_name(z));
5652 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5653 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5654 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5655
5656 if (addr < elem) {
5657 type = "out-of-bounds(underflow) + use-after-free";
5658 prob = "low";
5659 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5660 type = "double-free";
5661 prob = "high";
5662 } else if (addr < elem + esize) {
5663 type = "use-after-free";
5664 prob = "high";
5665 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5666 type = "out-of-bounds + use-after-free";
5667 prob = "low";
5668 } else {
5669 type = "out-of-bounds";
5670 prob = "high";
5671 }
5672 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5673 type, prob);
5674 if (addr < elem) {
5675 paniclog_append_noflush(" Access : %d byte(s) before\n",
5676 (uint32_t)(elem - addr) + 1);
5677 } else if (addr < elem + esize) {
5678 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5679 (uint32_t)(addr - elem) + 1);
5680 } else {
5681 paniclog_append_noflush(" Access : %d byte(s) past\n",
5682 (uint32_t)(addr - (elem + esize)) + 1);
5683 }
5684
5685 panic_display_pgz_bt(has_syms, slot, false);
5686 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5687 panic_display_pgz_bt(has_syms, slot, true);
5688 }
5689
5690 paniclog_append_noflush("\n");
5691 }
5692
5693 #endif /* CONFIG_PROB_GZALLOC */
5694 #endif /* !ZALLOC_TEST */
5695 #pragma mark zfree
5696 #if !ZALLOC_TEST
5697
5698 /*!
5699 * @defgroup zfree
5700 * @{
5701 *
5702 * @brief
5703 * The codepath for zone frees.
5704 *
5705 * @discussion
5706 * There are 4 major ways to allocate memory that end up in the zone allocator:
5707 * - @c zfree()
5708 * - @c zfree_percpu()
5709 * - @c kfree*()
5710 * - @c zfree_permanent()
5711 *
5712 * While permanent zones have their own allocation scheme, all other codepaths
5713 * will eventually go through the @c zfree_ext() choking point.
5714 */
5715
5716 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5717 zfree_drop(zone_t zone, vm_offset_t addr)
5718 {
5719 vm_offset_t esize = zone_elem_outer_size(zone);
5720 struct zone_page_metadata *meta;
5721 vm_offset_t eidx;
5722
5723 meta = zone_element_resolve(zone, addr, &eidx);
5724
5725 if (!zone_meta_mark_free(meta, eidx)) {
5726 zone_meta_double_free_panic(zone, addr, __func__);
5727 }
5728
5729 vm_offset_t old_size = meta->zm_alloc_size;
5730 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5731 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5732
5733 if (new_size == 0) {
5734 /* whether the page was on the intermediate or all_used, queue, move it to free */
5735 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5736 zone->z_wired_empty += meta->zm_chunk_len;
5737 } else if (old_size + esize > max_size) {
5738 /* first free element on page, move from all_used */
5739 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5740 }
5741
5742 if (__improbable(zone->z_exhausted_wait)) {
5743 zone_wakeup_exhausted_waiters(zone);
5744 }
5745 }
5746
5747 __attribute__((noinline))
5748 static void
zfree_item(zone_t zone,vm_offset_t addr)5749 zfree_item(zone_t zone, vm_offset_t addr)
5750 {
5751 /* transfer preemption count to lock */
5752 zone_lock_nopreempt_check_contention(zone);
5753
5754 zfree_drop(zone, addr);
5755 zone->z_elems_free += 1;
5756
5757 zone_unlock(zone);
5758 }
5759
5760 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5761 zfree_cached_depot_recirculate(
5762 zone_t zone,
5763 uint32_t depot_max,
5764 zone_cache_t cache)
5765 {
5766 smr_t smr = zone_cache_smr(cache);
5767 smr_seq_t seq;
5768 uint32_t n;
5769
5770 zone_recirc_lock_nopreempt_check_contention(zone);
5771
5772 n = cache->zc_depot.zd_full;
5773 if (n >= depot_max) {
5774 /*
5775 * If SMR is in use, rotate the entire chunk of magazines.
5776 *
5777 * If the head of the recirculation layer is ready to be
5778 * reused, pull them back to refill a little.
5779 */
5780 seq = zone_depot_move_full(&zone->z_recirc,
5781 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5782
5783 if (smr) {
5784 smr_deferred_advance_commit(smr, seq);
5785 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5786 zone_depot_move_full(&cache->zc_depot,
5787 &zone->z_recirc, depot_max / 2, NULL);
5788 }
5789 }
5790 }
5791
5792 n = depot_max - cache->zc_depot.zd_full;
5793 if (n > zone->z_recirc.zd_empty) {
5794 n = zone->z_recirc.zd_empty;
5795 }
5796 if (n) {
5797 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5798 n, zone);
5799 }
5800
5801 zone_recirc_unlock_nopreempt(zone);
5802 }
5803
5804 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5805 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5806 {
5807 zone_magazine_t mag = NULL, tmp = NULL;
5808 smr_t smr = zone_cache_smr(cache);
5809 bool wakeup_exhausted = false;
5810
5811 if (zone->z_recirc.zd_empty == 0) {
5812 mag = zone_magazine_alloc(Z_NOWAIT);
5813 }
5814
5815 zone_recirc_lock_nopreempt_check_contention(zone);
5816
5817 if (mag == NULL && zone->z_recirc.zd_empty) {
5818 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5819 __builtin_assume(mag);
5820 }
5821 if (mag) {
5822 tmp = zone_magazine_replace(cache, mag, true);
5823 if (smr) {
5824 smr_deferred_advance_commit(smr, tmp->zm_seq);
5825 }
5826 if (zone_security_array[zone_index(zone)].z_lifo) {
5827 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5828 } else {
5829 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5830 }
5831
5832 wakeup_exhausted = zone->z_exhausted_wait;
5833 }
5834
5835 zone_recirc_unlock_nopreempt(zone);
5836
5837 if (__improbable(wakeup_exhausted)) {
5838 zone_lock_nopreempt(zone);
5839 if (zone->z_exhausted_wait) {
5840 zone_wakeup_exhausted_waiters(zone);
5841 }
5842 zone_unlock_nopreempt(zone);
5843 }
5844
5845 return mag ? cache : NULL;
5846 }
5847
5848 __attribute__((noinline))
5849 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5850 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5851 {
5852 zone_magazine_t mag = NULL, tmp = NULL;
5853 uint32_t depot_max;
5854
5855 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5856 if (depot_max) {
5857 zone_depot_lock_nopreempt(cache);
5858
5859 if (cache->zc_depot.zd_empty == 0) {
5860 zfree_cached_depot_recirculate(zone, depot_max, cache);
5861 }
5862
5863 if (__probable(cache->zc_depot.zd_empty)) {
5864 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5865 __builtin_assume(mag);
5866 } else {
5867 mag = zone_magazine_alloc(Z_NOWAIT);
5868 }
5869 if (mag) {
5870 tmp = zone_magazine_replace(cache, mag, true);
5871 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5872 }
5873
5874 zone_depot_unlock_nopreempt(cache);
5875
5876 return mag ? cache : NULL;
5877 }
5878
5879 return zfree_cached_recirculate(zone, cache);
5880 }
5881
5882 __attribute__((always_inline))
5883 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5884 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5885 {
5886 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5887
5888 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5889 return cache;
5890 }
5891
5892 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5893 zone_cache_swap_magazines(cache);
5894 return cache;
5895 }
5896
5897 return zfree_cached_trim(zone, cache);
5898 }
5899
5900 __attribute__((always_inline))
5901 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5902 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5903 {
5904 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5905 size_t idx = cache->zc_free_cur;
5906
5907 if (__probable(idx + 1 < zc_mag_size())) {
5908 return cache;
5909 }
5910
5911 /*
5912 * when SMR is in use, the bucket is tagged early with
5913 * @c smr_deferred_advance(), which costs a full barrier,
5914 * but performs no store.
5915 *
5916 * When zones hit the recirculation layer, the advance is commited,
5917 * under the recirculation lock (see zfree_cached_recirculate()).
5918 *
5919 * When done this way, the zone contention detection mechanism
5920 * will adjust the size of the per-cpu depots gracefully, which
5921 * mechanically reduces the pace of these commits as usage increases.
5922 */
5923
5924 if (__probable(idx + 1 == zc_mag_size())) {
5925 zone_magazine_t mag;
5926
5927 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5928 offsetof(struct zone_magazine, zm_elems));
5929 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5930 return cache;
5931 }
5932
5933 return zfree_cached_trim(zone, cache);
5934 }
5935
5936 __attribute__((always_inline))
5937 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5938 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5939 {
5940 struct zone_page_metadata *meta;
5941 vm_offset_t offs;
5942
5943 #pragma unused(combined_size)
5944 #if CONFIG_PROB_GZALLOC
5945 if (__improbable(pgz_owned(elem))) {
5946 elem = pgz_unprotect(elem, __builtin_frame_address(0));
5947 }
5948 #endif /* CONFIG_PROB_GZALLOC */
5949
5950 meta = zone_meta_from_addr(elem);
5951 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5952 zone_invalid_element_panic(zone, elem);
5953 }
5954
5955 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5956 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5957 offs += ptoa(meta->zm_page_index);
5958 }
5959
5960 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5961 zone_invalid_element_panic(zone, elem);
5962 }
5963
5964 #if VM_TAG_SIZECLASSES
5965 if (__improbable(zone->z_uses_tags)) {
5966 vm_tag_t *slot;
5967
5968 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5969 Z_FAST_QUO(offs, zone->z_quo_magic));
5970 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5971 -(long)ZFREE_ELEM_SIZE(combined_size));
5972 *slot = VM_KERN_MEMORY_NONE;
5973 }
5974 #endif /* VM_TAG_SIZECLASSES */
5975
5976 #if KASAN_CLASSIC
5977 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5978 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5979 zone->z_percpu, __builtin_frame_address(0));
5980 #endif
5981 #if CONFIG_KERNEL_TAGGING
5982 if (__probable(zone->z_tbi_tag)) {
5983 elem = zone_tag_element(zone, elem, ZFREE_ELEM_SIZE(combined_size));
5984 }
5985 #endif /* CONFIG_KERNEL_TAGGING */
5986
5987 return elem;
5988 }
5989
5990 __attribute__((always_inline))
5991 void *
zcache_mark_invalid(zone_t zone,void * elem)5992 zcache_mark_invalid(zone_t zone, void *elem)
5993 {
5994 vm_size_t esize = zone_elem_inner_offs(zone);
5995
5996 ZFREE_LOG(zone, (vm_offset_t)elem, 1);
5997 return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
5998 }
5999
6000 /*
6001 * The function is noinline when zlog can be used so that the backtracing can
6002 * reliably skip the zfree_ext() and zfree_log()
6003 * boring frames.
6004 */
6005 #if ZALLOC_ENABLE_LOGGING
6006 __attribute__((noinline))
6007 #endif /* ZALLOC_ENABLE_LOGGING */
6008 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)6009 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
6010 {
6011 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
6012 vm_offset_t elem = (vm_offset_t)addr;
6013 int cpu;
6014
6015 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
6016
6017 ZFREE_LOG(zone, elem, 1);
6018 elem = __zcache_mark_invalid(zone, elem, combined_size);
6019
6020 disable_preemption();
6021 cpu = cpu_number();
6022 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
6023
6024 #if KASAN_CLASSIC
6025 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
6026 struct kasan_quarantine_result kqr;
6027
6028 kqr = kasan_quarantine(elem, esize);
6029 elem = kqr.addr;
6030 zone = kqr.zone;
6031 if (elem == 0) {
6032 return enable_preemption();
6033 }
6034 }
6035 #endif
6036
6037 if (zone->z_pcpu_cache) {
6038 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
6039
6040 if (__probable(cache)) {
6041 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6042 return enable_preemption();
6043 }
6044 }
6045
6046 return zfree_item(zone, elem);
6047 }
6048
6049 __attribute__((always_inline))
6050 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)6051 zcache_free_stack_to_cpu(
6052 zone_id_t zid,
6053 zone_cache_t cache,
6054 zstack_t stack,
6055 vm_size_t esize,
6056 zone_cache_ops_t ops,
6057 bool zero)
6058 {
6059 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
6060 vm_offset_t *p;
6061
6062 stack.z_count -= n;
6063 cache->zc_free_cur += n;
6064 p = cache->zc_free_elems + cache->zc_free_cur;
6065
6066 do {
6067 void *o = zstack_pop_no_delta(&stack);
6068
6069 if (ops) {
6070 o = ops->zc_op_mark_invalid(zid, o);
6071 } else {
6072 if (zero) {
6073 bzero(o, esize);
6074 }
6075 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
6076 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
6077 }
6078 *--p = (vm_offset_t)o;
6079 } while (--n > 0);
6080
6081 return stack;
6082 }
6083
6084 __attribute__((always_inline))
6085 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)6086 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6087 {
6088 vm_offset_t elem = (vm_offset_t)addr;
6089 zone_cache_t cache;
6090 vm_size_t esize;
6091 zone_t zone = zone_by_id(zid);
6092 int cpu;
6093
6094 ZFREE_LOG(zone, elem, 1);
6095
6096 disable_preemption();
6097 cpu = cpu_number();
6098 esize = zone_elem_inner_size(zone);
6099 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6100 if (!ops) {
6101 addr = (void *)__zcache_mark_invalid(zone, elem,
6102 ZFREE_PACK_SIZE(esize, esize));
6103 }
6104 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6105 if (__probable(cache)) {
6106 if (ops) {
6107 addr = ops->zc_op_mark_invalid(zid, addr);
6108 }
6109 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6110 enable_preemption();
6111 } else if (ops) {
6112 enable_preemption();
6113 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
6114 ops->zc_op_free(zid, addr);
6115 } else {
6116 zfree_item(zone, elem);
6117 }
6118 }
6119
6120 __attribute__((always_inline))
6121 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)6122 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
6123 {
6124 zone_t zone = zone_by_id(zid);
6125 zone_cache_t cache;
6126 vm_size_t esize;
6127 int cpu;
6128
6129 ZFREE_LOG(zone, stack.z_head, stack.z_count);
6130
6131 disable_preemption();
6132 cpu = cpu_number();
6133 esize = zone_elem_inner_size(zone);
6134 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
6135 stack.z_count * esize;
6136
6137 for (;;) {
6138 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6139 if (__probable(cache)) {
6140 stack = zcache_free_stack_to_cpu(zid, cache,
6141 stack, esize, ops, zero);
6142 enable_preemption();
6143 } else if (ops) {
6144 enable_preemption();
6145 os_atomic_dec(&zone->z_elems_avail, relaxed);
6146 ops->zc_op_free(zid, zstack_pop(&stack));
6147 } else {
6148 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6149
6150 if (zero) {
6151 bzero((void *)addr, esize);
6152 }
6153 addr = __zcache_mark_invalid(zone, addr,
6154 ZFREE_PACK_SIZE(esize, esize));
6155 zfree_item(zone, addr);
6156 }
6157
6158 if (stack.z_count == 0) {
6159 break;
6160 }
6161
6162 disable_preemption();
6163 cpu = cpu_number();
6164 }
6165 }
6166
6167 void
6168 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6169 {
6170 __builtin_assume(ops != NULL);
6171 zcache_free_1_ext(zid, addr, ops);
6172 }
6173
6174 void
6175 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6176 {
6177 __builtin_assume(ops != NULL);
6178 zcache_free_n_ext(zid, stack, ops, false);
6179 }
6180
6181 void
6182 (zfree_n)(zone_id_t zid, zstack_t stack)
6183 {
6184 zcache_free_n_ext(zid, stack, NULL, true);
6185 }
6186
6187 void
6188 (zfree_nozero)(zone_id_t zid, void *addr)
6189 {
6190 zcache_free_1_ext(zid, addr, NULL);
6191 }
6192
6193 void
6194 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6195 {
6196 zcache_free_n_ext(zid, stack, NULL, false);
6197 }
6198
6199 void
6200 (zfree)(zone_t zov, void *addr)
6201 {
6202 zone_t zone = zov->z_self;
6203 zone_stats_t zstats = zov->z_stats;
6204 vm_offset_t esize = zone_elem_inner_size(zone);
6205
6206 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6207 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6208
6209 vm_memtag_bzero(addr, esize);
6210
6211 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6212 }
6213
6214 __attribute__((noinline))
6215 void
zfree_percpu(union zone_or_view zov,void * addr)6216 zfree_percpu(union zone_or_view zov, void *addr)
6217 {
6218 zone_t zone = zov.zov_view->zv_zone;
6219 zone_stats_t zstats = zov.zov_view->zv_stats;
6220 vm_offset_t esize = zone_elem_inner_size(zone);
6221
6222 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6223 assert(zone->z_percpu);
6224 addr = (void *)__zpcpu_demangle(addr);
6225 zpercpu_foreach_cpu(i) {
6226 vm_memtag_bzero((char *)addr + ptoa(i), esize);
6227 }
6228 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6229 }
6230
6231 void
6232 (zfree_id)(zone_id_t zid, void *addr)
6233 {
6234 (zfree)(&zone_array[zid], addr);
6235 }
6236
6237 void
6238 (zfree_ro)(zone_id_t zid, void *addr)
6239 {
6240 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6241 zone_t zone = zone_by_id(zid);
6242 zone_stats_t zstats = zone->z_stats;
6243 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6244
6245 #if ZSECURITY_CONFIG(READ_ONLY)
6246 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6247 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6248 #else
6249 (void)zid;
6250 bzero(addr, esize);
6251 #endif /* !KASAN_CLASSIC */
6252 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6253 }
6254
6255 __attribute__((noinline))
6256 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6257 zfree_item_smr(zone_t zone, vm_offset_t addr)
6258 {
6259 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6260 vm_size_t esize = zone_elem_inner_size(zone);
6261
6262 /*
6263 * This should be taken extremely rarely:
6264 * this happens if we failed allocating an empty bucket.
6265 */
6266 smr_synchronize(zone_cache_smr(cache));
6267
6268 cache->zc_free((void *)addr, esize);
6269 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6270
6271 zfree_item(zone, addr);
6272 }
6273
6274 void
6275 (zfree_smr)(zone_t zone, void *addr)
6276 {
6277 vm_offset_t elem = (vm_offset_t)addr;
6278 vm_offset_t esize;
6279 zone_cache_t cache;
6280 int cpu;
6281
6282 ZFREE_LOG(zone, elem, 1);
6283
6284 disable_preemption();
6285 cpu = cpu_number();
6286 #if MACH_ASSERT
6287 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6288 assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
6289 #endif
6290 esize = zone_elem_inner_size(zone);
6291 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6292 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6293 if (__probable(cache)) {
6294 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6295 enable_preemption();
6296 } else {
6297 zfree_item_smr(zone, elem);
6298 }
6299 }
6300
6301 void
6302 (zfree_id_smr)(zone_id_t zid, void *addr)
6303 {
6304 (zfree_smr)(&zone_array[zid], addr);
6305 }
6306
6307 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)6308 kfree_type_impl_internal(
6309 kalloc_type_view_t kt_view,
6310 void *ptr __unsafe_indexable)
6311 {
6312 zone_t zsig = kt_view->kt_zsig;
6313 zone_t z = kt_view->kt_zv.zv_zone;
6314 struct zone_page_metadata *meta = zone_meta_from_addr((vm_offset_t) ptr);
6315 zone_id_t zidx_meta = meta->zm_index;
6316 zone_security_flags_t zsflags_meta = zone_security_array[zidx_meta];
6317 zone_security_flags_t zsflags_z = zone_security_config(z);
6318 zone_security_flags_t zsflags_zsig;
6319
6320 if (NULL == ptr) {
6321 return;
6322 }
6323
6324 if ((zsflags_z.z_kheap_id == KHEAP_ID_DATA_BUFFERS) ||
6325 zone_has_index(z, zidx_meta)) {
6326 return (zfree)(&kt_view->kt_zv, ptr);
6327 }
6328 zsflags_zsig = zone_security_config(zsig);
6329 if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
6330 z = zone_array + zidx_meta;
6331 return (zfree)(z, ptr);
6332 }
6333
6334 return (zfree)(kt_view->kt_zshared, ptr);
6335 }
6336
6337 /*! @} */
6338 #endif /* !ZALLOC_TEST */
6339 #pragma mark zalloc
6340 #if !ZALLOC_TEST
6341
6342 /*!
6343 * @defgroup zalloc
6344 * @{
6345 *
6346 * @brief
6347 * The codepath for zone allocations.
6348 *
6349 * @discussion
6350 * There are 4 major ways to allocate memory that end up in the zone allocator:
6351 * - @c zalloc(), @c zalloc_flags(), ...
6352 * - @c zalloc_percpu()
6353 * - @c kalloc*()
6354 * - @c zalloc_permanent()
6355 *
6356 * While permanent zones have their own allocation scheme, all other codepaths
6357 * will eventually go through the @c zalloc_ext() choking point.
6358 *
6359 * @c zalloc_return() is the final function everyone tail calls into,
6360 * which prepares the element for consumption by the caller and deals with
6361 * common treatment (zone logging, tags, kasan, validation, ...).
6362 */
6363
6364 /*!
6365 * @function zalloc_import
6366 *
6367 * @brief
6368 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6369 *
6370 * @param zone The zone to import elements from
6371 * @param elems The array to import into
6372 * @param n The number of elements to import. Must be non zero,
6373 * and smaller than @c zone->z_elems_free.
6374 */
6375 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6376 zalloc_import(
6377 zone_t zone,
6378 vm_offset_t *elems,
6379 zalloc_flags_t flags,
6380 uint32_t n)
6381 {
6382 vm_offset_t esize = zone_elem_outer_size(zone);
6383 vm_offset_t offs = zone_elem_inner_offs(zone);
6384 zone_stats_t zs;
6385 int cpu = cpu_number();
6386 uint32_t i = 0;
6387
6388 zs = zpercpu_get_cpu(zone->z_stats, cpu);
6389
6390 if (__improbable(zone_caching_disabled < 0)) {
6391 /*
6392 * In the first 10s after boot, mess with
6393 * the scan position in order to make early
6394 * allocations patterns less predictable.
6395 */
6396 zone_early_scramble_rr(zone, cpu, zs);
6397 }
6398
6399 do {
6400 vm_offset_t page, eidx, size = 0;
6401 struct zone_page_metadata *meta;
6402
6403 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6404 meta = zone_pva_to_meta(zone->z_pageq_partial);
6405 page = zone_pva_to_addr(zone->z_pageq_partial);
6406 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6407 meta = zone_pva_to_meta(zone->z_pageq_empty);
6408 page = zone_pva_to_addr(zone->z_pageq_empty);
6409 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6410 } else {
6411 zone_accounting_panic(zone, "z_elems_free corruption");
6412 }
6413
6414 zone_meta_validate(zone, meta, page);
6415
6416 vm_offset_t old_size = meta->zm_alloc_size;
6417 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6418
6419 do {
6420 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6421 elems[i++] = page + offs + eidx * esize;
6422 size += esize;
6423 } while (i < n && old_size + size + esize <= max_size);
6424
6425 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6426
6427 if (new_size + esize > max_size) {
6428 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6429 } else if (old_size == 0) {
6430 /* remove from free, move to intermediate */
6431 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6432 }
6433 } while (i < n);
6434
6435 n = zone_counter_sub(zone, z_elems_free, n);
6436 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6437 zone->z_elems_free_min = n;
6438 }
6439
6440 return zone_elem_inner_size(zone);
6441 }
6442
6443 __attribute__((always_inline))
6444 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6445 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6446 {
6447 #pragma unused(zone, flags)
6448 #if KASAN_CLASSIC || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6449 vm_offset_t esize = zone_elem_inner_size(zone);
6450 #endif
6451
6452 #if CONFIG_KERNEL_TAGGING
6453 if (__probable(zone->z_tbi_tag)) {
6454 /*
6455 * Retrieve the memory tag assigned on free and update the pointer
6456 * metadata.
6457 */
6458 addr = vm_memtag_fixup_ptr(addr);
6459 }
6460 #endif /* CONFIG_KERNEL_TAGGING */
6461
6462 #if VM_TAG_SIZECLASSES
6463 if (__improbable(zone->z_uses_tags)) {
6464 struct zone_page_metadata *meta;
6465 vm_offset_t offs;
6466 vm_tag_t *slot;
6467 vm_tag_t tag;
6468
6469 tag = zalloc_flags_get_tag(flags);
6470 meta = zone_meta_from_addr(addr);
6471 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6472 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6473 offs += ptoa(meta->zm_page_index);
6474 }
6475
6476 slot = zba_extra_ref_ptr(meta->zm_bitmap,
6477 Z_FAST_QUO(offs, zone->z_quo_magic));
6478 *slot = tag;
6479
6480 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6481 (long)esize);
6482 }
6483 #endif /* VM_TAG_SIZECLASSES */
6484
6485 #if CONFIG_PROB_GZALLOC
6486 if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6487 addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6488 }
6489 #endif
6490
6491 #if KASAN_CLASSIC
6492 /*
6493 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
6494 */
6495 if ((flags & Z_SKIP_KASAN) == 0) {
6496 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6497 (flags & Z_PCPU), __builtin_frame_address(0));
6498 }
6499 #endif /* KASAN_CLASSIC */
6500
6501 return addr;
6502 }
6503
6504 __attribute__((always_inline))
6505 void *
zcache_mark_valid(zone_t zone,void * addr)6506 zcache_mark_valid(zone_t zone, void *addr)
6507 {
6508 addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
6509 ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
6510 return addr;
6511 }
6512
6513 /*!
6514 * @function zalloc_return
6515 *
6516 * @brief
6517 * Performs the tail-end of the work required on allocations before the caller
6518 * uses them.
6519 *
6520 * @discussion
6521 * This function is called without any zone lock held,
6522 * and preemption back to the state it had when @c zalloc_ext() was called.
6523 *
6524 * @param zone The zone we're allocating from.
6525 * @param addr The element we just allocated.
6526 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6527 * @param elem_size The element size for this zone.
6528 */
6529 __attribute__((always_inline))
6530 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6531 zalloc_return(
6532 zone_t zone,
6533 vm_offset_t addr,
6534 zalloc_flags_t flags,
6535 vm_offset_t elem_size)
6536 {
6537 addr = __zcache_mark_valid(zone, addr, flags);
6538 #if ZALLOC_ENABLE_ZERO_CHECK
6539 zalloc_validate_element(zone, addr, elem_size, flags);
6540 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6541 ZALLOC_LOG(zone, addr, 1);
6542
6543 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6544 return (struct kalloc_result){ (void *)addr, elem_size };
6545 }
6546
6547 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6548 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6549 {
6550 if (esize <= 512) {
6551 return zone_early_thres_mul * page_size / 4;
6552 } else if (esize < 2048) {
6553 return zone_early_thres_mul * esize * 8;
6554 }
6555 return zone_early_thres_mul * zone->z_chunk_elems * esize;
6556 }
6557
6558 __attribute__((noinline))
6559 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6560 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6561 {
6562 vm_offset_t esize, addr;
6563 zone_stats_t zs;
6564
6565 zone_lock_nopreempt_check_contention(zone);
6566
6567 zs = zpercpu_get(zstats);
6568 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6569 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6570 zone_expand_async_schedule_if_allowed(zone);
6571 } else {
6572 zone_expand_locked(zone, flags);
6573 }
6574 if (__improbable(zone->z_elems_free == 0)) {
6575 zs->zs_alloc_fail++;
6576 zone_unlock(zone);
6577 if (__improbable(flags & Z_NOFAIL)) {
6578 zone_nofail_panic(zone);
6579 }
6580 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6581 return (struct kalloc_result){ };
6582 }
6583 }
6584
6585 esize = zalloc_import(zone, &addr, flags, 1);
6586 zs->zs_mem_allocated += esize;
6587
6588 if (__improbable(!zone_share_always &&
6589 !os_atomic_load(&zs->zs_alloc_not_shared, relaxed))) {
6590 if (flags & Z_SET_NOTSHARED) {
6591 vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6592
6593 if (zs->zs_mem_allocated >= shared_threshold) {
6594 zpercpu_foreach(zs_cpu, zstats) {
6595 os_atomic_store(&zs_cpu->zs_alloc_not_shared, 1, relaxed);
6596 }
6597 }
6598 }
6599 }
6600 zone_unlock(zone);
6601
6602 return zalloc_return(zone, addr, flags, esize);
6603 }
6604
6605 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6606 zalloc_cached_import(
6607 zone_t zone,
6608 zalloc_flags_t flags,
6609 zone_cache_t cache)
6610 {
6611 uint16_t n_elems = zc_mag_size();
6612
6613 zone_lock_nopreempt(zone);
6614
6615 if (__probable(!zone_caching_disabled &&
6616 zone->z_elems_free > zone->z_elems_rsv / 2)) {
6617 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6618 zone_expand_async_schedule_if_allowed(zone);
6619 }
6620 if (zone->z_elems_free < n_elems) {
6621 n_elems = (uint16_t)zone->z_elems_free;
6622 }
6623 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6624 cache->zc_alloc_cur = n_elems;
6625 }
6626
6627 zone_unlock_nopreempt(zone);
6628 }
6629
6630 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6631 zalloc_cached_depot_recirculate(
6632 zone_t zone,
6633 uint32_t depot_max,
6634 zone_cache_t cache,
6635 smr_t smr)
6636 {
6637 smr_seq_t seq;
6638 uint32_t n;
6639
6640 zone_recirc_lock_nopreempt_check_contention(zone);
6641
6642 n = cache->zc_depot.zd_empty;
6643 if (n >= depot_max) {
6644 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6645 n - depot_max / 2, NULL);
6646 }
6647
6648 n = cache->zc_depot.zd_full;
6649 if (smr && n) {
6650 /*
6651 * if SMR is in use, it means smr_poll() failed,
6652 * so rotate the entire chunk of magazines in order
6653 * to let the sequence numbers age.
6654 */
6655 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6656 n, NULL);
6657 smr_deferred_advance_commit(smr, seq);
6658 }
6659
6660 n = depot_max - cache->zc_depot.zd_empty;
6661 if (n > zone->z_recirc.zd_full) {
6662 n = zone->z_recirc.zd_full;
6663 }
6664
6665 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6666 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6667 n, zone);
6668 }
6669
6670 zone_recirc_unlock_nopreempt(zone);
6671 }
6672
6673 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6674 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6675 {
6676 zone_smr_free_cb_t zc_free = cache->zc_free;
6677 vm_size_t esize = zone_elem_inner_size(z);
6678
6679 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6680 vm_offset_t elem = mag->zm_elems[i];
6681
6682 zc_free((void *)elem, zone_elem_inner_size(z));
6683 elem = __zcache_mark_invalid(z, elem,
6684 ZFREE_PACK_SIZE(esize, esize));
6685 mag->zm_elems[i] = elem;
6686 }
6687 }
6688
6689 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6690 zalloc_cached_recirculate(
6691 zone_t zone,
6692 zone_cache_t cache)
6693 {
6694 zone_magazine_t mag = NULL;
6695
6696 zone_recirc_lock_nopreempt_check_contention(zone);
6697
6698 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6699 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6700 if (zone_cache_smr(cache)) {
6701 zalloc_cached_reuse_smr(zone, cache, mag);
6702 }
6703 mag = zone_magazine_replace(cache, mag, false);
6704 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6705 }
6706
6707 zone_recirc_unlock_nopreempt(zone);
6708 }
6709
6710 __attribute__((noinline))
6711 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6712 zalloc_cached_prime(
6713 zone_t zone,
6714 zone_cache_ops_t ops,
6715 zalloc_flags_t flags,
6716 zone_cache_t cache)
6717 {
6718 zone_magazine_t mag = NULL;
6719 uint32_t depot_max;
6720 smr_t smr;
6721
6722 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6723 if (depot_max) {
6724 smr = zone_cache_smr(cache);
6725
6726 zone_depot_lock_nopreempt(cache);
6727
6728 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6729 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6730 smr);
6731 }
6732
6733 if (__probable(cache->zc_depot.zd_full)) {
6734 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6735 if (zone_cache_smr(cache)) {
6736 zalloc_cached_reuse_smr(zone, cache, mag);
6737 }
6738 mag = zone_magazine_replace(cache, mag, false);
6739 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6740 }
6741
6742 zone_depot_unlock_nopreempt(cache);
6743 } else if (zone->z_recirc.zd_full) {
6744 zalloc_cached_recirculate(zone, cache);
6745 }
6746
6747 if (__probable(cache->zc_alloc_cur)) {
6748 return cache;
6749 }
6750
6751 if (ops == NULL) {
6752 zalloc_cached_import(zone, flags, cache);
6753 if (__probable(cache->zc_alloc_cur)) {
6754 return cache;
6755 }
6756 }
6757
6758 return NULL;
6759 }
6760
6761 __attribute__((always_inline))
6762 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6763 zalloc_cached_get_pcpu_cache(
6764 zone_t zone,
6765 zone_cache_ops_t ops,
6766 int cpu,
6767 zalloc_flags_t flags)
6768 {
6769 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6770
6771 if (__probable(cache->zc_alloc_cur != 0)) {
6772 return cache;
6773 }
6774
6775 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6776 zone_cache_swap_magazines(cache);
6777 return cache;
6778 }
6779
6780 return zalloc_cached_prime(zone, ops, flags, cache);
6781 }
6782
6783
6784 /*!
6785 * @function zalloc_ext
6786 *
6787 * @brief
6788 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6789 */
6790 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6791 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6792 {
6793 /*
6794 * KASan uses zalloc() for fakestack, which can be called anywhere.
6795 * However, we make sure these calls can never block.
6796 */
6797 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6798 #if KASAN_FAKESTACK
6799 zone->z_kasan_fakestacks ||
6800 #endif /* KASAN_FAKESTACK */
6801 ml_get_interrupts_enabled() ||
6802 ml_is_quiescing() ||
6803 debug_mode_active(),
6804 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6805
6806 /*
6807 * Make sure Z_NOFAIL was not obviously misused
6808 */
6809 if (flags & Z_NOFAIL) {
6810 assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6811 }
6812
6813 #if VM_TAG_SIZECLASSES
6814 if (__improbable(zone->z_uses_tags)) {
6815 vm_tag_t tag = zalloc_flags_get_tag(flags);
6816
6817 if (flags & Z_VM_TAG_BT_BIT) {
6818 tag = vm_tag_bt() ?: tag;
6819 }
6820 if (tag != VM_KERN_MEMORY_NONE) {
6821 tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6822 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6823 }
6824 if (tag == VM_KERN_MEMORY_NONE) {
6825 zone_security_flags_t zsflags = zone_security_config(zone);
6826
6827 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6828 tag = VM_KERN_MEMORY_KALLOC_DATA;
6829 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6830 zsflags.z_kalloc_type) {
6831 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6832 } else {
6833 tag = VM_KERN_MEMORY_KALLOC;
6834 }
6835 }
6836 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6837 }
6838 #endif /* VM_TAG_SIZECLASSES */
6839
6840 disable_preemption();
6841
6842 #if ZALLOC_ENABLE_ZERO_CHECK
6843 if (zalloc_skip_zero_check()) {
6844 flags |= Z_NOZZC;
6845 }
6846 #endif
6847
6848 if (zone->z_pcpu_cache) {
6849 zone_cache_t cache;
6850 vm_offset_t index, addr, esize;
6851 int cpu = cpu_number();
6852
6853 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6854 if (__probable(cache)) {
6855 esize = zone_elem_inner_size(zone);
6856 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6857 index = --cache->zc_alloc_cur;
6858 addr = cache->zc_alloc_elems[index];
6859 cache->zc_alloc_elems[index] = 0;
6860 enable_preemption();
6861 return zalloc_return(zone, addr, flags, esize);
6862 }
6863 }
6864
6865 __attribute__((musttail))
6866 return zalloc_item(zone, zstats, flags);
6867 }
6868
6869 __attribute__((always_inline))
6870 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6871 zcache_alloc_stack_from_cpu(
6872 zone_id_t zid,
6873 zone_cache_t cache,
6874 zstack_t stack,
6875 uint32_t n,
6876 zone_cache_ops_t ops)
6877 {
6878 vm_offset_t *p;
6879
6880 n = MIN(n, cache->zc_alloc_cur);
6881 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6882 cache->zc_alloc_cur -= n;
6883 stack.z_count += n;
6884
6885 do {
6886 vm_offset_t e = *--p;
6887
6888 *p = 0;
6889 if (ops) {
6890 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6891 } else {
6892 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6893 }
6894 zstack_push_no_delta(&stack, (void *)e);
6895 } while (--n > 0);
6896
6897 return stack;
6898 }
6899
6900 __attribute__((noinline))
6901 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6902 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6903 {
6904 zone_t zone = zone_by_id(zid);
6905 zone_stats_t zstats = zone->z_stats;
6906 int cpu;
6907
6908 count -= stack.z_count;
6909
6910 disable_preemption();
6911 cpu = cpu_number();
6912 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6913 count * zone_elem_inner_size(zone);
6914 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6915 enable_preemption();
6916
6917 return stack;
6918 }
6919
6920 #define ZCACHE_ALLOC_RETRY ((void *)-1)
6921
6922 __attribute__((noinline))
6923 static void *
zcache_alloc_one(zone_id_t zid,zalloc_flags_t flags,zone_cache_ops_t ops)6924 zcache_alloc_one(
6925 zone_id_t zid,
6926 zalloc_flags_t flags,
6927 zone_cache_ops_t ops)
6928 {
6929 zone_t zone = zone_by_id(zid);
6930 void *o;
6931
6932 /*
6933 * First try to allocate in rudimentary zones without ever going into
6934 * __ZONE_EXHAUSTED_AND_WAITING_HARD__() by clearing Z_NOFAIL.
6935 */
6936 enable_preemption();
6937 o = ops->zc_op_alloc(zid, flags & ~Z_NOFAIL);
6938 if (__probable(o)) {
6939 os_atomic_inc(&zone->z_elems_avail, relaxed);
6940 } else if (__probable(flags & Z_NOFAIL)) {
6941 zone_cache_t cache;
6942 vm_offset_t index;
6943 int cpu;
6944
6945 zone_lock(zone);
6946
6947 cpu = cpu_number();
6948 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6949 o = ZCACHE_ALLOC_RETRY;
6950 if (__probable(cache)) {
6951 index = --cache->zc_alloc_cur;
6952 o = (void *)cache->zc_alloc_elems[index];
6953 cache->zc_alloc_elems[index] = 0;
6954 o = ops->zc_op_mark_valid(zid, o);
6955 } else if (zone->z_elems_free == 0) {
6956 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone);
6957 }
6958
6959 zone_unlock(zone);
6960 }
6961
6962 return o;
6963 }
6964
6965 __attribute__((always_inline))
6966 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6967 zcache_alloc_n_ext(
6968 zone_id_t zid,
6969 uint32_t count,
6970 zalloc_flags_t flags,
6971 zone_cache_ops_t ops)
6972 {
6973 zstack_t stack = { };
6974 zone_cache_t cache;
6975 zone_t zone;
6976 int cpu;
6977
6978 disable_preemption();
6979 cpu = cpu_number();
6980 zone = zone_by_id(zid);
6981 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6982 count * zone_elem_inner_size(zone);
6983
6984 for (;;) {
6985 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6986 if (__probable(cache)) {
6987 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6988 count - stack.z_count, ops);
6989 enable_preemption();
6990 } else {
6991 void *o;
6992
6993 if (ops) {
6994 o = zcache_alloc_one(zid, flags, ops);
6995 } else {
6996 o = zalloc_item(zone, zone->z_stats, flags).addr;
6997 }
6998 if (__improbable(o == NULL)) {
6999 return zcache_alloc_fail(zid, stack, count);
7000 }
7001 if (ops == NULL || o != ZCACHE_ALLOC_RETRY) {
7002 zstack_push(&stack, o);
7003 }
7004 }
7005
7006 if (stack.z_count == count) {
7007 break;
7008 }
7009
7010 disable_preemption();
7011 cpu = cpu_number();
7012 }
7013
7014 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
7015
7016 return stack;
7017 }
7018
7019 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)7020 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
7021 {
7022 return zcache_alloc_n_ext(zid, count, flags, NULL);
7023 }
7024
zstack_t(zcache_alloc_n)7025 zstack_t
7026 (zcache_alloc_n)(
7027 zone_id_t zid,
7028 uint32_t count,
7029 zalloc_flags_t flags,
7030 zone_cache_ops_t ops)
7031 {
7032 __builtin_assume(ops != NULL);
7033 return zcache_alloc_n_ext(zid, count, flags, ops);
7034 }
7035
7036 __attribute__((always_inline))
7037 void *
zalloc(zone_t zov)7038 zalloc(zone_t zov)
7039 {
7040 return zalloc_flags(zov, Z_WAITOK);
7041 }
7042
7043 __attribute__((always_inline))
7044 void *
zalloc_noblock(zone_t zov)7045 zalloc_noblock(zone_t zov)
7046 {
7047 return zalloc_flags(zov, Z_NOWAIT);
7048 }
7049
7050 void *
7051 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
7052 {
7053 zone_t zone = zov->z_self;
7054 zone_stats_t zstats = zov->z_stats;
7055
7056 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7057 assert(!zone->z_percpu && !zone->z_permanent);
7058 return zalloc_ext(zone, zstats, flags).addr;
7059 }
7060
7061 __attribute__((always_inline))
7062 void *
7063 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
7064 {
7065 return (zalloc_flags)(zone_by_id(zid), flags);
7066 }
7067
7068 void *
7069 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
7070 {
7071 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7072 zone_t zone = zone_by_id(zid);
7073 zone_stats_t zstats = zone->z_stats;
7074 struct kalloc_result kr;
7075
7076 kr = zalloc_ext(zone, zstats, flags);
7077 #if ZSECURITY_CONFIG(READ_ONLY)
7078 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
7079 if (kr.addr) {
7080 zone_require_ro(zid, kr.size, kr.addr);
7081 }
7082 #endif
7083 return kr.addr;
7084 }
7085
7086 #if ZSECURITY_CONFIG(READ_ONLY)
7087
7088 __attribute__((always_inline))
7089 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)7090 from_current_stack(vm_offset_t addr, vm_size_t size)
7091 {
7092 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
7093 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
7094
7095 addr = vm_memtag_canonicalize_address(addr);
7096
7097 return (addr >= start) && (addr + size < end);
7098 }
7099
7100 /*
7101 * Check if an address is from const memory i.e TEXT or DATA CONST segements
7102 * or the SECURITY_READ_ONLY_LATE section.
7103 */
7104 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7105 __attribute__((always_inline))
7106 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7107 from_const_memory(const vm_offset_t addr, vm_size_t size)
7108 {
7109 return rorgn_contains(addr, size, true);
7110 }
7111 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7112 __attribute__((always_inline))
7113 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7114 from_const_memory(const vm_offset_t addr, vm_size_t size)
7115 {
7116 #pragma unused(addr, size)
7117 return true;
7118 }
7119 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7120
7121 __abortlike
7122 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7123 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
7124 const vm_offset_t src, vm_size_t src_size)
7125 {
7126 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
7127 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
7128 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7129 extern vm_offset_t rorgn_begin;
7130 extern vm_offset_t rorgn_end;
7131 #else
7132 vm_offset_t const rorgn_begin = 0;
7133 vm_offset_t const rorgn_end = 0;
7134 #endif
7135
7136 if (from_ro_map(src, src_size)) {
7137 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
7138 zone_t dst_zone = &zone_array[zid];
7139 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
7140 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
7141 dst_zone->z_name);
7142 }
7143
7144 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
7145 "current stack (%p - %p) or const memory (phys %p - %p)",
7146 (void *)src, (void*)kvtophys(src),
7147 (void *)zone_info.zi_ro_range.min_address,
7148 (void *)zone_info.zi_ro_range.max_address,
7149 (void *)stack_start, (void *)stack_end,
7150 (void *)rorgn_begin, (void *)rorgn_end);
7151 }
7152
7153 __attribute__((always_inline))
7154 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7155 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
7156 const vm_offset_t src, vm_size_t src_size)
7157 {
7158 if (from_current_stack(src, src_size) ||
7159 (from_ro_map(src, src_size) &&
7160 zid == zone_index_from_ptr((void *)src)) ||
7161 from_const_memory(src, src_size)) {
7162 return;
7163 }
7164 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
7165 }
7166
7167 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7168
7169 __attribute__((noinline))
7170 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)7171 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
7172 const void *new_data, vm_size_t new_data_size)
7173 {
7174 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7175
7176 #if ZSECURITY_CONFIG(READ_ONLY)
7177 bool skip_src_check = false;
7178
7179 /*
7180 * The OSEntitlements RO-zone is a little differently treated. For more
7181 * information: rdar://100518485.
7182 */
7183 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
7184 code_signing_config_t cs_config = 0;
7185
7186 code_signing_configuration(NULL, &cs_config);
7187 if (cs_config & CS_CONFIG_CSM_ENABLED) {
7188 skip_src_check = true;
7189 }
7190 }
7191
7192 if (skip_src_check == false) {
7193 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
7194 new_data_size);
7195 }
7196 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
7197 (vm_offset_t) new_data, new_data_size);
7198 #else
7199 (void)zid;
7200 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
7201 #endif
7202 }
7203
7204 __attribute__((noinline))
7205 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)7206 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
7207 zro_atomic_op_t op, uint64_t value)
7208 {
7209 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7210
7211 #if ZSECURITY_CONFIG(READ_ONLY)
7212 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
7213 #else
7214 (void)zid;
7215 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
7216 #endif
7217 return value;
7218 }
7219
7220 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)7221 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
7222 {
7223 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7224 #if ZSECURITY_CONFIG(READ_ONLY)
7225 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
7226 #else
7227 (void)zid;
7228 bzero((void *)((uintptr_t)elem + offset), size);
7229 #endif
7230 }
7231
7232 /*
7233 * This function will run in the PPL and needs to be robust
7234 * against an attacker with arbitrary kernel write.
7235 */
7236
7237 #if ZSECURITY_CONFIG(READ_ONLY)
7238
7239 __abortlike
7240 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7241 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7242 {
7243 struct zone_size_params p = zone_ro_size_params[zid];
7244 vm_offset_t elem = (vm_offset_t)addr;
7245 uint32_t zindex;
7246 zone_t other;
7247 zone_t zone = &zone_array[zid];
7248
7249 if (!from_ro_map(addr, 1)) {
7250 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7251 }
7252
7253 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7254 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7255 }
7256
7257 zindex = zone_index_from_ptr(addr);
7258 other = &zone_array[zindex];
7259 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7260 panic("zone_require_ro failed: invalid zone index %d "
7261 "(addr: %p, expected: %s%s)", zindex,
7262 addr, zone_heap_name(zone), zone->z_name);
7263 } else {
7264 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7265 "(addr: %p, expected: %s%s)",
7266 zindex, zone_heap_name(other), other->z_name,
7267 addr, zone_heap_name(zone), zone->z_name);
7268 }
7269 }
7270
7271 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7272
7273 __attribute__((always_inline))
7274 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7275 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7276 {
7277 #if ZSECURITY_CONFIG(READ_ONLY)
7278 struct zone_size_params p = zone_ro_size_params[zid];
7279 vm_offset_t elem = (vm_offset_t)addr;
7280
7281 if (!from_ro_map(addr, 1) ||
7282 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7283 zid != zone_meta_from_addr(elem)->zm_index) {
7284 zone_id_require_ro_panic(zid, addr);
7285 }
7286 #else
7287 #pragma unused(zid, addr)
7288 #endif
7289 }
7290
7291 void *
7292 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7293 {
7294 zone_t zone = zov.zov_view->zv_zone;
7295 zone_stats_t zstats = zov.zov_view->zv_stats;
7296
7297 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7298 assert(zone->z_percpu);
7299 flags |= Z_PCPU;
7300 return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7301 }
7302
7303 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7304 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7305 {
7306 struct zone_page_metadata *page_meta;
7307 vm_offset_t offs, addr;
7308 zone_pva_t pva;
7309
7310 assert(ml_get_interrupts_enabled() ||
7311 ml_is_quiescing() ||
7312 debug_mode_active() ||
7313 startup_phase < STARTUP_SUB_EARLY_BOOT);
7314
7315 size = (size + mask) & ~mask;
7316 assert(size <= PAGE_SIZE);
7317
7318 zone_lock(zone);
7319 assert(zone->z_self == zone);
7320
7321 for (;;) {
7322 pva = zone->z_pageq_partial;
7323 while (!zone_pva_is_null(pva)) {
7324 page_meta = zone_pva_to_meta(pva);
7325 if (page_meta->zm_bump + size <= PAGE_SIZE) {
7326 goto found;
7327 }
7328 pva = page_meta->zm_page_next;
7329 }
7330
7331 zone_expand_locked(zone, Z_WAITOK);
7332 }
7333
7334 found:
7335 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7336 page_meta->zm_bump = (uint16_t)(offs + size);
7337 page_meta->zm_alloc_size += size;
7338 zone->z_elems_free -= size;
7339 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7340
7341 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7342 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7343 }
7344
7345 zone_unlock(zone);
7346
7347 if (zone->z_tbi_tag) {
7348 addr = vm_memtag_fixup_ptr(offs + zone_pva_to_addr(pva));
7349 } else {
7350 addr = offs + zone_pva_to_addr(pva);
7351 }
7352
7353 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7354 return (void *)addr;
7355 }
7356
7357 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7358 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7359 {
7360 vm_offset_t addr;
7361
7362 kernel_memory_allocate(kernel_map, &addr, size, mask,
7363 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7364
7365 return (void *)addr;
7366 }
7367
7368 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7369 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7370 {
7371 if (size <= PAGE_SIZE) {
7372 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7373 return _zalloc_permanent(zone, size, mask);
7374 }
7375 return _zalloc_permanent_large(size, mask, tag);
7376 }
7377
7378 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7379 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7380 {
7381 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7382 return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7383 }
7384
7385 /*! @} */
7386 #endif /* !ZALLOC_TEST */
7387 #pragma mark zone GC / trimming
7388 #if !ZALLOC_TEST
7389
7390 static thread_call_data_t zone_trim_callout;
7391 EVENT_DEFINE(ZONE_EXHAUSTED);
7392
7393 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7394 zone_reclaim_chunk(
7395 zone_t z,
7396 struct zone_page_metadata *meta,
7397 uint32_t free_count)
7398 {
7399 vm_address_t page_addr;
7400 vm_size_t size_to_free;
7401 uint32_t bitmap_ref;
7402 uint32_t page_count;
7403 zone_security_flags_t zsflags = zone_security_config(z);
7404 bool sequester = !z->z_destroyed;
7405 bool oob_guard = false;
7406
7407 if (zone_submap_is_sequestered(zsflags)) {
7408 /*
7409 * If the entire map is sequestered, we can't return the VA.
7410 * It stays pinned to the zone forever.
7411 */
7412 sequester = true;
7413 }
7414
7415 zone_meta_queue_pop(z, &z->z_pageq_empty);
7416
7417 page_addr = zone_meta_to_addr(meta);
7418 page_count = meta->zm_chunk_len;
7419 oob_guard = meta->zm_guarded;
7420
7421 if (meta->zm_alloc_size) {
7422 zone_metadata_corruption(z, meta, "alloc_size");
7423 }
7424 if (z->z_percpu) {
7425 if (page_count != 1) {
7426 zone_metadata_corruption(z, meta, "page_count");
7427 }
7428 size_to_free = ptoa(z->z_chunk_pages);
7429 zone_remove_wired_pages(z, z->z_chunk_pages);
7430 } else {
7431 if (page_count > z->z_chunk_pages) {
7432 zone_metadata_corruption(z, meta, "page_count");
7433 }
7434 if (page_count < z->z_chunk_pages) {
7435 /* Dequeue non populated VA from z_pageq_va */
7436 zone_meta_remqueue(z, meta + page_count);
7437 }
7438 size_to_free = ptoa(page_count);
7439 zone_remove_wired_pages(z, page_count);
7440 }
7441
7442 zone_counter_sub(z, z_elems_free, free_count);
7443 zone_counter_sub(z, z_elems_avail, free_count);
7444 zone_counter_sub(z, z_wired_empty, page_count);
7445 zone_counter_sub(z, z_wired_cur, page_count);
7446
7447 if (z->z_pcpu_cache == NULL) {
7448 if (z->z_elems_free_min < free_count) {
7449 z->z_elems_free_min = 0;
7450 } else {
7451 z->z_elems_free_min -= free_count;
7452 }
7453 }
7454 if (z->z_elems_free_wma < free_count) {
7455 z->z_elems_free_wma = 0;
7456 } else {
7457 z->z_elems_free_wma -= free_count;
7458 }
7459
7460 bitmap_ref = 0;
7461 if (sequester) {
7462 if (meta->zm_inline_bitmap) {
7463 for (int i = 0; i < meta->zm_chunk_len; i++) {
7464 meta[i].zm_bitmap = 0;
7465 }
7466 } else {
7467 bitmap_ref = meta->zm_bitmap;
7468 meta->zm_bitmap = 0;
7469 }
7470 meta->zm_chunk_len = 0;
7471 } else {
7472 if (!meta->zm_inline_bitmap) {
7473 bitmap_ref = meta->zm_bitmap;
7474 }
7475 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7476 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7477 }
7478
7479 #if CONFIG_ZLEAKS
7480 if (__improbable(zleak_should_disable_for_zone(z) &&
7481 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7482 thread_call_enter(&zone_leaks_callout);
7483 }
7484 #endif /* CONFIG_ZLEAKS */
7485
7486 zone_unlock(z);
7487
7488 if (bitmap_ref) {
7489 zone_bits_free(bitmap_ref);
7490 }
7491
7492 /* Free the pages for metadata and account for them */
7493 #if KASAN_CLASSIC
7494 if (z->z_percpu) {
7495 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7496 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7497 zone_elem_outer_size(z),
7498 zone_elem_outer_offs(z),
7499 zone_elem_redzone(z));
7500 }
7501 } else {
7502 kasan_zmem_remove(page_addr, size_to_free,
7503 zone_elem_outer_size(z),
7504 zone_elem_outer_offs(z),
7505 zone_elem_redzone(z));
7506 }
7507 #endif /* KASAN_CLASSIC */
7508
7509 if (sequester) {
7510 kernel_memory_depopulate(page_addr, size_to_free,
7511 KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7512 } else {
7513 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7514 kmem_free(zone_submap(zsflags), page_addr,
7515 ptoa(z->z_chunk_pages + oob_guard));
7516 if (oob_guard) {
7517 os_atomic_dec(&zone_guard_pages, relaxed);
7518 }
7519 }
7520
7521 thread_yield_to_preemption();
7522
7523 zone_lock(z);
7524
7525 if (sequester) {
7526 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7527 }
7528 }
7529
7530 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7531 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7532 {
7533 z_debug_assert(n <= zc_mag_size());
7534
7535 for (uint16_t i = 0; i < n; i++) {
7536 vm_offset_t addr = elems[i];
7537 elems[i] = 0;
7538 zfree_drop(z, addr);
7539 }
7540
7541 z->z_elems_free += n;
7542 }
7543
7544 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7545 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7546 {
7547 z_debug_assert(n <= zc_mag_size());
7548 zone_cache_ops_t ops = zcache_ops[zid];
7549
7550 for (uint16_t i = 0; i < n; i++) {
7551 vm_offset_t addr = elems[i];
7552 elems[i] = 0;
7553 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7554 ops->zc_op_free(zid, (void *)addr);
7555 }
7556
7557 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7558 }
7559
7560 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7561 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7562 {
7563 zpercpu_foreach(zc, z->z_pcpu_cache) {
7564 zone_depot_lock(zc);
7565
7566 if (zc->zc_depot.zd_full > (target + 1) / 2) {
7567 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7568 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7569 }
7570
7571 if (zc->zc_depot.zd_empty > target / 2) {
7572 uint32_t n = zc->zc_depot.zd_empty - target / 2;
7573 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7574 }
7575
7576 zone_depot_unlock(zc);
7577 }
7578 }
7579
7580 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7581 ZONE_RECLAIM_TRIM,
7582 ZONE_RECLAIM_DRAIN,
7583 ZONE_RECLAIM_DESTROY,
7584 });
7585
7586 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7587 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7588 {
7589 uint32_t depot_max = 0;
7590 bool cleanup = mode != ZONE_RECLAIM_TRIM;
7591
7592 if (z->z_depot_cleanup) {
7593 z->z_depot_cleanup = false;
7594 depot_max = z->z_depot_size;
7595 cleanup = true;
7596 }
7597
7598 if (cleanup) {
7599 zone_depot_trim(z, depot_max, zd);
7600 }
7601
7602 if (mode == ZONE_RECLAIM_DESTROY) {
7603 zpercpu_foreach(zc, z->z_pcpu_cache) {
7604 zone_reclaim_elements(z, zc->zc_alloc_cur,
7605 zc->zc_alloc_elems);
7606 zone_reclaim_elements(z, zc->zc_free_cur,
7607 zc->zc_free_elems);
7608 zc->zc_alloc_cur = zc->zc_free_cur = 0;
7609 }
7610
7611 z->z_recirc_empty_min = 0;
7612 z->z_recirc_empty_wma = 0;
7613 z->z_recirc_full_min = 0;
7614 z->z_recirc_full_wma = 0;
7615 z->z_recirc_cont_cur = 0;
7616 z->z_recirc_cont_wma = 0;
7617 }
7618 }
7619
7620 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)7621 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
7622 {
7623 assert(zd->zd_empty == 0);
7624 assert(zd->zd_full == 0);
7625
7626 zone_recirc_lock_nopreempt(z);
7627
7628 *zd = z->z_recirc;
7629 if (zd->zd_full == 0) {
7630 zd->zd_tail = &zd->zd_head;
7631 }
7632 zone_depot_init(&z->z_recirc);
7633 z->z_recirc_empty_min = 0;
7634 z->z_recirc_empty_wma = 0;
7635 z->z_recirc_full_min = 0;
7636 z->z_recirc_full_wma = 0;
7637
7638 zone_recirc_unlock_nopreempt(z);
7639 }
7640
7641 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7642 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7643 {
7644 for (;;) {
7645 uint32_t budget = zc_free_batch_size();
7646 uint32_t count;
7647 bool done = true;
7648
7649 zone_recirc_lock_nopreempt(z);
7650 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7651 z->z_recirc_empty_min);
7652 assert(count <= z->z_recirc.zd_empty);
7653
7654 if (count > budget) {
7655 count = budget;
7656 done = false;
7657 }
7658 if (count) {
7659 budget -= count;
7660 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7661 z->z_recirc_empty_min -= count;
7662 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7663 }
7664
7665 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7666 z->z_recirc_full_min);
7667 assert(count <= z->z_recirc.zd_full);
7668
7669 if (count > budget) {
7670 count = budget;
7671 done = false;
7672 }
7673 if (count) {
7674 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7675 z->z_recirc_full_min -= count;
7676 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7677 }
7678
7679 zone_recirc_unlock_nopreempt(z);
7680
7681 if (done) {
7682 return;
7683 }
7684
7685 /*
7686 * If the number of magazines to reclaim is too large,
7687 * we might be keeping preemption disabled for too long.
7688 *
7689 * Drop and retake the lock to allow for preemption to occur.
7690 */
7691 zone_unlock(z);
7692 zone_lock(z);
7693 }
7694 }
7695
7696 /*!
7697 * @function zone_reclaim
7698 *
7699 * @brief
7700 * Drains or trim the zone.
7701 *
7702 * @discussion
7703 * Draining the zone will free it from all its elements.
7704 *
7705 * Trimming the zone tries to respect the working set size, and avoids draining
7706 * the depot when it's not necessary.
7707 *
7708 * @param z The zone to reclaim from
7709 * @param mode The purpose of this reclaim.
7710 */
7711 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7712 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7713 {
7714 struct zone_depot zd;
7715
7716 zone_depot_init(&zd);
7717
7718 zone_lock(z);
7719
7720 if (mode == ZONE_RECLAIM_DESTROY) {
7721 if (!z->z_destructible || z->z_elems_rsv) {
7722 panic("zdestroy: Zone %s%s isn't destructible",
7723 zone_heap_name(z), z->z_name);
7724 }
7725
7726 if (!z->z_self || z->z_expander ||
7727 z->z_async_refilling || z->z_expanding_wait) {
7728 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7729 zone_heap_name(z), z->z_name);
7730 }
7731
7732 #if !KASAN_CLASSIC
7733 /*
7734 * Unset the valid bit. We'll hit an assert failure on further
7735 * operations on this zone, until zinit() is called again.
7736 *
7737 * Leave the zone valid for KASan as we will see zfree's on
7738 * quarantined free elements even after the zone is destroyed.
7739 */
7740 z->z_self = NULL;
7741 #endif
7742 z->z_destroyed = true;
7743 } else if (z->z_destroyed) {
7744 return zone_unlock(z);
7745 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7746 /* If the zone is under its reserve level, leave it alone. */
7747 return zone_unlock(z);
7748 }
7749
7750 if (z->z_pcpu_cache) {
7751 zone_magazine_t mag;
7752 uint32_t freed = 0;
7753
7754 /*
7755 * This is all done with the zone lock held on purpose.
7756 * The work here is O(ncpu), which should still be short.
7757 *
7758 * We need to keep the lock held until we have reclaimed
7759 * at least a few magazines, otherwise if the zone has no
7760 * free elements outside of the depot, a thread performing
7761 * a concurrent allocatiuon could try to grow the zone
7762 * while we're trying to drain it.
7763 */
7764 if (mode == ZONE_RECLAIM_TRIM) {
7765 zone_reclaim_recirc_trim(z, &zd);
7766 } else {
7767 zone_reclaim_recirc_drain(z, &zd);
7768 }
7769 zone_reclaim_pcpu(z, mode, &zd);
7770
7771 if (z->z_chunk_elems) {
7772 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7773 smr_t smr = zone_cache_smr(cache);
7774
7775 while (zd.zd_full) {
7776 mag = zone_depot_pop_head_full(&zd, NULL);
7777 if (smr) {
7778 smr_wait(smr, mag->zm_seq);
7779 zalloc_cached_reuse_smr(z, cache, mag);
7780 freed += zc_mag_size();
7781 }
7782 zone_reclaim_elements(z, zc_mag_size(),
7783 mag->zm_elems);
7784 zone_depot_insert_head_empty(&zd, mag);
7785
7786 freed += zc_mag_size();
7787 if (freed >= zc_free_batch_size()) {
7788 zone_unlock(z);
7789 zone_magazine_free_list(&zd);
7790 thread_yield_to_preemption();
7791 zone_lock(z);
7792 freed = 0;
7793 }
7794 }
7795 } else {
7796 zone_id_t zid = zone_index(z);
7797
7798 zone_unlock(z);
7799
7800 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7801
7802 while (zd.zd_full) {
7803 mag = zone_depot_pop_head_full(&zd, NULL);
7804 zcache_reclaim_elements(zid, zc_mag_size(),
7805 mag->zm_elems);
7806 zone_magazine_free(mag);
7807 }
7808
7809 goto cleanup;
7810 }
7811 }
7812
7813 while (!zone_pva_is_null(z->z_pageq_empty)) {
7814 struct zone_page_metadata *meta;
7815 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7816
7817 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7818 limit = MAX(limit, z->z_elems_free -
7819 MIN(z->z_elems_free_min, z->z_elems_free_wma));
7820 }
7821
7822 meta = zone_pva_to_meta(z->z_pageq_empty);
7823 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7824
7825 if (zone_count_free(z) - count < limit) {
7826 break;
7827 }
7828
7829 zone_reclaim_chunk(z, meta, count);
7830 }
7831
7832 zone_unlock(z);
7833
7834 cleanup:
7835 zone_magazine_free_list(&zd);
7836 }
7837
7838 void
zone_drain(zone_t zone)7839 zone_drain(zone_t zone)
7840 {
7841 current_thread()->options |= TH_OPT_ZONE_PRIV;
7842 lck_mtx_lock(&zone_gc_lock);
7843 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7844 lck_mtx_unlock(&zone_gc_lock);
7845 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7846 }
7847
7848 void
zcache_drain(zone_id_t zid)7849 zcache_drain(zone_id_t zid)
7850 {
7851 zone_drain(zone_by_id(zid));
7852 }
7853
7854 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7855 zone_reclaim_all(zone_reclaim_mode_t mode)
7856 {
7857 /*
7858 * Start with zcaches, so that they flow into the regular zones.
7859 *
7860 * Then the zones with VA sequester since depopulating
7861 * pages will not need to allocate vm map entries for holes,
7862 * which will give memory back to the system faster.
7863 */
7864 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7865 zone_t z = zone_by_id(zid);
7866
7867 if (z->z_self && z->z_chunk_elems == 0) {
7868 zone_reclaim(z, mode);
7869 }
7870 }
7871 zone_index_foreach(zid) {
7872 zone_t z = zone_by_id(zid);
7873
7874 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7875 continue;
7876 }
7877 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7878 z->collectable) {
7879 zone_reclaim(z, mode);
7880 }
7881 }
7882
7883 zone_index_foreach(zid) {
7884 zone_t z = zone_by_id(zid);
7885
7886 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7887 continue;
7888 }
7889 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7890 z->collectable) {
7891 zone_reclaim(z, mode);
7892 }
7893 }
7894
7895 zone_reclaim(zc_magazine_zone, mode);
7896 }
7897
7898 void
zone_userspace_reboot_checks(void)7899 zone_userspace_reboot_checks(void)
7900 {
7901 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7902 if (label_zone_size != 0) {
7903 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7904 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7905 }
7906 }
7907
7908 void
zone_gc(zone_gc_level_t level)7909 zone_gc(zone_gc_level_t level)
7910 {
7911 zone_reclaim_mode_t mode;
7912 zone_t largest_zone = NULL;
7913
7914 switch (level) {
7915 case ZONE_GC_TRIM:
7916 mode = ZONE_RECLAIM_TRIM;
7917 break;
7918 case ZONE_GC_DRAIN:
7919 mode = ZONE_RECLAIM_DRAIN;
7920 break;
7921 case ZONE_GC_JETSAM:
7922 largest_zone = kill_process_in_largest_zone();
7923 mode = ZONE_RECLAIM_TRIM;
7924 break;
7925 }
7926
7927 current_thread()->options |= TH_OPT_ZONE_PRIV;
7928 lck_mtx_lock(&zone_gc_lock);
7929
7930 zone_reclaim_all(mode);
7931
7932 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7933 /*
7934 * If we possibly killed a process, but we're still critical,
7935 * we need to drain harder.
7936 */
7937 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7938 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7939 }
7940
7941 lck_mtx_unlock(&zone_gc_lock);
7942 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7943 }
7944
7945 void
zone_gc_trim(void)7946 zone_gc_trim(void)
7947 {
7948 zone_gc(ZONE_GC_TRIM);
7949 }
7950
7951 void
zone_gc_drain(void)7952 zone_gc_drain(void)
7953 {
7954 zone_gc(ZONE_GC_DRAIN);
7955 }
7956
7957 static bool
zone_trim_needed(zone_t z)7958 zone_trim_needed(zone_t z)
7959 {
7960 if (z->z_depot_cleanup) {
7961 return true;
7962 }
7963
7964 if (z->z_async_refilling) {
7965 /* Don't fight with refill */
7966 return false;
7967 }
7968
7969 if (z->z_pcpu_cache) {
7970 uint32_t e_n, f_n;
7971
7972 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7973 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7974
7975 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7976 return true;
7977 }
7978
7979 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7980 f_n * zc_mag_size() * zone_elem_inner_size(z) >
7981 zc_autotrim_size() * Z_WMA_UNIT) {
7982 return true;
7983 }
7984
7985 return false;
7986 }
7987
7988 if (!zone_pva_is_null(z->z_pageq_empty)) {
7989 uint32_t n;
7990
7991 n = MIN(z->z_elems_free_wma, z->z_elems_free_min);
7992
7993 return n >= z->z_elems_rsv + z->z_chunk_elems;
7994 }
7995
7996 return false;
7997 }
7998
7999 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)8000 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
8001 {
8002 current_thread()->options |= TH_OPT_ZONE_PRIV;
8003
8004 zone_foreach(z) {
8005 if (!z->collectable || z == zc_magazine_zone) {
8006 continue;
8007 }
8008
8009 if (zone_trim_needed(z)) {
8010 lck_mtx_lock(&zone_gc_lock);
8011 zone_reclaim(z, ZONE_RECLAIM_TRIM);
8012 lck_mtx_unlock(&zone_gc_lock);
8013 }
8014 }
8015
8016 if (zone_trim_needed(zc_magazine_zone)) {
8017 lck_mtx_lock(&zone_gc_lock);
8018 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
8019 lck_mtx_unlock(&zone_gc_lock);
8020 }
8021
8022 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
8023 }
8024
8025 void
compute_zone_working_set_size(__unused void * param)8026 compute_zone_working_set_size(__unused void *param)
8027 {
8028 uint32_t zc_auto = zc_enable_level();
8029 bool needs_trim = false;
8030
8031 /*
8032 * Keep zone caching disabled until the first proc is made.
8033 */
8034 if (__improbable(zone_caching_disabled < 0)) {
8035 return;
8036 }
8037
8038 zone_caching_disabled = vm_pool_low();
8039
8040 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
8041 zc_auto = 0;
8042 }
8043
8044 zone_foreach(z) {
8045 uint32_t old, wma, cur;
8046 bool needs_caching = false;
8047
8048 if (z->z_self != z) {
8049 continue;
8050 }
8051
8052 zone_lock(z);
8053
8054 zone_recirc_lock_nopreempt(z);
8055
8056 if (z->z_pcpu_cache) {
8057 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
8058 z->z_recirc_empty_min = z->z_recirc.zd_empty;
8059 z->z_recirc_empty_wma = wma;
8060 } else {
8061 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
8062 z->z_elems_free_min = z->z_elems_free;
8063 z->z_elems_free_wma = wma;
8064 }
8065
8066 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
8067 z->z_recirc_full_min = z->z_recirc.zd_full;
8068 z->z_recirc_full_wma = wma;
8069
8070 /* fixed point decimal of contentions per second */
8071 old = z->z_recirc_cont_wma;
8072 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
8073 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
8074 cur = (3 * old + cur) / 4;
8075 zone_recirc_unlock_nopreempt(z);
8076
8077 if (z->z_pcpu_cache) {
8078 uint16_t size = z->z_depot_size;
8079
8080 if (zone_exhausted(z)) {
8081 if (z->z_depot_size) {
8082 z->z_depot_size = 0;
8083 z->z_depot_cleanup = true;
8084 }
8085 } else if (size < z->z_depot_limit && cur > zc_grow_level()) {
8086 /*
8087 * lose history on purpose now
8088 * that we just grew, to give
8089 * the sytem time to adjust.
8090 */
8091 cur = (zc_grow_level() + zc_shrink_level()) / 2;
8092 size = size ? (3 * size + 2) / 2 : 2;
8093 z->z_depot_size = MIN(z->z_depot_limit, size);
8094 } else if (size > 0 && cur <= zc_shrink_level()) {
8095 /*
8096 * lose history on purpose now
8097 * that we just shrunk, to give
8098 * the sytem time to adjust.
8099 */
8100 cur = (zc_grow_level() + zc_shrink_level()) / 2;
8101 z->z_depot_size = size - 1;
8102 z->z_depot_cleanup = true;
8103 }
8104 } else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
8105 old >= zc_auto && cur >= zc_auto) {
8106 needs_caching = true;
8107 }
8108
8109 z->z_recirc_cont_wma = cur;
8110 z->z_recirc_cont_cur = 0;
8111
8112 if (!needs_trim && zone_trim_needed(z)) {
8113 needs_trim = true;
8114 }
8115
8116 zone_unlock(z);
8117
8118 if (needs_caching) {
8119 zone_enable_caching(z);
8120 }
8121 }
8122
8123 if (needs_trim) {
8124 thread_call_enter(&zone_trim_callout);
8125 }
8126 }
8127
8128 #endif /* !ZALLOC_TEST */
8129 #pragma mark vm integration, MIG routines
8130 #if !ZALLOC_TEST
8131
8132 extern unsigned int stack_total;
8133 #if defined (__x86_64__)
8134 extern unsigned int inuse_ptepages_count;
8135 #endif
8136
8137 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)8138 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
8139 bool is_kt_var)
8140 {
8141 if (is_kt_var) {
8142 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
8143 return cur.ktv_var->kt_name;
8144 } else {
8145 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
8146 return cur.ktv_fixed->kt_zv.zv_name;
8147 }
8148 }
8149
8150 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)8151 panic_print_types_in_zone(zone_t z, const char* debug_str)
8152 {
8153 kalloc_type_views_t kt_cur = {};
8154 const char *prev_type = "";
8155 size_t skip_over_site = sizeof("site.") - 1;
8156 zone_security_flags_t zsflags = zone_security_config(z);
8157 bool is_kt_var = false;
8158
8159 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8160 uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
8161 kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
8162 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
8163 is_kt_var = true;
8164 } else {
8165 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
8166 }
8167
8168 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
8169 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
8170
8171 while (kt_cur.ktv_fixed) {
8172 kalloc_type_views_t kt_next = {};
8173 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
8174 is_kt_var) + skip_over_site;
8175 if (strcmp(typename, prev_type) != 0) {
8176 paniclog_append_noflush("\t%-50s\n", typename);
8177 prev_type = typename;
8178 }
8179 kt_cur = kt_next;
8180 }
8181 paniclog_append_noflush("\n");
8182 }
8183
8184 static void
panic_display_kalloc_types(void)8185 panic_display_kalloc_types(void)
8186 {
8187 if (kalloc_type_src_zone) {
8188 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
8189 }
8190 if (kalloc_type_dst_zone) {
8191 panic_print_types_in_zone(kalloc_type_dst_zone,
8192 "addr is being freed to");
8193 }
8194 }
8195
8196 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)8197 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
8198 uint64_t *zone_size)
8199 {
8200 zone_index_foreach(zid) {
8201 zone_t z = &zone_array[zid];
8202 vm_offset_t size = zone_size_wired(z);
8203
8204 if (zid == ZONE_ID_VM_PAGES) {
8205 continue;
8206 }
8207 for (uint32_t i = 0; i < n; i++) {
8208 if (size > zone_size[i]) {
8209 largest_zones[i] = z;
8210 zone_size[i] = size;
8211 break;
8212 }
8213 }
8214 }
8215 }
8216
8217 #define NUM_LARGEST_ZONES 5
8218 static void
panic_display_largest_zones(void)8219 panic_display_largest_zones(void)
8220 {
8221 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
8222 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
8223
8224 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
8225 (uint64_t *) &largest_size);
8226
8227 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
8228 "Zone Name", "Cur Size", "Free Size");
8229 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
8230 zone_t z = largest_zones[i];
8231 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
8232 zone_heap_name(z), z->z_name,
8233 mach_vm_size_pretty(largest_size[i]),
8234 mach_vm_size_unit(largest_size[i]),
8235 mach_vm_size_pretty(zone_size_free(z)),
8236 mach_vm_size_unit(zone_size_free(z)));
8237 }
8238 }
8239
8240 static void
panic_display_zprint(void)8241 panic_display_zprint(void)
8242 {
8243 panic_display_largest_zones();
8244 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
8245 (uintptr_t)(kernel_stack_size * stack_total));
8246 #if defined (__x86_64__)
8247 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
8248 (uintptr_t)ptoa(inuse_ptepages_count));
8249 #endif
8250 paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
8251 counter_load(&kalloc_large_total));
8252
8253 if (panic_kext_memory_info) {
8254 mach_memory_info_t *mem_info = panic_kext_memory_info;
8255
8256 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
8257 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
8258 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
8259 continue;
8260 }
8261 if (mem_info[i].size > (1024 * 1024)) {
8262 paniclog_append_noflush("%-5lld %10lld\n",
8263 mem_info[i].site, mem_info[i].size);
8264 }
8265 }
8266 }
8267 }
8268
8269 static void
panic_display_zone_info(void)8270 panic_display_zone_info(void)
8271 {
8272 paniclog_append_noflush("Zone info:\n");
8273 paniclog_append_noflush(" Zone map: %p - %p\n",
8274 (void *)zone_info.zi_map_range.min_address,
8275 (void *)zone_info.zi_map_range.max_address);
8276 #if CONFIG_PROB_GZALLOC
8277 if (pgz_submap) {
8278 paniclog_append_noflush(" . PGZ : %p - %p\n",
8279 (void *)pgz_submap->min_offset,
8280 (void *)pgz_submap->max_offset);
8281 }
8282 #endif /* CONFIG_PROB_GZALLOC */
8283 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8284 vm_map_t map = zone_submaps[i];
8285
8286 if (map == VM_MAP_NULL) {
8287 continue;
8288 }
8289 paniclog_append_noflush(" . %-6s: %p - %p\n",
8290 zone_submaps_names[i],
8291 (void *)map->min_offset,
8292 (void *)map->max_offset);
8293 }
8294 paniclog_append_noflush(" Metadata: %p - %p\n"
8295 " Bitmaps : %p - %p\n"
8296 " Extra : %p - %p\n"
8297 "\n",
8298 (void *)zone_info.zi_meta_range.min_address,
8299 (void *)zone_info.zi_meta_range.max_address,
8300 (void *)zone_info.zi_bits_range.min_address,
8301 (void *)zone_info.zi_bits_range.max_address,
8302 (void *)zone_info.zi_xtra_range.min_address,
8303 (void *)zone_info.zi_xtra_range.max_address);
8304 }
8305
8306 static void
panic_display_zone_fault(vm_offset_t addr)8307 panic_display_zone_fault(vm_offset_t addr)
8308 {
8309 struct zone_page_metadata meta = { };
8310 vm_map_t map = VM_MAP_NULL;
8311 vm_offset_t oob_offs = 0, size = 0;
8312 int map_idx = -1;
8313 zone_t z = NULL;
8314 const char *kind = "whild deref";
8315 bool oob = false;
8316
8317 /*
8318 * First: look if we bumped into guard pages between submaps
8319 */
8320 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8321 map = zone_submaps[i];
8322 if (map == VM_MAP_NULL) {
8323 continue;
8324 }
8325
8326 if (addr >= map->min_offset && addr < map->max_offset) {
8327 map_idx = i;
8328 break;
8329 }
8330 }
8331
8332 if (map_idx == -1) {
8333 /* this really shouldn't happen, submaps are back to back */
8334 return;
8335 }
8336
8337 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8338
8339 /*
8340 * Second: look if there's just no metadata at all
8341 */
8342 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8343 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8344 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8345 zone_array[meta.zm_index].z_self == NULL) {
8346 paniclog_append_noflush(" Zone : <unknown>\n");
8347 kind = "wild deref, missing or invalid metadata";
8348 } else {
8349 z = &zone_array[meta.zm_index];
8350 paniclog_append_noflush(" Zone : %s%s\n",
8351 zone_heap_name(z), zone_name(z));
8352 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8353 kind = "out-of-bounds (high confidence)";
8354 oob = true;
8355 size = zone_element_size((void *)addr,
8356 &z, false, &oob_offs);
8357 } else {
8358 kind = "use-after-free (medium confidence)";
8359 }
8360 }
8361
8362 paniclog_append_noflush(" Address : %p\n", (void *)addr);
8363 if (oob) {
8364 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
8365 (void *)(trunc_page(addr) - (size - oob_offs)),
8366 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8367 }
8368 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
8369 zone_submaps_names[map_idx],
8370 (void *)map->min_offset, (void *)map->max_offset);
8371 paniclog_append_noflush(" Kind : %s\n", kind);
8372 if (oob) {
8373 paniclog_append_noflush(" Access : %d byte(s) past\n",
8374 (uint32_t)(addr & PAGE_MASK) + 1);
8375 }
8376 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
8377 "0x%04x 0x%08x 0x%08x 0x%08x\n",
8378 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8379 meta.zm_alloc_size, meta.zm_bitmap,
8380 meta.zm_page_next.packed_address,
8381 meta.zm_page_prev.packed_address);
8382 paniclog_append_noflush("\n");
8383 }
8384
8385 void
panic_display_zalloc(void)8386 panic_display_zalloc(void)
8387 {
8388 bool keepsyms = false;
8389
8390 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8391
8392 panic_display_zone_info();
8393
8394 if (panic_fault_address) {
8395 #if CONFIG_PROB_GZALLOC
8396 if (pgz_owned(panic_fault_address)) {
8397 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8398 } else
8399 #endif /* CONFIG_PROB_GZALLOC */
8400 if (zone_maps_owned(panic_fault_address, 1)) {
8401 panic_display_zone_fault(panic_fault_address);
8402 }
8403 }
8404
8405 if (panic_include_zprint) {
8406 panic_display_zprint();
8407 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8408 panic_display_largest_zones();
8409 }
8410 #if CONFIG_ZLEAKS
8411 if (zleak_active) {
8412 panic_display_zleaks(keepsyms);
8413 }
8414 #endif
8415 if (panic_include_kalloc_types) {
8416 panic_display_kalloc_types();
8417 }
8418 }
8419
8420 /*
8421 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8422 * requesting zone information.
8423 * Frees unused pages towards the end of the region, and zero'es out unused
8424 * space on the last page.
8425 */
8426 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8427 create_vm_map_copy(
8428 vm_offset_t start_addr,
8429 vm_size_t total_size,
8430 vm_size_t used_size)
8431 {
8432 kern_return_t kr;
8433 vm_offset_t end_addr;
8434 vm_size_t free_size;
8435 vm_map_copy_t copy;
8436
8437 if (used_size != total_size) {
8438 end_addr = start_addr + used_size;
8439 free_size = total_size - (round_page(end_addr) - start_addr);
8440
8441 if (free_size >= PAGE_SIZE) {
8442 kmem_free(ipc_kernel_map,
8443 round_page(end_addr), free_size);
8444 }
8445 bzero((char *) end_addr, round_page(end_addr) - end_addr);
8446 }
8447
8448 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8449 (vm_map_size_t)used_size, TRUE, ©);
8450 assert(kr == KERN_SUCCESS);
8451
8452 return copy;
8453 }
8454
8455 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8456 get_zone_info(
8457 zone_t z,
8458 mach_zone_name_t *zn,
8459 mach_zone_info_t *zi)
8460 {
8461 struct zone zcopy;
8462 vm_size_t cached = 0;
8463
8464 assert(z != ZONE_NULL);
8465 zone_lock(z);
8466 if (!z->z_self) {
8467 zone_unlock(z);
8468 return FALSE;
8469 }
8470 zcopy = *z;
8471 if (z->z_pcpu_cache) {
8472 zpercpu_foreach(zc, z->z_pcpu_cache) {
8473 cached += zc->zc_alloc_cur + zc->zc_free_cur;
8474 cached += zc->zc_depot.zd_full * zc_mag_size();
8475 }
8476 }
8477 zone_unlock(z);
8478
8479 if (zn != NULL) {
8480 /*
8481 * Append kalloc heap name to zone name (if zone is used by kalloc)
8482 */
8483 char temp_zone_name[MAX_ZONE_NAME] = "";
8484 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8485 zone_heap_name(z), z->z_name);
8486
8487 /* assuming here the name data is static */
8488 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8489 strlen(temp_zone_name) + 1);
8490 }
8491
8492 if (zi != NULL) {
8493 *zi = (mach_zone_info_t) {
8494 .mzi_count = zone_count_allocated(&zcopy) - cached,
8495 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8496 // max_size for zprint is now high-watermark of pages used
8497 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8498 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8499 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8500 .mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
8501 };
8502 if (zcopy.z_chunk_pages == 0) {
8503 /* this is a zcache */
8504 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8505 }
8506 zpercpu_foreach(zs, zcopy.z_stats) {
8507 zi->mzi_sum_size += zs->zs_mem_allocated;
8508 }
8509 if (zcopy.collectable) {
8510 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8511 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8512 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8513 }
8514 }
8515
8516 return TRUE;
8517 }
8518
8519 /* mach_memory_info entitlement */
8520 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8521
8522 /* macro needed to rate-limit mach_memory_info */
8523 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8524
8525 /* declarations necessary to call kauth_cred_issuser() */
8526 struct ucred;
8527 extern int kauth_cred_issuser(struct ucred *);
8528 extern struct ucred *kauth_cred_get(void);
8529
8530 static kern_return_t
8531 mach_memory_info_internal(
8532 host_t host,
8533 mach_zone_name_array_t *namesp,
8534 mach_msg_type_number_t *namesCntp,
8535 mach_zone_info_array_t *infop,
8536 mach_msg_type_number_t *infoCntp,
8537 mach_memory_info_array_t *memoryInfop,
8538 mach_msg_type_number_t *memoryInfoCntp,
8539 bool redact_info);
8540
8541 static kern_return_t
mach_memory_info_security_check(bool redact_info)8542 mach_memory_info_security_check(bool redact_info)
8543 {
8544 /* If not root, only allow redacted calls. */
8545 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8546 return KERN_NO_ACCESS;
8547 }
8548
8549 if (PE_srd_fused) {
8550 return KERN_SUCCESS;
8551 }
8552
8553 /* If does not have the memory entitlement, fail. */
8554 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8555 if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8556 return KERN_DENIED;
8557 }
8558
8559 /*
8560 * On release non-mac arm devices, allow mach_memory_info
8561 * to be called twice per day per boot. memorymaintenanced
8562 * calls it once per day, which leaves room for a sysdiagnose.
8563 * Allow redacted version to be called without rate limit.
8564 */
8565
8566 if (!redact_info) {
8567 static uint64_t first_call = 0, second_call = 0;
8568 uint64_t now = 0;
8569 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8570
8571 if (!first_call) {
8572 first_call = now;
8573 } else if (!second_call) {
8574 second_call = now;
8575 } else if (first_call + NSEC_DAY > now) {
8576 return KERN_DENIED;
8577 } else if (first_call + NSEC_DAY < now) {
8578 first_call = now;
8579 second_call = 0;
8580 }
8581 }
8582 #endif
8583
8584 return KERN_SUCCESS;
8585 }
8586
8587 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8588 mach_zone_info(
8589 mach_port_t host_port,
8590 mach_zone_name_array_t *namesp,
8591 mach_msg_type_number_t *namesCntp,
8592 mach_zone_info_array_t *infop,
8593 mach_msg_type_number_t *infoCntp)
8594 {
8595 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8596 }
8597
8598 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8599 mach_memory_info(
8600 mach_port_t host_port,
8601 mach_zone_name_array_t *namesp,
8602 mach_msg_type_number_t *namesCntp,
8603 mach_zone_info_array_t *infop,
8604 mach_msg_type_number_t *infoCntp,
8605 mach_memory_info_array_t *memoryInfop,
8606 mach_msg_type_number_t *memoryInfoCntp)
8607 {
8608 bool redact_info = false;
8609 host_t host = HOST_NULL;
8610
8611 host = convert_port_to_host_priv(host_port);
8612 if (host == HOST_NULL) {
8613 redact_info = true;
8614 host = convert_port_to_host(host_port);
8615 }
8616
8617 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8618 }
8619
8620 static void
zone_info_redact(mach_zone_info_t * zi)8621 zone_info_redact(mach_zone_info_t *zi)
8622 {
8623 zi->mzi_cur_size = 0;
8624 zi->mzi_max_size = 0;
8625 zi->mzi_alloc_size = 0;
8626 zi->mzi_sum_size = 0;
8627 zi->mzi_collectable = 0;
8628 }
8629
8630 static bool
zone_info_needs_to_be_coalesced(int zone_index)8631 zone_info_needs_to_be_coalesced(int zone_index)
8632 {
8633 zone_security_flags_t zsflags = zone_security_array[zone_index];
8634 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8635 return true;
8636 }
8637 return false;
8638 }
8639
8640 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8641 zone_info_find_coalesce_zone(
8642 mach_zone_info_t *zi,
8643 mach_zone_info_t *info,
8644 int *coalesce,
8645 int coalesce_count,
8646 int *coalesce_index)
8647 {
8648 for (int i = 0; i < coalesce_count; i++) {
8649 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8650 *coalesce_index = coalesce[i];
8651 return true;
8652 }
8653 }
8654
8655 return false;
8656 }
8657
8658 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8659 zone_info_coalesce(
8660 mach_zone_info_t *info,
8661 int coalesce_index,
8662 mach_zone_info_t *zi)
8663 {
8664 info[coalesce_index].mzi_count += zi->mzi_count;
8665 }
8666
8667 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8668 mach_memory_info_internal(
8669 host_t host,
8670 mach_zone_name_array_t *namesp,
8671 mach_msg_type_number_t *namesCntp,
8672 mach_zone_info_array_t *infop,
8673 mach_msg_type_number_t *infoCntp,
8674 mach_memory_info_array_t *memoryInfop,
8675 mach_msg_type_number_t *memoryInfoCntp,
8676 bool redact_info)
8677 {
8678 mach_zone_name_t *names;
8679 vm_offset_t names_addr;
8680 vm_size_t names_size;
8681
8682 mach_zone_info_t *info;
8683 vm_offset_t info_addr;
8684 vm_size_t info_size;
8685
8686 int *coalesce;
8687 vm_offset_t coalesce_addr;
8688 vm_size_t coalesce_size;
8689 int coalesce_count = 0;
8690
8691 mach_memory_info_t *memory_info;
8692 vm_offset_t memory_info_addr;
8693 vm_size_t memory_info_size;
8694 vm_size_t memory_info_vmsize;
8695 unsigned int num_info;
8696
8697 unsigned int max_zones, used_zones, i;
8698 mach_zone_name_t *zn;
8699 mach_zone_info_t *zi;
8700 kern_return_t kr;
8701
8702 uint64_t zones_collectable_bytes = 0;
8703
8704 if (host == HOST_NULL) {
8705 return KERN_INVALID_HOST;
8706 }
8707
8708 kr = mach_memory_info_security_check(redact_info);
8709 if (kr != KERN_SUCCESS) {
8710 return kr;
8711 }
8712
8713 /*
8714 * We assume that zones aren't freed once allocated.
8715 * We won't pick up any zones that are allocated later.
8716 */
8717
8718 max_zones = os_atomic_load(&num_zones, relaxed);
8719
8720 names_size = round_page(max_zones * sizeof *names);
8721 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8722 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8723 if (kr != KERN_SUCCESS) {
8724 return kr;
8725 }
8726 names = (mach_zone_name_t *) names_addr;
8727
8728 info_size = round_page(max_zones * sizeof *info);
8729 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8730 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8731 if (kr != KERN_SUCCESS) {
8732 kmem_free(ipc_kernel_map,
8733 names_addr, names_size);
8734 return kr;
8735 }
8736 info = (mach_zone_info_t *) info_addr;
8737
8738 if (redact_info) {
8739 coalesce_size = round_page(max_zones * sizeof *coalesce);
8740 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8741 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8742 if (kr != KERN_SUCCESS) {
8743 kmem_free(ipc_kernel_map,
8744 names_addr, names_size);
8745 kmem_free(ipc_kernel_map,
8746 info_addr, info_size);
8747 return kr;
8748 }
8749 coalesce = (int *)coalesce_addr;
8750 }
8751
8752 zn = &names[0];
8753 zi = &info[0];
8754
8755 used_zones = 0;
8756 for (i = 0; i < max_zones; i++) {
8757 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8758 continue;
8759 }
8760
8761 if (!redact_info) {
8762 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8763 zn++;
8764 zi++;
8765 used_zones++;
8766 continue;
8767 }
8768
8769 zone_info_redact(zi);
8770 if (!zone_info_needs_to_be_coalesced(i)) {
8771 zn++;
8772 zi++;
8773 used_zones++;
8774 continue;
8775 }
8776
8777 int coalesce_index;
8778 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8779 coalesce, coalesce_count, &coalesce_index);
8780
8781 /* Didn't find a zone to coalesce */
8782 if (!found_coalesce_zone) {
8783 /* Updates the zone name */
8784 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8785 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8786 (int)zi->mzi_elem_size);
8787
8788 coalesce[coalesce_count] = used_zones;
8789 coalesce_count++;
8790 zn++;
8791 zi++;
8792 used_zones++;
8793 continue;
8794 }
8795
8796 zone_info_coalesce(info, coalesce_index, zi);
8797 }
8798
8799 if (redact_info) {
8800 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8801 }
8802
8803 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8804 *namesCntp = used_zones;
8805
8806 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8807 *infoCntp = used_zones;
8808
8809 num_info = 0;
8810 memory_info_addr = 0;
8811
8812 if (memoryInfop && memoryInfoCntp) {
8813 vm_map_copy_t copy;
8814 num_info = vm_page_diagnose_estimate();
8815 memory_info_size = num_info * sizeof(*memory_info);
8816 memory_info_vmsize = round_page(memory_info_size);
8817 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8818 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8819 if (kr != KERN_SUCCESS) {
8820 return kr;
8821 }
8822
8823 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8824 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8825 assert(kr == KERN_SUCCESS);
8826
8827 memory_info = (mach_memory_info_t *) memory_info_addr;
8828 vm_page_diagnose(memory_info, num_info, zones_collectable_bytes, redact_info);
8829
8830 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8831 assert(kr == KERN_SUCCESS);
8832
8833 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8834 (vm_map_size_t)memory_info_size, TRUE, ©);
8835 assert(kr == KERN_SUCCESS);
8836
8837 *memoryInfop = (mach_memory_info_t *) copy;
8838 *memoryInfoCntp = num_info;
8839 }
8840
8841 return KERN_SUCCESS;
8842 }
8843
8844 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8845 mach_zone_info_for_zone(
8846 host_priv_t host,
8847 mach_zone_name_t name,
8848 mach_zone_info_t *infop)
8849 {
8850 zone_t zone_ptr;
8851
8852 if (host == HOST_NULL) {
8853 return KERN_INVALID_HOST;
8854 }
8855
8856 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8857 if (!PE_i_can_has_debugger(NULL)) {
8858 return KERN_INVALID_HOST;
8859 }
8860 #endif
8861
8862 if (infop == NULL) {
8863 return KERN_INVALID_ARGUMENT;
8864 }
8865
8866 zone_ptr = ZONE_NULL;
8867 zone_foreach(z) {
8868 /*
8869 * Append kalloc heap name to zone name (if zone is used by kalloc)
8870 */
8871 char temp_zone_name[MAX_ZONE_NAME] = "";
8872 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8873 zone_heap_name(z), z->z_name);
8874
8875 /* Find the requested zone by name */
8876 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8877 zone_ptr = z;
8878 break;
8879 }
8880 }
8881
8882 /* No zones found with the requested zone name */
8883 if (zone_ptr == ZONE_NULL) {
8884 return KERN_INVALID_ARGUMENT;
8885 }
8886
8887 if (get_zone_info(zone_ptr, NULL, infop)) {
8888 return KERN_SUCCESS;
8889 }
8890 return KERN_FAILURE;
8891 }
8892
8893 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8894 mach_zone_info_for_largest_zone(
8895 host_priv_t host,
8896 mach_zone_name_t *namep,
8897 mach_zone_info_t *infop)
8898 {
8899 if (host == HOST_NULL) {
8900 return KERN_INVALID_HOST;
8901 }
8902
8903 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8904 if (!PE_i_can_has_debugger(NULL)) {
8905 return KERN_INVALID_HOST;
8906 }
8907 #endif
8908
8909 if (namep == NULL || infop == NULL) {
8910 return KERN_INVALID_ARGUMENT;
8911 }
8912
8913 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8914 return KERN_SUCCESS;
8915 }
8916 return KERN_FAILURE;
8917 }
8918
8919 uint64_t
get_zones_collectable_bytes(void)8920 get_zones_collectable_bytes(void)
8921 {
8922 uint64_t zones_collectable_bytes = 0;
8923 mach_zone_info_t zi;
8924
8925 zone_foreach(z) {
8926 if (get_zone_info(z, NULL, &zi)) {
8927 zones_collectable_bytes +=
8928 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8929 }
8930 }
8931
8932 return zones_collectable_bytes;
8933 }
8934
8935 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8936 mach_zone_get_zlog_zones(
8937 host_priv_t host,
8938 mach_zone_name_array_t *namesp,
8939 mach_msg_type_number_t *namesCntp)
8940 {
8941 #if ZALLOC_ENABLE_LOGGING
8942 unsigned int max_zones, logged_zones, i;
8943 kern_return_t kr;
8944 zone_t zone_ptr;
8945 mach_zone_name_t *names;
8946 vm_offset_t names_addr;
8947 vm_size_t names_size;
8948
8949 if (host == HOST_NULL) {
8950 return KERN_INVALID_HOST;
8951 }
8952
8953 if (namesp == NULL || namesCntp == NULL) {
8954 return KERN_INVALID_ARGUMENT;
8955 }
8956
8957 max_zones = os_atomic_load(&num_zones, relaxed);
8958
8959 names_size = round_page(max_zones * sizeof *names);
8960 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8961 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8962 if (kr != KERN_SUCCESS) {
8963 return kr;
8964 }
8965 names = (mach_zone_name_t *) names_addr;
8966
8967 zone_ptr = ZONE_NULL;
8968 logged_zones = 0;
8969 for (i = 0; i < max_zones; i++) {
8970 zone_t z = &(zone_array[i]);
8971 assert(z != ZONE_NULL);
8972
8973 /* Copy out the zone name if zone logging is enabled */
8974 if (z->z_btlog) {
8975 get_zone_info(z, &names[logged_zones], NULL);
8976 logged_zones++;
8977 }
8978 }
8979
8980 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8981 *namesCntp = logged_zones;
8982
8983 return KERN_SUCCESS;
8984
8985 #else /* ZALLOC_ENABLE_LOGGING */
8986 #pragma unused(host, namesp, namesCntp)
8987 return KERN_FAILURE;
8988 #endif /* ZALLOC_ENABLE_LOGGING */
8989 }
8990
8991 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8992 mach_zone_get_btlog_records(
8993 host_priv_t host,
8994 mach_zone_name_t name,
8995 zone_btrecord_array_t *recsp,
8996 mach_msg_type_number_t *numrecs)
8997 {
8998 #if ZALLOC_ENABLE_LOGGING
8999 zone_btrecord_t *recs;
9000 kern_return_t kr;
9001 vm_address_t addr;
9002 vm_size_t size;
9003 zone_t zone_ptr;
9004 vm_map_copy_t copy;
9005
9006 if (host == HOST_NULL) {
9007 return KERN_INVALID_HOST;
9008 }
9009
9010 if (recsp == NULL || numrecs == NULL) {
9011 return KERN_INVALID_ARGUMENT;
9012 }
9013
9014 zone_ptr = ZONE_NULL;
9015 zone_foreach(z) {
9016 /*
9017 * Append kalloc heap name to zone name (if zone is used by kalloc)
9018 */
9019 char temp_zone_name[MAX_ZONE_NAME] = "";
9020 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
9021 zone_heap_name(z), z->z_name);
9022
9023 /* Find the requested zone by name */
9024 if (track_this_zone(temp_zone_name, name.mzn_name)) {
9025 zone_ptr = z;
9026 break;
9027 }
9028 }
9029
9030 /* No zones found with the requested zone name */
9031 if (zone_ptr == ZONE_NULL) {
9032 return KERN_INVALID_ARGUMENT;
9033 }
9034
9035 /* Logging not turned on for the requested zone */
9036 if (!zone_ptr->z_btlog) {
9037 return KERN_FAILURE;
9038 }
9039
9040 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
9041 if (kr != KERN_SUCCESS) {
9042 return kr;
9043 }
9044
9045 addr = (vm_address_t)recs;
9046 size = sizeof(zone_btrecord_t) * *numrecs;
9047
9048 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
9049 assert(kr == KERN_SUCCESS);
9050
9051 *recsp = (zone_btrecord_t *)copy;
9052 return KERN_SUCCESS;
9053
9054 #else /* !ZALLOC_ENABLE_LOGGING */
9055 #pragma unused(host, name, recsp, numrecs)
9056 return KERN_FAILURE;
9057 #endif /* !ZALLOC_ENABLE_LOGGING */
9058 }
9059
9060
9061 kern_return_t
mach_zone_force_gc(host_t host)9062 mach_zone_force_gc(
9063 host_t host)
9064 {
9065 if (host == HOST_NULL) {
9066 return KERN_INVALID_HOST;
9067 }
9068
9069 #if DEBUG || DEVELOPMENT
9070 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
9071 /* Callout to buffer cache GC to drop elements in the apfs zones */
9072 if (consider_buffer_cache_collect != NULL) {
9073 (void)(*consider_buffer_cache_collect)(0);
9074 }
9075 zone_gc(ZONE_GC_DRAIN);
9076 #endif /* DEBUG || DEVELOPMENT */
9077 return KERN_SUCCESS;
9078 }
9079
9080 zone_t
zone_find_largest(uint64_t * zone_size)9081 zone_find_largest(uint64_t *zone_size)
9082 {
9083 zone_t largest_zone = 0;
9084 uint64_t largest_zone_size = 0;
9085 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
9086 if (zone_size) {
9087 *zone_size = largest_zone_size;
9088 }
9089 return largest_zone;
9090 }
9091
9092 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)9093 zone_get_stats(
9094 zone_t zone,
9095 struct zone_basic_stats *stats)
9096 {
9097 stats->zbs_avail = zone->z_elems_avail;
9098
9099 stats->zbs_alloc_fail = 0;
9100 zpercpu_foreach(zs, zone->z_stats) {
9101 stats->zbs_alloc_fail += zs->zs_alloc_fail;
9102 }
9103
9104 stats->zbs_cached = 0;
9105 if (zone->z_pcpu_cache) {
9106 zpercpu_foreach(zc, zone->z_pcpu_cache) {
9107 stats->zbs_cached += zc->zc_alloc_cur +
9108 zc->zc_free_cur +
9109 zc->zc_depot.zd_full * zc_mag_size();
9110 }
9111 }
9112
9113 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
9114
9115 /*
9116 * Since we don't take any locks, deal with possible inconsistencies
9117 * as the counters may have changed.
9118 */
9119 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
9120 &stats->zbs_alloc)) {
9121 stats->zbs_avail = stats->zbs_free;
9122 stats->zbs_alloc = 0;
9123 }
9124 }
9125
9126 #endif /* !ZALLOC_TEST */
9127 #pragma mark zone creation, configuration, destruction
9128 #if !ZALLOC_TEST
9129
9130 static zone_t
zone_init_defaults(zone_id_t zid)9131 zone_init_defaults(zone_id_t zid)
9132 {
9133 zone_t z = &zone_array[zid];
9134
9135 z->z_wired_max = ~0u;
9136 z->collectable = true;
9137
9138 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
9139 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
9140 zone_depot_init(&z->z_recirc);
9141 return z;
9142 }
9143
9144 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems,bool exhausts_by_design)9145 zone_set_exhaustible(zone_t zone, vm_size_t nelems, bool exhausts_by_design)
9146 {
9147 zone_lock(zone);
9148 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
9149 zone->z_exhausts = exhausts_by_design;
9150 zone_unlock(zone);
9151 }
9152
9153 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)9154 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
9155 {
9156 zone_t zone = zov.zov_zone;
9157
9158 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
9159 zone = zov.zov_view->zv_zone;
9160 } else {
9161 zone = zov.zov_zone;
9162 }
9163
9164 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
9165 }
9166
9167 /**
9168 * @function zone_create_find
9169 *
9170 * @abstract
9171 * Finds an unused zone for the given name and element size.
9172 *
9173 * @param name the zone name
9174 * @param size the element size (including redzones, ...)
9175 * @param flags the flags passed to @c zone_create*
9176 * @param zid_inout the desired zone ID or ZONE_ID_ANY
9177 *
9178 * @returns a zone to initialize further.
9179 */
9180 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)9181 zone_create_find(
9182 const char *name,
9183 vm_size_t size,
9184 zone_create_flags_t flags,
9185 zone_id_t *zid_inout)
9186 {
9187 zone_id_t nzones, zid = *zid_inout;
9188 zone_t z;
9189
9190 simple_lock(&all_zones_lock, &zone_locks_grp);
9191
9192 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
9193 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
9194
9195 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
9196 /*
9197 * The first time around, make sure the reserved zone IDs
9198 * have an initialized lock as zone_index_foreach() will
9199 * enumerate them.
9200 */
9201 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
9202 zone_init_defaults(nzones++);
9203 }
9204
9205 os_atomic_store(&num_zones, nzones, release);
9206 }
9207
9208 if (zid != ZONE_ID_ANY) {
9209 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
9210 panic("zone_create: invalid desired zone ID %d for %s",
9211 zid, name);
9212 }
9213 if (flags & ZC_DESTRUCTIBLE) {
9214 panic("zone_create: ID %d (%s) must be permanent", zid, name);
9215 }
9216 if (zone_array[zid].z_self) {
9217 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
9218 }
9219 z = &zone_array[zid];
9220 } else {
9221 if (flags & ZC_DESTRUCTIBLE) {
9222 /*
9223 * If possible, find a previously zdestroy'ed zone in the
9224 * zone_array that we can reuse.
9225 */
9226 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
9227 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
9228 z = &zone_array[i];
9229
9230 /*
9231 * If the zone name and the element size are the
9232 * same, we can just reuse the old zone struct.
9233 */
9234 if (strcmp(z->z_name, name) ||
9235 zone_elem_outer_size(z) != size) {
9236 continue;
9237 }
9238 bitmap_clear(zone_destroyed_bitmap, i);
9239 z->z_destroyed = false;
9240 z->z_self = z;
9241 zid = (zone_id_t)i;
9242 goto out;
9243 }
9244 }
9245
9246 zid = nzones++;
9247 z = zone_init_defaults(zid);
9248
9249 /*
9250 * The release barrier pairs with the acquire in
9251 * zone_index_foreach() and makes sure that enumeration loops
9252 * always see an initialized zone lock.
9253 */
9254 os_atomic_store(&num_zones, nzones, release);
9255 }
9256
9257 out:
9258 num_zones_in_use++;
9259 simple_unlock(&all_zones_lock);
9260
9261 *zid_inout = zid;
9262 return z;
9263 }
9264
9265 __abortlike
9266 static void
zone_create_panic(const char * name,const char * f1,const char * f2)9267 zone_create_panic(const char *name, const char *f1, const char *f2)
9268 {
9269 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
9270 name, f1, f2);
9271 }
9272 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
9273 if ((flags) & forbidden_flag) { \
9274 zone_create_panic(name, #current_flag, #forbidden_flag); \
9275 }
9276
9277 /*
9278 * Adjusts the size of the element based on minimum size, alignment
9279 * and kasan redzones
9280 */
9281 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9282 zone_elem_adjust_size(
9283 const char *name __unused,
9284 vm_size_t elem_size,
9285 zone_create_flags_t flags __unused,
9286 uint16_t *redzone __unused)
9287 {
9288 vm_size_t size;
9289
9290 /*
9291 * Adjust element size for minimum size and pointer alignment
9292 */
9293 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9294 if (size < ZONE_MIN_ELEM_SIZE) {
9295 size = ZONE_MIN_ELEM_SIZE;
9296 }
9297
9298 #if KASAN_CLASSIC
9299 /*
9300 * Expand the zone allocation size to include the redzones.
9301 *
9302 * For page-multiple zones add a full guard page because they
9303 * likely require alignment.
9304 */
9305 uint16_t redzone_tmp;
9306 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9307 redzone_tmp = 0;
9308 } else if ((size & PAGE_MASK) == 0) {
9309 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9310 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9311 "alignment", name);
9312 }
9313 redzone_tmp = PAGE_SIZE;
9314 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
9315 redzone_tmp = 0;
9316 } else {
9317 redzone_tmp = KASAN_GUARD_SIZE;
9318 }
9319 size += redzone_tmp;
9320 if (redzone) {
9321 *redzone = redzone_tmp;
9322 }
9323 #endif
9324 return size;
9325 }
9326
9327 /*
9328 * Returns the allocation chunk size that has least framentation
9329 */
9330 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9331 zone_get_min_alloc_granule(
9332 vm_size_t elem_size,
9333 zone_create_flags_t flags)
9334 {
9335 vm_size_t alloc_granule = PAGE_SIZE;
9336 if (flags & ZC_PERCPU) {
9337 alloc_granule = PAGE_SIZE * zpercpu_count();
9338 if (PAGE_SIZE % elem_size > 256) {
9339 panic("zone_create: per-cpu zone has too much fragmentation");
9340 }
9341 } else if (flags & ZC_READONLY) {
9342 alloc_granule = PAGE_SIZE;
9343 } else if ((elem_size & PAGE_MASK) == 0) {
9344 /* zero fragmentation by definition */
9345 alloc_granule = elem_size;
9346 } else if (alloc_granule % elem_size == 0) {
9347 /* zero fragmentation by definition */
9348 } else {
9349 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9350 vm_size_t alloc_tmp = PAGE_SIZE;
9351 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9352
9353 #if __arm64__
9354 /*
9355 * Increase chunk size to 48K for sizes larger than 4K on 16k
9356 * machines, so as to reduce internal fragementation for kalloc
9357 * zones with sizes 12K and 24K.
9358 */
9359 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9360 max_chunk_size = 48 * 1024;
9361 }
9362 #endif
9363 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9364 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9365 if (frag_tmp < frag) {
9366 frag = frag_tmp;
9367 alloc_granule = alloc_tmp;
9368 }
9369 }
9370 }
9371 return alloc_granule;
9372 }
9373
9374 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9375 zone_get_early_alloc_size(
9376 const char *name __unused,
9377 vm_size_t elem_size,
9378 zone_create_flags_t flags,
9379 vm_size_t min_elems)
9380 {
9381 vm_size_t adjusted_size, alloc_granule, chunk_elems;
9382
9383 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9384 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9385 chunk_elems = alloc_granule / adjusted_size;
9386
9387 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9388 }
9389
9390 zone_t
9391 zone_create_ext(
9392 const char *name,
9393 vm_size_t size,
9394 zone_create_flags_t flags,
9395 zone_id_t zid,
9396 void (^extra_setup)(zone_t))
9397 {
9398 zone_security_flags_t *zsflags;
9399 uint16_t redzone;
9400 zone_t z;
9401
9402 if (size > ZONE_MAX_ALLOC_SIZE) {
9403 panic("zone_create: element size too large: %zd", (size_t)size);
9404 }
9405
9406 if (size < 2 * sizeof(vm_size_t)) {
9407 /* Elements are too small for kasan. */
9408 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9409 }
9410
9411 size = zone_elem_adjust_size(name, size, flags, &redzone);
9412
9413 /*
9414 * Allocate the zone slot, return early if we found an older match.
9415 */
9416 z = zone_create_find(name, size, flags, &zid);
9417 if (__improbable(z->z_self)) {
9418 /* We found a zone to reuse */
9419 return z;
9420 }
9421 zsflags = &zone_security_array[zid];
9422
9423 /*
9424 * Initialize the zone properly.
9425 */
9426
9427 /*
9428 * If the kernel is post lockdown, copy the zone name passed in.
9429 * Else simply maintain a pointer to the name string as it can only
9430 * be a core XNU zone (no unloadable kext exists before lockdown).
9431 */
9432 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9433 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9434 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9435 strlcpy(buf, name, nsz);
9436 z->z_name = buf;
9437 } else {
9438 z->z_name = name;
9439 }
9440 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9441 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9442 } else {
9443 /*
9444 * zone_init() hasn't run yet, use the storage provided by
9445 * zone_stats_startup(), and zone_init() will replace it
9446 * with the final value once the PERCPU zone exists.
9447 */
9448 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9449 }
9450
9451 if (flags & ZC_OBJ_CACHE) {
9452 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9453 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9454 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9455 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9456
9457 z->z_elem_size = (uint16_t)size;
9458 z->z_chunk_pages = 0;
9459 z->z_quo_magic = 0;
9460 z->z_align_magic = 0;
9461 z->z_chunk_elems = 0;
9462 z->z_elem_offs = 0;
9463 z->no_callout = true;
9464 zsflags->z_lifo = true;
9465 } else {
9466 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9467
9468 z->z_elem_size = (uint16_t)(size - redzone);
9469 z->z_chunk_pages = (uint16_t)atop(alloc);
9470 z->z_quo_magic = Z_MAGIC_QUO(size);
9471 z->z_align_magic = Z_MAGIC_ALIGNED(size);
9472 if (flags & ZC_PERCPU) {
9473 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9474 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9475 } else {
9476 z->z_chunk_elems = (uint16_t)(alloc / size);
9477 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9478 }
9479 }
9480
9481 /*
9482 * Handle KPI flags
9483 */
9484
9485 /* ZC_CACHING applied after all configuration is done */
9486 if (flags & ZC_NOCACHING) {
9487 z->z_nocaching = true;
9488 }
9489
9490 if (flags & ZC_READONLY) {
9491 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9492 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9493 assert(zid <= ZONE_ID__LAST_RO);
9494 #if ZSECURITY_CONFIG(READ_ONLY)
9495 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9496 #endif
9497 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9498 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9499 assert(size <= PAGE_SIZE);
9500 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9501 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9502 (uint32_t)size, zone_heap_name(z), z->z_name);
9503 }
9504 }
9505
9506 if (flags & ZC_PERCPU) {
9507 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9508 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9509 z->z_percpu = true;
9510 }
9511 if (flags & ZC_NOGC) {
9512 z->collectable = false;
9513 }
9514 /*
9515 * Handle ZC_NOENCRYPT from xnu only
9516 */
9517 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9518 zsflags->z_noencrypt = true;
9519 }
9520 if (flags & ZC_NOCALLOUT) {
9521 z->no_callout = true;
9522 }
9523 if (flags & ZC_DESTRUCTIBLE) {
9524 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9525 z->z_destructible = true;
9526 }
9527 /*
9528 * Handle Internal flags
9529 */
9530 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9531 if (flags & ZC_PGZ_USE_GUARDS) {
9532 /*
9533 * Try to turn on guard pages only for zones
9534 * with a chance of OOB.
9535 */
9536 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9537 zsflags->z_pgz_use_guards = true;
9538 }
9539 z->z_pgz_use_guards = true;
9540 }
9541 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9542 if (!(flags & ZC_NOTBITAG)) {
9543 z->z_tbi_tag = true;
9544 }
9545 if (flags & ZC_KALLOC_TYPE) {
9546 zsflags->z_kalloc_type = true;
9547 }
9548 if (flags & ZC_VM) {
9549 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9550 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9551 }
9552 if (flags & ZC_DATA) {
9553 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9554 }
9555 #if KASAN_CLASSIC
9556 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9557 z->z_kasan_quarantine = true;
9558 }
9559 z->z_kasan_redzone = redzone;
9560 #endif /* KASAN_CLASSIC */
9561 #if KASAN_FAKESTACK
9562 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9563 z->z_kasan_fakestacks = true;
9564 }
9565 #endif /* KASAN_FAKESTACK */
9566
9567 /*
9568 * Then if there's extra tuning, do it
9569 */
9570 if (extra_setup) {
9571 extra_setup(z);
9572 }
9573
9574 /*
9575 * Configure debugging features
9576 */
9577 #if CONFIG_PROB_GZALLOC
9578 if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9579 pgz_zone_init(z);
9580 }
9581 #endif
9582 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9583 #if ZALLOC_ENABLE_LOGGING
9584 /*
9585 * Check for and set up zone leak detection
9586 * if requested via boot-args.
9587 */
9588 zone_setup_logging(z);
9589 #endif /* ZALLOC_ENABLE_LOGGING */
9590 #if KASAN_TBI
9591 zone_setup_kasan_logging(z);
9592 #endif /* KASAN_TBI */
9593 }
9594
9595 #if VM_TAG_SIZECLASSES
9596 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9597 static uint16_t sizeclass_idx;
9598
9599 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9600 z->z_uses_tags = true;
9601 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9602 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9603 z->z_tags_sizeclass = sizeclass_idx++;
9604 } else {
9605 uint16_t i = 0;
9606 for (; i < sizeclass_idx; i++) {
9607 if (size == zone_tags_sizeclasses[i]) {
9608 z->z_tags_sizeclass = i;
9609 break;
9610 }
9611 }
9612
9613 /*
9614 * Size class wasn't found, add it to zone_tags_sizeclasses
9615 */
9616 if (i == sizeclass_idx) {
9617 assert(i < VM_TAG_SIZECLASSES);
9618 zone_tags_sizeclasses[i] = (uint16_t)size;
9619 z->z_tags_sizeclass = sizeclass_idx++;
9620 }
9621 }
9622 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9623 }
9624 #endif
9625
9626 /*
9627 * Finally, fixup properties based on security policies, boot-args, ...
9628 */
9629 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9630 /*
9631 * We use LIFO in the data map, because workloads like network
9632 * usage or similar tend to rotate through allocations very
9633 * quickly with sometimes epxloding working-sets and using
9634 * a FIFO policy might cause massive TLB trashing with rather
9635 * dramatic performance impacts.
9636 */
9637 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9638 zsflags->z_lifo = true;
9639 }
9640
9641 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9642 /*
9643 * No zone made before zone_init() can have ZC_CACHING set.
9644 */
9645 assert(zc_magazine_zone);
9646 zone_enable_caching(z);
9647 }
9648
9649 zone_lock(z);
9650 z->z_self = z;
9651 zone_unlock(z);
9652
9653 return z;
9654 }
9655
9656 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9657 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9658 {
9659 zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9660 }
9661
9662 zone_id_t
zone_get_sig_eq(zone_t zone)9663 zone_get_sig_eq(zone_t zone)
9664 {
9665 return zone_security_array[zone_index(zone)].z_sig_eq;
9666 }
9667
9668 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9669 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9670 {
9671 /* moving to SMR must be done before the zone has ever been used */
9672 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9673 assert(!zone_security_array[zone_index(zone)].z_lifo);
9674 assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9675
9676 if (!zone->z_pcpu_cache) {
9677 zone_enable_caching(zone);
9678 }
9679
9680 zone_lock(zone);
9681
9682 zpercpu_foreach(it, zone->z_pcpu_cache) {
9683 it->zc_smr = smr;
9684 it->zc_free = free_cb;
9685 }
9686 zone->z_smr = true;
9687
9688 zone_unlock(zone);
9689 }
9690
9691 __startup_func
9692 void
zone_create_startup(struct zone_create_startup_spec * spec)9693 zone_create_startup(struct zone_create_startup_spec *spec)
9694 {
9695 zone_t z;
9696
9697 z = zone_create_ext(spec->z_name, spec->z_size,
9698 spec->z_flags, spec->z_zid, spec->z_setup);
9699 if (spec->z_var) {
9700 *spec->z_var = z;
9701 }
9702 }
9703
9704 /*
9705 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9706 * union works. trust but verify.
9707 */
9708 #define zalloc_check_zov_alias(f1, f2) \
9709 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9710 zalloc_check_zov_alias(z_self, zv_zone);
9711 zalloc_check_zov_alias(z_stats, zv_stats);
9712 zalloc_check_zov_alias(z_name, zv_name);
9713 zalloc_check_zov_alias(z_views, zv_next);
9714 #undef zalloc_check_zov_alias
9715
9716 __startup_func
9717 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9718 zone_view_startup_init(struct zone_view_startup_spec *spec)
9719 {
9720 struct kalloc_heap *heap = NULL;
9721 zone_view_t zv = spec->zv_view;
9722 zone_t z;
9723 zone_security_flags_t zsflags;
9724
9725 switch (spec->zv_heapid) {
9726 case KHEAP_ID_DATA_BUFFERS:
9727 heap = KHEAP_DATA_BUFFERS;
9728 break;
9729 default:
9730 heap = NULL;
9731 }
9732
9733 if (heap) {
9734 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9735 } else {
9736 z = *spec->zv_zone;
9737 assert(spec->zv_size <= zone_elem_inner_size(z));
9738 }
9739
9740 assert(z);
9741
9742 zv->zv_zone = z;
9743 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9744 zv->zv_next = z->z_views;
9745 zsflags = zone_security_config(z);
9746 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9747 /*
9748 * count the raw view for zones not in a heap,
9749 * kalloc_heap_init() already counts it for its members.
9750 */
9751 zone_view_count += 2;
9752 } else {
9753 zone_view_count += 1;
9754 }
9755 z->z_views = zv;
9756 }
9757
9758 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9759 zone_create(
9760 const char *name,
9761 vm_size_t size,
9762 zone_create_flags_t flags)
9763 {
9764 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9765 }
9766
9767 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9768
9769 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9770 zone_create_ro(
9771 const char *name,
9772 vm_size_t size,
9773 zone_create_flags_t flags,
9774 zone_create_ro_id_t zc_ro_id)
9775 {
9776 assert(zc_ro_id <= ZC_RO_ID__LAST);
9777 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9778 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9779 return reserved_zid;
9780 }
9781
9782 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9783 zinit(
9784 vm_size_t size, /* the size of an element */
9785 vm_size_t max __unused, /* maximum memory to use */
9786 vm_size_t alloc __unused, /* allocation size */
9787 const char *name) /* a name for the zone */
9788 {
9789 return zone_create(name, size, ZC_DESTRUCTIBLE);
9790 }
9791
9792 void
zdestroy(zone_t z)9793 zdestroy(zone_t z)
9794 {
9795 unsigned int zindex = zone_index(z);
9796 zone_security_flags_t zsflags = zone_security_array[zindex];
9797
9798 current_thread()->options |= TH_OPT_ZONE_PRIV;
9799 lck_mtx_lock(&zone_gc_lock);
9800
9801 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9802
9803 lck_mtx_unlock(&zone_gc_lock);
9804 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9805
9806 zone_lock(z);
9807
9808 if (!zone_submap_is_sequestered(zsflags)) {
9809 while (!zone_pva_is_null(z->z_pageq_va)) {
9810 struct zone_page_metadata *meta;
9811
9812 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9813 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9814 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9815 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9816 zone_unlock(z);
9817 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9818 ptoa(z->z_chunk_pages));
9819 zone_lock(z);
9820 }
9821 }
9822
9823 #if !KASAN_CLASSIC
9824 /* Assert that all counts are zero */
9825 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9826 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9827 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9828 zone_heap_name(z), z->z_name);
9829 }
9830
9831 /* consistency check: make sure everything is indeed empty */
9832 assert(zone_pva_is_null(z->z_pageq_empty));
9833 assert(zone_pva_is_null(z->z_pageq_partial));
9834 assert(zone_pva_is_null(z->z_pageq_full));
9835 if (!zone_submap_is_sequestered(zsflags)) {
9836 assert(zone_pva_is_null(z->z_pageq_va));
9837 }
9838 #endif
9839
9840 zone_unlock(z);
9841
9842 simple_lock(&all_zones_lock, &zone_locks_grp);
9843
9844 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9845 /* Mark the zone as empty in the bitmap */
9846 bitmap_set(zone_destroyed_bitmap, zindex);
9847 num_zones_in_use--;
9848 assert(num_zones_in_use > 0);
9849
9850 simple_unlock(&all_zones_lock);
9851 }
9852
9853 #endif /* !ZALLOC_TEST */
9854 #pragma mark zalloc module init
9855 #if !ZALLOC_TEST
9856
9857 /*
9858 * Initialize the "zone of zones" which uses fixed memory allocated
9859 * earlier in memory initialization. zone_bootstrap is called
9860 * before zone_init.
9861 */
9862 __startup_func
9863 void
zone_bootstrap(void)9864 zone_bootstrap(void)
9865 {
9866 #if DEBUG || DEVELOPMENT
9867 #if __x86_64__
9868 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9869 /*
9870 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9871 * the fp faulting code gets triggered and deadlocks.
9872 */
9873 zone_caching_disabled = 1;
9874 }
9875 #endif /* __x86_64__ */
9876 #endif /* DEBUG || DEVELOPMENT */
9877
9878 /* Validate struct zone_packed_virtual_address expectations */
9879 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9880 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9881 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9882 }
9883
9884 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9885 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9886 /*
9887 * Scale zc_mag_size() per machine.
9888 *
9889 * - wide machines get 128B magazines to avoid all false sharing
9890 * - smaller machines but with enough RAM get a bit bigger
9891 * buckets (empirically affects networking performance)
9892 */
9893 if (zpercpu_early_count >= 10) {
9894 _zc_mag_size = 14;
9895 } else if ((sane_size >> 30) >= 4) {
9896 _zc_mag_size = 10;
9897 }
9898 }
9899
9900 /*
9901 * Initialize random used to scramble early allocations
9902 */
9903 zpercpu_foreach_cpu(cpu) {
9904 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9905 }
9906
9907 #if CONFIG_PROB_GZALLOC
9908 /*
9909 * Set pgz_sample_counter on the boot CPU so that we do not sample
9910 * any allocation until PGZ has been properly setup (in pgz_init()).
9911 */
9912 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9913 #endif /* CONFIG_PROB_GZALLOC */
9914
9915 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9916 /*
9917 * Randomly assign zones to one of the 4 general submaps,
9918 * and pick whether they allocate from the begining
9919 * or the end of it.
9920 *
9921 * A lot of OOB exploitation relies on precise interleaving
9922 * of specific types in the heap.
9923 *
9924 * Woops, you can't guarantee that anymore.
9925 */
9926 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9927 uint32_t r = zalloc_random_uniform32(0,
9928 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9929
9930 zone_security_array[i].z_submap_from_end = (r & 1);
9931 zone_security_array[i].z_submap_idx += (r >> 1);
9932 }
9933 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9934
9935 thread_call_setup_with_options(&zone_expand_callout,
9936 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9937 THREAD_CALL_OPTIONS_ONCE);
9938
9939 thread_call_setup_with_options(&zone_trim_callout,
9940 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9941 THREAD_CALL_OPTIONS_ONCE);
9942 }
9943
9944 #define ZONE_GUARD_SIZE (64UL << 10)
9945
9946 __startup_func
9947 static void
zone_tunables_fixup(void)9948 zone_tunables_fixup(void)
9949 {
9950 int wdt = 0;
9951
9952 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9953 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9954 PE_parse_boot_argn("pgz1", NULL, 0)) {
9955 /*
9956 * if pgz1= was used, but pgz= was not,
9957 * then the more specific pgz1 takes precedence.
9958 */
9959 pgz_all = false;
9960 }
9961 #endif
9962
9963 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9964 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9965 }
9966 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9967 !PE_parse_boot_argn("zet", NULL, 0)) {
9968 zone_exhausted_timeout = -1;
9969 }
9970 }
9971 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9972
9973 __startup_func
9974 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9975 zone_submap_init(
9976 mach_vm_offset_t *submap_min,
9977 zone_submap_idx_t idx,
9978 uint64_t zone_sub_map_numer,
9979 uint64_t *remaining_denom,
9980 vm_offset_t *remaining_size)
9981 {
9982 vm_map_create_options_t vmco;
9983 vm_map_address_t addr;
9984 vm_offset_t submap_start, submap_end;
9985 vm_size_t submap_size;
9986 vm_map_t submap;
9987 vm_prot_t prot = VM_PROT_DEFAULT;
9988 vm_prot_t prot_max = VM_PROT_ALL;
9989 kern_return_t kr;
9990
9991 submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9992 *remaining_denom);
9993 submap_start = *submap_min;
9994
9995 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9996 vm_offset_t submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9997 submap_start += submap_padding;
9998 submap_size = pmap_ro_zone_align(submap_size);
9999 assert(*remaining_size >= (submap_padding + submap_size));
10000 *remaining_size -= submap_padding;
10001 *submap_min = submap_start;
10002 }
10003
10004 submap_end = submap_start + submap_size;
10005 if (idx == Z_SUBMAP_IDX_VM) {
10006 vm_packing_verify_range("vm_compressor",
10007 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
10008 vm_packing_verify_range("vm_page",
10009 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
10010 }
10011
10012 vmco = VM_MAP_CREATE_NEVER_FAULTS;
10013 if (!zone_submap_is_sequestered(idx)) {
10014 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
10015 }
10016
10017 vm_map_will_allocate_early_map(&zone_submaps[idx]);
10018 submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
10019 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
10020 VM_KERN_MEMORY_ZONE).kmr_submap;
10021
10022 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
10023 zone_info.zi_ro_range.min_address = submap_start;
10024 zone_info.zi_ro_range.max_address = submap_end;
10025 prot_max = prot = VM_PROT_NONE;
10026 }
10027
10028 addr = submap_start;
10029 vm_object_t kobject = kernel_object_default;
10030 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
10031 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
10032 kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10033 if (kr != KERN_SUCCESS) {
10034 panic("ksubmap[%s]: failed to make first entry (%d)",
10035 zone_submaps_names[idx], kr);
10036 }
10037
10038 addr = submap_end - ZONE_GUARD_SIZE / 2;
10039 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
10040 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
10041 kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10042 if (kr != KERN_SUCCESS) {
10043 panic("ksubmap[%s]: failed to make last entry (%d)",
10044 zone_submaps_names[idx], kr);
10045 }
10046
10047 #if DEBUG || DEVELOPMENT
10048 printf("zone_init: map %-5s %p:%p (%u%c)\n",
10049 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
10050 mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
10051 #endif /* DEBUG || DEVELOPMENT */
10052
10053 zone_submaps[idx] = submap;
10054 *submap_min = submap_end;
10055 *remaining_size -= submap_size;
10056 *remaining_denom -= zone_sub_map_numer;
10057 }
10058
10059 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)10060 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
10061 {
10062 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
10063 pva->packed_address += delta;
10064 }
10065 }
10066
10067 /*
10068 * Allocate metadata array and migrate bootstrap initial metadata and memory.
10069 */
10070 __startup_func
10071 static void
zone_metadata_init(void)10072 zone_metadata_init(void)
10073 {
10074 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
10075 vm_map_entry_t first;
10076
10077 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
10078 vm_size_t early_sz;
10079 vm_offset_t reloc_base;
10080
10081 /*
10082 * Step 1: Allocate the metadata + bitmaps range
10083 *
10084 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
10085 * of physical memory (16M per 1G).
10086 *
10087 * Let's preallocate for the worst to avoid weird panics.
10088 */
10089 vm_map_will_allocate_early_map(&zone_meta_map);
10090 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
10091 zone_meta_size + zone_bits_size + zone_xtra_size,
10092 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10093 VM_KERN_MEMORY_ZONE, &zone_meta_map);
10094 meta_r.min_address += ZONE_GUARD_SIZE;
10095 meta_r.max_address -= ZONE_GUARD_SIZE;
10096 if (zone_xtra_size) {
10097 xtra_r.max_address = meta_r.max_address;
10098 meta_r.max_address -= zone_xtra_size;
10099 xtra_r.min_address = meta_r.max_address;
10100 } else {
10101 xtra_r.min_address = xtra_r.max_address = 0;
10102 }
10103 bits_r.max_address = meta_r.max_address;
10104 meta_r.max_address -= zone_bits_size;
10105 bits_r.min_address = meta_r.max_address;
10106
10107 #if DEBUG || DEVELOPMENT
10108 printf("zone_init: metadata %p:%p (%u%c)\n",
10109 (void *)meta_r.min_address, (void *)meta_r.max_address,
10110 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
10111 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
10112 printf("zone_init: metabits %p:%p (%u%c)\n",
10113 (void *)bits_r.min_address, (void *)bits_r.max_address,
10114 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
10115 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
10116 printf("zone_init: extra %p:%p (%u%c)\n",
10117 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
10118 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
10119 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
10120 #endif /* DEBUG || DEVELOPMENT */
10121
10122 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
10123 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
10124
10125 /*
10126 * Step 2: Install new ranges.
10127 * Relocate metadata and bits.
10128 */
10129 early_r = zone_info.zi_map_range;
10130 early_sz = mach_vm_range_size(&early_r);
10131
10132 zone_info.zi_map_range = zone_map_range;
10133 zone_info.zi_meta_range = meta_r;
10134 zone_info.zi_bits_range = bits_r;
10135 zone_info.zi_xtra_range = xtra_r;
10136 zone_info.zi_meta_base = (struct zone_page_metadata *)meta_r.min_address -
10137 zone_pva_from_addr(zone_map_range.min_address).packed_address;
10138
10139 vm_map_lock(vm_map);
10140 first = vm_map_first_entry(vm_map);
10141 reloc_base = first->vme_end;
10142 first->vme_end += early_sz;
10143 vm_map->size += early_sz;
10144 vm_map_unlock(vm_map);
10145
10146 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
10147 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
10148 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
10149 /* this needs to sign extend */
10150 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
10151
10152 zone_meta_populate(reloc_base, early_sz);
10153 memcpy(new_meta, early_meta,
10154 atop(early_sz) * sizeof(struct zone_page_metadata));
10155 for (uint32_t i = 0; i < atop(early_sz); i++) {
10156 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
10157 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
10158 }
10159
10160 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
10161 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
10162
10163 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
10164 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
10165 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
10166 }
10167
10168 zba_populate(0, false);
10169 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
10170 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
10171
10172 /*
10173 * Step 3: Relocate the boostrap VM structs
10174 * (including rewriting their content).
10175 */
10176
10177 kernel_memory_populate(reloc_base, early_sz,
10178 KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL | KMA_TAG,
10179 VM_KERN_MEMORY_OSFMK);
10180 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
10181
10182 #if KASAN
10183 kasan_notify_address(reloc_base, early_sz);
10184 #if KASAN_TBI
10185 kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
10186 #endif /* KASAN_TBI */
10187 #endif /* KASAN */
10188
10189 vm_map_relocate_early_maps(reloc_delta);
10190
10191 for (uint32_t i = 0; i < atop(early_sz); i++) {
10192 zone_id_t zid = new_meta[i].zm_index;
10193 zone_t z = &zone_array[zid];
10194 vm_size_t esize = zone_elem_outer_size(z);
10195 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
10196 vm_address_t addr;
10197
10198 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
10199 continue;
10200 }
10201
10202 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
10203 if (zone_meta_is_free(&new_meta[i], eidx)) {
10204 continue;
10205 }
10206
10207 addr = vm_memtag_fixup_ptr(base + eidx * esize);
10208 #if KASAN_CLASSIC
10209 kasan_alloc(addr,
10210 zone_elem_inner_size(z), zone_elem_inner_size(z),
10211 zone_elem_redzone(z), false,
10212 __builtin_frame_address(0));
10213 #endif
10214 vm_map_relocate_early_elem(zid, addr, reloc_delta);
10215 }
10216 }
10217 }
10218
10219 __startup_data
10220 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
10221 #if ZSECURITY_CONFIG(READ_ONLY)
10222 [Z_SUBMAP_IDX_VM] = 15,
10223 [Z_SUBMAP_IDX_READ_ONLY] = 5,
10224 #else
10225 [Z_SUBMAP_IDX_VM] = 20,
10226 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10227 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10228 [Z_SUBMAP_IDX_GENERAL_0] = 15,
10229 [Z_SUBMAP_IDX_GENERAL_1] = 15,
10230 [Z_SUBMAP_IDX_GENERAL_2] = 15,
10231 [Z_SUBMAP_IDX_GENERAL_3] = 15,
10232 [Z_SUBMAP_IDX_DATA] = 20,
10233 #else
10234 [Z_SUBMAP_IDX_GENERAL_0] = 60,
10235 [Z_SUBMAP_IDX_DATA] = 20,
10236 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10237 };
10238
10239 __startup_func
10240 static inline uint16_t
zone_submap_ratios_denom(void)10241 zone_submap_ratios_denom(void)
10242 {
10243 uint16_t denom = 0;
10244
10245 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10246 denom += submap_ratios[idx];
10247 }
10248
10249 assert(denom == 100);
10250
10251 return denom;
10252 }
10253
10254 __startup_func
10255 static inline vm_offset_t
zone_restricted_va_max(void)10256 zone_restricted_va_max(void)
10257 {
10258 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
10259 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
10260
10261 return trunc_page(MIN(compressor_max, vm_page_max));
10262 }
10263
10264 __startup_func
10265 static void
zone_set_map_sizes(void)10266 zone_set_map_sizes(void)
10267 {
10268 vm_size_t zsize;
10269 vm_size_t zsizearg;
10270
10271 /*
10272 * Compute the physical limits for the zone map
10273 */
10274
10275 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10276 zsize = zsizearg * (1024ULL * 1024);
10277 } else {
10278 /* Set target zone size as 1/4 of physical memory */
10279 zsize = (vm_size_t)(sane_size >> 2);
10280 zsize += zsize >> 1;
10281 }
10282
10283 if (zsize < CONFIG_ZONE_MAP_MIN) {
10284 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
10285 }
10286 if (zsize > sane_size >> 1) {
10287 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10288 }
10289 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10290 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10291 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10292 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10293 zsize = ZONE_MAP_MAX;
10294 }
10295
10296 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10297
10298
10299 /*
10300 * Declare restrictions on zone max
10301 */
10302 vm_offset_t vm_submap_size = round_page(
10303 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10304 zone_submap_ratios_denom());
10305
10306 #if CONFIG_PROB_GZALLOC
10307 vm_submap_size += pgz_get_size();
10308 #endif /* CONFIG_PROB_GZALLOC */
10309 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10310 &zone_map_range.min_address)) {
10311 zone_map_range.min_address = 0;
10312 }
10313
10314 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10315 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10316
10317 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10318 ZBA_PTR_MASK + 1);
10319 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10320 (CHAR_BIT * KALLOC_MINSIZE));
10321
10322 #if VM_TAG_SIZECLASSES
10323 if (zone_tagging_on) {
10324 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10325 }
10326 if (zba_xtra_shift) {
10327 /*
10328 * if we need the extra space range, then limit the size of the
10329 * bitmaps to something reasonable instead of a theoretical
10330 * worst case scenario of all zones being for the smallest
10331 * allocation granule, in order to avoid fake VA pressure on
10332 * other parts of the system.
10333 */
10334 zone_bits_size = round_page(zone_bits_size / 8);
10335 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10336 }
10337 #endif /* VM_TAG_SIZECLASSES */
10338 }
10339 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10340
10341 /*
10342 * Can't use zone_info.zi_map_range at this point as it is being used to
10343 * store the range of early pmap memory that was stolen to bootstrap the
10344 * necessary VM zones.
10345 */
10346 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10347 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10348 return zone_meta_size + zone_bits_size + zone_xtra_size;
10349 });
10350
10351 /*
10352 * Global initialization of Zone Allocator.
10353 * Runs after zone_bootstrap.
10354 */
10355 __startup_func
10356 static void
zone_init(void)10357 zone_init(void)
10358 {
10359 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
10360 mach_vm_offset_t submap_min = 0;
10361 uint64_t denom = zone_submap_ratios_denom();
10362 /*
10363 * And now allocate the various pieces of VA and submaps.
10364 */
10365
10366 submap_min = zone_map_range.min_address;
10367
10368 #if CONFIG_PROB_GZALLOC
10369 vm_size_t pgz_size = pgz_get_size();
10370
10371 vm_map_will_allocate_early_map(&pgz_submap);
10372 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10373 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10374 VM_KERN_MEMORY_ZONE, &pgz_submap);
10375
10376 submap_min += pgz_size;
10377 remaining_size -= pgz_size;
10378 #if DEBUG || DEVELOPMENT
10379 printf("zone_init: pgzalloc %p:%p (%u%c) [%d slots]\n",
10380 (void *)zone_info.zi_pgz_range.min_address,
10381 (void *)zone_info.zi_pgz_range.max_address,
10382 mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10383 pgz_slots);
10384 #endif /* DEBUG || DEVELOPMENT */
10385 #endif /* CONFIG_PROB_GZALLOC */
10386
10387 /*
10388 * Allocate the submaps
10389 */
10390 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10391 if (submap_ratios[idx] == 0) {
10392 zone_submaps[idx] = VM_MAP_NULL;
10393 } else {
10394 zone_submap_init(&submap_min, idx, submap_ratios[idx],
10395 &denom, &remaining_size);
10396 }
10397 }
10398
10399 zone_metadata_init();
10400
10401 #if VM_TAG_SIZECLASSES
10402 if (zone_tagging_on) {
10403 vm_allocation_zones_init();
10404 }
10405 #endif /* VM_TAG_SIZECLASSES */
10406
10407 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10408 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10409
10410 (void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NOTBITAG,
10411 ZONE_ID_PERMANENT, ^(zone_t z) {
10412 z->z_permanent = true;
10413 z->z_elem_size = 1;
10414 });
10415 (void)zone_create_ext("vm.permanent.percpu", 1,
10416 kma_flags | ZC_PERCPU | ZC_NOTBITAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10417 z->z_permanent = true;
10418 z->z_elem_size = 1;
10419 });
10420
10421 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10422 zc_mag_size() * sizeof(vm_offset_t),
10423 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10424 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10425
10426 /*
10427 * Now migrate the startup statistics into their final storage,
10428 * and enable logging for early zones (that zone_create_ext() skipped).
10429 */
10430 int cpu = cpu_number();
10431 zone_index_foreach(idx) {
10432 zone_t tz = &zone_array[idx];
10433
10434 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10435 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10436
10437 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10438 tz->z_stats = zs;
10439 }
10440 if (tz->z_self == tz) {
10441 #if ZALLOC_ENABLE_LOGGING
10442 zone_setup_logging(tz);
10443 #endif /* ZALLOC_ENABLE_LOGGING */
10444 #if KASAN_TBI
10445 zone_setup_kasan_logging(tz);
10446 #endif /* KASAN_TBI */
10447 }
10448 }
10449 }
10450 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10451
10452 void
zalloc_iokit_lockdown(void)10453 zalloc_iokit_lockdown(void)
10454 {
10455 zone_share_always = false;
10456 }
10457
10458 void
zalloc_first_proc_made(void)10459 zalloc_first_proc_made(void)
10460 {
10461 zone_caching_disabled = 0;
10462 zone_early_thres_mul = 1;
10463 }
10464
10465 __startup_func
10466 vm_offset_t
zone_early_mem_init(vm_size_t size)10467 zone_early_mem_init(vm_size_t size)
10468 {
10469 vm_offset_t mem;
10470
10471 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10472
10473 /*
10474 * The zone that is used early to bring up the VM is stolen here.
10475 *
10476 * When the zone subsystem is actually initialized,
10477 * zone_metadata_init() will be called, and those pages
10478 * and the elements they contain, will be relocated into
10479 * the VM submap (even for architectures when those zones
10480 * do not live there).
10481 */
10482 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10483 mem = (vm_offset_t)zone_early_pages_to_cram;
10484
10485 zone_info.zi_meta_base = zone_early_meta_array_startup -
10486 zone_pva_from_addr(mem).packed_address;
10487 zone_info.zi_map_range.min_address = mem;
10488 zone_info.zi_map_range.max_address = mem + size;
10489
10490 zone_info.zi_bits_range = (struct mach_vm_range){
10491 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10492 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10493 sizeof(zba_chunk_startup),
10494 };
10495
10496 zba_meta()->zbam_left = 1;
10497 zba_meta()->zbam_right = 1;
10498 zba_init_chunk(0, false);
10499
10500 return mem;
10501 }
10502
10503 #endif /* !ZALLOC_TEST */
10504 #pragma mark - tests
10505 #if DEBUG || DEVELOPMENT
10506
10507 /*
10508 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10509 * thread goes through at a time.
10510 *
10511 * Or we can end up with multiple test zones (if a second zinit() comes through
10512 * before zdestroy()), which could lead us to run out of zones.
10513 */
10514 static bool any_zone_test_running = FALSE;
10515
10516 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10517 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10518 {
10519 vm_offset_t elem_size = zone_elem_outer_size(z);
10520 vm_offset_t base;
10521 struct zone_page_metadata *meta;
10522
10523 while (!zone_pva_is_null(page_index)) {
10524 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10525 meta = zone_pva_to_meta(page_index);
10526
10527 if (meta->zm_inline_bitmap) {
10528 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10529 uint32_t map = meta[i].zm_bitmap;
10530
10531 for (; map; map &= map - 1) {
10532 *elems++ = INSTANCE_PUT(base +
10533 elem_size * __builtin_clz(map));
10534 }
10535 base += elem_size * 32;
10536 }
10537 } else {
10538 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10539 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10540 for (size_t i = 0; i < (1u << order); i++) {
10541 uint64_t map = bits[i];
10542
10543 for (; map; map &= map - 1) {
10544 *elems++ = INSTANCE_PUT(base +
10545 elem_size * __builtin_clzll(map));
10546 }
10547 base += elem_size * 64;
10548 }
10549 }
10550
10551 page_index = meta->zm_page_next;
10552 }
10553 return elems;
10554 }
10555
10556 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10557 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10558 {
10559 zone_t zone = NULL;
10560 uintptr_t * array;
10561 uintptr_t * next;
10562 uintptr_t element;
10563 uint32_t idx, count, found;
10564 uint32_t nobtcount;
10565 uint32_t elemSize;
10566 size_t maxElems;
10567
10568 zone_foreach(z) {
10569 if (!z->z_name) {
10570 continue;
10571 }
10572 if (!strncmp(zoneName, z->z_name, nameLen)) {
10573 zone = z;
10574 break;
10575 }
10576 }
10577 if (zone == NULL) {
10578 return KERN_INVALID_NAME;
10579 }
10580
10581 elemSize = (uint32_t)zone_elem_inner_size(zone);
10582 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10583
10584 array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10585 if (array == NULL) {
10586 return KERN_RESOURCE_SHORTAGE;
10587 }
10588
10589 zone_lock(zone);
10590
10591 next = array;
10592 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10593 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10594 count = (uint32_t)(next - array);
10595
10596 zone_unlock(zone);
10597
10598 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10599 assert(found <= count);
10600
10601 for (idx = 0; idx < count; idx++) {
10602 element = array[idx];
10603 if (kInstanceFlagReferenced & element) {
10604 continue;
10605 }
10606 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10607 }
10608
10609 #if ZALLOC_ENABLE_LOGGING
10610 if (zone->z_btlog && !corruption_debug_flag) {
10611 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10612 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10613 btlog_copy_backtraces_for_elements(zone->z_btlog,
10614 (vm_address_t *)array, &count, elemSize, proc);
10615 }
10616 #endif /* ZALLOC_ENABLE_LOGGING */
10617
10618 for (nobtcount = idx = 0; idx < count; idx++) {
10619 element = array[idx];
10620 if (!element) {
10621 continue;
10622 }
10623 if (kInstanceFlagReferenced & element) {
10624 continue;
10625 }
10626 nobtcount++;
10627 }
10628 if (nobtcount) {
10629 proc(nobtcount, elemSize, BTREF_NULL);
10630 }
10631
10632 kfree_type(vm_offset_t, maxElems, array);
10633 return KERN_SUCCESS;
10634 }
10635
10636 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10637 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10638 {
10639 zone_security_flags_t zsflags;
10640 uint32_t x = 4;
10641 uint32_t *test_ptr;
10642
10643 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10644 printf("zone_ro_basic_test: Test already running.\n");
10645 return EALREADY;
10646 }
10647
10648 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10649
10650 for (int i = 0; i < 3; i++) {
10651 #if ZSECURITY_CONFIG(READ_ONLY)
10652 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10653 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10654 printf("zone_ro_basic_test: create a sub-page size zone\n");
10655
10656 printf("zone_ro_basic_test: verify flags were set\n");
10657 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10658
10659 printf("zone_ro_basic_test: zalloc an element\n");
10660 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10661 assert(test_ptr);
10662
10663 printf("zone_ro_basic_test: verify we can't write to it\n");
10664 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10665
10666 x = 4;
10667 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10668 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10669 assert(test_ptr);
10670 assert(*(uint32_t*)test_ptr == x);
10671
10672 x = 5;
10673 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10674 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10675 assert(test_ptr);
10676 assert(*(uint32_t*)test_ptr == x);
10677
10678 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10679 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10680
10681 printf("zone_ro_basic_test: free elem\n");
10682 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10683 assert(!test_ptr);
10684 #else
10685 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10686
10687 printf("zone_ro_basic_test: verify flags were set\n");
10688 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10689
10690 printf("zone_ro_basic_test: zalloc an element\n");
10691 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10692 assert(test_ptr);
10693
10694 x = 4;
10695 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10696 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10697 assert(test_ptr);
10698 assert(*(uint32_t*)test_ptr == x);
10699
10700 x = 5;
10701 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10702 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10703 assert(test_ptr);
10704 assert(*(uint32_t*)test_ptr == x);
10705
10706 printf("zone_ro_basic_test: free elem\n");
10707 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10708 assert(!test_ptr);
10709 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10710 }
10711
10712 printf("zone_ro_basic_test: garbage collection\n");
10713 zone_gc(ZONE_GC_DRAIN);
10714
10715 printf("zone_ro_basic_test: Test passed\n");
10716
10717 *out = 1;
10718 os_atomic_store(&any_zone_test_running, false, relaxed);
10719 return 0;
10720 }
10721 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10722
10723 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10724 zone_basic_test_run(__unused int64_t in, int64_t *out)
10725 {
10726 static zone_t test_zone_ptr = NULL;
10727
10728 unsigned int i = 0, max_iter = 5;
10729 void * test_ptr;
10730 zone_t test_zone;
10731 int rc = 0;
10732
10733 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10734 printf("zone_basic_test: Test already running.\n");
10735 return EALREADY;
10736 }
10737
10738 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10739
10740 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10741 do {
10742 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10743 assert(test_zone);
10744
10745 #if KASAN_CLASSIC
10746 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10747 #else
10748 if (test_zone->z_elems_free != 0)
10749 #endif
10750 {
10751 printf("zone_basic_test: free count is not zero\n");
10752 rc = EIO;
10753 goto out;
10754 }
10755
10756 if (test_zone_ptr == NULL) {
10757 /* Stash the zone pointer returned on the fist zinit */
10758 printf("zone_basic_test: zone created for the first time\n");
10759 test_zone_ptr = test_zone;
10760 } else if (test_zone != test_zone_ptr) {
10761 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10762 rc = EIO;
10763 goto out;
10764 }
10765
10766 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10767 zfree(test_zone, test_ptr);
10768
10769 zdestroy(test_zone);
10770 i++;
10771
10772 printf("zone_basic_test: Iteration %d successful\n", i);
10773 } while (i < max_iter);
10774
10775 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10776 /* test Z_VA_SEQUESTER */
10777 {
10778 zone_t test_pcpu_zone;
10779 kern_return_t kr;
10780 int idx, num_allocs = 8;
10781 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10782 void *allocs[num_allocs];
10783 void **allocs_pcpu;
10784 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10785
10786 test_zone = zone_create("test_zone_sysctl", elem_size,
10787 ZC_DESTRUCTIBLE);
10788 assert(test_zone);
10789
10790 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10791 ZC_DESTRUCTIBLE | ZC_PERCPU);
10792 assert(test_pcpu_zone);
10793
10794 for (idx = 0; idx < num_allocs; idx++) {
10795 allocs[idx] = zalloc(test_zone);
10796 assert(NULL != allocs[idx]);
10797 printf("alloc[%d] %p\n", idx, allocs[idx]);
10798 }
10799 for (idx = 0; idx < num_allocs; idx++) {
10800 zfree(test_zone, allocs[idx]);
10801 }
10802 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10803
10804 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10805 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10806 assert(kr == KERN_SUCCESS);
10807
10808 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10809 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10810 Z_WAITOK | Z_ZERO);
10811 assert(NULL != allocs_pcpu[idx]);
10812 }
10813 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10814 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10815 }
10816 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10817
10818 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10819 vm_page_wire_count, vm_page_free_count,
10820 100L * phys_pages / zone_pages_wired_max);
10821 zone_gc(ZONE_GC_DRAIN);
10822 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10823 vm_page_wire_count, vm_page_free_count,
10824 100L * phys_pages / zone_pages_wired_max);
10825
10826 unsigned int allva = 0;
10827
10828 zone_foreach(z) {
10829 zone_lock(z);
10830 allva += z->z_wired_cur;
10831 if (zone_pva_is_null(z->z_pageq_va)) {
10832 zone_unlock(z);
10833 continue;
10834 }
10835 unsigned count = 0;
10836 uint64_t size;
10837 zone_pva_t pg = z->z_pageq_va;
10838 struct zone_page_metadata *page_meta;
10839 while (pg.packed_address) {
10840 page_meta = zone_pva_to_meta(pg);
10841 count += z->z_percpu ? 1 : z->z_chunk_pages;
10842 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10843 count -= page_meta->zm_page_index;
10844 }
10845 pg = page_meta->zm_page_next;
10846 }
10847 size = zone_size_wired(z);
10848 if (!size) {
10849 size = 1;
10850 }
10851 printf("%s%s: seq %d, res %d, %qd %%\n",
10852 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10853 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10854 zone_unlock(z);
10855 }
10856
10857 printf("total va: %d\n", allva);
10858
10859 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10860 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10861 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10862 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10863 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10864 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10865
10866 for (idx = 0; idx < num_allocs; idx++) {
10867 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10868 }
10869
10870 /* make sure the zone is still usable after a GC */
10871
10872 for (idx = 0; idx < num_allocs; idx++) {
10873 allocs[idx] = zalloc(test_zone);
10874 assert(allocs[idx]);
10875 printf("alloc[%d] %p\n", idx, allocs[idx]);
10876 }
10877 for (idx = 0; idx < num_allocs; idx++) {
10878 zfree(test_zone, allocs[idx]);
10879 }
10880
10881 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10882 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10883 Z_WAITOK | Z_ZERO);
10884 assert(NULL != allocs_pcpu[idx]);
10885 }
10886 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10887 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10888 }
10889
10890 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10891
10892 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10893
10894 zdestroy(test_zone);
10895 zdestroy(test_pcpu_zone);
10896 }
10897 #endif /* KASAN_CLASSIC */
10898
10899 printf("zone_basic_test: Test passed\n");
10900
10901
10902 *out = 1;
10903 out:
10904 os_atomic_store(&any_zone_test_running, false, relaxed);
10905 return rc;
10906 }
10907 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10908
10909 struct zone_stress_obj {
10910 TAILQ_ENTRY(zone_stress_obj) zso_link;
10911 };
10912
10913 struct zone_stress_ctx {
10914 thread_t zsc_leader;
10915 lck_mtx_t zsc_lock;
10916 zone_t zsc_zone;
10917 uint64_t zsc_end;
10918 uint32_t zsc_workers;
10919 };
10920
10921 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10922 zone_stress_worker(void *arg, wait_result_t __unused wr)
10923 {
10924 struct zone_stress_ctx *ctx = arg;
10925 bool leader = ctx->zsc_leader == current_thread();
10926 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10927 struct zone_bool_gen bg = { };
10928 struct zone_stress_obj *obj;
10929 uint32_t allocs = 0;
10930
10931 random_bool_init(&bg.zbg_bg);
10932
10933 do {
10934 for (int i = 0; i < 2000; i++) {
10935 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10936 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10937 switch (what) {
10938 case 0:
10939 case 1:
10940 if (allocs < 10000) {
10941 obj = zalloc(ctx->zsc_zone);
10942 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10943 allocs++;
10944 }
10945 break;
10946 case 2:
10947 case 3:
10948 if (allocs < 10000) {
10949 obj = zalloc(ctx->zsc_zone);
10950 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10951 allocs++;
10952 }
10953 break;
10954 case 4:
10955 if (leader) {
10956 zone_gc(ZONE_GC_DRAIN);
10957 }
10958 break;
10959 case 5:
10960 case 6:
10961 if (!TAILQ_EMPTY(&head)) {
10962 obj = TAILQ_FIRST(&head);
10963 TAILQ_REMOVE(&head, obj, zso_link);
10964 zfree(ctx->zsc_zone, obj);
10965 allocs--;
10966 }
10967 break;
10968 case 7:
10969 if (!TAILQ_EMPTY(&head)) {
10970 obj = TAILQ_LAST(&head, zone_stress_head);
10971 TAILQ_REMOVE(&head, obj, zso_link);
10972 zfree(ctx->zsc_zone, obj);
10973 allocs--;
10974 }
10975 break;
10976 }
10977 }
10978 } while (mach_absolute_time() < ctx->zsc_end);
10979
10980 while (!TAILQ_EMPTY(&head)) {
10981 obj = TAILQ_FIRST(&head);
10982 TAILQ_REMOVE(&head, obj, zso_link);
10983 zfree(ctx->zsc_zone, obj);
10984 }
10985
10986 lck_mtx_lock(&ctx->zsc_lock);
10987 if (--ctx->zsc_workers == 0) {
10988 thread_wakeup(ctx);
10989 } else if (leader) {
10990 while (ctx->zsc_workers) {
10991 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10992 THREAD_UNINT);
10993 }
10994 }
10995 lck_mtx_unlock(&ctx->zsc_lock);
10996
10997 if (!leader) {
10998 thread_terminate_self();
10999 __builtin_unreachable();
11000 }
11001 }
11002
11003 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)11004 zone_stress_test_run(__unused int64_t in, int64_t *out)
11005 {
11006 struct zone_stress_ctx ctx = {
11007 .zsc_leader = current_thread(),
11008 .zsc_workers = 3,
11009 };
11010 kern_return_t kr;
11011 thread_t th;
11012
11013 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11014 printf("zone_stress_test: Test already running.\n");
11015 return EALREADY;
11016 }
11017
11018 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11019 ctx.zsc_zone = zone_create("test_zone_344", 344,
11020 ZC_DESTRUCTIBLE | ZC_NOCACHING);
11021 assert(ctx.zsc_zone->z_chunk_pages > 1);
11022
11023 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
11024
11025 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
11026
11027 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11028
11029 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
11030 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
11031 BASEPRI_DEFAULT, &th);
11032 if (kr == KERN_SUCCESS) {
11033 printf("zone_stress_test: thread %d: %p\n", i, th);
11034 thread_deallocate(th);
11035 } else {
11036 ctx.zsc_workers--;
11037 }
11038 }
11039
11040 zone_stress_worker(&ctx, 0);
11041
11042 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
11043
11044 zdestroy(ctx.zsc_zone);
11045
11046 printf("zone_stress_test: Done\n");
11047
11048 *out = 1;
11049 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11050 os_atomic_store(&any_zone_test_running, false, relaxed);
11051 return 0;
11052 }
11053 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
11054
11055 struct zone_gc_stress_obj {
11056 STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
11057 uintptr_t zgso_pad[63];
11058 };
11059 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
11060
11061 #define ZONE_GC_OBJ_PER_PAGE (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
11062
11063 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
11064
11065 struct zone_gc_stress_ctx {
11066 bool zgsc_done;
11067 lck_mtx_t zgsc_lock;
11068 zone_t zgsc_zone;
11069 uint64_t zgsc_end;
11070 uint32_t zgsc_workers;
11071 };
11072
11073 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)11074 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
11075 {
11076 struct zone_gc_stress_obj *obj;
11077
11078 for (size_t i = 0; i < n; i++) {
11079 obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
11080 STAILQ_INSERT_TAIL(head, obj, zgso_link);
11081 }
11082 }
11083
11084 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)11085 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
11086 {
11087 struct zone_gc_stress_obj *obj;
11088
11089 while ((obj = STAILQ_FIRST(head))) {
11090 STAILQ_REMOVE_HEAD(head, zgso_link);
11091 zfree(zone_gc_stress_zone, obj);
11092 }
11093 }
11094
11095 __dead2
11096 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)11097 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
11098 {
11099 struct zone_gc_stress_ctx *ctx = arg;
11100 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11101
11102 while (!ctx->zgsc_done) {
11103 zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
11104 zone_gc_stress_test_free_n(&head);
11105 }
11106
11107 lck_mtx_lock(&ctx->zgsc_lock);
11108 if (--ctx->zgsc_workers == 0) {
11109 thread_wakeup(ctx);
11110 }
11111 lck_mtx_unlock(&ctx->zgsc_lock);
11112
11113 thread_terminate_self();
11114 __builtin_unreachable();
11115 }
11116
11117 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)11118 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
11119 {
11120 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11121 struct zone_gc_stress_ctx ctx = {
11122 .zgsc_workers = 3,
11123 };
11124 kern_return_t kr;
11125 thread_t th;
11126
11127 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11128 printf("zone_gc_stress_test: Test already running.\n");
11129 return EALREADY;
11130 }
11131
11132 lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11133 lck_mtx_lock(&ctx.zgsc_lock);
11134
11135 printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
11136
11137 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11138
11139 for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
11140 kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
11141 BASEPRI_DEFAULT, &th);
11142 if (kr == KERN_SUCCESS) {
11143 printf("zone_gc_stress_test: thread %d: %p\n", i, th);
11144 thread_deallocate(th);
11145 } else {
11146 ctx.zgsc_workers--;
11147 }
11148 }
11149
11150 for (uint64_t i = 0; i < in; i++) {
11151 size_t count = zc_mag_size() * zc_free_batch_size() * 10;
11152
11153 if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
11154 count = ZONE_GC_OBJ_PER_PAGE * 20;
11155 }
11156
11157 zone_gc_stress_test_alloc_n(&head, count);
11158 zone_gc_stress_test_free_n(&head);
11159
11160 lck_mtx_lock(&zone_gc_lock);
11161 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11162 ZONE_RECLAIM_TRIM);
11163 lck_mtx_unlock(&zone_gc_lock);
11164
11165 printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
11166 }
11167
11168 os_atomic_thread_fence(seq_cst);
11169 ctx.zgsc_done = true;
11170 lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
11171 lck_mtx_unlock(&ctx.zgsc_lock);
11172
11173 lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
11174
11175 lck_mtx_lock(&zone_gc_lock);
11176 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11177 ZONE_RECLAIM_DRAIN);
11178 lck_mtx_unlock(&zone_gc_lock);
11179
11180 printf("zone_gc_stress_test: Done\n");
11181
11182 *out = 1;
11183 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11184 os_atomic_store(&any_zone_test_running, false, relaxed);
11185 return 0;
11186 }
11187 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
11188
11189 /*
11190 * Routines to test that zone garbage collection and zone replenish threads
11191 * running at the same time don't cause problems.
11192 */
11193
11194 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)11195 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
11196 {
11197 zone_gc(ZONE_GC_DRAIN);
11198 *out = 1;
11199 return 0;
11200 }
11201 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
11202
11203 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)11204 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
11205 {
11206 zone_t z = vm_map_entry_zone;
11207 struct data { struct data *next; } *node, *list = NULL;
11208
11209 if (z == NULL) {
11210 printf("Couldn't find a replenish zone\n");
11211 return EIO;
11212 }
11213
11214 /* big enough to go past replenishment */
11215 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
11216 node = zalloc(z);
11217 node->next = list;
11218 list = node;
11219 }
11220
11221 /*
11222 * release the memory we allocated
11223 */
11224 while (list != NULL) {
11225 node = list;
11226 list = list->next;
11227 zfree(z, node);
11228 }
11229
11230 *out = 1;
11231 return 0;
11232 }
11233 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
11234
11235 #endif /* DEBUG || DEVELOPMENT */
11236