1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/host_statistics.h>
93 #include <kern/misc_protos.h>
94 #include <kern/thread_call.h>
95 #include <kern/zalloc_internal.h>
96 #include <kern/kalloc.h>
97 #include <kern/debug.h>
98 #include <kern/smr.h>
99
100 #include <prng/random.h>
101
102 #include <vm/pmap.h>
103 #include <vm/vm_map_internal.h>
104 #include <vm/vm_memtag.h>
105 #include <vm/vm_kern_internal.h>
106 #include <vm/vm_kern_xnu.h>
107 #include <vm/vm_page_internal.h>
108 #include <vm/vm_pageout_internal.h>
109 #include <vm/vm_compressor_xnu.h> /* C_SLOT_PACKED_PTR* */
110 #include <vm/vm_far.h>
111
112 #include <pexpert/pexpert.h>
113
114 #include <machine/machparam.h>
115 #include <machine/machine_routines.h> /* ml_cpu_get_info */
116
117 #include <os/atomic.h>
118 #include <os/log.h>
119
120 #include <libkern/OSDebug.h>
121 #include <libkern/OSAtomic.h>
122 #include <libkern/section_keywords.h>
123 #include <sys/kdebug.h>
124 #include <sys/kern_memorystatus_xnu.h>
125 #include <sys/code_signing.h>
126
127 #include <san/kasan.h>
128 #include <libsa/stdlib.h>
129 #include <sys/errno.h>
130 #include <sys/code_signing.h>
131
132 #include <IOKit/IOBSD.h>
133 #include <arm64/amcc_rorgn.h>
134
135 #if DEBUG
136 #define z_debug_assert(expr) assert(expr)
137 #else
138 #define z_debug_assert(expr) (void)(expr)
139 #endif
140
141 /* Returns pid of the task with the largest number of VM map entries. */
142 extern pid_t find_largest_process_vm_map_entries(void);
143
144 extern zone_t vm_object_zone;
145 extern zone_t ipc_service_port_label_zone;
146
147 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
148 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
149
150 #if ZSECURITY_CONFIG(ZONE_TAGGING)
151 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
152 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
153 #else /* ZSECURITY_CONFIG_ZONE_TAGGING */
154 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
155 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
156 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
157
158 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
159 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
160 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
161 #define ZONE_MAX_CHUNK_ALLOC_NUM (10)
162 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
163
164 #if XNU_PLATFORM_MacOSX
165 #define ZONE_MAP_MAX (32ULL << 30)
166 #define ZONE_MAP_VA_SIZE (128ULL << 30)
167 #else
168 #define ZONE_MAP_MAX (8ULL << 30)
169 #define ZONE_MAP_VA_SIZE (24ULL << 30)
170 #endif
171
172 __enum_closed_decl(zm_len_t, uint16_t, {
173 ZM_CHUNK_FREE = 0x0,
174 /* 1 through 8 are valid lengths */
175 ZM_CHUNK_LEN_MAX = 0x8,
176
177 /* PGZ magical values */
178 ZM_PGZ_GUARD = 0xb, /* oo[b] */
179
180 /* secondary page markers */
181 ZM_SECONDARY_PAGE = 0xe,
182 ZM_SECONDARY_PCPU_PAGE = 0xf,
183 });
184
185 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
186
187 struct zone_page_metadata {
188 union {
189 struct {
190 /* The index of the zone this metadata page belongs to */
191 zone_id_t zm_index : 10;
192
193 /*
194 * This chunk ends with a guard page.
195 */
196 uint16_t zm_guarded : 1;
197
198 /*
199 * Whether `zm_bitmap` is an inline bitmap
200 * or a packed bitmap reference
201 */
202 uint16_t zm_inline_bitmap : 1;
203
204 /*
205 * Zones allocate in "chunks" of zone_t::z_chunk_pages
206 * consecutive pages, or zpercpu_count() pages if the
207 * zone is percpu.
208 *
209 * The first page of it has its metadata set with:
210 * - 0 if none of the pages are currently wired
211 * - the number of wired pages in the chunk
212 * (not scaled for percpu).
213 *
214 * Other pages in the chunk have their zm_chunk_len set
215 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
216 * depending on whether the zone is percpu or not.
217 * For those, zm_page_index holds the index of that page
218 * in the run, and zm_subchunk_len the remaining length
219 * within the chunk.
220 */
221 zm_len_t zm_chunk_len : 4;
222 };
223 uint16_t zm_bits;
224 };
225
226 union {
227 #define ZM_ALLOC_SIZE_LOCK 1u
228 uint16_t zm_alloc_size; /* first page only */
229 struct {
230 uint8_t zm_page_index; /* secondary pages only */
231 uint8_t zm_subchunk_len; /* secondary pages only */
232 };
233 uint16_t zm_oob_offs; /* in guard pages */
234 };
235 union {
236 uint32_t zm_bitmap; /* most zones */
237 uint32_t zm_bump; /* permanent zones */
238 };
239
240 union {
241 struct {
242 zone_pva_t zm_page_next;
243 zone_pva_t zm_page_prev;
244 };
245 };
246 };
247 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
248
249 /*!
250 * @typedef zone_magazine_t
251 *
252 * @brief
253 * Magazine of cached allocations.
254 *
255 * @field zm_next linkage used by magazine depots.
256 * @field zm_elems an array of @c zc_mag_size() elements.
257 */
258 struct zone_magazine {
259 zone_magazine_t zm_next;
260 smr_seq_t zm_seq;
261 vm_offset_t zm_elems[0];
262 };
263
264 /*!
265 * @typedef zone_cache_t
266 *
267 * @brief
268 * Magazine of cached allocations.
269 *
270 * @discussion
271 * Below is a diagram of the caching system. This design is inspired by the
272 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
273 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
274 * zone allocator (itself derived from this seminal work).
275 *
276 * It is divided into 3 layers:
277 * - the per-cpu layer,
278 * - the recirculation depot layer,
279 * - the Zone Allocator.
280 *
281 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
282 * which are stacks of up to @c zc_mag_size() elements.
283 *
284 * <h2>CPU layer</h2>
285 *
286 * The CPU layer (@c zone_cache_t) looks like this:
287 *
288 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
289 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
290 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
291 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
292 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
293 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
294 * ╰─────────┴─────────────────────────────╯
295 *
296 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
297 * or free to. Serialization is achieved through disabling preemption, and only
298 * the current CPU can acces those allocations. This is represented on the left
299 * hand side of the diagram above.
300 *
301 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
302 * full magazines, and is protected by the @c zm_depot_lock for access.
303 * The lock is expected to absolutely never be contended, as only the local CPU
304 * tends to access the local per-cpu depot in regular operation mode.
305 *
306 * However unlike UMA, our implementation allows for the zone GC to reclaim
307 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
308 *
309 *
310 * <h2>Recirculation Depot</h2>
311 *
312 * The recirculation depot layer is a list similar to the per-cpu depot,
313 * however it is different in two fundamental ways:
314 *
315 * - it is protected by the regular zone lock,
316 * - elements referenced by the magazines in that layer appear free
317 * to the zone layer.
318 *
319 *
320 * <h2>Magazine circulation and sizing</h2>
321 *
322 * The caching system sizes itself dynamically. Operations that allocate/free
323 * a single element call @c zone_lock_nopreempt_check_contention() which records
324 * contention on the lock by doing a trylock and recording its success.
325 *
326 * This information is stored in the @c z_recirc_cont_cur field of the zone,
327 * and a windowed moving average is maintained in @c z_contention_wma.
328 * The periodically run function @c compute_zone_working_set_size() will then
329 * take this into account to decide to grow the number of buckets allowed
330 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
331 * thresholds.
332 *
333 * The per-cpu layer will attempt to work with its depot, finding both full and
334 * empty magazines cached there. If it can't get what it needs, then it will
335 * mediate with the zone recirculation layer. Such recirculation is done in
336 * batches in order to amortize lock holds.
337 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
338 *
339 * The recirculation layer keeps a track of what the minimum amount of magazines
340 * it had over time was for each of the full and empty queues. This allows for
341 * @c compute_zone_working_set_size() to return memory to the system when a zone
342 * stops being used as much.
343 *
344 * <h2>Security considerations</h2>
345 *
346 * The zone caching layer has been designed to avoid returning elements in
347 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
348 * and @c zfree() free to the (f) magazine, and only swap them when the
349 * requested operation cannot be fulfilled.
350 *
351 * The per-cpu overflow depot or the recirculation depots are similarly used
352 * in FIFO order.
353 *
354 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
355 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
356 * @field zc_free_cur denormalized number of elements in the (f) magazine
357 * @field zc_alloc_elems a pointer to the array of elements in (a)
358 * @field zc_free_elems a pointer to the array of elements in (f)
359 *
360 * @field zc_depot a list of @c zc_depot_cur full magazines
361 */
362 typedef struct zone_cache {
363 hw_lck_ticket_t zc_depot_lock;
364 uint16_t zc_alloc_cur;
365 uint16_t zc_free_cur;
366 vm_offset_t *zc_alloc_elems;
367 vm_offset_t *zc_free_elems;
368 struct zone_depot zc_depot;
369 smr_t zc_smr;
370 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
371 } __attribute__((aligned(64))) * zone_cache_t;
372
373 #if !__x86_64__
374 static
375 #endif
376 __security_const_late struct {
377 struct mach_vm_range zi_map_range; /* all zone submaps */
378 struct mach_vm_range zi_ro_range; /* read-only range */
379 struct mach_vm_range zi_meta_range; /* debugging only */
380 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
381 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
382
383 /*
384 * The metadata lives within the zi_meta_range address range.
385 *
386 * The correct formula to find a metadata index is:
387 * absolute_page_index - page_index(zi_map_range.min_address)
388 *
389 * And then this index is used to dereference zi_meta_range.min_address
390 * as a `struct zone_page_metadata` array.
391 *
392 * To avoid doing that substraction all the time in the various fast-paths,
393 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
394 * that math all the time.
395 */
396 struct zone_page_metadata *zi_meta_base;
397 } zone_info;
398
399 __startup_data static struct mach_vm_range zone_map_range;
400 __startup_data static vm_map_size_t zone_meta_size;
401 __startup_data static vm_map_size_t zone_bits_size;
402 __startup_data static vm_map_size_t zone_xtra_size;
403 #if MACH_ASSERT
404 __startup_data static vm_map_size_t vm_submap_restriction_size_debug;
405 #endif /* MACH_ASSERT */
406
407 /*
408 * Initial array of metadata for stolen memory.
409 *
410 * The numbers here have to be kept in sync with vm_map_steal_memory()
411 * so that we have reserved enough metadata.
412 *
413 * After zone_init() has run (which happens while the kernel is still single
414 * threaded), the metadata is moved to its final dynamic location, and
415 * this array is unmapped with the rest of __startup_data at lockdown.
416 */
417 #define ZONE_EARLY_META_INLINE_COUNT 64
418 __startup_data
419 static struct zone_page_metadata
420 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
421
422
423 __startup_data __attribute__((aligned(PAGE_MAX_SIZE)))
424 static uint8_t zone_early_pages_to_cram[PAGE_MAX_SIZE * 16];
425
426 /*
427 * The zone_locks_grp allows for collecting lock statistics.
428 * All locks are associated to this group in zinit.
429 * Look at tools/lockstat for debugging lock contention.
430 */
431 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
432 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
433
434 /*
435 * The zone metadata lock protects:
436 * - metadata faulting,
437 * - VM submap VA allocations,
438 * - early gap page queue list
439 */
440 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
441 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
442
443 /*
444 * Exclude more than one concurrent garbage collection
445 */
446 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
447 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
448 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
449
450 /*
451 * Panic logging metadata
452 */
453 bool panic_include_zprint = false;
454 bool panic_include_kalloc_types = false;
455 zone_t kalloc_type_src_zone = ZONE_NULL;
456 zone_t kalloc_type_dst_zone = ZONE_NULL;
457 mach_memory_info_t *panic_kext_memory_info = NULL;
458 vm_size_t panic_kext_memory_size = 0;
459 vm_offset_t panic_fault_address = 0;
460
461 /*
462 * Protects zone_array, num_zones, num_zones_in_use, and
463 * zone_destroyed_bitmap
464 */
465 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
466 static zone_id_t num_zones_in_use;
467 zone_id_t _Atomic num_zones;
468 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
469
470 /*
471 * Initial globals for zone stats until we can allocate the real ones.
472 * Those get migrated inside the per-CPU ones during zone_init() and
473 * this array is unmapped with the rest of __startup_data at lockdown.
474 */
475
476 /* zone to allocate zone_magazine structs from */
477 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
478 /*
479 * Until pid1 is made, zone caching is off,
480 * until compute_zone_working_set_size() runs for the firt time.
481 *
482 * -1 represents the "never enabled yet" value.
483 */
484 static int8_t zone_caching_disabled = -1;
485
486 __startup_data
487 static struct zone_stats zone_stats_startup[MAX_ZONES];
488 struct zone zone_array[MAX_ZONES];
489 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
490 [0 ... MAX_ZONES - 1] = {
491 .z_kheap_id = KHEAP_ID_NONE,
492 .z_noencrypt = false,
493 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
494 .z_kalloc_type = false,
495 .z_sig_eq = 0,
496 #if ZSECURITY_CONFIG(ZONE_TAGGING)
497 .z_tag = 1,
498 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
499 .z_tag = 0,
500 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
501 },
502 };
503 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
SECURITY_READ_ONLY_LATE(zone_cache_ops_t)504 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
505
506 #if DEBUG || DEVELOPMENT
507 unsigned int
508 zone_max_zones(void)
509 {
510 return MAX_ZONES;
511 }
512 #endif
513
514 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
515 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
516
517 /* Used to keep track of destroyed slots in the zone_array */
518 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
519
520 /* number of zone mapped pages used by all zones */
521 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
522 size_t zone_pages_wired;
523 size_t zone_guard_pages;
524
525 /* Time in (ms) after which we panic for zone exhaustions */
526 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
527 static bool zone_share_always = true;
528 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
529
530 #if VM_TAG_SIZECLASSES
531 /*
532 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
533 * zones only.
534 *
535 * There are 3 kinds of tags that can be used:
536 * - pre-registered VM_KERN_MEMORY_*
537 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
538 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
539 *
540 * The VM tracks the statistics in lazily allocated structures.
541 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
542 *
543 * If for some reason the requested tag cannot be accounted for,
544 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
545 *
546 * Each allocated element also remembers the tag it was assigned,
547 * which lets zalloc/zfree update statistics correctly.
548 */
549
550 /* enable tags for zones that ask for it */
551 static TUNABLE(bool, zone_tagging_on, "-zt", false);
552
553 /*
554 * Array of all sizeclasses used by kalloc variants so that we can
555 * have accounting per size class for each kalloc callsite
556 */
557 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
558 #endif /* VM_TAG_SIZECLASSES */
559
560 #if DEBUG || DEVELOPMENT
561 static int zalloc_simulate_vm_pressure;
562 #endif /* DEBUG || DEVELOPMENT */
563
564 #define Z_TUNABLE(t, n, d) \
565 TUNABLE(t, _##n, #n, d); \
566 __pure2 static inline t n(void) { return _##n; }
567
568 /*
569 * Zone caching tunables
570 *
571 * zc_mag_size():
572 * size of magazines, larger to reduce contention at the expense of memory
573 *
574 * zc_enable_level
575 * number of contentions per second after which zone caching engages
576 * automatically.
577 *
578 * 0 to disable.
579 *
580 * zc_grow_level
581 * number of contentions per second x cpu after which the number of magazines
582 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
583 *
584 * zc_shrink_level
585 * number of contentions per second x cpu below which the number of magazines
586 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
587 *
588 * zc_pcpu_max
589 * maximum memory size in bytes that can hang from a CPU,
590 * which will affect how many magazines are allowed in the depot.
591 *
592 * The alloc/free magazines are assumed to be on average half-empty
593 * and to count for "1" unit of magazines.
594 *
595 * zc_autotrim_size
596 * Size allowed to hang extra from the recirculation depot before
597 * auto-trim kicks in.
598 *
599 * zc_autotrim_buckets
600 *
601 * How many buckets in excess of the working-set are allowed
602 * before auto-trim kicks in for empty buckets.
603 *
604 * zc_free_batch_size
605 * The size of batches of frees/reclaim that can be done before we
606 * check if we have kept the zone lock held (and preemption disabled)
607 * for too long.
608 *
609 * zc_free_batch_timeout
610 * The number of mach ticks that may elapse before we will drop and
611 * reaquire the zone lock.
612 */
613 Z_TUNABLE(uint16_t, zc_mag_size, 8);
614 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
615 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
616 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
617 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
618 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
619 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
620 static Z_TUNABLE(uint32_t, zc_free_batch_size, 64);
621 static Z_TUNABLE(uint64_t, zc_free_batch_timeout, 9600); // 400us
622
623 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
624 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
625 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
626 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
627 [Z_SUBMAP_IDX_VM] = "VM",
628 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
629 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
630 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
631 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
632 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
633 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
634 #else
635 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
636 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
637 [Z_SUBMAP_IDX_DATA] = "DATA",
638 };
639
640 #if __x86_64__
641 #define ZONE_ENTROPY_CNT 8
642 #else
643 #define ZONE_ENTROPY_CNT 2
644 #endif
645 static struct zone_bool_gen {
646 struct bool_gen zbg_bg;
647 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
648 } zone_bool_gen[MAX_CPUS];
649
650 static zone_t zone_find_largest(uint64_t *zone_size);
651
652 #endif /* !ZALLOC_TEST */
653 #pragma mark Zone metadata
654 #if !ZALLOC_TEST
655
656 static inline bool
zone_has_index(zone_t z,zone_id_t zid)657 zone_has_index(zone_t z, zone_id_t zid)
658 {
659 return zone_array + zid == z;
660 }
661
662 __abortlike
663 void
zone_invalid_panic(zone_t zone)664 zone_invalid_panic(zone_t zone)
665 {
666 panic("zone %p isn't in the zone_array", zone);
667 }
668
669 __abortlike
670 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)671 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
672 const char *kind)
673 {
674 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
675 kind, meta, zone_heap_name(zone), zone->z_name);
676 }
677
678 __abortlike
679 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)680 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
681 {
682 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
683 (void *)addr, zone_heap_name(zone), zone->z_name);
684 }
685
686 __abortlike
687 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)688 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
689 struct zone_page_metadata *meta)
690 {
691 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
692 zone_id_t zidx;
693 zone_t src_zone;
694
695 if (zsflags.z_kalloc_type) {
696 panic_include_kalloc_types = true;
697 kalloc_type_dst_zone = zone;
698 }
699
700 zidx = meta->zm_index;
701 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
702 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
703 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
704 zidx);
705 }
706
707 src_zone = &zone_array[zidx];
708 src_zsflags = zone_security_array[zidx];
709 if (src_zsflags.z_kalloc_type) {
710 panic_include_kalloc_types = true;
711 kalloc_type_src_zone = src_zone;
712 }
713
714 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
715 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
716 zone_heap_name(src_zone), src_zone->z_name, zidx);
717 }
718
719 __abortlike
720 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)721 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
722 {
723 panic("metadata list corruption through element %p detected in zone %s%s",
724 meta, zone_heap_name(zone), zone->z_name);
725 }
726
727 __abortlike
728 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)729 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
730 const char *kind)
731 {
732 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
733 zone_heap_name(zone), zone->z_name, meta);
734 }
735
736 __abortlike
737 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)738 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
739 {
740 panic("%s: double free of %p to zone %s%s", caller,
741 (void *)addr, zone_heap_name(zone), zone->z_name);
742 }
743
744 __abortlike
745 static void
zone_accounting_panic(zone_t zone,const char * kind)746 zone_accounting_panic(zone_t zone, const char *kind)
747 {
748 panic("accounting mismatch (%s) for zone %s%s", kind,
749 zone_heap_name(zone), zone->z_name);
750 }
751
752 #define zone_counter_sub(z, stat, value) ({ \
753 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
754 zone_accounting_panic(z, #stat " wrap-around"); \
755 } \
756 (z)->stat; \
757 })
758
759 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)760 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
761 vm_offset_t esize)
762 {
763 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
764 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
765 }
766 return m->zm_alloc_size;
767 }
768
769 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)770 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
771 vm_offset_t esize)
772 {
773 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
774 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
775 }
776 return m->zm_alloc_size;
777 }
778
779 __abortlike
780 static void
zone_nofail_panic(zone_t zone)781 zone_nofail_panic(zone_t zone)
782 {
783 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
784 zone_heap_name(zone), zone->z_name);
785 }
786
787 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)788 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
789 {
790 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
791 struct mach_vm_range r = { addr_start, addr_end };
792
793 return mach_vm_range_intersects(ro_r, &r);
794 }
795
796 #define from_range(r, addr, size) \
797 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
798 mach_vm_range_contains(r, vm_memtag_canonicalize_kernel((mach_vm_offset_t)(addr))), \
799 mach_vm_range_contains(r, vm_memtag_canonicalize_kernel((mach_vm_offset_t)(addr)), size))
800
801 #define from_ro_map(addr, size) \
802 from_range(&zone_info.zi_ro_range, addr, size)
803
804 #define from_zone_map(addr, size) \
805 from_range(&zone_info.zi_map_range, addr, size)
806
807 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)808 zone_pva_is_null(zone_pva_t page)
809 {
810 return page.packed_address == 0;
811 }
812
813 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)814 zone_pva_is_queue(zone_pva_t page)
815 {
816 // actual kernel pages have the top bit set
817 return (int32_t)page.packed_address > 0;
818 }
819
820 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)821 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
822 {
823 return pva1.packed_address == pva2.packed_address;
824 }
825
826 __header_always_inline zone_pva_t *
zone_pageq_base(void)827 zone_pageq_base(void)
828 {
829 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
830
831 /*
832 * `-1` so that if the first __DATA variable is a page queue,
833 * it gets a non 0 index
834 */
835 return data_seg_start - 1;
836 }
837
838 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)839 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
840 struct zone_page_metadata *meta)
841 {
842 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
843
844 if (!zone_pva_is_equal(*queue_head, oldv)) {
845 zone_page_metadata_list_corruption(z, meta);
846 }
847 *queue_head = meta->zm_page_next;
848 }
849
850 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)851 zone_queue_encode(zone_pva_t *headp)
852 {
853 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
854 }
855
856 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)857 zone_pva_from_addr(vm_address_t addr)
858 {
859 // cannot use atop() because we want to maintain the sign bit
860 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
861 }
862
863 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)864 zone_pva_to_addr(zone_pva_t page)
865 {
866 // cause sign extension so that we end up with the right address
867 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
868 }
869
870 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)871 zone_pva_to_meta(zone_pva_t page)
872 {
873 return VM_FAR_ADD_PTR_UNBOUNDED(
874 zone_info.zi_meta_base, page.packed_address);
875 }
876
877 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)878 zone_pva_from_meta(struct zone_page_metadata *meta)
879 {
880 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
881 }
882
883 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)884 zone_meta_from_addr(vm_offset_t addr)
885 {
886 return zone_pva_to_meta(zone_pva_from_addr(addr));
887 }
888
889 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)890 zone_index_from_ptr(const void *ptr)
891 {
892 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
893 }
894
895 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)896 zone_meta_to_addr(struct zone_page_metadata *meta)
897 {
898 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
899 }
900
901 __attribute__((overloadable))
902 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)903 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
904 {
905 if (!zone_has_index(z, meta->zm_index)) {
906 zone_page_metadata_index_confusion_panic(z, addr, meta);
907 }
908 }
909
910 __attribute__((overloadable))
911 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)912 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
913 {
914 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
915 }
916
917 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)918 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
919 struct zone_page_metadata *meta)
920 {
921 zone_pva_t head = *headp;
922 zone_pva_t queue_pva = zone_queue_encode(headp);
923 struct zone_page_metadata *tmp;
924
925 meta->zm_page_next = head;
926 if (!zone_pva_is_null(head)) {
927 tmp = zone_pva_to_meta(head);
928 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
929 zone_page_metadata_list_corruption(z, meta);
930 }
931 tmp->zm_page_prev = zone_pva_from_meta(meta);
932 }
933 meta->zm_page_prev = queue_pva;
934 *headp = zone_pva_from_meta(meta);
935 }
936
937 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)938 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
939 {
940 zone_pva_t head = *headp;
941 struct zone_page_metadata *meta = zone_pva_to_meta(head);
942 struct zone_page_metadata *tmp;
943
944 zone_meta_validate(z, meta);
945
946 if (!zone_pva_is_null(meta->zm_page_next)) {
947 tmp = zone_pva_to_meta(meta->zm_page_next);
948 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
949 zone_page_metadata_list_corruption(z, meta);
950 }
951 tmp->zm_page_prev = meta->zm_page_prev;
952 }
953 *headp = meta->zm_page_next;
954
955 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
956
957 return meta;
958 }
959
960 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)961 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
962 {
963 zone_pva_t meta_pva = zone_pva_from_meta(meta);
964 struct zone_page_metadata *tmp;
965
966 if (!zone_pva_is_null(meta->zm_page_next)) {
967 tmp = zone_pva_to_meta(meta->zm_page_next);
968 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
969 zone_page_metadata_list_corruption(z, meta);
970 }
971 tmp->zm_page_prev = meta->zm_page_prev;
972 }
973 if (zone_pva_is_queue(meta->zm_page_prev)) {
974 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
975 } else {
976 tmp = zone_pva_to_meta(meta->zm_page_prev);
977 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
978 zone_page_metadata_list_corruption(z, meta);
979 }
980 tmp->zm_page_next = meta->zm_page_next;
981 }
982
983 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
984 }
985
986 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)987 zone_meta_requeue(zone_t z, zone_pva_t *headp,
988 struct zone_page_metadata *meta)
989 {
990 zone_meta_remqueue(z, meta);
991 zone_meta_queue_push(z, headp, meta);
992 }
993
994 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
995 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)996 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
997 {
998 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
999
1000 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1001 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1002 zone_meta_requeue(z, &z->z_pageq_partial, m);
1003 zone_counter_sub(z, z_wired_empty, len);
1004 }
1005 }
1006
1007 /* allows a given metadata to reach the z_pageq_empty queue again */
1008 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1009 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1010 {
1011 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1012
1013 assert(new_size % sizeof(vm_offset_t) == 0);
1014 if (new_size == 0) {
1015 zone_meta_requeue(z, &z->z_pageq_empty, m);
1016 z->z_wired_empty += len;
1017 }
1018 }
1019
1020 /*
1021 * Routine to populate a page backing metadata in the zone_metadata_region.
1022 * Must be called without the zone lock held as it might potentially block.
1023 */
1024 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1025 zone_meta_populate(vm_offset_t base, vm_size_t size)
1026 {
1027 struct zone_page_metadata *from = zone_meta_from_addr(base);
1028 struct zone_page_metadata *to = from + atop(size);
1029 vm_offset_t page_addr = trunc_page(from);
1030
1031 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1032 #if !KASAN
1033 /*
1034 * This can race with another thread doing a populate on the same metadata
1035 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1036 * fault in the shadow when we first access the metadata page. Avoid this
1037 * by always synchronizing on the zone_metadata_region lock with KASan.
1038 */
1039 if (pmap_find_phys(kernel_pmap, page_addr)) {
1040 continue;
1041 }
1042 #endif
1043
1044 for (;;) {
1045 kern_return_t ret = KERN_SUCCESS;
1046
1047 /*
1048 * All updates to the zone_metadata_region are done
1049 * under the zone_metadata_region_lck
1050 */
1051 zone_meta_lock();
1052 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1053 ret = kernel_memory_populate(page_addr,
1054 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1055 VM_KERN_MEMORY_OSFMK);
1056 }
1057 zone_meta_unlock();
1058
1059 if (ret == KERN_SUCCESS) {
1060 break;
1061 }
1062
1063 /*
1064 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1065 * to bad system deadlocks, so if the allocation failed,
1066 * we need to do the VM_PAGE_WAIT() outside of the lock.
1067 */
1068 VM_PAGE_WAIT();
1069 }
1070 }
1071 }
1072
1073 __abortlike
1074 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1075 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1076 {
1077 struct zone_page_metadata *meta;
1078 const char *from_cache = "";
1079 vm_offset_t page;
1080
1081 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1082 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1083 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1084 }
1085 page = trunc_page(addr);
1086 meta = zone_meta_from_addr(addr);
1087
1088 if (!zone_has_index(zone, meta->zm_index)) {
1089 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1090 }
1091
1092 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1093 panic("metadata %p corresponding to addr %p being freed to "
1094 "zone %s%s%s, is marked as secondary per cpu page",
1095 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1096 from_cache);
1097 }
1098 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1099 page -= ptoa(meta->zm_page_index);
1100 meta -= meta->zm_page_index;
1101 }
1102
1103 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1104 panic("metadata %p corresponding to addr %p being freed to "
1105 "zone %s%s%s, has chunk len greater than max",
1106 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1107 from_cache);
1108 }
1109
1110 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1111 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1112 "zone element size", (void *)addr, zone_heap_name(zone),
1113 zone->z_name, from_cache);
1114 }
1115
1116 zone_invalid_element_addr_panic(zone, addr);
1117 }
1118
1119 __attribute__((always_inline))
1120 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1121 zone_element_resolve(
1122 zone_t zone,
1123 vm_offset_t addr,
1124 vm_offset_t *idx)
1125 {
1126 struct zone_page_metadata *meta;
1127 vm_offset_t offs, eidx;
1128
1129 meta = zone_meta_from_addr(addr);
1130 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1131 zone_invalid_element_panic(zone, addr);
1132 }
1133
1134 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1135 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1136 offs += ptoa(meta->zm_page_index);
1137 meta -= meta->zm_page_index;
1138 }
1139
1140 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1141 if (eidx * zone_elem_outer_size(zone) != offs) {
1142 zone_invalid_element_panic(zone, addr);
1143 }
1144
1145 *idx = eidx;
1146 return meta;
1147 }
1148
1149 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1150 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1151 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1152 {
1153 vm_offset_t addr = (vm_offset_t)ptr;
1154 vm_offset_t end = addr + elem_size;
1155 vm_offset_t offs;
1156
1157 /*
1158 * 0-sized allocations in a KALLOC_MINSIZE bucket
1159 * would be offset to the next allocation which is incorrect.
1160 */
1161 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1162
1163 /*
1164 * Given how chunks work, for a zone with PGZ guards on,
1165 * there's a single element which ends precisely
1166 * at the page boundary: the last one.
1167 */
1168 if (req_size == elem_size ||
1169 (end & PAGE_MASK) ||
1170 !zone_meta_from_addr(addr)->zm_guarded) {
1171 return ptr;
1172 }
1173
1174 offs = elem_size - req_size;
1175 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1176
1177 return (char *)addr + offs;
1178 }
1179 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1180
1181 __abortlike
1182 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1183 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1184 {
1185 struct zone_page_metadata *meta;
1186 vm_offset_t offs, size, page;
1187 zone_t zone;
1188
1189 page = trunc_page(addr);
1190 meta = zone_meta_from_addr(addr);
1191 zone = &zone_array[meta->zm_index];
1192
1193 if (zone->z_percpu) {
1194 panic("zone bound checks: address %p is a per-cpu allocation",
1195 (void *)addr);
1196 }
1197
1198 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1199 page -= ptoa(meta->zm_page_index);
1200 meta -= meta->zm_page_index;
1201 }
1202
1203 size = zone_elem_outer_size(zone);
1204 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1205 zone->z_quo_magic, size);
1206 panic("zone bound checks: buffer %p of length %zd overflows "
1207 "object %p of size %zd in zone %p[%s%s]",
1208 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1209 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1210 }
1211
1212 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1213 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1214 {
1215 struct zone_page_metadata *meta;
1216 vm_offset_t offs, size;
1217 zone_t zone;
1218
1219 if (!from_zone_map(addr, 1)) {
1220 return;
1221 }
1222
1223 meta = zone_meta_from_addr(addr);
1224 zone = zone_by_id(meta->zm_index);
1225
1226 if (zone->z_percpu) {
1227 zone_element_bounds_check_panic(addr, len);
1228 }
1229
1230 if (zone->z_permanent) {
1231 /* We don't know bounds for those */
1232 return;
1233 }
1234
1235 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1236 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1237 offs += ptoa(meta->zm_page_index);
1238 }
1239 size = zone_elem_outer_size(zone);
1240 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1241 if (len + zone_elem_redzone(zone) > size - offs) {
1242 zone_element_bounds_check_panic(addr, len);
1243 }
1244 }
1245
1246 /*
1247 * Routine to get the size of a zone allocated address.
1248 * If the address doesn't belong to the zone maps, returns 0.
1249 */
1250 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1251 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1252 {
1253 vm_address_t addr = (vm_address_t)elem;
1254 struct zone_page_metadata *meta;
1255 vm_size_t esize, offs, end;
1256 zone_t zone;
1257
1258 if (from_zone_map(addr, sizeof(void *))) {
1259 meta = zone_meta_from_addr(addr);
1260 zone = zone_by_id(meta->zm_index);
1261 esize = zone_elem_inner_size(zone);
1262 end = vm_memtag_canonicalize_kernel(addr + esize);
1263 offs = 0;
1264
1265 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1266 /*
1267 * If the chunk uses guards, and that (addr + esize)
1268 * either crosses a page boundary or is at the boundary,
1269 * we need to look harder.
1270 */
1271 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1272 uint32_t chunk_pages = zone->z_chunk_pages;
1273
1274 /*
1275 * Because in the vast majority of cases the element
1276 * size is sub-page, and that meta[1] must be faulted,
1277 * we can quickly peek at whether it's a guard.
1278 *
1279 * For elements larger than a page, finding the guard
1280 * page requires a little more effort.
1281 */
1282 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1283 offs = meta[1].zm_oob_offs;
1284 if (clear_oob) {
1285 meta[1].zm_oob_offs = 0;
1286 }
1287 } else if (esize > PAGE_SIZE) {
1288 struct zone_page_metadata *gmeta;
1289
1290 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1291 gmeta = meta + meta->zm_subchunk_len;
1292 } else {
1293 gmeta = meta + chunk_pages;
1294 }
1295 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1296
1297 if (end >= zone_meta_to_addr(gmeta)) {
1298 offs = gmeta->zm_oob_offs;
1299 if (clear_oob) {
1300 gmeta->zm_oob_offs = 0;
1301 }
1302 }
1303 }
1304 }
1305 #else
1306 #pragma unused(end, clear_oob)
1307 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1308
1309 if (oob_offs) {
1310 *oob_offs = offs;
1311 }
1312 if (z) {
1313 *z = zone;
1314 }
1315 return esize;
1316 }
1317
1318 if (oob_offs) {
1319 *oob_offs = 0;
1320 }
1321
1322 return 0;
1323 }
1324
1325 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1326 zone_id_for_element(void *addr, vm_size_t esize)
1327 {
1328 zone_id_t zid = ZONE_ID_INVALID;
1329 if (from_zone_map(addr, esize)) {
1330 zid = zone_index_from_ptr(addr);
1331 __builtin_assume(zid != ZONE_ID_INVALID);
1332 }
1333 return zid;
1334 }
1335
1336 /* This function just formats the reason for the panics by redoing the checks */
1337 __abortlike
1338 static void
zone_require_panic(zone_t zone,void * addr)1339 zone_require_panic(zone_t zone, void *addr)
1340 {
1341 uint32_t zindex;
1342 zone_t other;
1343
1344 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1345 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1346 }
1347
1348 zindex = zone_index_from_ptr(addr);
1349 other = &zone_array[zindex];
1350 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1351 panic("zone_require failed: invalid zone index %d "
1352 "(addr: %p, expected: %s%s)", zindex,
1353 addr, zone_heap_name(zone), zone->z_name);
1354 } else {
1355 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1356 "(addr: %p, expected: %s%s)",
1357 zindex, zone_heap_name(other), other->z_name,
1358 addr, zone_heap_name(zone), zone->z_name);
1359 }
1360 }
1361
1362 __abortlike
1363 static void
zone_id_require_panic(zone_id_t zid,void * addr)1364 zone_id_require_panic(zone_id_t zid, void *addr)
1365 {
1366 zone_require_panic(&zone_array[zid], addr);
1367 }
1368
1369 /*
1370 * Routines to panic if a pointer is not mapped to an expected zone.
1371 * This can be used as a means of pinning an object to the zone it is expected
1372 * to be a part of. Causes a panic if the address does not belong to any
1373 * specified zone, does not belong to any zone, has been freed and therefore
1374 * unmapped from the zone, or the pointer contains an uninitialized value that
1375 * does not belong to any zone.
1376 */
1377 __mockable void
zone_require(zone_t zone,void * addr)1378 zone_require(zone_t zone, void *addr)
1379 {
1380 vm_size_t esize = zone_elem_inner_size(zone);
1381
1382 if (from_zone_map(addr, esize) &&
1383 zone_has_index(zone, zone_index_from_ptr(addr))) {
1384 return;
1385 }
1386 zone_require_panic(zone, addr);
1387 }
1388
1389 __mockable void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1390 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1391 {
1392 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1393 return;
1394 }
1395 zone_id_require_panic(zid, addr);
1396 }
1397
1398 void
zone_id_require_aligned(zone_id_t zid,void * addr)1399 zone_id_require_aligned(zone_id_t zid, void *addr)
1400 {
1401 zone_t zone = zone_by_id(zid);
1402 vm_offset_t elem, offs;
1403
1404 elem = (vm_offset_t)addr;
1405 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
1406
1407 if (from_zone_map(addr, 1)) {
1408 struct zone_page_metadata *meta;
1409
1410 meta = zone_meta_from_addr(elem);
1411 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1412 offs += ptoa(meta->zm_page_index);
1413 }
1414
1415 if (zid == meta->zm_index &&
1416 Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
1417 return;
1418 }
1419 }
1420
1421 zone_invalid_element_panic(zone, elem);
1422 }
1423
1424 bool
zone_owns(zone_t zone,void * addr)1425 zone_owns(zone_t zone, void *addr)
1426 {
1427 vm_size_t esize = zone_elem_inner_size(zone);
1428
1429 if (from_zone_map(addr, esize)) {
1430 return zone_has_index(zone, zone_index_from_ptr(addr));
1431 }
1432 return false;
1433 }
1434
1435 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1436 zone_kmem_suballoc(
1437 mach_vm_offset_t addr,
1438 vm_size_t size,
1439 int flags,
1440 vm_tag_t tag,
1441 vm_map_t *new_map)
1442 {
1443 struct mach_vm_range r;
1444 #ifndef __BUILDING_XNU_LIB_UNITTEST__
1445 /* Don't create the zalloc submap, unit-test mock all zalloc functionality */
1446 *new_map = kmem_suballoc(kernel_map, &addr, size,
1447 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1448 flags, KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT, tag).kmr_submap;
1449 #else
1450 #pragma unused(flags, tag, new_map)
1451 #endif
1452 r.min_address = addr;
1453 r.max_address = addr + size;
1454 return r;
1455 }
1456
1457 #endif /* !ZALLOC_TEST */
1458 #pragma mark Zone bits allocator
1459
1460 /*!
1461 * @defgroup Zone Bitmap allocator
1462 * @{
1463 *
1464 * @brief
1465 * Functions implementing the zone bitmap allocator
1466 *
1467 * @discussion
1468 * The zone allocator maintains which elements are allocated or free in bitmaps.
1469 *
1470 * When the number of elements per page is smaller than 32, it is stored inline
1471 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1472 * and @c zm_bitmap used for storage).
1473 *
1474 * When the number of elements is larger, then a bitmap is allocated from
1475 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1476 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1477 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1478 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1479 * cannot be larger than 1024 bytes (8192 bits).
1480 *
1481 * This buddy allocator can actually accomodate allocations as large
1482 * as 8k on 16k systems and 2k on 4k systems.
1483 *
1484 * Note: @c zba_* functions are implementation details not meant to be used
1485 * outside of the allocation of the allocator itself. Interfaces to the rest of
1486 * the zone allocator are documented and not @c zba_* prefixed.
1487 */
1488
1489 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1490 #define ZBA_GRANULE sizeof(uint64_t)
1491 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1492 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1493 #define ZBA_MAX_ALLOC_ORDER 7
1494 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1495 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1496 #define ZBA_PTR_MASK 0x0fffffff
1497 #define ZBA_ORDER_SHIFT 29
1498 #define ZBA_HAS_EXTRA_BIT 0x10000000
1499
1500 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1501 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1502
1503 struct zone_bits_chain {
1504 uint32_t zbc_next;
1505 uint32_t zbc_prev;
1506 } __attribute__((aligned(ZBA_GRANULE)));
1507
1508 struct zone_bits_head {
1509 uint32_t zbh_next;
1510 uint32_t zbh_unused;
1511 } __attribute__((aligned(ZBA_GRANULE)));
1512
1513 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1514 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1515
1516 struct zone_bits_allocator_meta {
1517 uint32_t zbam_left;
1518 uint32_t zbam_right;
1519 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1520 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1521 };
1522
1523 struct zone_bits_allocator_header {
1524 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1525 };
1526
1527 #if ZALLOC_TEST
1528 static struct zalloc_bits_allocator_test_setup {
1529 vm_offset_t zbats_base;
1530 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1531 } zba_test_info;
1532
1533 static struct zone_bits_allocator_header *
zba_base_header(void)1534 zba_base_header(void)
1535 {
1536 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1537 }
1538
1539 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1540 zba_populate(uint32_t n, bool with_extra __unused)
1541 {
1542 vm_address_t base = zba_test_info.zbats_base;
1543 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1544
1545 return KERN_SUCCESS;
1546 }
1547 #else
1548 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1549 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1550
1551 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1552 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1553
1554 static struct zone_bits_allocator_header *
zba_base_header(void)1555 zba_base_header(void)
1556 {
1557 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1558 }
1559
1560 static void
zba_lock(void)1561 zba_lock(void)
1562 {
1563 lck_mtx_lock(&zba_mtx);
1564 }
1565
1566 static void
zba_unlock(void)1567 zba_unlock(void)
1568 {
1569 lck_mtx_unlock(&zba_mtx);
1570 }
1571
1572 __abortlike
1573 static void
zba_memory_exhausted(void)1574 zba_memory_exhausted(void)
1575 {
1576 uint64_t zsize = 0;
1577 zone_t z = zone_find_largest(&zsize);
1578 panic("zba_populate: out of bitmap space, "
1579 "likely due to memory leak in zone [%s%s] "
1580 "(%u%c, %d elements allocated)",
1581 zone_heap_name(z), zone_name(z),
1582 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1583 zone_count_allocated(z));
1584 }
1585
1586
1587 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1588 zba_populate(uint32_t n, bool with_extra)
1589 {
1590 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1591 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1592 vm_address_t bits_addr;
1593 vm_address_t xtra_addr;
1594 kern_return_t kr;
1595
1596 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1597 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1598
1599 kr = kernel_memory_populate(bits_addr, bits_size,
1600 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1601 VM_KERN_MEMORY_OSFMK);
1602 if (kr != KERN_SUCCESS) {
1603 return kr;
1604 }
1605
1606
1607 if (with_extra) {
1608 kr = kernel_memory_populate(xtra_addr, xtra_size,
1609 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1610 VM_KERN_MEMORY_OSFMK);
1611 if (kr != KERN_SUCCESS) {
1612 kernel_memory_depopulate(bits_addr, bits_size,
1613 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1614 VM_KERN_MEMORY_OSFMK);
1615 }
1616 }
1617
1618 return kr;
1619 }
1620 #endif
1621
1622 __pure2
1623 static struct zone_bits_allocator_meta *
zba_meta(void)1624 zba_meta(void)
1625 {
1626 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1627 }
1628
1629 __pure2
1630 static uint64_t *
zba_slot_base(void)1631 zba_slot_base(void)
1632 {
1633 return (uint64_t *)zba_base_header();
1634 }
1635
1636 __pure2
1637 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1638 zba_head(uint32_t order, bool with_extra)
1639 {
1640 if (with_extra) {
1641 return &zba_meta()->zbam_lists_with_extra[order];
1642 } else {
1643 return &zba_meta()->zbam_lists[order];
1644 }
1645 }
1646
1647 __pure2
1648 static uint32_t
zba_head_index(struct zone_bits_head * hd)1649 zba_head_index(struct zone_bits_head *hd)
1650 {
1651 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1652 }
1653
1654 __pure2
1655 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1656 zba_chain_for_index(uint32_t index)
1657 {
1658 return (struct zone_bits_chain *)(zba_slot_base() + index);
1659 }
1660
1661 __pure2
1662 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1663 zba_chain_to_index(const struct zone_bits_chain *zbc)
1664 {
1665 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1666 }
1667
1668 __abortlike
1669 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1670 zba_head_corruption_panic(uint32_t order, bool with_extra)
1671 {
1672 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1673 order, with_extra, zba_head(order, with_extra));
1674 }
1675
1676 __abortlike
1677 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1678 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1679 {
1680 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1681 }
1682
1683 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1684 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1685 {
1686 struct zone_bits_head *hd = zba_head(order, with_extra);
1687 uint32_t hd_index = zba_head_index(hd);
1688 uint32_t index = zba_chain_to_index(zbc);
1689 struct zone_bits_chain *next;
1690
1691 if (hd->zbh_next) {
1692 next = zba_chain_for_index(hd->zbh_next);
1693 if (next->zbc_prev != hd_index) {
1694 zba_head_corruption_panic(order, with_extra);
1695 }
1696 next->zbc_prev = index;
1697 }
1698 zbc->zbc_next = hd->zbh_next;
1699 zbc->zbc_prev = hd_index;
1700 hd->zbh_next = index;
1701 }
1702
1703 static void
zba_remove_block(struct zone_bits_chain * zbc)1704 zba_remove_block(struct zone_bits_chain *zbc)
1705 {
1706 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1707 uint32_t index = zba_chain_to_index(zbc);
1708
1709 if (prev->zbc_next != index) {
1710 zba_chain_corruption_panic(prev, zbc);
1711 }
1712 if ((prev->zbc_next = zbc->zbc_next)) {
1713 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1714 if (next->zbc_prev != index) {
1715 zba_chain_corruption_panic(zbc, next);
1716 }
1717 next->zbc_prev = zbc->zbc_prev;
1718 }
1719 }
1720
1721 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1722 zba_try_pop_block(uint32_t order, bool with_extra)
1723 {
1724 struct zone_bits_head *hd = zba_head(order, with_extra);
1725 struct zone_bits_chain *zbc;
1726
1727 if (hd->zbh_next == 0) {
1728 return 0;
1729 }
1730
1731 zbc = zba_chain_for_index(hd->zbh_next);
1732 zba_remove_block(zbc);
1733 return (vm_address_t)zbc;
1734 }
1735
1736 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1737 zba_header(vm_offset_t addr)
1738 {
1739 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1740 return (struct zone_bits_allocator_header *)addr;
1741 }
1742
1743 static size_t
zba_node_parent(size_t node)1744 zba_node_parent(size_t node)
1745 {
1746 return (node - 1) / 2;
1747 }
1748
1749 static size_t
zba_node_left_child(size_t node)1750 zba_node_left_child(size_t node)
1751 {
1752 return node * 2 + 1;
1753 }
1754
1755 static size_t
zba_node_buddy(size_t node)1756 zba_node_buddy(size_t node)
1757 {
1758 return ((node - 1) ^ 1) + 1;
1759 }
1760
1761 static size_t
zba_node(vm_offset_t addr,uint32_t order)1762 zba_node(vm_offset_t addr, uint32_t order)
1763 {
1764 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1765 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1766 }
1767
1768 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1769 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1770 {
1771 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1772 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1773 }
1774
1775 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1776 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1777 {
1778 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1779 }
1780
1781 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1782 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1783 {
1784 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1785 }
1786
1787 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1788 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1789 {
1790 struct zone_bits_allocator_header *zbah = zba_header(addr);
1791 struct zone_bits_chain *zbc;
1792 size_t node = zba_node(addr, order);
1793
1794 while (node) {
1795 size_t parent = zba_node_parent(node);
1796
1797 zba_node_flip_split(zbah, parent);
1798 if (zba_node_is_split(zbah, parent)) {
1799 break;
1800 }
1801
1802 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1803 zba_remove_block(zbc);
1804 order++;
1805 node = parent;
1806 }
1807
1808 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1809 }
1810
1811 static vm_size_t
zba_chunk_header_size(uint32_t n)1812 zba_chunk_header_size(uint32_t n)
1813 {
1814 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1815 if (n == 0) {
1816 hdr_size += sizeof(struct zone_bits_allocator_meta);
1817 }
1818 return hdr_size;
1819 }
1820
1821 static void
zba_init_chunk(uint32_t n,bool with_extra)1822 zba_init_chunk(uint32_t n, bool with_extra)
1823 {
1824 vm_size_t hdr_size = zba_chunk_header_size(n);
1825 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1826 struct zone_bits_allocator_header *zbah = zba_header(page);
1827 vm_size_t size = ZBA_CHUNK_SIZE;
1828 size_t node;
1829
1830 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1831 if (size < hdr_size + (ZBA_GRANULE << o)) {
1832 continue;
1833 }
1834 size -= ZBA_GRANULE << o;
1835 node = zba_node(page + size, o);
1836 zba_node_flip_split(zbah, zba_node_parent(node));
1837 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1838 }
1839 }
1840
1841 __attribute__((noinline))
1842 static void
zba_grow(bool with_extra)1843 zba_grow(bool with_extra)
1844 {
1845 struct zone_bits_allocator_meta *meta = zba_meta();
1846 kern_return_t kr = KERN_SUCCESS;
1847 uint32_t chunk;
1848
1849 #if !ZALLOC_TEST
1850 if (meta->zbam_left >= meta->zbam_right) {
1851 zba_memory_exhausted();
1852 }
1853 #endif
1854
1855 if (with_extra) {
1856 chunk = meta->zbam_right - 1;
1857 } else {
1858 chunk = meta->zbam_left;
1859 }
1860
1861 kr = zba_populate(chunk, with_extra);
1862 if (kr == KERN_SUCCESS) {
1863 if (with_extra) {
1864 meta->zbam_right -= 1;
1865 } else {
1866 meta->zbam_left += 1;
1867 }
1868
1869 zba_init_chunk(chunk, with_extra);
1870 #if !ZALLOC_TEST
1871 } else {
1872 /*
1873 * zba_populate() has to be allowed to fail populating,
1874 * as we are under a global lock, we need to do the
1875 * VM_PAGE_WAIT() outside of the lock.
1876 */
1877 assert(kr == KERN_RESOURCE_SHORTAGE);
1878 zba_unlock();
1879 VM_PAGE_WAIT();
1880 zba_lock();
1881 #endif
1882 }
1883 }
1884
1885 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1886 zba_alloc(uint32_t order, bool with_extra)
1887 {
1888 struct zone_bits_allocator_header *zbah;
1889 uint32_t cur = order;
1890 vm_address_t addr;
1891 size_t node;
1892
1893 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1894 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1895 zba_grow(with_extra);
1896 cur = order;
1897 }
1898 }
1899
1900 zbah = zba_header(addr);
1901 node = zba_node(addr, cur);
1902 zba_node_flip_split(zbah, zba_node_parent(node));
1903 while (cur > order) {
1904 cur--;
1905 zba_node_flip_split(zbah, node);
1906 node = zba_node_left_child(node);
1907 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1908 cur, with_extra);
1909 }
1910
1911 return addr;
1912 }
1913
1914 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
1915 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
1916 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
1917 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
1918
1919 #if !ZALLOC_TEST
1920 #if VM_TAG_SIZECLASSES
1921
1922 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1923 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1924 {
1925 vm_offset_t base = zone_info.zi_xtra_range.min_address;
1926 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1927
1928 return (void *)(base + ((offs + idx) << zba_xtra_shift));
1929 }
1930
1931 #endif /* VM_TAG_SIZECLASSES */
1932
1933 static uint32_t
zba_bits_ref_order(uint32_t bref)1934 zba_bits_ref_order(uint32_t bref)
1935 {
1936 return bref >> ZBA_ORDER_SHIFT;
1937 }
1938
1939 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1940 zba_bits_ref_ptr(uint32_t bref)
1941 {
1942 return zba_slot_base() + (bref & ZBA_PTR_MASK);
1943 }
1944
1945 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1946 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1947 zalloc_flags_t flags, vm_offset_t eidx)
1948 {
1949 size_t i = eidx / 32;
1950 uint32_t map;
1951
1952 if (eidx % 32) {
1953 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
1954 if (map) {
1955 eidx = __builtin_ctz(map);
1956 meta[i].zm_bitmap ^= 1u << eidx;
1957 return i * 32 + eidx;
1958 }
1959 i++;
1960 }
1961
1962 uint32_t chunk_len = meta->zm_chunk_len;
1963 if (flags & Z_PCPU) {
1964 chunk_len = zpercpu_count();
1965 }
1966 for (int j = 0; j < chunk_len; j++, i++) {
1967 if (i >= chunk_len) {
1968 i = 0;
1969 }
1970 if (__probable(map = meta[i].zm_bitmap)) {
1971 meta[i].zm_bitmap &= map - 1;
1972 return i * 32 + __builtin_ctz(map);
1973 }
1974 }
1975
1976 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
1977 }
1978
1979 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)1980 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
1981 vm_offset_t eidx)
1982 {
1983 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
1984 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
1985 size_t i = eidx / 64;
1986 uint64_t map;
1987
1988 if (eidx % 64) {
1989 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
1990 if (map) {
1991 eidx = __builtin_ctzll(map);
1992 bits[i] ^= 1ull << eidx;
1993 return i * 64 + eidx;
1994 }
1995 i++;
1996 }
1997
1998 for (int j = 0; j < bits_size; i++, j++) {
1999 if (i >= bits_size) {
2000 i = 0;
2001 }
2002 if (__probable(map = bits[i])) {
2003 bits[i] &= map - 1;
2004 return i * 64 + __builtin_ctzll(map);
2005 }
2006 }
2007
2008 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2009 }
2010
2011 /*!
2012 * @function zone_meta_find_and_clear_bit
2013 *
2014 * @brief
2015 * The core of the bitmap allocator: find a bit set in the bitmaps.
2016 *
2017 * @discussion
2018 * This method will round robin through available allocations,
2019 * with a per-core memory of the last allocated element index allocated.
2020 *
2021 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2022 * double-free bugs way too practical.
2023 *
2024 * @param zone The zone we're allocating from.
2025 * @param meta The main metadata for the chunk being allocated from.
2026 * @param flags the alloc flags (for @c Z_PCPU).
2027 */
2028 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2029 zone_meta_find_and_clear_bit(
2030 zone_t zone,
2031 zone_stats_t zs,
2032 struct zone_page_metadata *meta,
2033 zalloc_flags_t flags)
2034 {
2035 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2036
2037 if (meta->zm_inline_bitmap) {
2038 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2039 } else {
2040 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2041 }
2042 zs->zs_alloc_rr = (uint16_t)eidx;
2043 return eidx;
2044 }
2045
2046 /*!
2047 * @function zone_meta_bits_init_inline
2048 *
2049 * @brief
2050 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2051 *
2052 * @param meta The main metadata for the initialized chunk.
2053 * @param count The number of elements the chunk can hold
2054 * (which might be partial for partially populated chunks).
2055 */
2056 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2057 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2058 {
2059 /*
2060 * We're called with the metadata zm_bitmap fields already zeroed out.
2061 */
2062 for (size_t i = 0; i < count / 32; i++) {
2063 meta[i].zm_bitmap = ~0u;
2064 }
2065 if (count % 32) {
2066 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2067 }
2068 }
2069
2070 /*!
2071 * @function zone_meta_bits_alloc_init
2072 *
2073 * @brief
2074 * Allocates a zm_bitmap field for a newly assigned chunk.
2075 *
2076 * @param count The number of elements the chunk can hold
2077 * (which might be partial for partially populated chunks).
2078 * @param nbits The maximum nuber of bits that will be used.
2079 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2080 */
2081 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2082 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2083 {
2084 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2085 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2086
2087 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2088 uint64_t *bits;
2089 size_t i = 0;
2090
2091 assert(order <= ZBA_MAX_ALLOC_ORDER);
2092 assert(count <= ZBA_GRANULE_BITS << order);
2093
2094 zba_lock();
2095 bits = (uint64_t *)zba_alloc(order, with_extra);
2096 zba_unlock();
2097
2098 while (i < count / 64) {
2099 bits[i++] = ~0ull;
2100 }
2101 if (count % 64) {
2102 bits[i++] = zba_map_mask_lt(uint64_t, count);
2103 }
2104 while (i < 1u << order) {
2105 bits[i++] = 0;
2106 }
2107
2108 return (uint32_t)(bits - zba_slot_base()) +
2109 (order << ZBA_ORDER_SHIFT) +
2110 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2111 }
2112
2113 /*!
2114 * @function zone_meta_bits_merge
2115 *
2116 * @brief
2117 * Adds elements <code>[start, end)</code> to a chunk being extended.
2118 *
2119 * @param meta The main metadata for the extended chunk.
2120 * @param start The index of the first element to add to the chunk.
2121 * @param end The index of the last (exclusive) element to add.
2122 */
2123 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2124 zone_meta_bits_merge(struct zone_page_metadata *meta,
2125 uint32_t start, uint32_t end)
2126 {
2127 if (meta->zm_inline_bitmap) {
2128 while (start < end) {
2129 size_t s_i = start / 32;
2130 size_t s_e = end / 32;
2131
2132 if (s_i == s_e) {
2133 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2134 zba_map_mask_ge(uint32_t, start);
2135 break;
2136 }
2137
2138 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2139 start += 32 - (start % 32);
2140 }
2141 } else {
2142 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2143
2144 while (start < end) {
2145 size_t s_i = start / 64;
2146 size_t s_e = end / 64;
2147
2148 if (s_i == s_e) {
2149 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2150 zba_map_mask_ge(uint64_t, start);
2151 break;
2152 }
2153 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2154 start += 64 - (start % 64);
2155 }
2156 }
2157 }
2158
2159 /*!
2160 * @function zone_bits_free
2161 *
2162 * @brief
2163 * Frees a bitmap to the zone bitmap allocator.
2164 *
2165 * @param bref
2166 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2167 */
2168 static void
zone_bits_free(uint32_t bref)2169 zone_bits_free(uint32_t bref)
2170 {
2171 zba_lock();
2172 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2173 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2174 zba_unlock();
2175 }
2176
2177 /*!
2178 * @function zone_meta_is_free
2179 *
2180 * @brief
2181 * Returns whether a given element appears free.
2182 */
2183 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2184 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2185 {
2186 if (meta->zm_inline_bitmap) {
2187 uint32_t bit = zba_map_bit(uint32_t, eidx);
2188 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2189 } else {
2190 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2191 uint64_t bit = zba_map_bit(uint64_t, eidx);
2192 return bits[zba_map_index(uint64_t, eidx)] & bit;
2193 }
2194 }
2195
2196 /*!
2197 * @function zone_meta_mark_free
2198 *
2199 * @brief
2200 * Marks an element as free and returns whether it was marked as used.
2201 */
2202 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2203 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2204 {
2205 if (meta->zm_inline_bitmap) {
2206 uint32_t bit = zba_map_bit(uint32_t, eidx);
2207 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2208 return false;
2209 }
2210 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2211 } else {
2212 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2213 uint64_t bit = zba_map_bit(uint64_t, eidx);
2214 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2215 return false;
2216 }
2217 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2218 }
2219 return true;
2220 }
2221
2222 #if VM_TAG_SIZECLASSES
2223
2224 __startup_func
2225 void
__zone_site_register(vm_allocation_site_t * site)2226 __zone_site_register(vm_allocation_site_t *site)
2227 {
2228 if (zone_tagging_on) {
2229 vm_tag_alloc(site);
2230 }
2231 }
2232
2233 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2234 zone_index_from_tag_index(uint32_t sizeclass_idx)
2235 {
2236 return zone_tags_sizeclasses[sizeclass_idx];
2237 }
2238
2239 #endif /* VM_TAG_SIZECLASSES */
2240 #endif /* !ZALLOC_TEST */
2241 /*! @} */
2242 #pragma mark zalloc helpers
2243 #if !ZALLOC_TEST
2244
2245 static inline void *
zstack_tbi_fix(vm_offset_t elem)2246 zstack_tbi_fix(vm_offset_t elem)
2247 {
2248 elem = vm_memtag_load_tag(elem);
2249 return (void *)elem;
2250 }
2251
2252 static inline vm_offset_t
zstack_tbi_fill(void * addr)2253 zstack_tbi_fill(void *addr)
2254 {
2255 vm_offset_t elem = (vm_offset_t)addr;
2256
2257 return vm_memtag_canonicalize_kernel(elem);
2258 }
2259
2260 __attribute__((always_inline))
2261 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2262 zstack_push_no_delta(zstack_t *stack, void *addr)
2263 {
2264 vm_offset_t elem = zstack_tbi_fill(addr);
2265
2266 *(vm_offset_t *)addr = stack->z_head - elem;
2267 stack->z_head = elem;
2268 }
2269
2270 __attribute__((always_inline))
2271 void
zstack_push(zstack_t * stack,void * addr)2272 zstack_push(zstack_t *stack, void *addr)
2273 {
2274 zstack_push_no_delta(stack, addr);
2275 stack->z_count++;
2276 }
2277
2278 __attribute__((always_inline))
2279 static inline void *
zstack_pop_no_delta(zstack_t * stack)2280 zstack_pop_no_delta(zstack_t *stack)
2281 {
2282 void *addr = zstack_tbi_fix(stack->z_head);
2283
2284 stack->z_head += *(vm_offset_t *)addr;
2285 *(vm_offset_t *)addr = 0;
2286
2287 return addr;
2288 }
2289
2290 __attribute__((always_inline))
2291 void *
zstack_pop(zstack_t * stack)2292 zstack_pop(zstack_t *stack)
2293 {
2294 stack->z_count--;
2295 return zstack_pop_no_delta(stack);
2296 }
2297
2298 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2299 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2300 {
2301 uint32_t ticket;
2302
2303 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2304 &ticket, &zone_locks_grp))) {
2305 return;
2306 }
2307
2308 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2309
2310 /*
2311 * If zone caching has been disabled due to memory pressure,
2312 * then recording contention is not useful, give the system
2313 * time to recover.
2314 */
2315 if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2316 zone->z_recirc_cont_cur++;
2317 }
2318 }
2319
2320 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2321 zone_recirc_lock_nopreempt(zone_t zone)
2322 {
2323 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2324 }
2325
2326 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2327 zone_recirc_unlock_nopreempt(zone_t zone)
2328 {
2329 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2330 }
2331
2332 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2333 zone_lock_nopreempt_check_contention(zone_t zone)
2334 {
2335 uint32_t ticket;
2336 #if KASAN_FAKESTACK
2337 spl_t s = 0;
2338 if (zone->z_kasan_fakestacks) {
2339 s = splsched();
2340 }
2341 #endif /* KASAN_FAKESTACK */
2342
2343 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2344 &zone_locks_grp))) {
2345 #if KASAN_FAKESTACK
2346 zone->z_kasan_spl = s;
2347 #endif /* KASAN_FAKESTACK */
2348 return;
2349 }
2350
2351 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2352 #if KASAN_FAKESTACK
2353 zone->z_kasan_spl = s;
2354 #endif /* KASAN_FAKESTACK */
2355
2356 /*
2357 * If zone caching has been disabled due to memory pressure,
2358 * then recording contention is not useful, give the system
2359 * time to recover.
2360 */
2361 if (__probable(!zone_caching_disabled &&
2362 !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2363 zone->z_recirc_cont_cur++;
2364 }
2365 }
2366
2367 static inline void
zone_lock_nopreempt(zone_t zone)2368 zone_lock_nopreempt(zone_t zone)
2369 {
2370 #if KASAN_FAKESTACK
2371 spl_t s = 0;
2372 if (zone->z_kasan_fakestacks) {
2373 s = splsched();
2374 }
2375 #endif /* KASAN_FAKESTACK */
2376 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2377 #if KASAN_FAKESTACK
2378 zone->z_kasan_spl = s;
2379 #endif /* KASAN_FAKESTACK */
2380 }
2381
2382 static inline void
zone_unlock_nopreempt(zone_t zone)2383 zone_unlock_nopreempt(zone_t zone)
2384 {
2385 #if KASAN_FAKESTACK
2386 spl_t s = zone->z_kasan_spl;
2387 zone->z_kasan_spl = 0;
2388 #endif /* KASAN_FAKESTACK */
2389 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2390 #if KASAN_FAKESTACK
2391 if (zone->z_kasan_fakestacks) {
2392 splx(s);
2393 }
2394 #endif /* KASAN_FAKESTACK */
2395 }
2396
2397 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2398 zone_depot_lock_nopreempt(zone_cache_t zc)
2399 {
2400 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2401 }
2402
2403 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2404 zone_depot_unlock_nopreempt(zone_cache_t zc)
2405 {
2406 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2407 }
2408
2409 static inline void
zone_depot_lock(zone_cache_t zc)2410 zone_depot_lock(zone_cache_t zc)
2411 {
2412 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2413 }
2414
2415 static inline void
zone_depot_unlock(zone_cache_t zc)2416 zone_depot_unlock(zone_cache_t zc)
2417 {
2418 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2419 }
2420
2421 zone_t
zone_by_id(size_t zid)2422 zone_by_id(size_t zid)
2423 {
2424 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2425 }
2426
2427 static inline bool
zone_supports_vm(zone_t z)2428 zone_supports_vm(zone_t z)
2429 {
2430 /*
2431 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2432 * to overcommit because they're used to reclaim memory
2433 * (VM support).
2434 */
2435 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2436 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2437 }
2438
2439 const char *
zone_name(zone_t z)2440 zone_name(zone_t z)
2441 {
2442 return z->z_name;
2443 }
2444
2445 const char *
zone_heap_name(zone_t z)2446 zone_heap_name(zone_t z)
2447 {
2448 zone_security_flags_t zsflags = zone_security_config(z);
2449 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2450 return kalloc_heap_names[zsflags.z_kheap_id];
2451 }
2452 return "invalid";
2453 }
2454
2455 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2456 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2457 {
2458 vm_size_t elem_count, chunks;
2459
2460 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2461 zone_elem_outer_size(z);
2462 chunks = (max_elems + elem_count - 1) / elem_count;
2463
2464 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2465 }
2466
2467 static inline vm_size_t
zone_submaps_approx_size(void)2468 zone_submaps_approx_size(void)
2469 {
2470 vm_size_t size = 0;
2471
2472 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2473 if (zone_submaps[idx] != VM_MAP_NULL) {
2474 size += zone_submaps[idx]->size;
2475 }
2476 }
2477
2478 return size;
2479 }
2480
2481 static inline void
zone_depot_init(struct zone_depot * zd)2482 zone_depot_init(struct zone_depot *zd)
2483 {
2484 *zd = (struct zone_depot){
2485 .zd_tail = &zd->zd_head,
2486 };
2487 }
2488
2489 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2490 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2491 {
2492 if (zd->zd_full++ == 0) {
2493 zd->zd_tail = &mag->zm_next;
2494 }
2495 mag->zm_next = zd->zd_head;
2496 zd->zd_head = mag;
2497 }
2498
2499 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2500 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2501 {
2502 zd->zd_full++;
2503 mag->zm_next = *zd->zd_tail;
2504 *zd->zd_tail = mag;
2505 zd->zd_tail = &mag->zm_next;
2506 }
2507
2508 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2509 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2510 {
2511 zd->zd_empty++;
2512 mag->zm_next = *zd->zd_tail;
2513 *zd->zd_tail = mag;
2514 }
2515
2516 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2517 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2518 {
2519 zone_magazine_t mag = zd->zd_head;
2520
2521 assert(zd->zd_full);
2522
2523 zd->zd_full--;
2524 if (z && z->z_recirc_full_min > zd->zd_full) {
2525 z->z_recirc_full_min = zd->zd_full;
2526 }
2527 zd->zd_head = mag->zm_next;
2528 if (zd->zd_full == 0) {
2529 zd->zd_tail = &zd->zd_head;
2530 }
2531
2532 mag->zm_next = NULL;
2533 return mag;
2534 }
2535
2536 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2537 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2538 {
2539 zone_magazine_t mag = *zd->zd_tail;
2540
2541 assert(zd->zd_empty);
2542
2543 zd->zd_empty--;
2544 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2545 z->z_recirc_empty_min = zd->zd_empty;
2546 }
2547 *zd->zd_tail = mag->zm_next;
2548
2549 mag->zm_next = NULL;
2550 return mag;
2551 }
2552
2553 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2554 zone_depot_move_full(
2555 struct zone_depot *dst,
2556 struct zone_depot *src,
2557 uint32_t n,
2558 zone_t z)
2559 {
2560 zone_magazine_t head, last;
2561
2562 assert(n);
2563 assert(src->zd_full >= n);
2564
2565 src->zd_full -= n;
2566 if (z && z->z_recirc_full_min > src->zd_full) {
2567 z->z_recirc_full_min = src->zd_full;
2568 }
2569 head = last = src->zd_head;
2570 for (uint32_t i = n; i-- > 1;) {
2571 last = last->zm_next;
2572 }
2573
2574 src->zd_head = last->zm_next;
2575 if (src->zd_full == 0) {
2576 src->zd_tail = &src->zd_head;
2577 }
2578
2579 if (z && zone_security_array[zone_index(z)].z_lifo) {
2580 if (dst->zd_full == 0) {
2581 dst->zd_tail = &last->zm_next;
2582 }
2583 last->zm_next = dst->zd_head;
2584 dst->zd_head = head;
2585 } else {
2586 last->zm_next = *dst->zd_tail;
2587 *dst->zd_tail = head;
2588 dst->zd_tail = &last->zm_next;
2589 }
2590 dst->zd_full += n;
2591
2592 return last->zm_seq;
2593 }
2594
2595 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2596 zone_depot_move_empty(
2597 struct zone_depot *dst,
2598 struct zone_depot *src,
2599 uint32_t n,
2600 zone_t z)
2601 {
2602 zone_magazine_t head, last;
2603
2604 assert(n);
2605 assert(src->zd_empty >= n);
2606
2607 src->zd_empty -= n;
2608 if (z && z->z_recirc_empty_min > src->zd_empty) {
2609 z->z_recirc_empty_min = src->zd_empty;
2610 }
2611 head = last = *src->zd_tail;
2612 for (uint32_t i = n; i-- > 1;) {
2613 last = last->zm_next;
2614 }
2615
2616 *src->zd_tail = last->zm_next;
2617
2618 dst->zd_empty += n;
2619 last->zm_next = *dst->zd_tail;
2620 *dst->zd_tail = head;
2621 }
2622
2623 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2624 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2625 {
2626 if (depot->zd_full == 0) {
2627 return false;
2628 }
2629
2630 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2631 }
2632
2633 static void
zone_cache_swap_magazines(zone_cache_t cache)2634 zone_cache_swap_magazines(zone_cache_t cache)
2635 {
2636 uint16_t count_a = cache->zc_alloc_cur;
2637 uint16_t count_f = cache->zc_free_cur;
2638 vm_offset_t *elems_a = cache->zc_alloc_elems;
2639 vm_offset_t *elems_f = cache->zc_free_elems;
2640
2641 z_debug_assert(count_a <= zc_mag_size());
2642 z_debug_assert(count_f <= zc_mag_size());
2643
2644 cache->zc_alloc_cur = count_f;
2645 cache->zc_free_cur = count_a;
2646 cache->zc_alloc_elems = elems_f;
2647 cache->zc_free_elems = elems_a;
2648 }
2649
2650 __pure2
2651 static smr_t
zone_cache_smr(zone_cache_t cache)2652 zone_cache_smr(zone_cache_t cache)
2653 {
2654 return cache->zc_smr;
2655 }
2656
2657 /*!
2658 * @function zone_magazine_replace
2659 *
2660 * @brief
2661 * Unlod a magazine and load a new one instead.
2662 */
2663 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2664 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2665 {
2666 zone_magazine_t old;
2667 vm_offset_t **elems;
2668
2669 mag->zm_seq = SMR_SEQ_INVALID;
2670
2671 if (empty) {
2672 elems = &zc->zc_free_elems;
2673 zc->zc_free_cur = 0;
2674 } else {
2675 elems = &zc->zc_alloc_elems;
2676 zc->zc_alloc_cur = zc_mag_size();
2677 }
2678 old = (zone_magazine_t)((uintptr_t)*elems -
2679 offsetof(struct zone_magazine, zm_elems));
2680 *elems = mag->zm_elems;
2681
2682 return old;
2683 }
2684
2685 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2686 zone_magazine_alloc(zalloc_flags_t flags)
2687 {
2688 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2689 }
2690
2691 static void
zone_magazine_free(zone_magazine_t mag)2692 zone_magazine_free(zone_magazine_t mag)
2693 {
2694 (zfree)(zc_magazine_zone, mag);
2695 }
2696
2697 static void
zone_magazine_free_list(struct zone_depot * zd)2698 zone_magazine_free_list(struct zone_depot *zd)
2699 {
2700 zone_magazine_t tmp, mag = *zd->zd_tail;
2701
2702 while (mag) {
2703 tmp = mag->zm_next;
2704 zone_magazine_free(mag);
2705 mag = tmp;
2706 }
2707
2708 *zd->zd_tail = NULL;
2709 zd->zd_empty = 0;
2710 }
2711
2712 __mockable void
zone_enable_caching(zone_t zone)2713 zone_enable_caching(zone_t zone)
2714 {
2715 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2716 zone_cache_t caches;
2717 size_t depot_limit;
2718
2719 depot_limit = zc_pcpu_max() / size_per_mag;
2720 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2721
2722 caches = zalloc_percpu_permanent_type(struct zone_cache);
2723 zpercpu_foreach(zc, caches) {
2724 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2725 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2726 zone_depot_init(&zc->zc_depot);
2727 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2728 }
2729
2730 zone_lock(zone);
2731 assert(zone->z_pcpu_cache == NULL);
2732 zone->z_pcpu_cache = caches;
2733 zone->z_recirc_cont_cur = 0;
2734 zone->z_recirc_cont_wma = 0;
2735 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2736 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2737 zone_unlock(zone);
2738 }
2739
2740 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2741 zone_maps_owned(vm_address_t addr, vm_size_t size)
2742 {
2743 return from_zone_map(addr, size);
2744 }
2745
2746 #if KASAN_LIGHT
2747 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2748 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2749 {
2750 return from_zone_map(addr, size) ||
2751 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2752 }
2753 #endif /* KASAN_LIGHT */
2754
2755 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2756 zone_map_sizes(
2757 vm_map_size_t *psize,
2758 vm_map_size_t *pfree,
2759 vm_map_size_t *plargest_free)
2760 {
2761 vm_map_size_t size, free, largest;
2762
2763 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2764
2765 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2766 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2767 *psize += size;
2768 *pfree += free;
2769 *plargest_free = MAX(*plargest_free, largest);
2770 }
2771 }
2772
2773 __attribute__((always_inline))
2774 vm_map_t
zone_submap(zone_security_flags_t zsflags)2775 zone_submap(zone_security_flags_t zsflags)
2776 {
2777 return zone_submaps[zsflags.z_submap_idx];
2778 }
2779
2780 unsigned
zpercpu_count(void)2781 zpercpu_count(void)
2782 {
2783 return zpercpu_early_count;
2784 }
2785
2786 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
2787 /*
2788 * Returns a random number of a given bit-width.
2789 *
2790 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2791 *
2792 * This uses Intel's rdrand because random() uses FP registers
2793 * which causes FP faults and allocations which isn't something
2794 * we can do from zalloc itself due to reentrancy problems.
2795 *
2796 * For pre-rdrand machines (which we no longer support),
2797 * we use a bad biased random generator that doesn't use FP.
2798 * Such HW is no longer supported, but VM of newer OSes on older
2799 * bare metal is made to limp along (with reduced security) this way.
2800 */
2801 static uint64_t
zalloc_random_mask64(uint32_t bits)2802 zalloc_random_mask64(uint32_t bits)
2803 {
2804 uint64_t mask = ~0ull >> (64 - bits);
2805 uint64_t v;
2806
2807 #if __x86_64__
2808 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2809 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2810 v &= mask;
2811 } else {
2812 disable_preemption();
2813 int cpu = cpu_number();
2814 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2815 zone_bool_gen[cpu].zbg_entropy,
2816 ZONE_ENTROPY_CNT, bits);
2817 enable_preemption();
2818 }
2819 #else
2820 v = early_random() & mask;
2821 #endif
2822
2823 return v;
2824 }
2825
2826 /*
2827 * Returns a random number within [bound_min, bound_max)
2828 *
2829 * This isn't _exactly_ uniform, but the skew is small enough
2830 * not to matter for the consumers of this interface.
2831 *
2832 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2833 * will be returned (bound_max - bound_min) / 2^64 more often
2834 * than values within [2^64 % (bound_max - bound_min), bound_max).
2835 */
2836 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2837 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2838 {
2839 uint64_t delta = bound_max - bound_min;
2840
2841 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2842 }
2843
2844 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
2845 #if ZALLOC_ENABLE_LOGGING
2846 /*
2847 * Track all kalloc zones of specified size for zlog name
2848 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2849 *
2850 * Additionally track all early kalloc zones with early.kalloc
2851 */
2852 static bool
track_kalloc_zones(zone_t z,const char * logname)2853 track_kalloc_zones(zone_t z, const char *logname)
2854 {
2855 const char *prefix;
2856 size_t len;
2857 zone_security_flags_t zsflags = zone_security_config(z);
2858
2859 prefix = "kalloc.type.var.";
2860 len = strlen(prefix);
2861 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2862 strncmp(logname, prefix, len) == 0) {
2863 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2864
2865 return zone_elem_inner_size(z) == sizeclass;
2866 }
2867
2868 prefix = "kalloc.type.";
2869 len = strlen(prefix);
2870 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2871 strncmp(logname, prefix, len) == 0) {
2872 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2873
2874 return zone_elem_inner_size(z) == sizeclass;
2875 }
2876
2877 prefix = "kalloc.";
2878 len = strlen(prefix);
2879 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2880 strncmp(logname, prefix, len) == 0) {
2881 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2882
2883 return zone_elem_inner_size(z) == sizeclass;
2884 }
2885
2886 prefix = "early.kalloc";
2887 if ((zsflags.z_kheap_id == KHEAP_ID_EARLY) &&
2888 (strcmp(logname, prefix) == 0)) {
2889 return true;
2890 }
2891
2892 return false;
2893 }
2894 #endif
2895
2896 int
track_this_zone(const char * zonename,const char * logname)2897 track_this_zone(const char *zonename, const char *logname)
2898 {
2899 unsigned int len;
2900 const char *zc = zonename;
2901 const char *lc = logname;
2902
2903 /*
2904 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2905 */
2906
2907 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2908 /*
2909 * If the current characters don't match, check for a space in
2910 * in the zone name and a corresponding period in the log name.
2911 * If that's not there, then the strings don't match.
2912 */
2913
2914 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2915 break;
2916 }
2917
2918 /*
2919 * The strings are equal so far. If we're at the end, then it's a match.
2920 */
2921
2922 if (*zc == '\0') {
2923 return TRUE;
2924 }
2925 }
2926
2927 return FALSE;
2928 }
2929
2930 #if DEBUG || DEVELOPMENT
2931
2932 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2933 zone_element_info(void *addr, vm_tag_t * ptag)
2934 {
2935 vm_size_t size = 0;
2936 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2937 struct zone *src_zone;
2938
2939 if (from_zone_map(addr, sizeof(void *))) {
2940 src_zone = zone_by_id(zone_index_from_ptr(addr));
2941 size = zone_elem_inner_size(src_zone);
2942 #if VM_TAG_SIZECLASSES
2943 if (__improbable(src_zone->z_uses_tags)) {
2944 struct zone_page_metadata *meta;
2945 vm_offset_t eidx;
2946 vm_tag_t *slot;
2947
2948 meta = zone_element_resolve(src_zone,
2949 (vm_offset_t)addr, &eidx);
2950 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
2951 tag = *slot;
2952 }
2953 #endif /* VM_TAG_SIZECLASSES */
2954 }
2955
2956 *ptag = tag;
2957 return size;
2958 }
2959
2960 #endif /* DEBUG || DEVELOPMENT */
2961 #if KASAN_CLASSIC
2962
2963 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)2964 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
2965 {
2966 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
2967
2968 *zonep = zone;
2969 return zone_elem_inner_size(zone);
2970 }
2971
2972 #endif /* KASAN_CLASSIC */
2973 #endif /* !ZALLOC_TEST */
2974 #pragma mark Zone zeroing and early random
2975 #if !ZALLOC_TEST
2976
2977 /*
2978 * Zone zeroing
2979 *
2980 * All allocations from zones are zeroed on free and are additionally
2981 * check that they are still zero on alloc. The check is
2982 * always on, on embedded devices. Perf regression was detected
2983 * on intel as we cant use the vectorized implementation of
2984 * memcmp_zero_ptr_aligned due to cyclic dependenices between
2985 * initization and allocation. Therefore we perform the check
2986 * on 20% of the allocations.
2987 */
2988 #if ZALLOC_ENABLE_ZERO_CHECK
2989 #if defined(__x86_64__)
2990 /*
2991 * Peform zero validation on every 5th allocation
2992 */
2993 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
2994 static uint32_t PERCPU_DATA(zzc_decrementer);
2995 #endif /* defined(__x86_64__) */
2996
2997 /*
2998 * Determine if zero validation for allocation should be skipped
2999 */
3000 static bool
zalloc_skip_zero_check(void)3001 zalloc_skip_zero_check(void)
3002 {
3003 #if defined(__x86_64__)
3004 uint32_t *counterp, cnt;
3005
3006 counterp = PERCPU_GET(zzc_decrementer);
3007 cnt = *counterp;
3008 if (__probable(cnt > 0)) {
3009 *counterp = cnt - 1;
3010 return true;
3011 }
3012 *counterp = zzc_rate - 1;
3013 #endif /* !defined(__x86_64__) */
3014 return false;
3015 }
3016
3017 __abortlike
3018 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3019 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3020 {
3021 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3022 uint32_t first_offs = ~0u;
3023 uintptr_t first_bits = 0, v;
3024 char buf[1024];
3025 int pos = 0;
3026
3027 buf[0] = '\0';
3028
3029 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3030 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3031 continue;
3032 }
3033 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3034 "%5d: 0x%016lx", o, v);
3035 if (first_offs > o) {
3036 first_offs = o;
3037 first_bits = v;
3038 }
3039 }
3040
3041 (panic)("[%s%s]: element modified after free "
3042 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3043 zone_heap_name(z), zone_name(z),
3044 first_offs, first_bits, esize, (void *)elem, buf);
3045 }
3046
3047 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3048 zalloc_validate_element(
3049 zone_t zone,
3050 vm_offset_t elem,
3051 vm_size_t size,
3052 zalloc_flags_t flags)
3053 {
3054 if (flags & Z_NOZZC) {
3055 return;
3056 }
3057 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3058 zalloc_uaf_panic(zone, elem, size);
3059 }
3060 if (flags & Z_PCPU) {
3061 for (size_t i = zpercpu_count(); --i > 0;) {
3062 elem += PAGE_SIZE;
3063 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3064 zalloc_uaf_panic(zone, elem, size);
3065 }
3066 }
3067 }
3068 }
3069
3070 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3071
3072 __attribute__((noinline))
3073 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3074 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3075 {
3076 #if KASAN_FAKESTACK
3077 /*
3078 * This can cause re-entrancy with kasan fakestacks
3079 */
3080 #pragma unused(zone, cpu, zs)
3081 #else
3082 uint32_t bits;
3083
3084 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3085 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3086
3087 zs->zs_alloc_rr += bits;
3088 zs->zs_alloc_rr %= zone->z_chunk_elems;
3089 #endif
3090 }
3091
3092 #endif /* !ZALLOC_TEST */
3093 #pragma mark Zone Leak Detection
3094 #if !ZALLOC_TEST
3095 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3096
3097 /*
3098 * Zone leak debugging code
3099 *
3100 * When enabled, this code keeps a log to track allocations to a particular
3101 * zone that have not yet been freed.
3102 *
3103 * Examining this log will reveal the source of a zone leak.
3104 *
3105 * The log is allocated only when logging is enabled (it is off by default),
3106 * so there is no effect on the system when it's turned off.
3107 *
3108 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3109 * zone name to log, with n starting at 1.
3110 *
3111 * Leaks debugging utilizes 2 tunables:
3112 * - zlsize (in kB) which describes how much "size" the record covers
3113 * (zones with smaller elements get more records, default is 4M).
3114 *
3115 * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3116 * size at which automatic leak detection will sample allocations.
3117 * (default is 8k)
3118 *
3119 *
3120 * Zone corruption logging
3121 *
3122 * Logging can also be used to help identify the source of a zone corruption.
3123 *
3124 * First, identify the zone that is being corrupted,
3125 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3126 *
3127 * When -zc is used in conjunction with zlog,
3128 * it changes the logging style to track both allocations and frees to the zone.
3129 *
3130 * When the corruption is detected, examining the log will show you the stack
3131 * traces of the callers who last allocated and freed any particular element in
3132 * the zone.
3133 *
3134 * Corruption debugging logs will have zrecs records
3135 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3136 */
3137
3138 #define ZRECORDS_MAX (256u << 10)
3139 #define ZRECORDS_DEFAULT (16u << 10)
3140 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3141 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3142 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3143
3144 __startup_func
3145 static void
zone_leaks_init_zrecs(void)3146 zone_leaks_init_zrecs(void)
3147 {
3148 /*
3149 * Don't allow more than ZRECORDS_MAX records,
3150 * even if the user asked for more.
3151 *
3152 * This prevents accidentally hogging too much kernel memory
3153 * and making the system unusable.
3154 */
3155 if (zrecs == 0) {
3156 zrecs = ZRECORDS_DEFAULT *
3157 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3158 }
3159 if (zrecs > ZRECORDS_MAX) {
3160 zrecs = ZRECORDS_MAX;
3161 }
3162 }
3163 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3164
3165 static uint32_t
zone_leaks_record_count(zone_t z)3166 zone_leaks_record_count(zone_t z)
3167 {
3168 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3169
3170 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3171 }
3172
3173 static uint32_t
zone_leaks_sample_rate(zone_t z)3174 zone_leaks_sample_rate(zone_t z)
3175 {
3176 return zlfreq / zone_elem_inner_size(z);
3177 }
3178
3179 #if ZALLOC_ENABLE_LOGGING
3180 /* Log allocations and frees to help debug a zone element corruption */
3181 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3182
3183 /*
3184 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3185 */
3186 #define MAX_ZONES_LOG_REQUESTS 10
3187
3188 /**
3189 * @function zone_setup_logging
3190 *
3191 * @abstract
3192 * Optionally sets up a zone for logging.
3193 *
3194 * @discussion
3195 * We recognized two boot-args:
3196 *
3197 * zlog=<zone_to_log>
3198 * zrecs=<num_records_in_log>
3199 * zlsize=<memory to cover for leaks>
3200 *
3201 * The zlog arg is used to specify the zone name that should be logged,
3202 * and zrecs/zlsize is used to control the size of the log.
3203 */
3204 static void
zone_setup_logging(zone_t z)3205 zone_setup_logging(zone_t z)
3206 {
3207 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3208 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3209 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3210 bool logging_on = false;
3211
3212 /*
3213 * Append kalloc heap name to zone name (if zone is used by kalloc)
3214 */
3215 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3216
3217 /* zlog0 isn't allowed. */
3218 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3219 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3220
3221 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3222 if (track_this_zone(zone_name, zlog_val) ||
3223 track_kalloc_zones(z, zlog_val)) {
3224 logging_on = true;
3225 break;
3226 }
3227 }
3228 }
3229
3230 /*
3231 * Backwards compat. with the old boot-arg used to specify single zone
3232 * logging i.e. zlog Needs to happen after the newer zlogn checks
3233 * because the prefix will match all the zlogn
3234 * boot-args.
3235 */
3236 if (!logging_on &&
3237 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3238 if (track_this_zone(zone_name, zlog_val) ||
3239 track_kalloc_zones(z, zlog_val)) {
3240 logging_on = true;
3241 }
3242 }
3243
3244 /*
3245 * If we want to log a zone, see if we need to allocate buffer space for
3246 * the log.
3247 *
3248 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3249 * we have to defer allocation in that case.
3250 *
3251 * zone_init() will finish the job.
3252 *
3253 * If we want to log one of the VM related zones that's set up early on,
3254 * we will skip allocation of the log until zinit is called again later
3255 * on some other zone.
3256 */
3257 if (logging_on) {
3258 if (corruption_debug_flag) {
3259 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3260 } else {
3261 z->z_btlog = btlog_create(BTLOG_HASH,
3262 zone_leaks_record_count(z), 0);
3263 }
3264 if (z->z_btlog) {
3265 z->z_log_on = true;
3266 printf("zone[%s%s]: logging enabled\n",
3267 zone_heap_name(z), z->z_name);
3268 } else {
3269 printf("zone[%s%s]: failed to enable logging\n",
3270 zone_heap_name(z), z->z_name);
3271 }
3272 }
3273 }
3274
3275 #endif /* ZALLOC_ENABLE_LOGGING */
3276 #if KASAN_TBI
3277 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3278
3279 __startup_func
3280 static void
kasan_tbi_init_zrecs(void)3281 kasan_tbi_init_zrecs(void)
3282 {
3283 /*
3284 * Don't allow more than ZRECORDS_MAX records,
3285 * even if the user asked for more.
3286 *
3287 * This prevents accidentally hogging too much kernel memory
3288 * and making the system unusable.
3289 */
3290 if (kasan_zrecs == 0) {
3291 kasan_zrecs = ZRECORDS_DEFAULT *
3292 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3293 }
3294 if (kasan_zrecs > ZRECORDS_MAX) {
3295 kasan_zrecs = ZRECORDS_MAX;
3296 }
3297 }
3298 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3299
3300 static void
zone_setup_kasan_logging(zone_t z)3301 zone_setup_kasan_logging(zone_t z)
3302 {
3303 if (!z->z_tbi_tag) {
3304 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3305 zone_heap_name(z), z->z_name);
3306 return;
3307 }
3308
3309 z->z_log_on = true;
3310 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3311 if (!z->z_btlog) {
3312 printf("zone[%s%s]: failed to enable kasan logging\n",
3313 zone_heap_name(z), z->z_name);
3314 }
3315 }
3316
3317 #endif /* KASAN_TBI */
3318 #if CONFIG_ZLEAKS
3319
3320 static thread_call_data_t zone_leaks_callout;
3321
3322 /*
3323 * The zone leak detector, abbreviated 'zleak', keeps track
3324 * of a subset of the currently outstanding allocations
3325 * made by the zone allocator.
3326 *
3327 * Zones who use more than zleak_pages_per_zone_wired_threshold
3328 * pages will get a BTLOG_HASH btlog with sampling to minimize
3329 * perf impact, yet receive statistical data about the backtrace
3330 * that is the most likely to cause the leak.
3331 *
3332 * If the zone goes under the threshold enough, then the log
3333 * is disabled and backtraces freed. Data can be collected
3334 * from userspace with the zlog(1) command.
3335 */
3336
3337 uint32_t zleak_active;
3338 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3339
3340 /* Size a zone will have before we will collect data on it */
3341 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3342 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3343
3344 static inline bool
zleak_should_enable_for_zone(zone_t z)3345 zleak_should_enable_for_zone(zone_t z)
3346 {
3347 if (z->z_log_on) {
3348 return false;
3349 }
3350 if (z->z_btlog) {
3351 return false;
3352 }
3353 if (z->z_exhausts) {
3354 return false;
3355 }
3356 if (zone_exhaustible(z)) {
3357 return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3358 }
3359 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3360 }
3361
3362 static inline bool
zleak_should_disable_for_zone(zone_t z)3363 zleak_should_disable_for_zone(zone_t z)
3364 {
3365 if (z->z_log_on) {
3366 return false;
3367 }
3368 if (!z->z_btlog) {
3369 return false;
3370 }
3371 if (zone_exhaustible(z)) {
3372 return z->z_wired_cur * 8 < z->z_wired_max * 7;
3373 }
3374 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3375 }
3376
3377 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3378 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3379 {
3380 btlog_t log;
3381
3382 zone_foreach(z) {
3383 if (zleak_should_disable_for_zone(z)) {
3384 log = z->z_btlog;
3385 z->z_btlog = NULL;
3386 assert(z->z_btlog_disabled == NULL);
3387 btlog_disable(log);
3388 z->z_btlog_disabled = log;
3389 os_atomic_dec(&zleak_active, relaxed);
3390 }
3391
3392 if (zleak_should_enable_for_zone(z)) {
3393 log = z->z_btlog_disabled;
3394 if (log == NULL) {
3395 log = btlog_create(BTLOG_HASH,
3396 zone_leaks_record_count(z),
3397 zone_leaks_sample_rate(z));
3398 } else if (btlog_enable(log) == KERN_SUCCESS) {
3399 z->z_btlog_disabled = NULL;
3400 } else {
3401 log = NULL;
3402 }
3403 os_atomic_store(&z->z_btlog, log, release);
3404 os_atomic_inc(&zleak_active, relaxed);
3405 }
3406 }
3407 }
3408
3409 __startup_func
3410 static void
zleak_init(void)3411 zleak_init(void)
3412 {
3413 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3414
3415 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3416 zleak_max_zonemap_size / 8);
3417
3418 thread_call_setup_with_options(&zone_leaks_callout,
3419 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3420 THREAD_CALL_OPTIONS_ONCE);
3421 }
3422 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3423
3424 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3425 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3426 {
3427 if (value >= zleak_max_zonemap_size) {
3428 return KERN_INVALID_VALUE;
3429 }
3430
3431 if (arg == &zleak_per_zone_tracking_threshold) {
3432 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3433 zleak_pages_per_zone_wired_threshold = atop(value);
3434 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3435 thread_call_enter(&zone_leaks_callout);
3436 }
3437 return KERN_SUCCESS;
3438 }
3439
3440 return KERN_INVALID_ARGUMENT;
3441 }
3442
3443 static void
panic_display_zleaks(bool has_syms)3444 panic_display_zleaks(bool has_syms)
3445 {
3446 bool did_header = false;
3447 vm_address_t bt[BTLOG_MAX_DEPTH];
3448 uint32_t len, count;
3449
3450 zone_foreach(z) {
3451 btlog_t log = z->z_btlog;
3452
3453 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3454 continue;
3455 }
3456
3457 count = btlog_guess_top(log, bt, &len);
3458 if (count == 0) {
3459 continue;
3460 }
3461
3462 if (!did_header) {
3463 paniclog_append_noflush("Zone (suspected) leak report:\n");
3464 did_header = true;
3465 }
3466
3467 paniclog_append_noflush(" Zone: %s%s\n",
3468 zone_heap_name(z), zone_name(z));
3469 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3470 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3471 paniclog_append_noflush(" Size: %ld\n",
3472 (long)zone_size_wired(z));
3473 paniclog_append_noflush(" Top backtrace:\n");
3474 for (uint32_t i = 0; i < len; i++) {
3475 if (has_syms) {
3476 paniclog_append_noflush(" %p ", (void *)bt[i]);
3477 panic_print_symbol_name(bt[i]);
3478 paniclog_append_noflush("\n");
3479 } else {
3480 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3481 }
3482 }
3483
3484 kmod_panic_dump(bt, len);
3485 paniclog_append_noflush("\n");
3486 }
3487 }
3488 #endif /* CONFIG_ZLEAKS */
3489
3490 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3491 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3492
3493 #if !KASAN_TBI
3494 __cold
3495 #endif
3496 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3497 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3498 {
3499 btlog_t log = zone->z_btlog;
3500 btref_get_flags_t flags = 0;
3501 btref_t ref;
3502
3503 #if !KASAN_TBI
3504 if (!log || !btlog_sample(log)) {
3505 return;
3506 }
3507 #endif
3508 if (get_preemption_level() || zone_supports_vm(zone)) {
3509 /*
3510 * VM zones can be used by btlog, avoid reentrancy issues.
3511 */
3512 flags = BTREF_GET_NOWAIT;
3513 }
3514
3515 ref = btref_get(fp, flags);
3516 while (count-- > 0) {
3517 if (count) {
3518 btref_retain(ref);
3519 }
3520 addr = (vm_offset_t)zstack_tbi_fix(addr);
3521 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3522 addr += *(vm_offset_t *)addr;
3523 }
3524 }
3525
3526 #define ZALLOC_LOG(zone, addr, count) ({ \
3527 if ((zone)->z_btlog) { \
3528 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3529 } \
3530 })
3531
3532 #if !KASAN_TBI
3533 __cold
3534 #endif
3535 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3536 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3537 {
3538 btlog_t log = zone->z_btlog;
3539 btref_get_flags_t flags = 0;
3540 btref_t ref;
3541
3542 #if !KASAN_TBI
3543 if (!log) {
3544 return;
3545 }
3546 #endif
3547
3548 /*
3549 * See if we're doing logging on this zone.
3550 *
3551 * There are two styles of logging used depending on
3552 * whether we're trying to catch a leak or corruption.
3553 */
3554 #if !KASAN_TBI
3555 if (btlog_get_type(log) == BTLOG_HASH) {
3556 /*
3557 * We're logging to catch a leak.
3558 *
3559 * Remove any record we might have for this element
3560 * since it's being freed. Note that we may not find it
3561 * if the buffer overflowed and that's OK.
3562 *
3563 * Since the log is of a limited size, old records get
3564 * overwritten if there are more zallocs than zfrees.
3565 */
3566 while (count-- > 0) {
3567 addr = (vm_offset_t)zstack_tbi_fix(addr);
3568 btlog_erase(log, (void *)addr);
3569 addr += *(vm_offset_t *)addr;
3570 }
3571 return;
3572 }
3573 #endif /* !KASAN_TBI */
3574
3575 if (get_preemption_level() || zone_supports_vm(zone)) {
3576 /*
3577 * VM zones can be used by btlog, avoid reentrancy issues.
3578 */
3579 flags = BTREF_GET_NOWAIT;
3580 }
3581
3582 ref = btref_get(fp, flags);
3583 while (count-- > 0) {
3584 if (count) {
3585 btref_retain(ref);
3586 }
3587 addr = (vm_offset_t)zstack_tbi_fix(addr);
3588 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3589 addr += *(vm_offset_t *)addr;
3590 }
3591 }
3592
3593 #define ZFREE_LOG(zone, addr, count) ({ \
3594 if ((zone)->z_btlog) { \
3595 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3596 } \
3597 })
3598
3599 #else
3600 #define ZALLOC_LOG(...) ((void)0)
3601 #define ZFREE_LOG(...) ((void)0)
3602 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3603 #endif /* !ZALLOC_TEST */
3604 #pragma mark zone (re)fill
3605 #if !ZALLOC_TEST
3606
3607 /*!
3608 * @defgroup Zone Refill
3609 * @{
3610 *
3611 * @brief
3612 * Functions handling The zone refill machinery.
3613 *
3614 * @discussion
3615 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3616 *
3617 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3618 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3619 *
3620 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3621 *
3622 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3623 * to block, or if the reserve becomes depleted.
3624 *
3625 *
3626 * <h2>Synchronous expansion</h2>
3627 *
3628 * This mechanism is actually the only one that may refill a zone, and all the
3629 * other ones funnel through this one eventually.
3630 *
3631 * @c zone_expand_locked() implements the core of the expansion mechanism,
3632 * and will do so while a caller specified predicate is true.
3633 *
3634 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3635 * - one VM privileged thread,
3636 * - one regular thread.
3637 *
3638 * Regular threads that refill will put down their identity in @c z_expander,
3639 * so that priority inversion avoidance can be implemented.
3640 *
3641 * However, VM privileged threads are allowed to use VM page reserves,
3642 * which allows for the system to recover from extreme memory pressure
3643 * situations, allowing for the few allocations that @c zone_gc() or
3644 * killing processes require.
3645 *
3646 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3647 * is set. @c z_expander is not necessarily the identity of this VM privileged
3648 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3649 * could even be @c THREAD_NULL otherwise).
3650 *
3651 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3652 * spending a whole pointer on priority inheritance for VM privileged threads
3653 * (and other issues related to having two owners), we use the rwlock boost as
3654 * a stop gap to avoid priority inversions.
3655 *
3656 *
3657 * <h2>Chunk wiring policies</h2>
3658 *
3659 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3660 * to try to minimize fragmentation relative to element sizes not aligning with
3661 * a chunk size well. However, this can grow large and be hard to fulfill on
3662 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3663 * 4k page systems).
3664 *
3665 * This is why, when under memory pressure the system allows chunks to be
3666 * partially populated. The metadata of the first page in the chunk maintains
3667 * the count of actually populated pages.
3668 *
3669 * The metadata for addresses assigned to a zone are found of 4 queues:
3670 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3671 * elements (those can be targeted by @c zone_gc()),
3672 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3673 * used,
3674 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3675 * left,
3676 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3677 * the zone forever, or the first secondary metadata for a chunk whose
3678 * corresponding page is not populated in the chunk.
3679 *
3680 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3681 * queues are preferred.
3682 *
3683 *
3684 * <h2>Asynchronous expansion</h2>
3685 *
3686 * This mechanism allows for refilling zones used mostly with non blocking
3687 * callers. It relies on a thread call (@c zone_expand_callout) which will
3688 * iterate all zones and refill the ones marked with @c z_async_refilling.
3689 *
3690 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3691 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3692 *
3693 *
3694 * <h2>Dealing with zone allocations from the mach VM code</h2>
3695 *
3696 * The implementation of the mach VM itself uses the zone allocator
3697 * for things like the vm_map_entry data structure. In order to prevent
3698 * a recursion problem when adding more pages to a zone, the VM zones
3699 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3700 * or any VM map functions to allocate.
3701 *
3702 * Instead, a really simple coalescing first-fit allocator is used
3703 * for this submap, and no one else than zalloc can allocate from it.
3704 *
3705 * Memory is directly populated which doesn't require allocation of
3706 * VM map entries, and avoids recursion. The cost of this scheme however,
3707 * is that `vm_map_lookup_entry` will not function on those addresses
3708 * (nor any API relying on it).
3709 */
3710
3711 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3712 static void zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd);
3713 static thread_call_data_t zone_expand_callout;
3714
3715 __attribute__((overloadable))
3716 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3717 zone_submap_is_sequestered(zone_submap_idx_t idx)
3718 {
3719 return idx != Z_SUBMAP_IDX_DATA;
3720 }
3721
3722 __attribute__((overloadable))
3723 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3724 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3725 {
3726 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3727 }
3728
3729 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3730 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3731 {
3732 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3733
3734 if (zsflags.z_noencrypt) {
3735 kmaflags |= KMA_NOENCRYPT;
3736 }
3737
3738 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
3739 kmaflags |= KMA_DATA;
3740 } else if ((zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED) ||
3741 (zsflags.z_submap_idx == Z_SUBMAP_IDX_DATA)) {
3742 /*
3743 * assume zones which are manually in the data heap,
3744 * like mbufs, are going to be shared somehow.
3745 */
3746 kmaflags |= KMA_DATA_SHARED;
3747 }
3748
3749 if (flags & Z_NOPAGEWAIT) {
3750 kmaflags |= KMA_NOPAGEWAIT;
3751 }
3752 if (z->z_permanent || (!z->z_destructible &&
3753 zone_submap_is_sequestered(zsflags))) {
3754 kmaflags |= KMA_PERMANENT;
3755 }
3756 if (zsflags.z_submap_from_end) {
3757 kmaflags |= KMA_LAST_FREE;
3758 }
3759
3760
3761 return kmaflags;
3762 }
3763
3764 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3765 zone_add_wired_pages(zone_t z, uint32_t pages)
3766 {
3767 os_atomic_add(&zone_pages_wired, pages, relaxed);
3768
3769 #if CONFIG_ZLEAKS
3770 if (__improbable(zleak_should_enable_for_zone(z) &&
3771 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3772 thread_call_enter(&zone_leaks_callout);
3773 }
3774 #else
3775 (void)z;
3776 #endif
3777 }
3778
3779 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3780 zone_remove_wired_pages(zone_t z, uint32_t pages)
3781 {
3782 os_atomic_sub(&zone_pages_wired, pages, relaxed);
3783
3784 #if CONFIG_ZLEAKS
3785 if (__improbable(zleak_should_disable_for_zone(z) &&
3786 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3787 thread_call_enter(&zone_leaks_callout);
3788 }
3789 #else
3790 (void)z;
3791 #endif
3792 }
3793
3794 #if ZSECURITY_CONFIG(ZONE_TAGGING)
3795
3796 static inline void
zone_tag_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3797 zone_tag_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3798 {
3799 if (zone->z_percpu) {
3800 zpercpu_foreach_cpu(index) {
3801 vm_memtag_store_tag(addr + ptoa(index), elem_size);
3802 }
3803 }
3804 }
3805
3806 static inline caddr_t
zone_tag_free_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3807 zone_tag_free_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3808 {
3809 if (__improbable((uintptr_t)addr > 0xFF00000000000000ULL)) {
3810 return addr;
3811 }
3812
3813 addr = vm_memtag_generate_and_store_tag(addr, elem_size);
3814 zone_tag_element(zone, addr, elem_size);
3815
3816 return addr;
3817 }
3818
3819 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3820 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3821 {
3822 zone_security_flags_t *zsflags = &zone_security_array[zone_index(zone)];
3823
3824 if (!zsflags->z_tag) {
3825 return;
3826 }
3827
3828 vm_size_t elem_size = zone_elem_outer_size(zone);
3829 vm_size_t oob_offs = zone_elem_outer_offs(zone);
3830
3831
3832 for (uint32_t i = start; i < end; i++) {
3833 caddr_t elem_addr = (caddr_t)(base + oob_offs + i * elem_size);
3834
3835 elem_addr = vm_memtag_generate_and_store_tag(elem_addr, elem_size);
3836 zone_tag_element(zone, elem_addr, elem_size);
3837 }
3838 }
3839 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3840 #define zone_tag_free_element(z, a, s) (a)
3841 #define zcram_memtag_init(z, b, s, e) do {} while (0)
3842 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3843
3844 /*!
3845 * @function zcram_and_lock()
3846 *
3847 * @brief
3848 * Prepare some memory for being usable for allocation purposes.
3849 *
3850 * @discussion
3851 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3852 * to be usable in the zone.
3853 *
3854 * This function assumes the metadata is already populated for the range.
3855 *
3856 * Calling this function with @c pg_start being 0 means that the memory
3857 * is either a partial chunk, or a full chunk, that isn't published anywhere
3858 * and the initialization can happen without locks held.
3859 *
3860 * Calling this function with a non zero @c pg_start means that we are extending
3861 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3862 * is already usable and published in the zone, so extending it requires holding
3863 * the zone lock.
3864 *
3865 * @param zone The zone to cram new populated pages into
3866 * @param addr The base address for the chunk(s)
3867 * @param pg_va_new The number of virtual pages newly assigned to the zone
3868 * @param pg_start The first newly populated page relative to @a addr.
3869 * @param pg_end The after-last newly populated page relative to @a addr.
3870 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3871 */
3872 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3873 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3874 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3875 {
3876 zone_id_t zindex = zone_index(zone);
3877 vm_offset_t elem_size = zone_elem_outer_size(zone);
3878 uint32_t free_start = 0, free_end = 0;
3879 uint32_t oob_offs = zone_elem_outer_offs(zone);
3880
3881 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3882 uint32_t chunk_pages = zone->z_chunk_pages;
3883 bool guarded = meta->zm_guarded;
3884
3885 assert(pg_start < pg_end && pg_end <= chunk_pages);
3886
3887 if (pg_start == 0) {
3888 uint16_t chunk_len = (uint16_t)pg_end;
3889 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3890 bool inline_bitmap = false;
3891
3892 if (zone->z_percpu) {
3893 chunk_len = 1;
3894 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3895 assert(pg_end == zpercpu_count());
3896 }
3897 if (!zone->z_permanent && !zone->z_uses_tags) {
3898 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3899 }
3900
3901 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3902
3903 meta[0] = (struct zone_page_metadata){
3904 .zm_index = zindex,
3905 .zm_guarded = guarded,
3906 .zm_inline_bitmap = inline_bitmap,
3907 .zm_chunk_len = chunk_len,
3908 .zm_alloc_size = lock,
3909 };
3910
3911 if (!zone->z_permanent && !inline_bitmap) {
3912 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3913 zone->z_chunk_elems, zone->z_uses_tags);
3914 }
3915
3916 for (uint16_t i = 1; i < chunk_pages; i++) {
3917 meta[i] = (struct zone_page_metadata){
3918 .zm_index = zindex,
3919 .zm_guarded = guarded,
3920 .zm_inline_bitmap = inline_bitmap,
3921 .zm_chunk_len = secondary_len,
3922 .zm_page_index = (uint8_t)i,
3923 .zm_bitmap = meta[0].zm_bitmap,
3924 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
3925 };
3926 }
3927
3928 if (inline_bitmap) {
3929 zone_meta_bits_init_inline(meta, free_end);
3930 }
3931 } else {
3932 assert(!zone->z_percpu && !zone->z_permanent);
3933
3934 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3935 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3936 }
3937
3938 zcram_memtag_init(zone, addr, free_start, free_end);
3939
3940 #if KASAN_CLASSIC
3941 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3942 if (zone->z_permanent) {
3943 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3944 } else if (zone->z_percpu) {
3945 for (uint32_t i = 0; i < pg_end; i++) {
3946 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3947 zone_elem_outer_size(zone),
3948 zone_elem_outer_offs(zone),
3949 zone_elem_redzone(zone));
3950 }
3951 } else {
3952 kasan_zmem_add(addr, ptoa(pg_end),
3953 zone_elem_outer_size(zone),
3954 zone_elem_outer_offs(zone),
3955 zone_elem_redzone(zone));
3956 }
3957 #endif /* KASAN_CLASSIC */
3958
3959 /*
3960 * Insert the initialized pages / metadatas into the right lists.
3961 */
3962
3963 zone_lock(zone);
3964 assert(zone->z_self == zone);
3965
3966 if (pg_start != 0) {
3967 assert(meta->zm_chunk_len == pg_start);
3968
3969 zone_meta_bits_merge(meta, free_start, free_end);
3970 meta->zm_chunk_len = (uint16_t)pg_end;
3971
3972 /*
3973 * consume the zone_meta_lock_in_partial()
3974 * done in zone_expand_locked()
3975 */
3976 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
3977 zone_meta_remqueue(zone, meta);
3978 }
3979
3980 if (zone->z_permanent || meta->zm_alloc_size) {
3981 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
3982 } else {
3983 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
3984 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
3985 }
3986 if (pg_end < chunk_pages) {
3987 /* push any non populated residual VA on z_pageq_va */
3988 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
3989 }
3990
3991 zone->z_elems_free += free_end - free_start;
3992 zone->z_elems_avail += free_end - free_start;
3993 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
3994 if (pg_va_new) {
3995 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
3996 }
3997 if (zone->z_wired_hwm < zone->z_wired_cur) {
3998 zone->z_wired_hwm = zone->z_wired_cur;
3999 }
4000
4001 #if CONFIG_ZLEAKS
4002 if (__improbable(zleak_should_enable_for_zone(zone) &&
4003 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4004 thread_call_enter(&zone_leaks_callout);
4005 }
4006 #endif /* CONFIG_ZLEAKS */
4007
4008 zone_add_wired_pages(zone, pg_end - pg_start);
4009 }
4010
4011 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4012 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4013 {
4014 uint32_t chunk_pages = zone->z_chunk_pages;
4015
4016 assert(pages % chunk_pages == 0);
4017 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4018 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4019 zone_unlock(zone);
4020 }
4021 }
4022
4023 __startup_func
4024 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4025 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4026 {
4027 uint32_t pages = (uint32_t)atop(size);
4028
4029 assert(from_zone_map(newmem, size));
4030 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4031 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4032
4033 /*
4034 * The early pages we move at the pmap layer can't be "depopulated"
4035 * because there's no vm_page_t for them.
4036 *
4037 * "Lock" them so that they never hit z_pageq_empty.
4038 */
4039 vm_memtag_bzero_unchecked((void *)newmem, size);
4040 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4041 }
4042
4043 /*!
4044 * @function zone_submap_alloc_sequestered_va
4045 *
4046 * @brief
4047 * Allocates VA without using vm_find_space().
4048 *
4049 * @discussion
4050 * Allocate VA quickly without using the slower vm_find_space() for cases
4051 * when the submaps are fully sequestered.
4052 *
4053 * The VM submap is used to implement the VM itself so it is always sequestered,
4054 * as it can't kmem_alloc which needs to always allocate vm entries.
4055 * However, it can use vm_map_enter() which tries to coalesce entries, which
4056 * always works, so the VM map only ever needs 2 entries (one for each end).
4057 *
4058 * The RO submap is similarly always sequestered if it exists (as a non
4059 * sequestered RO submap makes very little sense).
4060 *
4061 * The allocator is a very simple bump-allocator
4062 * that allocates from either end.
4063 */
4064 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4065 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4066 vm_offset_t *addrp)
4067 {
4068 vm_size_t size = ptoa(pages);
4069 vm_map_t map = zone_submap(zsflags);
4070 vm_map_entry_t first, last;
4071 vm_map_offset_t addr;
4072
4073 vmlp_api_start(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA);
4074
4075 vm_map_lock(map);
4076
4077 first = vm_map_first_entry(map);
4078 last = vm_map_last_entry(map);
4079
4080 if (zsflags.z_submap_from_end) {
4081 vmlp_range_event(map, last->vme_start - size, size);
4082 } else {
4083 vmlp_range_event(map, first->vme_end, size);
4084 }
4085
4086 if (first->vme_end + size > last->vme_start) {
4087 vm_map_unlock(map);
4088 vmlp_api_end(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA, KERN_NO_SPACE);
4089 return KERN_NO_SPACE;
4090 }
4091
4092 if (zsflags.z_submap_from_end) {
4093 last->vme_start -= size;
4094 addr = last->vme_start;
4095 VME_OFFSET_SET(last, addr);
4096 } else {
4097 addr = first->vme_end;
4098 first->vme_end += size;
4099 }
4100 map->size += size;
4101
4102 vm_map_unlock(map);
4103
4104 *addrp = addr;
4105 vmlp_api_end(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA, KERN_SUCCESS);
4106 return KERN_SUCCESS;
4107 }
4108
4109 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4110 zone_fill_initially(zone_t zone, vm_size_t nelems)
4111 {
4112 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4113 kern_return_t kr;
4114 vm_offset_t addr;
4115 uint32_t pages;
4116 zone_security_flags_t zsflags = zone_security_config(zone);
4117
4118 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4119 assert(zone->z_elems_avail == 0);
4120
4121 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4122 pages = zone_alloc_pages_for_nelems(zone, nelems);
4123 if (zone_submap_is_sequestered(zsflags)) {
4124 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4125 if (kr != KERN_SUCCESS) {
4126 panic("zone_submap_alloc_sequestered_va() "
4127 "of %u pages failed", pages);
4128 }
4129 kernel_memory_populate(addr, ptoa(pages),
4130 kmaflags, VM_KERN_MEMORY_ZONE);
4131 } else {
4132 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4133 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4134 kmaflags, VM_KERN_MEMORY_ZONE);
4135 }
4136
4137 zone_meta_populate(addr, ptoa(pages));
4138 zcram(zone, addr, pages, 0);
4139 }
4140
4141 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4142 __attribute__((noinline))
4143 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4144 zone_scramble_va_and_unlock(
4145 zone_t z,
4146 struct zone_page_metadata *meta,
4147 uint32_t runs,
4148 uint32_t pages,
4149 uint32_t chunk_pages,
4150 uint64_t guard_mask)
4151 {
4152 struct zone_page_metadata *arr[ZONE_MAX_CHUNK_ALLOC_NUM];
4153
4154 for (uint32_t run = 0, n = 0; run < runs; run++) {
4155 arr[run] = meta + n;
4156 n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4157 }
4158
4159 /*
4160 * Fisher–Yates shuffle, for an array with indices [0, n)
4161 *
4162 * for i from n−1 downto 1 do
4163 * j ← random integer such that 0 ≤ j ≤ i
4164 * exchange a[j] and a[i]
4165 *
4166 * The point here is that early allocations aren't at a fixed
4167 * distance from each other.
4168 */
4169 for (uint32_t i = runs - 1; i > 0; i--) {
4170 uint32_t j = zalloc_random_uniform32(0, i + 1);
4171
4172 meta = arr[j];
4173 arr[j] = arr[i];
4174 arr[i] = meta;
4175 }
4176
4177 zone_lock(z);
4178
4179 for (uint32_t i = 0; i < runs; i++) {
4180 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4181 }
4182 z->z_va_cur += z->z_percpu ? runs : pages;
4183 }
4184
4185 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4186 dist_u32(uint32_t a, uint32_t b)
4187 {
4188 return a < b ? b - a : a - b;
4189 }
4190
4191 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4192 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4193 {
4194 for (; n-- > 0; pop--) {
4195 uint32_t bit = zalloc_random_uniform32(0, pop);
4196 uint64_t m = mask;
4197
4198 for (; bit; bit--) {
4199 m &= m - 1;
4200 }
4201
4202 mask ^= 1ull << __builtin_ctzll(m);
4203 }
4204
4205 return mask;
4206 }
4207
4208 /**
4209 * @function zalloc_random_bits
4210 *
4211 * @brief
4212 * Compute a random number with a specified number of bit set in a given width.
4213 *
4214 * @discussion
4215 * This function generates a "uniform" distribution of sets of bits set in
4216 * a given width, with typically less than width/4 calls to random.
4217 *
4218 * @param pop the target number of bits set.
4219 * @param width the number of bits in the random integer to generate.
4220 */
4221 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4222 zalloc_random_bits(uint32_t pop, uint32_t width)
4223 {
4224 uint64_t w_mask = (1ull << width) - 1;
4225 uint64_t mask;
4226 uint32_t cur;
4227
4228 if (3 * width / 4 <= pop) {
4229 mask = w_mask;
4230 cur = width;
4231 } else if (pop <= width / 4) {
4232 mask = 0;
4233 cur = 0;
4234 } else {
4235 /*
4236 * Chosing a random number this way will overwhelmingly
4237 * contain `width` bits +/- a few.
4238 */
4239 mask = zalloc_random_mask64(width);
4240 cur = __builtin_popcountll(mask);
4241
4242 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4243 /*
4244 * If the opposite mask has a closer popcount,
4245 * then start with that one as the seed.
4246 */
4247 cur = width - cur;
4248 mask ^= w_mask;
4249 }
4250 }
4251
4252 if (cur < pop) {
4253 /*
4254 * Setting `pop - cur` bits is really clearing that many from
4255 * the opposite mask.
4256 */
4257 mask ^= w_mask;
4258 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4259 mask ^= w_mask;
4260 } else if (pop < cur) {
4261 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4262 }
4263
4264 return mask;
4265 }
4266 #endif
4267
4268 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4269 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4270 {
4271 zone_security_flags_t zsflags = zone_security_config(z);
4272 struct zone_page_metadata *meta;
4273 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4274 uint32_t chunk_pages = z->z_chunk_pages;
4275 uint32_t runs, pages, guards, guard_pages, rnum;
4276 uint64_t guard_mask = 0;
4277 bool lead_guard = false;
4278 zone_id_t zidx = zone_index(z);
4279 kern_return_t kr;
4280 vm_offset_t addr;
4281
4282 zone_unlock(z);
4283
4284 /*
4285 * A lot of OOB exploitation techniques rely on precise placement
4286 * and interleaving of zone pages. The layout that is sought
4287 * by attackers will be C/P/T types, where:
4288 * - (C)ompromised is the type for which attackers have a bug,
4289 * - (P)adding is used to pad memory,
4290 * - (T)arget is the type that the attacker will attempt to corrupt
4291 * by exploiting (C).
4292 *
4293 * Note that in some cases C==T and P isn't needed.
4294 *
4295 * In order to make those placement games much harder,
4296 * we grow zones by random runs of memory, up to 10 chunks.
4297 * This makes predicting the precise layout of the heap
4298 * quite more complicated.
4299 *
4300 * Note: this function makes a very heavy use of random,
4301 * however, it is mostly limited to sequestered zones,
4302 * and eventually the layout will be fixed,
4303 * and the usage of random vastly reduced.
4304 *
4305 * For non sequestered zones, there's a single call
4306 * to random in order to decide whether we want
4307 * a guard page or not.
4308 */
4309 pages = chunk_pages;
4310 guards = 0;
4311 runs = 1;
4312 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4313 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4314 runs = ZONE_MAX_CHUNK_ALLOC_NUM;
4315 runs = zalloc_random_uniform32(1, runs + 1);
4316 pages = runs * chunk_pages;
4317 }
4318 static_assert(ZONE_MAX_CHUNK_ALLOC_NUM <= 10,
4319 "make sure that `runs` will never exceed 10");
4320 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4321
4322 /*
4323 * For zones that are suceptible to OOB,
4324 * guards might be added after each chunk.
4325 *
4326 * Those guard pages are marked with the ZM_PGZ_GUARD
4327 * magical chunk len, and their zm_oob_offs field
4328 * is used to remember optional shift applied
4329 * to returned elements, in order to right-align-them
4330 * as much as possible.
4331 *
4332 * In an adversarial context, while guard pages
4333 * are extremely effective against linear overflow,
4334 * using a predictable frequency of guard pages feels like
4335 * a missed opportunity. Which is why we choose to insert
4336 * one guard region (chunk_pages guard pages) with 25% probability,
4337 * with a goal of having ~20% of the VA allocated consist of guard pages.
4338 */
4339 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4340 if (!z->z_percpu) {
4341 /*
4342 * Don't bother with adding guard regions for per-CPU zones, as
4343 * they're not interesting to attackers.
4344 */
4345 for (uint32_t run = 0; run < runs; run++) {
4346 rnum = zalloc_random_uniform32(0, 4 * 128);
4347 guards += (rnum < 128);
4348 }
4349 }
4350 assert3u(guards, <=, runs);
4351
4352 guard_mask = 0;
4353
4354 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4355 /*
4356 * Several exploitation strategies rely on a C/T (compromised
4357 * then target types) ordering of pages with a sub-page reach
4358 * from C into T.
4359 *
4360 * We want to reliably thwart such exploitations
4361 * and hence force a guard page between alternating
4362 * memory types.
4363 *
4364 * Note: this counts towards the number of guard pages we want.
4365 */
4366 guard_mask |= 1ull << (runs - 1);
4367
4368 if (guards > 1) {
4369 guard_mask |= zalloc_random_bits(guards - 1, runs - 1);
4370 } else {
4371 guards = 1;
4372 }
4373
4374 /*
4375 * While we randomize the chunks lengths, an attacker with
4376 * precise timing control can guess when overflows happen,
4377 * and "measure" the runs, which gives them an indication
4378 * of where the next run start offset is.
4379 *
4380 * In order to make this knowledge unusable, add a guard page
4381 * _before_ the new run with a 25% probability, regardless
4382 * of whether we had enough guard pages.
4383 */
4384 if ((rnum & 3) == 0) {
4385 lead_guard = true;
4386 guards++;
4387 }
4388 } else {
4389 assert3u(runs, ==, 1);
4390 assert3u(guards, <=, 1);
4391 guard_mask = guards << (runs - 1);
4392 }
4393 #else
4394 (void)rnum;
4395 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4396
4397 /* We want guards to be at least the size of the chunk. */
4398 guard_pages = guards * chunk_pages;
4399 if (zone_submap_is_sequestered(zsflags)) {
4400 kr = zone_submap_alloc_sequestered_va(zsflags,
4401 pages + guard_pages, &addr);
4402 } else {
4403 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4404 kr = kmem_alloc(zone_submap(zsflags), &addr,
4405 ptoa(pages + guard_pages), kmaflags, VM_KERN_MEMORY_ZONE);
4406 }
4407
4408 if (kr != KERN_SUCCESS) {
4409 uint64_t zone_size = 0;
4410 zone_t zone_largest = zone_find_largest(&zone_size);
4411 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4412 "likely due to memory leak in zone [%s%s] "
4413 "(%u%c, %d elements allocated)",
4414 kr, zone_heap_name(z), zone_name(z),
4415 zone_heap_name(zone_largest), zone_name(zone_largest),
4416 mach_vm_size_pretty(zone_size),
4417 mach_vm_size_unit(zone_size),
4418 zone_count_allocated(zone_largest));
4419 }
4420
4421 meta = zone_meta_from_addr(addr);
4422 zone_meta_populate(addr, ptoa(pages + guard_pages));
4423
4424 /*
4425 * Handle the leading guard page, if any
4426 */
4427 if (lead_guard) {
4428 for (uint32_t i = 0; i < chunk_pages; i++) {
4429 meta[i].zm_index = zidx;
4430 meta[i].zm_chunk_len = ZM_PGZ_GUARD;
4431 meta[i].zm_guarded = true;
4432 meta++;
4433 }
4434 }
4435
4436 for (uint32_t run = 0, n = 0; run < runs; run++) {
4437 bool guarded = (guard_mask >> run) & 1;
4438
4439 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4440 meta[n].zm_index = zidx;
4441 meta[n].zm_guarded = guarded;
4442 }
4443 if (guarded) {
4444 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4445 meta[n].zm_index = zidx;
4446 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4447 }
4448 }
4449 }
4450 if (guards) {
4451 os_atomic_add(&zone_guard_pages, guard_pages, relaxed);
4452 }
4453
4454 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4455 if (__improbable(zone_caching_disabled < 0)) {
4456 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4457 chunk_pages, guard_mask);
4458 }
4459 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4460
4461 zone_lock(z);
4462
4463 for (uint32_t run = 0, n = 0; run < runs; run++) {
4464 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4465 n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4466 }
4467 z->z_va_cur += z->z_percpu ? runs : pages;
4468 }
4469
4470 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4471 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4472 {
4473 #if DEBUG || DEVELOPMENT
4474 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_START,
4475 size, 0, 0, 0);
4476 #else
4477 (void)size;
4478 #endif
4479 }
4480
4481 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4482 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4483 {
4484 task_t task = current_task_early();
4485 if (pages) {
4486 if (task) {
4487 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4488 }
4489 counter_add(&vm_page_grab_count_kern, pages);
4490 }
4491 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
4492 pages, 0, 0, 0);
4493 }
4494
4495 __attribute__((noinline))
4496 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4497 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4498 {
4499 uint64_t wait_start = 0;
4500 long mapped;
4501
4502 sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4503
4504 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4505 return;
4506 }
4507
4508 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4509
4510 /*
4511 * If the zone map is really exhausted, wait on the GC thread,
4512 * donating our priority (which is important because the GC
4513 * thread is at a rather low priority).
4514 */
4515 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4516 uint32_t wait_ms = n * (n + 1) / 2;
4517 uint64_t interval;
4518
4519 if (n == 1) {
4520 wait_start = mach_absolute_time();
4521 } else {
4522 sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4523 }
4524 if (zone_exhausted_timeout > 0 &&
4525 wait_ms > zone_exhausted_timeout) {
4526 panic("zone map exhaustion: waited for %dms "
4527 "(pages: %ld, max: %ld, wanted: %d)",
4528 wait_ms, mapped, zone_pages_wired_max, pgs);
4529 }
4530
4531 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4532 &interval);
4533
4534 lck_spin_lock(&zone_exhausted_lock);
4535 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4536 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4537 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4538
4539 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4540 }
4541 }
4542
4543 static bool
zone_expand_wait_for_pages(bool waited)4544 zone_expand_wait_for_pages(bool waited)
4545 {
4546 if (waited) {
4547 return false;
4548 }
4549 #if DEBUG || DEVELOPMENT
4550 if (zalloc_simulate_vm_pressure) {
4551 return false;
4552 }
4553 #endif /* DEBUG || DEVELOPMENT */
4554 return !vm_pool_low();
4555 }
4556
4557 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4558 zone_expand_async_schedule_if_allowed(zone_t zone)
4559 {
4560 if (zone->z_async_refilling || zone->no_callout) {
4561 return;
4562 }
4563
4564 if (zone_exhausted(zone)) {
4565 return;
4566 }
4567
4568 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4569 return;
4570 }
4571
4572 if (!vm_pool_low() || zone_supports_vm(zone)) {
4573 zone->z_async_refilling = true;
4574 thread_call_enter(&zone_expand_callout);
4575 }
4576 }
4577
4578 __attribute__((noinline))
4579 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4580 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4581 {
4582 struct zone_depot zd;
4583 zone_magazine_t mag = NULL;
4584
4585 if (z->z_depot_size) {
4586 z->z_depot_size = 0;
4587 z->z_depot_cleanup = true;
4588
4589 zone_depot_init(&zd);
4590 zone_depot_trim(z, 0, &zd);
4591
4592 zone_recirc_lock_nopreempt(z);
4593 if (zd.zd_full) {
4594 zone_depot_move_full(&z->z_recirc,
4595 &zd, zd.zd_full, NULL);
4596 }
4597 if (zd.zd_empty) {
4598 zone_depot_move_empty(&z->z_recirc,
4599 &zd, zd.zd_empty, NULL);
4600 }
4601 zone_recirc_unlock_nopreempt(z);
4602 }
4603
4604 zone_recirc_lock_nopreempt(z);
4605 if (z->z_recirc.zd_full) {
4606 mag = zone_depot_pop_head_full(&z->z_recirc, z);
4607 }
4608 zone_recirc_unlock_nopreempt(z);
4609
4610 if (mag) {
4611 zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4612 zone_magazine_free(mag);
4613 }
4614
4615 return mag != NULL;
4616 }
4617
4618 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4619 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4620 {
4621 if (zone->z_elems_free > zone->z_elems_rsv) {
4622 return false;
4623 }
4624 if (!zone_exhausted(zone)) {
4625 return true;
4626 }
4627 if (zone->z_pcpu_cache && zone->z_depot_size) {
4628 if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4629 return false;
4630 }
4631 }
4632 return (flags & Z_NOFAIL) != 0;
4633 }
4634
4635 static void
zone_wakeup_exhausted_waiters(zone_t z)4636 zone_wakeup_exhausted_waiters(zone_t z)
4637 {
4638 z->z_exhausted_wait = false;
4639 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, false);
4640 thread_wakeup(&z->z_expander);
4641 }
4642
4643 __attribute__((noinline))
4644 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4645 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4646 {
4647 if (z->z_pcpu_cache && z->z_depot_size &&
4648 zalloc_expand_drain_exhausted_caches_locked(z)) {
4649 return;
4650 }
4651
4652 if (!z->z_exhausted_wait) {
4653 zone_recirc_lock_nopreempt(z);
4654 z->z_exhausted_wait = true;
4655 zone_recirc_unlock_nopreempt(z);
4656 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, true);
4657 }
4658
4659 assert_wait(&z->z_expander, TH_UNINT);
4660 zone_unlock(z);
4661 thread_block(THREAD_CONTINUE_NULL);
4662 zone_lock(z);
4663 }
4664
4665 static pmap_mapping_type_t
zone_mapping_type(zone_t z)4666 zone_mapping_type(zone_t z)
4667 {
4668 zone_security_flags_t zsflags = zone_security_config(z);
4669
4670 /*
4671 * If the zone has z_submap_idx is not Z_SUBMAP_IDX_DATA or
4672 * Z_SUBMAP_IDX_READ_ONLY, mark the corresponding mapping
4673 * type as PMAP_MAPPING_TYPE_RESTRICTED.
4674 */
4675 switch (zsflags.z_submap_idx) {
4676 case Z_SUBMAP_IDX_DATA:
4677 return PMAP_MAPPING_TYPE_DEFAULT;
4678 case Z_SUBMAP_IDX_READ_ONLY:
4679 return PMAP_MAPPING_TYPE_ROZONE;
4680 default:
4681 return PMAP_MAPPING_TYPE_RESTRICTED;
4682 }
4683 }
4684
4685 static vm_prot_t
zone_page_prot(zone_security_flags_t zsflags)4686 zone_page_prot(zone_security_flags_t zsflags)
4687 {
4688 switch (zsflags.z_submap_idx) {
4689 case Z_SUBMAP_IDX_READ_ONLY:
4690 return VM_PROT_READ;
4691 default:
4692 return VM_PROT_READ | VM_PROT_WRITE;
4693 }
4694 }
4695
4696 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4697 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4698 {
4699 zone_security_flags_t zsflags = zone_security_config(z);
4700 struct zone_expand ze = {
4701 .ze_thread = current_thread(),
4702 };
4703
4704 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4705 ze.ze_thread->options |= TH_OPT_VMPRIV;
4706 ze.ze_clear_priv = true;
4707 }
4708
4709 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4710 /*
4711 * When the thread is VM privileged,
4712 * vm_page_grab() will call VM_PAGE_WAIT()
4713 * without our knowledge, so we must assume
4714 * it's being called unfortunately.
4715 *
4716 * In practice it's not a big deal because
4717 * Z_NOPAGEWAIT is not really used on zones
4718 * that VM privileged threads are going to expand.
4719 */
4720 ze.ze_pg_wait = true;
4721 ze.ze_vm_priv = true;
4722 }
4723
4724 for (;;) {
4725 if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4726 goto out;
4727 }
4728
4729 if (z->z_expander == NULL) {
4730 z->z_expander = &ze;
4731 break;
4732 }
4733
4734 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4735 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4736 ze.ze_next = z->z_expander;
4737 z->z_expander = &ze;
4738 break;
4739 }
4740
4741 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4742 goto out;
4743 }
4744
4745 z->z_expanding_wait = true;
4746 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4747 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4748 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4749 }
4750
4751 do {
4752 struct zone_page_metadata *meta = NULL;
4753 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4754 vm_page_t page_list = NULL;
4755 vm_offset_t addr = 0;
4756 int waited = 0;
4757
4758 if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4759 __ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4760 continue; /* reevaluate if we really need it */
4761 }
4762
4763 /*
4764 * While we hold the zone lock, look if there's VA we can:
4765 * - complete from partial pages,
4766 * - reuse from the sequester list.
4767 *
4768 * When the page is being populated we pretend we allocated
4769 * an extra element so that zone_gc() can't attempt to free
4770 * the chunk (as it could become empty while we wait for pages).
4771 */
4772 if (zone_pva_is_null(z->z_pageq_va)) {
4773 zone_allocate_va_locked(z, flags);
4774 }
4775
4776 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4777 addr = zone_meta_to_addr(meta);
4778 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4779 cur_pages = meta->zm_page_index;
4780 meta -= cur_pages;
4781 addr -= ptoa(cur_pages);
4782 zone_meta_lock_in_partial(z, meta, cur_pages);
4783 }
4784 zone_unlock(z);
4785
4786 /*
4787 * And now allocate pages to populate our VA.
4788 */
4789 min_pages = z->z_chunk_pages;
4790 #if !KASAN_CLASSIC
4791 if (!z->z_percpu) {
4792 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4793 zone_elem_outer_size(z)));
4794 }
4795 #endif /* !KASAN_CLASSIC */
4796
4797 /*
4798 * Trigger jetsams via VM_pageout GC
4799 * if we're running out of zone memory
4800 */
4801 if (__improbable(zone_map_nearing_exhaustion())) {
4802 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4803 }
4804
4805 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4806
4807 while (pages < z->z_chunk_pages - cur_pages) {
4808 vm_grab_options_t grab_options = VM_PAGE_GRAB_NOPAGEWAIT;
4809 vm_page_t m;
4810
4811 m = vm_page_grab_options(grab_options);
4812
4813 if (m) {
4814 pages++;
4815 m->vmp_snext = page_list;
4816 page_list = m;
4817 vm_page_zero_fill(
4818 m
4819 );
4820 continue;
4821 }
4822
4823 if (pages >= min_pages &&
4824 !zone_expand_wait_for_pages(waited)) {
4825 break;
4826 }
4827
4828 if ((flags & Z_NOPAGEWAIT) == 0) {
4829 /*
4830 * The first time we're about to wait for pages,
4831 * mention that to waiters and wake them all.
4832 *
4833 * Set `ze_pg_wait` in our zone_expand context
4834 * so that waiters who care do not wait again.
4835 */
4836 if (!ze.ze_pg_wait) {
4837 zone_lock(z);
4838 if (z->z_expanding_wait) {
4839 z->z_expanding_wait = false;
4840 wakeup_all_with_inheritor(&z->z_expander,
4841 THREAD_AWAKENED);
4842 }
4843 ze.ze_pg_wait = true;
4844 zone_unlock(z);
4845 }
4846
4847 waited++;
4848 VM_PAGE_WAIT();
4849 continue;
4850 }
4851
4852 /*
4853 * Undo everything and bail out:
4854 *
4855 * - free pages
4856 * - undo the fake allocation if any
4857 * - put the VA back on the VA page queue.
4858 */
4859 vm_page_free_list(page_list, FALSE);
4860 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4861
4862 zone_lock(z);
4863
4864 zone_expand_async_schedule_if_allowed(z);
4865
4866 if (cur_pages) {
4867 zone_meta_unlock_from_partial(z, meta, cur_pages);
4868 }
4869 if (meta) {
4870 zone_meta_queue_push(z, &z->z_pageq_va,
4871 meta + cur_pages);
4872 }
4873 goto page_shortage;
4874 }
4875 vm_object_t object;
4876 object = kernel_object_default;
4877 vm_object_lock(object);
4878
4879 kernel_memory_populate_object_and_unlock(object,
4880 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4881 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4882 zone_page_prot(zsflags), zone_mapping_type(z));
4883
4884 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4885
4886 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4887
4888 /*
4889 * permanent zones only try once,
4890 * the retry loop is in the caller
4891 */
4892 } while (!z->z_permanent && zalloc_needs_refill(z, flags));
4893
4894 page_shortage:
4895 if (z->z_expander == &ze) {
4896 z->z_expander = ze.ze_next;
4897 } else {
4898 assert(z->z_expander->ze_next == &ze);
4899 z->z_expander->ze_next = NULL;
4900 }
4901 if (z->z_expanding_wait) {
4902 z->z_expanding_wait = false;
4903 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4904 }
4905 out:
4906 if (ze.ze_clear_priv) {
4907 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4908 }
4909 }
4910
4911 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4912 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4913 {
4914 zone_foreach(z) {
4915 if (z->no_callout) {
4916 /* z_async_refilling will never be set */
4917 continue;
4918 }
4919
4920 if (!z->z_async_refilling) {
4921 /*
4922 * avoid locking all zones, because the one(s)
4923 * we're looking for have been set _before_
4924 * thread_call_enter() was called, if we fail
4925 * to observe the bit, it means the thread-call
4926 * has been "dinged" again and we'll notice it then.
4927 */
4928 continue;
4929 }
4930
4931 zone_lock(z);
4932 if (z->z_self && z->z_async_refilling) {
4933 zone_expand_locked(z, Z_WAITOK);
4934 /*
4935 * clearing _after_ we grow is important,
4936 * so that we avoid waking up the thread call
4937 * while we grow and cause to run a second time.
4938 */
4939 z->z_async_refilling = false;
4940 }
4941 zone_unlock(z);
4942 }
4943 }
4944
4945 #endif /* !ZALLOC_TEST */
4946 #pragma mark zone jetsam integration
4947 #if !ZALLOC_TEST
4948
4949 /*
4950 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4951 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4952 */
4953 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4954
4955 /*
4956 * Threshold above which largest zones should be included in the panic log
4957 */
4958 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4959
4960 /*
4961 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4962 * where X=zone_map_jetsam_limit.
4963 *
4964 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4965 */
4966 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4967 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4968
4969 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4970 zone_map_jetsam_set_limit(uint32_t value)
4971 {
4972 if (value <= 0 || value > 100) {
4973 return KERN_INVALID_VALUE;
4974 }
4975
4976 zone_map_jetsam_limit = value;
4977 os_atomic_store(&zone_pages_jetsam_threshold,
4978 zone_pages_wired_max * value / 100, relaxed);
4979 return KERN_SUCCESS;
4980 }
4981
4982 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4983 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4984 {
4985 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4986 *current_size = ptoa_64(phys_pages);
4987 *capacity = ptoa_64(zone_pages_wired_max);
4988 }
4989
4990 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4991 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4992 {
4993 zone_t largest_zone = zone_find_largest(zone_size);
4994
4995 /*
4996 * Append kalloc heap name to zone name (if zone is used by kalloc)
4997 */
4998 snprintf(zone_name, zone_name_len, "%s%s",
4999 zone_heap_name(largest_zone), largest_zone->z_name);
5000 }
5001
5002 static bool
zone_map_nearing_threshold(unsigned int threshold)5003 zone_map_nearing_threshold(unsigned int threshold)
5004 {
5005 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5006 return phys_pages * 100 > zone_pages_wired_max * threshold;
5007 }
5008
5009 bool
zone_map_nearing_exhaustion(void)5010 zone_map_nearing_exhaustion(void)
5011 {
5012 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5013
5014 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5015 }
5016
5017
5018 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5019
5020 /*
5021 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5022 * to walk through the jetsam priority bands and kill processes.
5023 */
5024 static zone_t
kill_process_in_largest_zone(void)5025 kill_process_in_largest_zone(void)
5026 {
5027 pid_t pid = -1;
5028 uint64_t zone_size = 0;
5029 zone_t largest_zone = zone_find_largest(&zone_size);
5030
5031 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5032 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5033 ptoa_64(zone_pages_wired_max),
5034 (uint64_t)zone_submaps_approx_size(),
5035 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5036 zone_map_jetsam_limit);
5037 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5038 largest_zone->z_name, (uintptr_t)zone_size);
5039
5040 /*
5041 * We want to make sure we don't call this function from userspace.
5042 * Or we could end up trying to synchronously kill the process
5043 * whose context we're in, causing the system to hang.
5044 */
5045 assert(current_task() == kernel_task);
5046
5047 /*
5048 * If vm_object_zone is the largest, check to see if the number of
5049 * elements in vm_map_entry_zone is comparable.
5050 *
5051 * If so, consider vm_map_entry_zone as the largest. This lets us target
5052 * a specific process to jetsam to quickly recover from the zone map
5053 * bloat.
5054 */
5055 if (largest_zone == vm_object_zone) {
5056 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5057 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5058 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5059 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5060 largest_zone = vm_map_entry_zone;
5061 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5062 (uintptr_t)zone_size_wired(largest_zone));
5063 }
5064 }
5065
5066 /* TODO: Extend this to check for the largest process in other zones as well. */
5067 if (largest_zone == vm_map_entry_zone) {
5068 pid = find_largest_process_vm_map_entries();
5069 } else {
5070 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5071 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5072 largest_zone->z_name);
5073 }
5074 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5075 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5076 }
5077
5078 return largest_zone;
5079 }
5080
5081 #endif /* !ZALLOC_TEST */
5082 #pragma mark zfree
5083 #if !ZALLOC_TEST
5084
5085 /*!
5086 * @defgroup zfree
5087 * @{
5088 *
5089 * @brief
5090 * The codepath for zone frees.
5091 *
5092 * @discussion
5093 * There are 4 major ways to allocate memory that end up in the zone allocator:
5094 * - @c zfree()
5095 * - @c zfree_percpu()
5096 * - @c kfree*()
5097 * - @c zfree_permanent()
5098 *
5099 * While permanent zones have their own allocation scheme, all other codepaths
5100 * will eventually go through the @c zfree_ext() choking point.
5101 */
5102
5103 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5104 zfree_drop(zone_t zone, vm_offset_t addr)
5105 {
5106 vm_offset_t esize = zone_elem_outer_size(zone);
5107 struct zone_page_metadata *meta;
5108 vm_offset_t eidx;
5109
5110 meta = zone_element_resolve(zone, addr, &eidx);
5111
5112 if (!zone_meta_mark_free(meta, eidx)) {
5113 zone_meta_double_free_panic(zone, addr, __func__);
5114 }
5115
5116 vm_offset_t old_size = meta->zm_alloc_size;
5117 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5118 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5119
5120 if (new_size == 0) {
5121 /* whether the page was on the intermediate or all_used, queue, move it to free */
5122 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5123 zone->z_wired_empty += meta->zm_chunk_len;
5124 } else if (old_size + esize > max_size) {
5125 /* first free element on page, move from all_used */
5126 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5127 }
5128
5129 if (__improbable(zone->z_exhausted_wait)) {
5130 zone_wakeup_exhausted_waiters(zone);
5131 }
5132 }
5133
5134 __attribute__((noinline))
5135 static void
zfree_item(zone_t zone,vm_offset_t addr)5136 zfree_item(zone_t zone, vm_offset_t addr)
5137 {
5138 /* transfer preemption count to lock */
5139 zone_lock_nopreempt_check_contention(zone);
5140
5141 zfree_drop(zone, addr);
5142 zone->z_elems_free += 1;
5143
5144 zone_unlock(zone);
5145 }
5146
5147 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5148 zfree_cached_depot_recirculate(
5149 zone_t zone,
5150 uint32_t depot_max,
5151 zone_cache_t cache)
5152 {
5153 smr_t smr = zone_cache_smr(cache);
5154 smr_seq_t seq;
5155 uint32_t n;
5156
5157 zone_recirc_lock_nopreempt_check_contention(zone);
5158
5159 n = cache->zc_depot.zd_full;
5160 if (n >= depot_max) {
5161 /*
5162 * If SMR is in use, rotate the entire chunk of magazines.
5163 *
5164 * If the head of the recirculation layer is ready to be
5165 * reused, pull them back to refill a little.
5166 */
5167 seq = zone_depot_move_full(&zone->z_recirc,
5168 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5169
5170 if (smr) {
5171 smr_deferred_advance_commit(smr, seq);
5172 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5173 zone_depot_move_full(&cache->zc_depot,
5174 &zone->z_recirc, depot_max / 2, NULL);
5175 }
5176 }
5177 }
5178
5179 n = depot_max - cache->zc_depot.zd_full;
5180 if (n > zone->z_recirc.zd_empty) {
5181 n = zone->z_recirc.zd_empty;
5182 }
5183 if (n) {
5184 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5185 n, zone);
5186 }
5187
5188 zone_recirc_unlock_nopreempt(zone);
5189 }
5190
5191 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5192 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5193 {
5194 zone_magazine_t mag = NULL, tmp = NULL;
5195 smr_t smr = zone_cache_smr(cache);
5196 bool wakeup_exhausted = false;
5197
5198 if (zone->z_recirc.zd_empty == 0) {
5199 mag = zone_magazine_alloc(Z_NOWAIT);
5200 }
5201
5202 zone_recirc_lock_nopreempt_check_contention(zone);
5203
5204 if (mag == NULL && zone->z_recirc.zd_empty) {
5205 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5206 __builtin_assume(mag);
5207 }
5208 if (mag) {
5209 tmp = zone_magazine_replace(cache, mag, true);
5210 if (smr) {
5211 smr_deferred_advance_commit(smr, tmp->zm_seq);
5212 }
5213 if (zone_security_array[zone_index(zone)].z_lifo) {
5214 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5215 } else {
5216 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5217 }
5218
5219 wakeup_exhausted = zone->z_exhausted_wait;
5220 }
5221
5222 zone_recirc_unlock_nopreempt(zone);
5223
5224 if (__improbable(wakeup_exhausted)) {
5225 zone_lock_nopreempt(zone);
5226 if (zone->z_exhausted_wait) {
5227 zone_wakeup_exhausted_waiters(zone);
5228 }
5229 zone_unlock_nopreempt(zone);
5230 }
5231
5232 return mag ? cache : NULL;
5233 }
5234
5235 __attribute__((noinline))
5236 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5237 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5238 {
5239 zone_magazine_t mag = NULL, tmp = NULL;
5240 uint32_t depot_max;
5241
5242 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5243 if (depot_max) {
5244 zone_depot_lock_nopreempt(cache);
5245
5246 if (cache->zc_depot.zd_empty == 0) {
5247 zfree_cached_depot_recirculate(zone, depot_max, cache);
5248 }
5249
5250 if (__probable(cache->zc_depot.zd_empty)) {
5251 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5252 __builtin_assume(mag);
5253 } else {
5254 mag = zone_magazine_alloc(Z_NOWAIT);
5255 }
5256 if (mag) {
5257 tmp = zone_magazine_replace(cache, mag, true);
5258 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5259 }
5260
5261 zone_depot_unlock_nopreempt(cache);
5262
5263 return mag ? cache : NULL;
5264 }
5265
5266 return zfree_cached_recirculate(zone, cache);
5267 }
5268
5269 __attribute__((always_inline))
5270 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5271 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5272 {
5273 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5274
5275 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5276 return cache;
5277 }
5278
5279 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5280 zone_cache_swap_magazines(cache);
5281 return cache;
5282 }
5283
5284 return zfree_cached_trim(zone, cache);
5285 }
5286
5287 __attribute__((always_inline))
5288 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5289 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5290 {
5291 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5292 size_t idx = cache->zc_free_cur;
5293
5294 if (__probable(idx + 1 < zc_mag_size())) {
5295 return cache;
5296 }
5297
5298 /*
5299 * when SMR is in use, the bucket is tagged early with
5300 * @c smr_deferred_advance(), which costs a full barrier,
5301 * but performs no store.
5302 *
5303 * When zones hit the recirculation layer, the advance is commited,
5304 * under the recirculation lock (see zfree_cached_recirculate()).
5305 *
5306 * When done this way, the zone contention detection mechanism
5307 * will adjust the size of the per-cpu depots gracefully, which
5308 * mechanically reduces the pace of these commits as usage increases.
5309 */
5310
5311 if (__probable(idx + 1 == zc_mag_size())) {
5312 zone_magazine_t mag;
5313
5314 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5315 offsetof(struct zone_magazine, zm_elems));
5316 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5317 return cache;
5318 }
5319
5320 return zfree_cached_trim(zone, cache);
5321 }
5322
5323 __attribute__((always_inline))
5324 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5325 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5326 {
5327 struct zone_page_metadata *meta;
5328 vm_offset_t offs;
5329
5330 #pragma unused(combined_size)
5331
5332 meta = zone_meta_from_addr(elem);
5333 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5334 zone_invalid_element_panic(zone, elem);
5335 }
5336
5337 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5338 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5339 offs += ptoa(meta->zm_page_index);
5340 }
5341
5342 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5343 zone_invalid_element_panic(zone, elem);
5344 }
5345
5346 #if VM_TAG_SIZECLASSES
5347 if (__improbable(zone->z_uses_tags)) {
5348 vm_tag_t *slot;
5349
5350 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5351 Z_FAST_QUO(offs, zone->z_quo_magic));
5352 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5353 -(long)ZFREE_ELEM_SIZE(combined_size));
5354 *slot = VM_KERN_MEMORY_NONE;
5355 }
5356 #endif /* VM_TAG_SIZECLASSES */
5357
5358 #if KASAN_CLASSIC
5359 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5360 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5361 zone->z_percpu, __builtin_frame_address(0));
5362 #endif
5363
5364 elem = (vm_offset_t)zone_tag_free_element(zone, (caddr_t)elem, ZFREE_ELEM_SIZE(combined_size));
5365 return elem;
5366 }
5367
5368 __attribute__((always_inline))
5369 void *
zcache_mark_invalid(zone_t zone,void * elem)5370 zcache_mark_invalid(zone_t zone, void *elem)
5371 {
5372 vm_size_t esize = zone_elem_inner_size(zone);
5373
5374 ZFREE_LOG(zone, (vm_offset_t)elem, 1);
5375 return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
5376 }
5377
5378 /*
5379 * The function is noinline when zlog can be used so that the backtracing can
5380 * reliably skip the zfree_ext() and zfree_log()
5381 * boring frames.
5382 */
5383 #if ZALLOC_ENABLE_LOGGING
5384 __attribute__((noinline))
5385 #endif /* ZALLOC_ENABLE_LOGGING */
5386 __mockable void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5387 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5388 {
5389 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5390 vm_offset_t elem = (vm_offset_t)addr;
5391 int cpu;
5392
5393 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5394
5395 ZFREE_LOG(zone, elem, 1);
5396 elem = __zcache_mark_invalid(zone, elem, combined_size);
5397
5398 disable_preemption();
5399 cpu = cpu_number();
5400 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5401
5402 #if KASAN_CLASSIC
5403 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5404 struct kasan_quarantine_result kqr;
5405
5406 kqr = kasan_quarantine(elem, esize);
5407 elem = kqr.addr;
5408 zone = kqr.zone;
5409 if (elem == 0) {
5410 return enable_preemption();
5411 }
5412 }
5413 #endif
5414
5415 if (zone->z_pcpu_cache) {
5416 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
5417
5418 if (__probable(cache)) {
5419 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5420 return enable_preemption();
5421 }
5422 }
5423
5424 return zfree_item(zone, elem);
5425 }
5426
5427 __attribute__((always_inline))
5428 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)5429 zcache_free_stack_to_cpu(
5430 zone_id_t zid,
5431 zone_cache_t cache,
5432 zstack_t stack,
5433 vm_size_t esize,
5434 zone_cache_ops_t ops,
5435 bool zero)
5436 {
5437 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
5438 vm_offset_t *p;
5439
5440 stack.z_count -= n;
5441 cache->zc_free_cur += n;
5442 p = cache->zc_free_elems + cache->zc_free_cur;
5443
5444 do {
5445 void *o = zstack_pop_no_delta(&stack);
5446
5447 if (ops) {
5448 o = ops->zc_op_mark_invalid(zid, o);
5449 } else {
5450 if (zero) {
5451 vm_memtag_bzero_unchecked(o, esize);
5452 }
5453 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
5454 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
5455 }
5456 *--p = (vm_offset_t)o;
5457 } while (--n > 0);
5458
5459 return stack;
5460 }
5461
5462 __attribute__((always_inline))
5463 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)5464 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5465 {
5466 vm_offset_t elem = (vm_offset_t)addr;
5467 zone_cache_t cache;
5468 vm_size_t esize;
5469 zone_t zone = zone_by_id(zid);
5470 int cpu;
5471
5472 ZFREE_LOG(zone, elem, 1);
5473
5474 disable_preemption();
5475 cpu = cpu_number();
5476 esize = zone_elem_inner_size(zone);
5477 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5478 if (!ops) {
5479 addr = (void *)__zcache_mark_invalid(zone, elem,
5480 ZFREE_PACK_SIZE(esize, esize));
5481 }
5482 cache = zfree_cached_get_pcpu_cache(zone, cpu);
5483 if (__probable(cache)) {
5484 if (ops) {
5485 addr = ops->zc_op_mark_invalid(zid, addr);
5486 }
5487 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5488 enable_preemption();
5489 } else if (ops) {
5490 enable_preemption();
5491 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
5492 ops->zc_op_free(zid, addr);
5493 } else {
5494 zfree_item(zone, elem);
5495 }
5496 }
5497
5498 __attribute__((always_inline))
5499 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)5500 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
5501 {
5502 zone_t zone = zone_by_id(zid);
5503 zone_cache_t cache;
5504 vm_size_t esize;
5505 int cpu;
5506
5507 ZFREE_LOG(zone, stack.z_head, stack.z_count);
5508
5509 disable_preemption();
5510 cpu = cpu_number();
5511 esize = zone_elem_inner_size(zone);
5512 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
5513 stack.z_count * esize;
5514
5515 for (;;) {
5516 cache = zfree_cached_get_pcpu_cache(zone, cpu);
5517 if (__probable(cache)) {
5518 stack = zcache_free_stack_to_cpu(zid, cache,
5519 stack, esize, ops, zero);
5520 enable_preemption();
5521 } else if (ops) {
5522 enable_preemption();
5523 os_atomic_dec(&zone->z_elems_avail, relaxed);
5524 ops->zc_op_free(zid, zstack_pop(&stack));
5525 } else {
5526 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
5527
5528 if (zero) {
5529 vm_memtag_bzero_unchecked((void *)addr, esize);
5530 }
5531 addr = __zcache_mark_invalid(zone, addr,
5532 ZFREE_PACK_SIZE(esize, esize));
5533 zfree_item(zone, addr);
5534 }
5535
5536 if (stack.z_count == 0) {
5537 break;
5538 }
5539
5540 disable_preemption();
5541 cpu = cpu_number();
5542 }
5543 }
5544
5545 void
5546 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5547 {
5548 __builtin_assume(ops != NULL);
5549 zcache_free_1_ext(zid, addr, ops);
5550 }
5551
5552 void
5553 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
5554 {
5555 __builtin_assume(ops != NULL);
5556 zcache_free_n_ext(zid, stack, ops, false);
5557 }
5558
5559 void
5560 (zfree_n)(zone_id_t zid, zstack_t stack)
5561 {
5562 zcache_free_n_ext(zid, stack, NULL, true);
5563 }
5564
5565 void
5566 (zfree_nozero)(zone_id_t zid, void *addr)
5567 {
5568 zcache_free_1_ext(zid, addr, NULL);
5569 }
5570
5571 void
5572 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
5573 {
5574 zcache_free_n_ext(zid, stack, NULL, false);
5575 }
5576
5577 void
5578 (zfree)(zone_t zov, void *addr)
5579 {
5580 zone_t zone = zov->z_self;
5581 zone_stats_t zstats = zov->z_stats;
5582 vm_offset_t esize = zone_elem_inner_size(zone);
5583
5584 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
5585 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
5586 vm_memtag_bzero_unchecked(addr, esize);
5587
5588 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
5589 }
5590
5591 __attribute__((noinline))
5592 void
zfree_percpu(union zone_or_view zov,void * addr)5593 zfree_percpu(union zone_or_view zov, void *addr)
5594 {
5595 zone_t zone = zov.zov_view->zv_zone;
5596 zone_stats_t zstats = zov.zov_view->zv_stats;
5597 vm_offset_t esize = zone_elem_inner_size(zone);
5598
5599 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
5600 assert(zone->z_percpu);
5601 zpercpu_foreach_cpu(i) {
5602 vm_memtag_bzero_unchecked((char *)addr + ptoa(i), esize);
5603 }
5604 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
5605 }
5606
5607 void
5608 (zfree_id)(zone_id_t zid, void *addr)
5609 {
5610 (zfree)(&zone_array[zid], addr);
5611 }
5612
5613 void
5614 (zfree_ro)(zone_id_t zid, void *addr)
5615 {
5616 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
5617 zone_t zone = zone_by_id(zid);
5618 zone_stats_t zstats = zone->z_stats;
5619 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
5620
5621 #if ZSECURITY_CONFIG(READ_ONLY)
5622 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
5623 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
5624 #else
5625 (void)zid;
5626 bzero(addr, esize);
5627 #endif /* !KASAN_CLASSIC */
5628 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
5629 }
5630
5631 __attribute__((noinline))
5632 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)5633 zfree_item_smr(zone_t zone, vm_offset_t addr)
5634 {
5635 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
5636 vm_size_t esize = zone_elem_inner_size(zone);
5637
5638 /*
5639 * This should be taken extremely rarely:
5640 * this happens if we failed allocating an empty bucket.
5641 */
5642 smr_synchronize(zone_cache_smr(cache));
5643
5644 cache->zc_free((void *)addr, esize);
5645 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
5646
5647 zfree_item(zone, addr);
5648 }
5649
5650 void
5651 (zfree_smr)(zone_t zone, void *addr)
5652 {
5653 vm_offset_t elem = (vm_offset_t)addr;
5654 vm_offset_t esize;
5655 zone_cache_t cache;
5656 int cpu;
5657
5658 ZFREE_LOG(zone, elem, 1);
5659
5660 disable_preemption();
5661 cpu = cpu_number();
5662 #if MACH_ASSERT
5663 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5664 assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
5665 #endif
5666 esize = zone_elem_inner_size(zone);
5667 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5668 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
5669 if (__probable(cache)) {
5670 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5671 enable_preemption();
5672 } else {
5673 zfree_item_smr(zone, elem);
5674 }
5675 }
5676
5677 void
5678 (zfree_id_smr)(zone_id_t zid, void *addr)
5679 {
5680 (zfree_smr)(&zone_array[zid], addr);
5681 }
5682
5683 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)5684 kfree_type_impl_internal(
5685 kalloc_type_view_t kt_view,
5686 void *ptr __unsafe_indexable)
5687 {
5688 zone_t zsig = kt_view->kt_zsig;
5689 zone_t z = kt_view->kt_zv.zv_zone;
5690 struct zone_page_metadata *meta;
5691 zone_id_t zidx_meta;
5692 zone_security_flags_t zsflags_meta;
5693 zone_security_flags_t zsflags_z = zone_security_config(z);
5694 zone_security_flags_t zsflags_zsig;
5695
5696 if (NULL == ptr) {
5697 return;
5698 }
5699
5700 meta = zone_meta_from_addr((vm_offset_t) ptr);
5701 zidx_meta = meta->zm_index;
5702 zsflags_meta = zone_security_array[zidx_meta];
5703
5704 if (zone_is_data_kheap(zsflags_z.z_kheap_id) ||
5705 zone_has_index(z, zidx_meta)) {
5706 return (zfree)(&kt_view->kt_zv, ptr);
5707 }
5708 zsflags_zsig = zone_security_config(zsig);
5709 if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
5710 z = zone_array + zidx_meta;
5711 return (zfree)(z, ptr);
5712 }
5713
5714 return (zfree)(kt_view->kt_zearly, ptr);
5715 }
5716
5717 /*! @} */
5718 #endif /* !ZALLOC_TEST */
5719 #pragma mark zalloc
5720 #if !ZALLOC_TEST
5721
5722 /*!
5723 * @defgroup zalloc
5724 * @{
5725 *
5726 * @brief
5727 * The codepath for zone allocations.
5728 *
5729 * @discussion
5730 * There are 4 major ways to allocate memory that end up in the zone allocator:
5731 * - @c zalloc(), @c zalloc_flags(), ...
5732 * - @c zalloc_percpu()
5733 * - @c kalloc*()
5734 * - @c zalloc_permanent()
5735 *
5736 * While permanent zones have their own allocation scheme, all other codepaths
5737 * will eventually go through the @c zalloc_ext() choking point.
5738 *
5739 * @c zalloc_return() is the final function everyone tail calls into,
5740 * which prepares the element for consumption by the caller and deals with
5741 * common treatment (zone logging, tags, kasan, validation, ...).
5742 */
5743
5744 /*!
5745 * @function zalloc_import
5746 *
5747 * @brief
5748 * Import @c n elements in the specified array, opposite of @c zfree_drop().
5749 *
5750 * @param zone The zone to import elements from
5751 * @param elems The array to import into
5752 * @param n The number of elements to import. Must be non zero,
5753 * and smaller than @c zone->z_elems_free.
5754 */
5755 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)5756 zalloc_import(
5757 zone_t zone,
5758 vm_offset_t *elems,
5759 zalloc_flags_t flags,
5760 uint32_t n)
5761 {
5762 vm_offset_t esize = zone_elem_outer_size(zone);
5763 vm_offset_t offs = zone_elem_inner_offs(zone);
5764 zone_stats_t zs;
5765 int cpu = cpu_number();
5766 uint32_t i = 0;
5767
5768 zs = zpercpu_get_cpu(zone->z_stats, cpu);
5769
5770 if (__improbable(zone_caching_disabled < 0)) {
5771 /*
5772 * In the first 10s after boot, mess with
5773 * the scan position in order to make early
5774 * allocations patterns less predictable.
5775 */
5776 zone_early_scramble_rr(zone, cpu, zs);
5777 }
5778
5779 do {
5780 vm_offset_t page, eidx, size = 0;
5781 struct zone_page_metadata *meta;
5782
5783 if (!zone_pva_is_null(zone->z_pageq_partial)) {
5784 meta = zone_pva_to_meta(zone->z_pageq_partial);
5785 page = zone_pva_to_addr(zone->z_pageq_partial);
5786 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
5787 meta = zone_pva_to_meta(zone->z_pageq_empty);
5788 page = zone_pva_to_addr(zone->z_pageq_empty);
5789 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
5790 } else {
5791 zone_accounting_panic(zone, "z_elems_free corruption");
5792 }
5793
5794 zone_meta_validate(zone, meta, page);
5795
5796 vm_offset_t old_size = meta->zm_alloc_size;
5797 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5798
5799 do {
5800 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
5801 elems[i++] = page + offs + eidx * esize;
5802 size += esize;
5803 } while (i < n && old_size + size + esize <= max_size);
5804
5805 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
5806
5807 if (new_size + esize > max_size) {
5808 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
5809 } else if (old_size == 0) {
5810 /* remove from free, move to intermediate */
5811 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5812 }
5813 } while (i < n);
5814
5815 n = zone_counter_sub(zone, z_elems_free, n);
5816 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
5817 zone->z_elems_free_min = n;
5818 }
5819
5820 return zone_elem_inner_size(zone);
5821 }
5822
5823 __attribute__((always_inline))
5824 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)5825 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
5826 {
5827 #pragma unused(zone, flags)
5828 #if KASAN_CLASSIC || VM_TAG_SIZECLASSES
5829 vm_offset_t esize = zone_elem_inner_size(zone);
5830 #endif
5831
5832 addr = vm_memtag_load_tag(addr);
5833
5834 #if VM_TAG_SIZECLASSES
5835 if (__improbable(zone->z_uses_tags)) {
5836 struct zone_page_metadata *meta;
5837 vm_offset_t offs;
5838 vm_tag_t *slot;
5839 vm_tag_t tag;
5840
5841 tag = zalloc_flags_get_tag(flags);
5842 meta = zone_meta_from_addr(addr);
5843 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
5844 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5845 offs += ptoa(meta->zm_page_index);
5846 }
5847
5848 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5849 Z_FAST_QUO(offs, zone->z_quo_magic));
5850 *slot = tag;
5851
5852 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
5853 (long)esize);
5854 }
5855 #endif /* VM_TAG_SIZECLASSES */
5856
5857 #if KASAN_CLASSIC
5858 /*
5859 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
5860 */
5861 if ((flags & Z_SKIP_KASAN) == 0) {
5862 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
5863 (flags & Z_PCPU), __builtin_frame_address(0));
5864 }
5865 #endif /* KASAN_CLASSIC */
5866
5867 return addr;
5868 }
5869
5870 __attribute__((always_inline))
5871 void *
zcache_mark_valid(zone_t zone,void * addr)5872 zcache_mark_valid(zone_t zone, void *addr)
5873 {
5874 addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
5875 ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
5876 return addr;
5877 }
5878
5879 /*!
5880 * @function zalloc_return
5881 *
5882 * @brief
5883 * Performs the tail-end of the work required on allocations before the caller
5884 * uses them.
5885 *
5886 * @discussion
5887 * This function is called without any zone lock held,
5888 * and preemption back to the state it had when @c zalloc_ext() was called.
5889 *
5890 * @param zone The zone we're allocating from.
5891 * @param addr The element we just allocated.
5892 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
5893 * @param elem_size The element size for this zone.
5894 */
5895 __attribute__((always_inline))
5896 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)5897 zalloc_return(
5898 zone_t zone,
5899 vm_offset_t addr,
5900 zalloc_flags_t flags,
5901 vm_offset_t elem_size)
5902 {
5903 addr = __zcache_mark_valid(zone, addr, flags);
5904 #if ZALLOC_ENABLE_ZERO_CHECK
5905 zalloc_validate_element(zone, addr, elem_size, flags);
5906 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
5907 ZALLOC_LOG(zone, addr, 1);
5908
5909 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
5910 return (struct kalloc_result){ (void *)addr, elem_size };
5911 }
5912
5913 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)5914 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
5915 {
5916 if (esize <= 512) {
5917 return zone_early_thres_mul * page_size / 4;
5918 } else if (esize < 2048) {
5919 return zone_early_thres_mul * esize * 8;
5920 }
5921 return zone_early_thres_mul * zone->z_chunk_elems * esize;
5922 }
5923
5924 __attribute__((noinline))
5925 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)5926 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
5927 {
5928 vm_offset_t esize, addr;
5929 zone_stats_t zs;
5930
5931 zone_lock_nopreempt_check_contention(zone);
5932
5933 zs = zpercpu_get(zstats);
5934 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
5935 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
5936 zone_expand_async_schedule_if_allowed(zone);
5937 } else {
5938 zone_expand_locked(zone, flags);
5939 }
5940 if (__improbable(zone->z_elems_free == 0)) {
5941 zs->zs_alloc_fail++;
5942 zone_unlock(zone);
5943 if (__improbable(flags & Z_NOFAIL)) {
5944 zone_nofail_panic(zone);
5945 }
5946 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
5947 return (struct kalloc_result){ };
5948 }
5949 }
5950
5951 esize = zalloc_import(zone, &addr, flags, 1);
5952 zs->zs_mem_allocated += esize;
5953
5954 if (__improbable(!zone_share_always &&
5955 !os_atomic_load(&zs->zs_alloc_not_early, relaxed))) {
5956 if (flags & Z_SET_NOTEARLY) {
5957 vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
5958
5959 if (zs->zs_mem_allocated >= shared_threshold) {
5960 zpercpu_foreach(zs_cpu, zstats) {
5961 os_atomic_store(&zs_cpu->zs_alloc_not_early, 1, relaxed);
5962 }
5963 }
5964 }
5965 }
5966 zone_unlock(zone);
5967
5968 return zalloc_return(zone, addr, flags, esize);
5969 }
5970
5971 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)5972 zalloc_cached_import(
5973 zone_t zone,
5974 zalloc_flags_t flags,
5975 zone_cache_t cache)
5976 {
5977 uint16_t n_elems = zc_mag_size();
5978
5979 zone_lock_nopreempt(zone);
5980
5981 if (__probable(!zone_caching_disabled &&
5982 zone->z_elems_free > zone->z_elems_rsv / 2)) {
5983 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
5984 zone_expand_async_schedule_if_allowed(zone);
5985 }
5986 if (zone->z_elems_free < n_elems) {
5987 n_elems = (uint16_t)zone->z_elems_free;
5988 }
5989 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
5990 cache->zc_alloc_cur = n_elems;
5991 }
5992
5993 zone_unlock_nopreempt(zone);
5994 }
5995
5996 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)5997 zalloc_cached_depot_recirculate(
5998 zone_t zone,
5999 uint32_t depot_max,
6000 zone_cache_t cache,
6001 smr_t smr)
6002 {
6003 smr_seq_t seq;
6004 uint32_t n;
6005
6006 zone_recirc_lock_nopreempt_check_contention(zone);
6007
6008 n = cache->zc_depot.zd_empty;
6009 if (n >= depot_max) {
6010 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6011 n - depot_max / 2, NULL);
6012 }
6013
6014 n = cache->zc_depot.zd_full;
6015 if (smr && n) {
6016 /*
6017 * if SMR is in use, it means smr_poll() failed,
6018 * so rotate the entire chunk of magazines in order
6019 * to let the sequence numbers age.
6020 */
6021 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6022 n, NULL);
6023 smr_deferred_advance_commit(smr, seq);
6024 }
6025
6026 n = depot_max - cache->zc_depot.zd_empty;
6027 if (n > zone->z_recirc.zd_full) {
6028 n = zone->z_recirc.zd_full;
6029 }
6030
6031 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6032 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6033 n, zone);
6034 }
6035
6036 zone_recirc_unlock_nopreempt(zone);
6037 }
6038
6039 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6040 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6041 {
6042 zone_smr_free_cb_t zc_free = cache->zc_free;
6043 vm_size_t esize = zone_elem_inner_size(z);
6044
6045 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6046 vm_offset_t elem = mag->zm_elems[i];
6047
6048 zc_free((void *)elem, zone_elem_inner_size(z));
6049 elem = __zcache_mark_invalid(z, elem,
6050 ZFREE_PACK_SIZE(esize, esize));
6051 mag->zm_elems[i] = elem;
6052 }
6053 }
6054
6055 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6056 zalloc_cached_recirculate(
6057 zone_t zone,
6058 zone_cache_t cache)
6059 {
6060 zone_magazine_t mag = NULL;
6061
6062 zone_recirc_lock_nopreempt_check_contention(zone);
6063
6064 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6065 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6066 if (zone_cache_smr(cache)) {
6067 zalloc_cached_reuse_smr(zone, cache, mag);
6068 }
6069 mag = zone_magazine_replace(cache, mag, false);
6070 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6071 }
6072
6073 zone_recirc_unlock_nopreempt(zone);
6074 }
6075
6076 __attribute__((noinline))
6077 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6078 zalloc_cached_prime(
6079 zone_t zone,
6080 zone_cache_ops_t ops,
6081 zalloc_flags_t flags,
6082 zone_cache_t cache)
6083 {
6084 zone_magazine_t mag = NULL;
6085 uint32_t depot_max;
6086 smr_t smr;
6087
6088 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6089 if (depot_max) {
6090 smr = zone_cache_smr(cache);
6091
6092 zone_depot_lock_nopreempt(cache);
6093
6094 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6095 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6096 smr);
6097 }
6098
6099 if (__probable(cache->zc_depot.zd_full)) {
6100 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6101 if (zone_cache_smr(cache)) {
6102 zalloc_cached_reuse_smr(zone, cache, mag);
6103 }
6104 mag = zone_magazine_replace(cache, mag, false);
6105 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6106 }
6107
6108 zone_depot_unlock_nopreempt(cache);
6109 } else if (zone->z_recirc.zd_full) {
6110 zalloc_cached_recirculate(zone, cache);
6111 }
6112
6113 if (__probable(cache->zc_alloc_cur)) {
6114 return cache;
6115 }
6116
6117 if (ops == NULL) {
6118 zalloc_cached_import(zone, flags, cache);
6119 if (__probable(cache->zc_alloc_cur)) {
6120 return cache;
6121 }
6122 }
6123
6124 return NULL;
6125 }
6126
6127 __attribute__((always_inline))
6128 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6129 zalloc_cached_get_pcpu_cache(
6130 zone_t zone,
6131 zone_cache_ops_t ops,
6132 int cpu,
6133 zalloc_flags_t flags)
6134 {
6135 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6136
6137 if (__probable(cache->zc_alloc_cur != 0)) {
6138 return cache;
6139 }
6140
6141 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6142 zone_cache_swap_magazines(cache);
6143 return cache;
6144 }
6145
6146 return zalloc_cached_prime(zone, ops, flags, cache);
6147 }
6148
6149
6150 /*!
6151 * @function zalloc_ext
6152 *
6153 * @brief
6154 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6155 */
6156 __mockable struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6157 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6158 {
6159 /*
6160 * KASan uses zalloc() for fakestack, which can be called anywhere.
6161 * However, we make sure these calls can never block.
6162 */
6163 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6164 #if KASAN_FAKESTACK
6165 zone->z_kasan_fakestacks ||
6166 #endif /* KASAN_FAKESTACK */
6167 ml_get_interrupts_enabled() ||
6168 ml_is_quiescing() ||
6169 debug_mode_active(),
6170 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6171
6172 /*
6173 * Make sure Z_NOFAIL was not obviously misused
6174 */
6175 if (flags & Z_NOFAIL) {
6176 assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6177 }
6178
6179 #if VM_TAG_SIZECLASSES
6180 if (__improbable(zone->z_uses_tags)) {
6181 vm_tag_t tag = zalloc_flags_get_tag(flags);
6182
6183 if (flags & Z_VM_TAG_BT_BIT) {
6184 tag = vm_tag_bt() ?: tag;
6185 }
6186 if (tag != VM_KERN_MEMORY_NONE) {
6187 tag = vm_tag_will_update_zone(tag,
6188 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6189 }
6190 if (tag == VM_KERN_MEMORY_NONE) {
6191 zone_security_flags_t zsflags = zone_security_config(zone);
6192
6193 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6194 tag = VM_KERN_MEMORY_KALLOC_DATA;
6195 } else if (zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED) {
6196 tag = VM_KERN_MEMORY_KALLOC_SHARED;
6197 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6198 zsflags.z_kalloc_type) {
6199 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6200 } else {
6201 tag = VM_KERN_MEMORY_KALLOC;
6202 }
6203 }
6204 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6205 }
6206 #endif /* VM_TAG_SIZECLASSES */
6207
6208 disable_preemption();
6209
6210 #if ZALLOC_ENABLE_ZERO_CHECK
6211 if (zalloc_skip_zero_check()) {
6212 flags |= Z_NOZZC;
6213 }
6214 #endif
6215
6216 if (zone->z_pcpu_cache) {
6217 zone_cache_t cache;
6218 vm_offset_t index, addr, esize;
6219 int cpu = cpu_number();
6220
6221 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6222 if (__probable(cache)) {
6223 esize = zone_elem_inner_size(zone);
6224 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6225 index = --cache->zc_alloc_cur;
6226 addr = cache->zc_alloc_elems[index];
6227 cache->zc_alloc_elems[index] = 0;
6228 enable_preemption();
6229 return zalloc_return(zone, addr, flags, esize);
6230 }
6231 }
6232
6233 __attribute__((musttail))
6234 return zalloc_item(zone, zstats, flags);
6235 }
6236
6237 __attribute__((always_inline))
6238 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6239 zcache_alloc_stack_from_cpu(
6240 zone_id_t zid,
6241 zone_cache_t cache,
6242 zstack_t stack,
6243 uint32_t n,
6244 zone_cache_ops_t ops)
6245 {
6246 vm_offset_t *p;
6247
6248 n = MIN(n, cache->zc_alloc_cur);
6249 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6250 cache->zc_alloc_cur -= n;
6251 stack.z_count += n;
6252
6253 do {
6254 vm_offset_t e = *--p;
6255
6256 *p = 0;
6257 if (ops) {
6258 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6259 } else {
6260 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6261 }
6262 zstack_push_no_delta(&stack, (void *)e);
6263 } while (--n > 0);
6264
6265 return stack;
6266 }
6267
6268 __attribute__((noinline))
6269 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6270 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6271 {
6272 zone_t zone = zone_by_id(zid);
6273 zone_stats_t zstats = zone->z_stats;
6274 int cpu;
6275
6276 count -= stack.z_count;
6277
6278 disable_preemption();
6279 cpu = cpu_number();
6280 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6281 count * zone_elem_inner_size(zone);
6282 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6283 enable_preemption();
6284
6285 return stack;
6286 }
6287
6288 #define ZCACHE_ALLOC_RETRY ((void *)-1)
6289
6290 __attribute__((noinline))
6291 static void *
zcache_alloc_one(zone_id_t zid,zalloc_flags_t flags,zone_cache_ops_t ops)6292 zcache_alloc_one(
6293 zone_id_t zid,
6294 zalloc_flags_t flags,
6295 zone_cache_ops_t ops)
6296 {
6297 zone_t zone = zone_by_id(zid);
6298 void *o;
6299
6300 /*
6301 * First try to allocate in rudimentary zones without ever going into
6302 * __ZONE_EXHAUSTED_AND_WAITING_HARD__() by clearing Z_NOFAIL.
6303 */
6304 enable_preemption();
6305 o = ops->zc_op_alloc(zid, flags & ~Z_NOFAIL);
6306 if (__probable(o)) {
6307 os_atomic_inc(&zone->z_elems_avail, relaxed);
6308 } else if (__probable(flags & Z_NOFAIL)) {
6309 zone_cache_t cache;
6310 vm_offset_t index;
6311 int cpu;
6312
6313 zone_lock(zone);
6314
6315 cpu = cpu_number();
6316 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6317 o = ZCACHE_ALLOC_RETRY;
6318 if (__probable(cache)) {
6319 index = --cache->zc_alloc_cur;
6320 o = (void *)cache->zc_alloc_elems[index];
6321 cache->zc_alloc_elems[index] = 0;
6322 o = ops->zc_op_mark_valid(zid, o);
6323 } else if (zone->z_elems_free == 0) {
6324 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone);
6325 }
6326
6327 zone_unlock(zone);
6328 }
6329
6330 return o;
6331 }
6332
6333 __attribute__((always_inline))
6334 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6335 zcache_alloc_n_ext(
6336 zone_id_t zid,
6337 uint32_t count,
6338 zalloc_flags_t flags,
6339 zone_cache_ops_t ops)
6340 {
6341 zstack_t stack = { };
6342 zone_cache_t cache;
6343 zone_t zone;
6344 int cpu;
6345
6346 disable_preemption();
6347 cpu = cpu_number();
6348 zone = zone_by_id(zid);
6349 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6350 count * zone_elem_inner_size(zone);
6351
6352 for (;;) {
6353 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6354 if (__probable(cache)) {
6355 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6356 count - stack.z_count, ops);
6357 enable_preemption();
6358 } else {
6359 void *o;
6360
6361 if (ops) {
6362 o = zcache_alloc_one(zid, flags, ops);
6363 } else {
6364 o = zalloc_item(zone, zone->z_stats, flags).addr;
6365 }
6366 if (__improbable(o == NULL)) {
6367 return zcache_alloc_fail(zid, stack, count);
6368 }
6369 if (ops == NULL || o != ZCACHE_ALLOC_RETRY) {
6370 zstack_push(&stack, o);
6371 }
6372 }
6373
6374 if (stack.z_count == count) {
6375 break;
6376 }
6377
6378 disable_preemption();
6379 cpu = cpu_number();
6380 }
6381
6382 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6383
6384 return stack;
6385 }
6386
6387 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6388 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6389 {
6390 return zcache_alloc_n_ext(zid, count, flags, NULL);
6391 }
6392
zstack_t(zcache_alloc_n)6393 zstack_t
6394 (zcache_alloc_n)(
6395 zone_id_t zid,
6396 uint32_t count,
6397 zalloc_flags_t flags,
6398 zone_cache_ops_t ops)
6399 {
6400 __builtin_assume(ops != NULL);
6401 return zcache_alloc_n_ext(zid, count, flags, ops);
6402 }
6403
6404 __attribute__((always_inline))
6405 void *
zalloc(zone_t zov)6406 zalloc(zone_t zov)
6407 {
6408 return zalloc_flags(zov, Z_WAITOK);
6409 }
6410
6411 __attribute__((always_inline))
6412 void *
zalloc_noblock(zone_t zov)6413 zalloc_noblock(zone_t zov)
6414 {
6415 return zalloc_flags(zov, Z_NOWAIT);
6416 }
6417
6418 void *
6419 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
6420 {
6421 zone_t zone = zov->z_self;
6422 zone_stats_t zstats = zov->z_stats;
6423
6424 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6425 assert(!zone->z_percpu && !zone->z_permanent);
6426 return zalloc_ext(zone, zstats, flags).addr;
6427 }
6428
6429 __attribute__((always_inline))
6430 void *
6431 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6432 {
6433 return (zalloc_flags)(zone_by_id(zid), flags);
6434 }
6435
6436 void *
6437 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6438 {
6439 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6440 zone_t zone = zone_by_id(zid);
6441 zone_stats_t zstats = zone->z_stats;
6442 struct kalloc_result kr;
6443
6444 kr = zalloc_ext(zone, zstats, flags);
6445 #if ZSECURITY_CONFIG(READ_ONLY) && !__BUILDING_XNU_LIBRARY__ /* zalloc mocks don't create ro memory */
6446 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6447 if (kr.addr) {
6448 zone_require_ro(zid, kr.size, kr.addr);
6449 }
6450 #endif
6451 return kr.addr;
6452 }
6453
6454 #if ZSECURITY_CONFIG(READ_ONLY)
6455
6456 __attribute__((always_inline))
6457 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6458 from_current_stack(vm_offset_t addr, vm_size_t size)
6459 {
6460 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6461 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6462
6463 addr = vm_memtag_canonicalize_kernel(addr);
6464
6465 return (addr >= start) && (addr + size < end);
6466 }
6467
6468 /*
6469 * Check if an address is from const memory i.e TEXT or DATA CONST segements
6470 * or the SECURITY_READ_ONLY_LATE section.
6471 */
6472 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
6473 __attribute__((always_inline))
6474 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6475 from_const_memory(const vm_offset_t addr, vm_size_t size)
6476 {
6477 return rorgn_contains(addr, size, true);
6478 }
6479 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */
6480 __attribute__((always_inline))
6481 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6482 from_const_memory(const vm_offset_t addr, vm_size_t size)
6483 {
6484 #pragma unused(addr, size)
6485 return true;
6486 }
6487 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */
6488
6489 __abortlike
6490 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6491 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6492 const vm_offset_t src, vm_size_t src_size)
6493 {
6494 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
6495 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
6496 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
6497 extern vm_offset_t rorgn_begin;
6498 extern vm_offset_t rorgn_end;
6499 #else
6500 vm_offset_t const rorgn_begin = 0;
6501 vm_offset_t const rorgn_end = 0;
6502 #endif
6503
6504 if (from_ro_map(src, src_size)) {
6505 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6506 zone_t dst_zone = &zone_array[zid];
6507 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6508 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6509 dst_zone->z_name);
6510 }
6511
6512 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
6513 "current stack (%p - %p) or const memory (phys %p - %p)",
6514 (void *)src, (void*)kvtophys(src),
6515 (void *)zone_info.zi_ro_range.min_address,
6516 (void *)zone_info.zi_ro_range.max_address,
6517 (void *)stack_start, (void *)stack_end,
6518 (void *)rorgn_begin, (void *)rorgn_end);
6519 }
6520
6521 __attribute__((always_inline))
6522 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6523 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6524 const vm_offset_t src, vm_size_t src_size)
6525 {
6526 if (from_current_stack(src, src_size) ||
6527 (from_ro_map(src, src_size) &&
6528 zid == zone_index_from_ptr((void *)src)) ||
6529 from_const_memory(src, src_size)) {
6530 return;
6531 }
6532 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6533 }
6534
6535 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6536
6537 __mockable __attribute__((noinline))
6538 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)6539 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
6540 const void *new_data, vm_size_t new_data_size)
6541 {
6542 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6543
6544 #if ZSECURITY_CONFIG(READ_ONLY)
6545 bool skip_src_check = false;
6546
6547 /*
6548 * The OSEntitlements RO-zone is a little differently treated. For more
6549 * information: rdar://100518485.
6550 */
6551 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
6552 code_signing_config_t cs_config = 0;
6553
6554 code_signing_configuration(NULL, &cs_config);
6555 if (cs_config & CS_CONFIG_CSM_ENABLED) {
6556 skip_src_check = true;
6557 }
6558 }
6559
6560 if (skip_src_check == false) {
6561 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
6562 new_data_size);
6563 }
6564 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
6565 (vm_offset_t) new_data, new_data_size);
6566 #else
6567 (void)zid;
6568 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
6569 #endif
6570 }
6571
6572 __attribute__((noinline))
6573 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)6574 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
6575 zro_atomic_op_t op, uint64_t value)
6576 {
6577 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6578
6579 #if ZSECURITY_CONFIG(READ_ONLY)
6580 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
6581 #else
6582 (void)zid;
6583 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
6584 #endif
6585 return value;
6586 }
6587
6588 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)6589 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
6590 {
6591 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6592 #if ZSECURITY_CONFIG(READ_ONLY)
6593 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
6594 #else
6595 (void)zid;
6596 bzero((void *)((uintptr_t)elem + offset), size);
6597 #endif
6598 }
6599
6600 /*
6601 * This function will run in the PPL and needs to be robust
6602 * against an attacker with arbitrary kernel write.
6603 */
6604
6605 #if ZSECURITY_CONFIG(READ_ONLY) && !defined(__BUILDING_XNU_LIBRARY__)
6606
6607 __abortlike
6608 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)6609 zone_id_require_ro_panic(zone_id_t zid, void *addr)
6610 {
6611 struct zone_size_params p = zone_ro_size_params[zid];
6612 vm_offset_t elem = (vm_offset_t)addr;
6613 uint32_t zindex;
6614 zone_t other;
6615 zone_t zone = &zone_array[zid];
6616
6617 if (!from_ro_map(addr, 1)) {
6618 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
6619 }
6620
6621 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
6622 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
6623 }
6624
6625 zindex = zone_index_from_ptr(addr);
6626 other = &zone_array[zindex];
6627 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
6628 panic("zone_require_ro failed: invalid zone index %d "
6629 "(addr: %p, expected: %s%s)", zindex,
6630 addr, zone_heap_name(zone), zone->z_name);
6631 } else {
6632 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
6633 "(addr: %p, expected: %s%s)",
6634 zindex, zone_heap_name(other), other->z_name,
6635 addr, zone_heap_name(zone), zone->z_name);
6636 }
6637 }
6638
6639 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6640
6641 __attribute__((always_inline))
6642 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)6643 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
6644 {
6645 #if ZSECURITY_CONFIG(READ_ONLY) && !defined(__BUILDING_XNU_LIBRARY__) \
6646 /* can't do this in user-mode because there's no zones submap */
6647 struct zone_size_params p = zone_ro_size_params[zid];
6648 vm_offset_t elem = (vm_offset_t)addr;
6649
6650 if (!from_ro_map(addr, 1) ||
6651 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
6652 zid != zone_meta_from_addr(elem)->zm_index) {
6653 zone_id_require_ro_panic(zid, addr);
6654 }
6655 #else
6656 #pragma unused(zid, addr)
6657 #endif
6658 }
6659
6660 void *
6661 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
6662 {
6663 zone_t zone = zov.zov_view->zv_zone;
6664 zone_stats_t zstats = zov.zov_view->zv_stats;
6665
6666 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6667 assert(zone->z_percpu);
6668 flags |= Z_PCPU;
6669 return zalloc_ext(zone, zstats, flags).addr;
6670 }
6671
6672 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)6673 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
6674 {
6675 struct zone_page_metadata *page_meta;
6676 vm_offset_t offs, addr;
6677 zone_pva_t pva;
6678
6679 assert(ml_get_interrupts_enabled() ||
6680 ml_is_quiescing() ||
6681 debug_mode_active() ||
6682 startup_phase < STARTUP_SUB_EARLY_BOOT);
6683
6684 size = (size + mask) & ~mask;
6685 assert(size <= PAGE_SIZE);
6686
6687 zone_lock(zone);
6688 assert(zone->z_self == zone);
6689
6690 for (;;) {
6691 pva = zone->z_pageq_partial;
6692 while (!zone_pva_is_null(pva)) {
6693 page_meta = zone_pva_to_meta(pva);
6694 if (page_meta->zm_bump + size <= PAGE_SIZE) {
6695 goto found;
6696 }
6697 pva = page_meta->zm_page_next;
6698 }
6699
6700 zone_expand_locked(zone, Z_WAITOK);
6701 }
6702
6703 found:
6704 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
6705 page_meta->zm_bump = (uint16_t)(offs + size);
6706 page_meta->zm_alloc_size += size;
6707 zone->z_elems_free -= size;
6708 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
6709
6710 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
6711 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
6712 }
6713
6714 zone_unlock(zone);
6715
6716 if (zone->z_tbi_tag) {
6717 addr = vm_memtag_load_tag(offs + zone_pva_to_addr(pva));
6718 } else {
6719 addr = offs + zone_pva_to_addr(pva);
6720 }
6721
6722 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6723 return (void *)addr;
6724 }
6725
6726 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)6727 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
6728 {
6729 vm_offset_t addr;
6730
6731 kernel_memory_allocate(kernel_map, &addr, size, mask,
6732 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
6733
6734 return (void *)addr;
6735 }
6736
6737 __mockable void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)6738 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
6739 {
6740 if (size <= PAGE_SIZE) {
6741 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
6742 return _zalloc_permanent(zone, size, mask);
6743 }
6744 return _zalloc_permanent_large(size, mask, tag);
6745 }
6746
6747 __mockable void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)6748 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
6749 {
6750 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
6751 return _zalloc_permanent(zone, size, mask);
6752 }
6753
6754 /*! @} */
6755 #endif /* !ZALLOC_TEST */
6756 #pragma mark zone GC / trimming
6757 #if !ZALLOC_TEST
6758
6759 static thread_call_data_t zone_trim_callout;
6760 EVENT_DEFINE(ZONE_EXHAUSTED);
6761
6762 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)6763 zone_reclaim_chunk(
6764 zone_t z,
6765 struct zone_page_metadata *meta,
6766 uint32_t free_count)
6767 {
6768 vm_address_t page_addr;
6769 vm_size_t size_to_free;
6770 uint32_t bitmap_ref;
6771 uint32_t page_count;
6772 zone_security_flags_t zsflags = zone_security_config(z);
6773 bool sequester = !z->z_destroyed;
6774 bool oob_guard = false;
6775
6776 if (zone_submap_is_sequestered(zsflags)) {
6777 /*
6778 * If the entire map is sequestered, we can't return the VA.
6779 * It stays pinned to the zone forever.
6780 */
6781 sequester = true;
6782 }
6783
6784 zone_meta_queue_pop(z, &z->z_pageq_empty);
6785
6786 page_addr = zone_meta_to_addr(meta);
6787 page_count = meta->zm_chunk_len;
6788 oob_guard = meta->zm_guarded;
6789
6790 if (meta->zm_alloc_size) {
6791 zone_metadata_corruption(z, meta, "alloc_size");
6792 }
6793 if (z->z_percpu) {
6794 if (page_count != 1) {
6795 zone_metadata_corruption(z, meta, "page_count");
6796 }
6797 size_to_free = ptoa(z->z_chunk_pages);
6798 zone_remove_wired_pages(z, z->z_chunk_pages);
6799 } else {
6800 if (page_count > z->z_chunk_pages) {
6801 zone_metadata_corruption(z, meta, "page_count");
6802 }
6803 if (page_count < z->z_chunk_pages) {
6804 /* Dequeue non populated VA from z_pageq_va */
6805 zone_meta_remqueue(z, meta + page_count);
6806 }
6807 size_to_free = ptoa(page_count);
6808 zone_remove_wired_pages(z, page_count);
6809 }
6810
6811 zone_counter_sub(z, z_elems_free, free_count);
6812 zone_counter_sub(z, z_elems_avail, free_count);
6813 zone_counter_sub(z, z_wired_empty, page_count);
6814 zone_counter_sub(z, z_wired_cur, page_count);
6815
6816 if (z->z_pcpu_cache == NULL) {
6817 if (z->z_elems_free_min < free_count) {
6818 z->z_elems_free_min = 0;
6819 } else {
6820 z->z_elems_free_min -= free_count;
6821 }
6822 }
6823 if (z->z_elems_free_wma < free_count) {
6824 z->z_elems_free_wma = 0;
6825 } else {
6826 z->z_elems_free_wma -= free_count;
6827 }
6828
6829 bitmap_ref = 0;
6830 if (sequester) {
6831 if (meta->zm_inline_bitmap) {
6832 for (int i = 0; i < meta->zm_chunk_len; i++) {
6833 meta[i].zm_bitmap = 0;
6834 }
6835 } else {
6836 bitmap_ref = meta->zm_bitmap;
6837 meta->zm_bitmap = 0;
6838 }
6839 meta->zm_chunk_len = 0;
6840 } else {
6841 if (!meta->zm_inline_bitmap) {
6842 bitmap_ref = meta->zm_bitmap;
6843 }
6844 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
6845 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
6846 }
6847
6848 #if CONFIG_ZLEAKS
6849 if (__improbable(zleak_should_disable_for_zone(z) &&
6850 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
6851 thread_call_enter(&zone_leaks_callout);
6852 }
6853 #endif /* CONFIG_ZLEAKS */
6854
6855 zone_unlock(z);
6856
6857 if (bitmap_ref) {
6858 zone_bits_free(bitmap_ref);
6859 }
6860
6861 /* Free the pages for metadata and account for them */
6862 #if KASAN_CLASSIC
6863 if (z->z_percpu) {
6864 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
6865 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
6866 zone_elem_outer_size(z),
6867 zone_elem_outer_offs(z),
6868 zone_elem_redzone(z));
6869 }
6870 } else {
6871 kasan_zmem_remove(page_addr, size_to_free,
6872 zone_elem_outer_size(z),
6873 zone_elem_outer_offs(z),
6874 zone_elem_redzone(z));
6875 }
6876 #endif /* KASAN_CLASSIC */
6877
6878 if (sequester) {
6879 kma_flags_t flags = zone_kma_flags(z, zsflags, 0) | KMA_KOBJECT;
6880 kernel_memory_depopulate(page_addr, size_to_free,
6881 flags, VM_KERN_MEMORY_ZONE);
6882 } else {
6883 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
6884 kmem_free(zone_submap(zsflags), page_addr,
6885 ptoa(z->z_chunk_pages + oob_guard));
6886 if (oob_guard) {
6887 os_atomic_dec(&zone_guard_pages, relaxed);
6888 }
6889 }
6890
6891 thread_yield_to_preemption();
6892
6893 zone_lock(z);
6894
6895 if (sequester) {
6896 zone_meta_queue_push(z, &z->z_pageq_va, meta);
6897 }
6898 }
6899
6900 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)6901 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
6902 {
6903 z_debug_assert(n <= zc_mag_size());
6904
6905 for (uint16_t i = 0; i < n; i++) {
6906 vm_offset_t addr = elems[i];
6907 elems[i] = 0;
6908 zfree_drop(z, addr);
6909 }
6910
6911 z->z_elems_free += n;
6912 }
6913
6914 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)6915 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
6916 {
6917 z_debug_assert(n <= zc_mag_size());
6918 zone_cache_ops_t ops = zcache_ops[zid];
6919
6920 for (uint16_t i = 0; i < n; i++) {
6921 vm_offset_t addr = elems[i];
6922 elems[i] = 0;
6923 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
6924 ops->zc_op_free(zid, (void *)addr);
6925 }
6926
6927 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
6928 }
6929
6930 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)6931 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
6932 {
6933 zpercpu_foreach(zc, z->z_pcpu_cache) {
6934 zone_depot_lock(zc);
6935
6936 if (zc->zc_depot.zd_full > (target + 1) / 2) {
6937 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
6938 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
6939 }
6940
6941 if (zc->zc_depot.zd_empty > target / 2) {
6942 uint32_t n = zc->zc_depot.zd_empty - target / 2;
6943 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
6944 }
6945
6946 zone_depot_unlock(zc);
6947 }
6948 }
6949
6950 __enum_decl(zone_reclaim_mode_t, uint32_t, {
6951 ZONE_RECLAIM_TRIM,
6952 ZONE_RECLAIM_DRAIN,
6953 ZONE_RECLAIM_DESTROY,
6954 });
6955
6956 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)6957 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
6958 {
6959 uint32_t depot_max = 0;
6960 bool cleanup = mode != ZONE_RECLAIM_TRIM;
6961
6962 if (z->z_depot_cleanup) {
6963 z->z_depot_cleanup = false;
6964 depot_max = z->z_depot_size;
6965 cleanup = true;
6966 }
6967
6968 if (cleanup) {
6969 zone_depot_trim(z, depot_max, zd);
6970 }
6971
6972 if (mode == ZONE_RECLAIM_DESTROY) {
6973 zpercpu_foreach(zc, z->z_pcpu_cache) {
6974 zone_reclaim_elements(z, zc->zc_alloc_cur,
6975 zc->zc_alloc_elems);
6976 zone_reclaim_elements(z, zc->zc_free_cur,
6977 zc->zc_free_elems);
6978 zc->zc_alloc_cur = zc->zc_free_cur = 0;
6979 }
6980
6981 z->z_recirc_empty_min = 0;
6982 z->z_recirc_empty_wma = 0;
6983 z->z_recirc_full_min = 0;
6984 z->z_recirc_full_wma = 0;
6985 z->z_recirc_cont_cur = 0;
6986 z->z_recirc_cont_wma = 0;
6987 }
6988 }
6989
6990 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)6991 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
6992 {
6993 assert(zd->zd_empty == 0);
6994 assert(zd->zd_full == 0);
6995
6996 zone_recirc_lock_nopreempt(z);
6997
6998 *zd = z->z_recirc;
6999 if (zd->zd_full == 0) {
7000 zd->zd_tail = &zd->zd_head;
7001 }
7002 zone_depot_init(&z->z_recirc);
7003 z->z_recirc_empty_min = 0;
7004 z->z_recirc_empty_wma = 0;
7005 z->z_recirc_full_min = 0;
7006 z->z_recirc_full_wma = 0;
7007
7008 zone_recirc_unlock_nopreempt(z);
7009 }
7010
7011 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7012 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7013 {
7014 for (;;) {
7015 uint64_t maxtime = mach_continuous_speculative_time() +
7016 zc_free_batch_timeout();
7017 uint32_t budget = zc_free_batch_size();
7018 uint32_t count;
7019 bool done = true;
7020
7021 zone_recirc_lock_nopreempt(z);
7022 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7023 z->z_recirc_empty_min);
7024 assert(count <= z->z_recirc.zd_empty);
7025
7026 if (count > budget) {
7027 count = budget;
7028 done = false;
7029 }
7030 if (count) {
7031 budget -= count;
7032 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7033 z->z_recirc_empty_min -= count;
7034 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7035 }
7036
7037 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7038 z->z_recirc_full_min);
7039 assert(count <= z->z_recirc.zd_full);
7040
7041 if (count > budget) {
7042 count = budget;
7043 done = false;
7044 }
7045 if (count) {
7046 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7047 z->z_recirc_full_min -= count;
7048 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7049 }
7050
7051 zone_recirc_unlock_nopreempt(z);
7052
7053 if (done) {
7054 return;
7055 }
7056
7057 if (mach_continuous_speculative_time() < maxtime) {
7058 continue;
7059 }
7060
7061 /*
7062 * We have held preemption disabled for too long. Drop and
7063 * retake the lock to allow a pending preemption to occur.
7064 */
7065 #if SCHED_HYGIENE_DEBUG
7066 abandon_preemption_disable_measurement();
7067 #endif
7068 zone_unlock(z);
7069 zone_lock(z);
7070 maxtime = mach_continuous_speculative_time() +
7071 zc_free_batch_timeout();
7072 }
7073 }
7074
7075 /*!
7076 * @function zone_reclaim
7077 *
7078 * @brief
7079 * Drains or trim the zone.
7080 *
7081 * @discussion
7082 * Draining the zone will free it from all its elements.
7083 *
7084 * Trimming the zone tries to respect the working set size, and avoids draining
7085 * the depot when it's not necessary.
7086 *
7087 * @param z The zone to reclaim from
7088 * @param mode The purpose of this reclaim.
7089 */
7090 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7091 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7092 {
7093 struct zone_depot zd;
7094
7095 zone_depot_init(&zd);
7096
7097 zone_lock(z);
7098
7099 if (mode == ZONE_RECLAIM_DESTROY) {
7100 if (!z->z_destructible || z->z_elems_rsv) {
7101 panic("zdestroy: Zone %s%s isn't destructible",
7102 zone_heap_name(z), z->z_name);
7103 }
7104
7105 if (!z->z_self || z->z_expander ||
7106 z->z_async_refilling || z->z_expanding_wait) {
7107 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7108 zone_heap_name(z), z->z_name);
7109 }
7110
7111 #if !KASAN_CLASSIC
7112 /*
7113 * Unset the valid bit. We'll hit an assert failure on further
7114 * operations on this zone, until zinit() is called again.
7115 *
7116 * Leave the zone valid for KASan as we will see zfree's on
7117 * quarantined free elements even after the zone is destroyed.
7118 */
7119 z->z_self = NULL;
7120 #endif
7121 z->z_destroyed = true;
7122 } else if (z->z_destroyed) {
7123 return zone_unlock(z);
7124 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7125 /* If the zone is under its reserve level, leave it alone. */
7126 return zone_unlock(z);
7127 }
7128
7129 if (z->z_pcpu_cache) {
7130 zone_magazine_t mag;
7131 uint32_t freed = 0;
7132
7133 /*
7134 * This is all done with the zone lock held on purpose.
7135 * The work here is O(ncpu), which should still be short.
7136 *
7137 * We need to keep the lock held until we have reclaimed
7138 * at least a few magazines, otherwise if the zone has no
7139 * free elements outside of the depot, a thread performing
7140 * a concurrent allocatiuon could try to grow the zone
7141 * while we're trying to drain it.
7142 */
7143 if (mode == ZONE_RECLAIM_TRIM) {
7144 zone_reclaim_recirc_trim(z, &zd);
7145 } else {
7146 zone_reclaim_recirc_drain(z, &zd);
7147 }
7148 zone_reclaim_pcpu(z, mode, &zd);
7149
7150 if (z->z_chunk_elems) {
7151 uint64_t maxtime = mach_continuous_speculative_time() +
7152 zc_free_batch_timeout();
7153 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7154 smr_t smr = zone_cache_smr(cache);
7155
7156 while (zd.zd_full) {
7157 mag = zone_depot_pop_head_full(&zd, NULL);
7158 if (smr) {
7159 smr_wait(smr, mag->zm_seq);
7160 zalloc_cached_reuse_smr(z, cache, mag);
7161 freed += zc_mag_size();
7162 }
7163 zone_reclaim_elements(z, zc_mag_size(),
7164 mag->zm_elems);
7165 zone_depot_insert_head_empty(&zd, mag);
7166
7167 freed += zc_mag_size();
7168 if (freed >= zc_free_batch_size() ||
7169 mach_continuous_speculative_time() >= maxtime) {
7170 #if SCHED_HYGIENE_DEBUG
7171 abandon_preemption_disable_measurement();
7172 #endif
7173 zone_unlock(z);
7174 zone_magazine_free_list(&zd);
7175 thread_yield_to_preemption();
7176 zone_lock(z);
7177 freed = 0;
7178 maxtime = mach_continuous_speculative_time() +
7179 zc_free_batch_timeout();
7180 }
7181 }
7182 } else {
7183 zone_id_t zid = zone_index(z);
7184
7185 zone_unlock(z);
7186
7187 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7188
7189 while (zd.zd_full) {
7190 mag = zone_depot_pop_head_full(&zd, NULL);
7191 zcache_reclaim_elements(zid, zc_mag_size(),
7192 mag->zm_elems);
7193 zone_magazine_free(mag);
7194 }
7195
7196 goto cleanup;
7197 }
7198 }
7199
7200 while (!zone_pva_is_null(z->z_pageq_empty)) {
7201 struct zone_page_metadata *meta;
7202 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7203
7204 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7205 limit = MAX(limit, z->z_elems_free -
7206 MIN(z->z_elems_free_min, z->z_elems_free_wma / Z_WMA_UNIT));
7207 }
7208
7209 meta = zone_pva_to_meta(z->z_pageq_empty);
7210 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7211
7212 if (zone_count_free(z) - count < limit) {
7213 break;
7214 }
7215
7216 zone_reclaim_chunk(z, meta, count);
7217 }
7218
7219 zone_unlock(z);
7220
7221 cleanup:
7222 zone_magazine_free_list(&zd);
7223 }
7224
7225 void
zone_drain(zone_t zone)7226 zone_drain(zone_t zone)
7227 {
7228 current_thread()->options |= TH_OPT_ZONE_PRIV;
7229 lck_mtx_lock(&zone_gc_lock);
7230 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7231 lck_mtx_unlock(&zone_gc_lock);
7232 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7233 }
7234
7235 void
zcache_drain(zone_id_t zid)7236 zcache_drain(zone_id_t zid)
7237 {
7238 zone_drain(zone_by_id(zid));
7239 }
7240
7241 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7242 zone_reclaim_all(zone_reclaim_mode_t mode)
7243 {
7244 /*
7245 * Start with zcaches, so that they flow into the regular zones.
7246 *
7247 * Then the zones with VA sequester since depopulating
7248 * pages will not need to allocate vm map entries for holes,
7249 * which will give memory back to the system faster.
7250 */
7251 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7252 zone_t z = zone_by_id(zid);
7253
7254 if (z->z_self && z->z_chunk_elems == 0) {
7255 zone_reclaim(z, mode);
7256 }
7257 }
7258 zone_index_foreach(zid) {
7259 zone_t z = zone_by_id(zid);
7260
7261 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7262 continue;
7263 }
7264 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7265 z->collectable) {
7266 zone_reclaim(z, mode);
7267 }
7268 }
7269
7270 zone_index_foreach(zid) {
7271 zone_t z = zone_by_id(zid);
7272
7273 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7274 continue;
7275 }
7276 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7277 z->collectable) {
7278 zone_reclaim(z, mode);
7279 }
7280 }
7281
7282 zone_reclaim(zc_magazine_zone, mode);
7283 }
7284
7285 void
zone_userspace_reboot_checks(void)7286 zone_userspace_reboot_checks(void)
7287 {
7288 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7289 if (label_zone_size != 0) {
7290 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7291 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7292 }
7293 }
7294
7295 void
zone_gc(zone_gc_level_t level)7296 zone_gc(zone_gc_level_t level)
7297 {
7298 zone_reclaim_mode_t mode;
7299 zone_t largest_zone = NULL;
7300
7301 switch (level) {
7302 case ZONE_GC_TRIM:
7303 mode = ZONE_RECLAIM_TRIM;
7304 break;
7305 case ZONE_GC_DRAIN:
7306 mode = ZONE_RECLAIM_DRAIN;
7307 break;
7308 case ZONE_GC_JETSAM:
7309 largest_zone = kill_process_in_largest_zone();
7310 mode = ZONE_RECLAIM_TRIM;
7311 break;
7312 }
7313
7314 current_thread()->options |= TH_OPT_ZONE_PRIV;
7315 lck_mtx_lock(&zone_gc_lock);
7316
7317 zone_reclaim_all(mode);
7318
7319 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7320 /*
7321 * If we possibly killed a process, but we're still critical,
7322 * we need to drain harder.
7323 */
7324 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7325 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7326 }
7327
7328 lck_mtx_unlock(&zone_gc_lock);
7329 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7330 }
7331
7332 void
zone_gc_trim(void)7333 zone_gc_trim(void)
7334 {
7335 zone_gc(ZONE_GC_TRIM);
7336 }
7337
7338 void
zone_gc_drain(void)7339 zone_gc_drain(void)
7340 {
7341 zone_gc(ZONE_GC_DRAIN);
7342 }
7343
7344 static bool
zone_trim_needed(zone_t z)7345 zone_trim_needed(zone_t z)
7346 {
7347 if (z->z_depot_cleanup) {
7348 return true;
7349 }
7350
7351 if (z->z_async_refilling) {
7352 /* Don't fight with refill */
7353 return false;
7354 }
7355
7356 if (z->z_pcpu_cache) {
7357 uint32_t e_n, f_n;
7358
7359 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7360 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7361
7362 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7363 return true;
7364 }
7365
7366 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7367 f_n * zc_mag_size() * zone_elem_inner_size(z) >
7368 zc_autotrim_size() * Z_WMA_UNIT) {
7369 return true;
7370 }
7371
7372 return false;
7373 }
7374
7375 if (!zone_pva_is_null(z->z_pageq_empty)) {
7376 uint32_t n;
7377
7378 n = MIN(z->z_elems_free_wma / Z_WMA_UNIT, z->z_elems_free_min);
7379
7380 return n >= z->z_elems_rsv + z->z_chunk_elems;
7381 }
7382
7383 return false;
7384 }
7385
7386 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7387 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7388 {
7389 current_thread()->options |= TH_OPT_ZONE_PRIV;
7390
7391 zone_foreach(z) {
7392 if (!z->collectable || z == zc_magazine_zone) {
7393 continue;
7394 }
7395
7396 if (zone_trim_needed(z)) {
7397 lck_mtx_lock(&zone_gc_lock);
7398 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7399 lck_mtx_unlock(&zone_gc_lock);
7400 }
7401 }
7402
7403 if (zone_trim_needed(zc_magazine_zone)) {
7404 lck_mtx_lock(&zone_gc_lock);
7405 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7406 lck_mtx_unlock(&zone_gc_lock);
7407 }
7408
7409 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7410 }
7411
7412 void
compute_zone_working_set_size(__unused void * param)7413 compute_zone_working_set_size(__unused void *param)
7414 {
7415 uint32_t zc_auto = zc_enable_level();
7416 bool needs_trim = false;
7417
7418 /*
7419 * Keep zone caching disabled until the first proc is made.
7420 */
7421 if (__improbable(zone_caching_disabled < 0)) {
7422 return;
7423 }
7424
7425 zone_caching_disabled = vm_pool_low();
7426
7427 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
7428 zc_auto = 0;
7429 }
7430
7431 zone_foreach(z) {
7432 uint32_t old, wma, cur;
7433 bool needs_caching = false;
7434
7435 if (z->z_self != z) {
7436 continue;
7437 }
7438
7439 zone_lock(z);
7440
7441 zone_recirc_lock_nopreempt(z);
7442
7443 if (z->z_pcpu_cache) {
7444 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
7445 z->z_recirc_empty_min = z->z_recirc.zd_empty;
7446 z->z_recirc_empty_wma = wma;
7447 } else {
7448 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
7449 z->z_elems_free_min = z->z_elems_free;
7450 z->z_elems_free_wma = wma;
7451 }
7452
7453 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
7454 z->z_recirc_full_min = z->z_recirc.zd_full;
7455 z->z_recirc_full_wma = wma;
7456
7457 /* fixed point decimal of contentions per second */
7458 old = z->z_recirc_cont_wma;
7459 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
7460 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
7461 cur = (3 * old + cur) / 4;
7462 zone_recirc_unlock_nopreempt(z);
7463
7464 if (z->z_pcpu_cache) {
7465 uint16_t size = z->z_depot_size;
7466
7467 if (zone_exhausted(z)) {
7468 if (z->z_depot_size) {
7469 z->z_depot_size = 0;
7470 z->z_depot_cleanup = true;
7471 }
7472 } else if (size < z->z_depot_limit && cur > zc_grow_level()) {
7473 /*
7474 * lose history on purpose now
7475 * that we just grew, to give
7476 * the sytem time to adjust.
7477 */
7478 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7479 size = size ? (3 * size + 2) / 2 : 2;
7480 z->z_depot_size = MIN(z->z_depot_limit, size);
7481 } else if (size > 0 && cur <= zc_shrink_level()) {
7482 /*
7483 * lose history on purpose now
7484 * that we just shrunk, to give
7485 * the sytem time to adjust.
7486 */
7487 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7488 z->z_depot_size = size - 1;
7489 z->z_depot_cleanup = true;
7490 }
7491 } else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
7492 old >= zc_auto && cur >= zc_auto) {
7493 needs_caching = true;
7494 }
7495
7496 z->z_recirc_cont_wma = cur;
7497 z->z_recirc_cont_cur = 0;
7498
7499 if (!needs_trim && zone_trim_needed(z)) {
7500 needs_trim = true;
7501 }
7502
7503 zone_unlock(z);
7504
7505 if (needs_caching) {
7506 zone_enable_caching(z);
7507 }
7508 }
7509
7510 if (needs_trim) {
7511 thread_call_enter(&zone_trim_callout);
7512 }
7513 }
7514
7515 #endif /* !ZALLOC_TEST */
7516 #pragma mark vm integration, MIG routines
7517 #if !ZALLOC_TEST
7518
7519 extern unsigned int stack_total;
7520 #if defined (__x86_64__)
7521 extern unsigned int inuse_ptepages_count;
7522 #endif
7523
7524 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)7525 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
7526 bool is_kt_var)
7527 {
7528 if (is_kt_var) {
7529 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
7530 return cur.ktv_var->kt_name;
7531 } else {
7532 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
7533 return cur.ktv_fixed->kt_zv.zv_name;
7534 }
7535 }
7536
7537 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7538 panic_print_types_in_zone(zone_t z, const char* debug_str)
7539 {
7540 kalloc_type_views_t kt_cur = {};
7541 const char *prev_type = "";
7542 size_t skip_over_site = sizeof("site.") - 1;
7543 zone_security_flags_t zsflags = zone_security_config(z);
7544 bool is_kt_var = false;
7545
7546 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
7547 uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
7548 kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
7549 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
7550 is_kt_var = true;
7551 } else {
7552 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
7553 }
7554
7555 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
7556 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
7557
7558 while (kt_cur.ktv_fixed) {
7559 kalloc_type_views_t kt_next = {};
7560 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
7561 is_kt_var) + skip_over_site;
7562 if (strcmp(typename, prev_type) != 0) {
7563 paniclog_append_noflush("\t%-50s\n", typename);
7564 prev_type = typename;
7565 }
7566 kt_cur = kt_next;
7567 }
7568 paniclog_append_noflush("\n");
7569 }
7570
7571 static void
panic_display_kalloc_types(void)7572 panic_display_kalloc_types(void)
7573 {
7574 if (kalloc_type_src_zone) {
7575 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7576 }
7577 if (kalloc_type_dst_zone) {
7578 panic_print_types_in_zone(kalloc_type_dst_zone,
7579 "addr is being freed to");
7580 }
7581 }
7582
7583 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7584 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7585 uint64_t *zone_size)
7586 {
7587 zone_index_foreach(zid) {
7588 zone_t z = &zone_array[zid];
7589 vm_offset_t size = zone_size_wired(z);
7590
7591 if (zid == ZONE_ID_VM_PAGES) {
7592 continue;
7593 }
7594 for (uint32_t i = 0; i < n; i++) {
7595 if (size > zone_size[i]) {
7596 largest_zones[i] = z;
7597 zone_size[i] = size;
7598 break;
7599 }
7600 }
7601 }
7602 }
7603
7604 #define NUM_LARGEST_ZONES 5
7605 static void
panic_display_largest_zones(void)7606 panic_display_largest_zones(void)
7607 {
7608 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
7609 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
7610
7611 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
7612 (uint64_t *) &largest_size);
7613
7614 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
7615 "Zone Name", "Cur Size", "Free Size");
7616 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
7617 zone_t z = largest_zones[i];
7618 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
7619 zone_heap_name(z), z->z_name,
7620 mach_vm_size_pretty(largest_size[i]),
7621 mach_vm_size_unit(largest_size[i]),
7622 mach_vm_size_pretty(zone_size_free(z)),
7623 mach_vm_size_unit(zone_size_free(z)));
7624 }
7625 }
7626
7627 static void
panic_display_zprint(void)7628 panic_display_zprint(void)
7629 {
7630 panic_display_largest_zones();
7631 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
7632 (uintptr_t)(kernel_stack_size * stack_total));
7633 #if defined (__x86_64__)
7634 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
7635 (uintptr_t)ptoa(inuse_ptepages_count));
7636 #endif
7637 paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
7638 counter_load(&kalloc_large_total));
7639
7640 if (panic_kext_memory_info) {
7641 mach_memory_info_t *mem_info = panic_kext_memory_info;
7642
7643 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
7644 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
7645 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
7646 continue;
7647 }
7648 if (mem_info[i].size > (1024 * 1024)) {
7649 paniclog_append_noflush("%-5lld %10lld\n",
7650 mem_info[i].site, mem_info[i].size);
7651 }
7652 }
7653 }
7654 }
7655
7656 static void
panic_display_zone_info(void)7657 panic_display_zone_info(void)
7658 {
7659 paniclog_append_noflush("Zone info:\n");
7660 paniclog_append_noflush(" Zone map: %p - %p\n",
7661 (void *)zone_info.zi_map_range.min_address,
7662 (void *)zone_info.zi_map_range.max_address);
7663 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7664 vm_map_t map = zone_submaps[i];
7665
7666 if (map == VM_MAP_NULL) {
7667 continue;
7668 }
7669 paniclog_append_noflush(" . %-6s: %p - %p\n",
7670 zone_submaps_names[i],
7671 (void *)map->min_offset,
7672 (void *)map->max_offset);
7673 }
7674 paniclog_append_noflush(" Metadata: %p - %p\n"
7675 " Bitmaps : %p - %p\n"
7676 " Extra : %p - %p\n"
7677 "\n",
7678 (void *)zone_info.zi_meta_range.min_address,
7679 (void *)zone_info.zi_meta_range.max_address,
7680 (void *)zone_info.zi_bits_range.min_address,
7681 (void *)zone_info.zi_bits_range.max_address,
7682 (void *)zone_info.zi_xtra_range.min_address,
7683 (void *)zone_info.zi_xtra_range.max_address);
7684 }
7685
7686 static void
panic_display_zone_fault(vm_offset_t addr)7687 panic_display_zone_fault(vm_offset_t addr)
7688 {
7689 struct zone_page_metadata meta = { };
7690 vm_map_t map = VM_MAP_NULL;
7691 vm_offset_t oob_offs = 0, size = 0;
7692 int map_idx = -1;
7693 zone_t z = NULL;
7694 const char *kind = "whild deref";
7695 bool oob = false;
7696
7697 /*
7698 * First: look if we bumped into guard pages between submaps
7699 */
7700 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7701 map = zone_submaps[i];
7702 if (map == VM_MAP_NULL) {
7703 continue;
7704 }
7705
7706 if (addr >= map->min_offset && addr < map->max_offset) {
7707 map_idx = i;
7708 break;
7709 }
7710 }
7711
7712 if (map_idx == -1) {
7713 /* this really shouldn't happen, submaps are back to back */
7714 return;
7715 }
7716
7717 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
7718
7719 /*
7720 * Second: look if there's just no metadata at all
7721 */
7722 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
7723 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
7724 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
7725 zone_array[meta.zm_index].z_self == NULL) {
7726 paniclog_append_noflush(" Zone : <unknown>\n");
7727 kind = "wild deref, missing or invalid metadata";
7728 } else {
7729 z = &zone_array[meta.zm_index];
7730 paniclog_append_noflush(" Zone : %s%s\n",
7731 zone_heap_name(z), zone_name(z));
7732 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
7733 kind = "out-of-bounds (high confidence)";
7734 oob = true;
7735 size = zone_element_size((void *)addr,
7736 &z, false, &oob_offs);
7737 } else {
7738 kind = "use-after-free (medium confidence)";
7739 }
7740 }
7741
7742 paniclog_append_noflush(" Address : %p\n", (void *)addr);
7743 if (oob) {
7744 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
7745 (void *)(trunc_page(addr) - (size - oob_offs)),
7746 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
7747 }
7748 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
7749 zone_submaps_names[map_idx],
7750 (void *)map->min_offset, (void *)map->max_offset);
7751 paniclog_append_noflush(" Kind : %s\n", kind);
7752 if (oob) {
7753 paniclog_append_noflush(" Access : %d byte(s) past\n",
7754 (uint32_t)(addr & PAGE_MASK) + 1);
7755 }
7756 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
7757 "0x%04x 0x%08x 0x%08x 0x%08x\n",
7758 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
7759 meta.zm_alloc_size, meta.zm_bitmap,
7760 meta.zm_page_next.packed_address,
7761 meta.zm_page_prev.packed_address);
7762 paniclog_append_noflush("\n");
7763 }
7764
7765 void
panic_display_zalloc(void)7766 panic_display_zalloc(void)
7767 {
7768 bool keepsyms = false;
7769
7770 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
7771
7772 panic_display_zone_info();
7773
7774 if (panic_fault_address) {
7775 if (zone_maps_owned(panic_fault_address, 1)) {
7776 panic_display_zone_fault(panic_fault_address);
7777 }
7778 }
7779
7780 if (panic_include_zprint) {
7781 panic_display_zprint();
7782 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
7783 panic_display_largest_zones();
7784 }
7785 #if CONFIG_ZLEAKS
7786 if (zleak_active) {
7787 panic_display_zleaks(keepsyms);
7788 }
7789 #endif
7790 if (panic_include_kalloc_types) {
7791 panic_display_kalloc_types();
7792 }
7793 }
7794
7795 /*
7796 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
7797 * requesting zone information.
7798 * Frees unused pages towards the end of the region, and zero'es out unused
7799 * space on the last page.
7800 */
7801 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)7802 create_vm_map_copy(
7803 vm_offset_t start_addr,
7804 vm_size_t total_size,
7805 vm_size_t used_size)
7806 {
7807 kern_return_t kr;
7808 vm_offset_t end_addr;
7809 vm_size_t free_size;
7810 vm_map_copy_t copy;
7811
7812 if (used_size != total_size) {
7813 end_addr = start_addr + used_size;
7814 free_size = total_size - (round_page(end_addr) - start_addr);
7815
7816 if (free_size >= PAGE_SIZE) {
7817 kmem_free(ipc_kernel_map,
7818 round_page(end_addr), free_size);
7819 }
7820 bzero((char *) end_addr, round_page(end_addr) - end_addr);
7821 }
7822
7823 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
7824 (vm_map_size_t)used_size, TRUE, ©);
7825 assert(kr == KERN_SUCCESS);
7826
7827 return copy;
7828 }
7829
7830 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)7831 get_zone_info(
7832 zone_t z,
7833 mach_zone_name_t *zn,
7834 mach_zone_info_t *zi)
7835 {
7836 struct zone zcopy;
7837 vm_size_t cached = 0;
7838
7839 assert(z != ZONE_NULL);
7840 zone_lock(z);
7841 if (!z->z_self) {
7842 zone_unlock(z);
7843 return FALSE;
7844 }
7845 zcopy = *z;
7846 if (z->z_pcpu_cache) {
7847 zpercpu_foreach(zc, z->z_pcpu_cache) {
7848 cached += zc->zc_alloc_cur + zc->zc_free_cur;
7849 cached += zc->zc_depot.zd_full * zc_mag_size();
7850 }
7851 }
7852 zone_unlock(z);
7853
7854 if (zn != NULL) {
7855 /*
7856 * Append kalloc heap name to zone name (if zone is used by kalloc)
7857 */
7858 char temp_zone_name[MAX_ZONE_NAME] = "";
7859 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
7860 zone_heap_name(z), z->z_name);
7861
7862 /* assuming here the name data is static */
7863 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
7864 strlen(temp_zone_name) + 1);
7865 }
7866
7867 if (zi != NULL) {
7868 *zi = (mach_zone_info_t) {
7869 .mzi_count = zone_count_allocated(&zcopy) - cached,
7870 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
7871 // max_size for zprint is now high-watermark of pages used
7872 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
7873 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
7874 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
7875 .mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
7876 };
7877 if (zcopy.z_chunk_pages == 0) {
7878 /* this is a zcache */
7879 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
7880 }
7881 zpercpu_foreach(zs, zcopy.z_stats) {
7882 zi->mzi_sum_size += zs->zs_mem_allocated;
7883 }
7884 if (zcopy.collectable) {
7885 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
7886 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
7887 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
7888 }
7889 }
7890
7891 return TRUE;
7892 }
7893
7894 /* mach_memory_info entitlement */
7895 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
7896
7897 /* macro needed to rate-limit mach_memory_info */
7898 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
7899
7900 /* declarations necessary to call kauth_cred_issuser() */
7901 struct ucred;
7902 extern int kauth_cred_issuser(struct ucred *);
7903 extern struct ucred *kauth_cred_get(void);
7904
7905 static kern_return_t
7906 mach_memory_info_internal(
7907 host_t host,
7908 mach_zone_name_array_t *namesp,
7909 mach_msg_type_number_t *namesCntp,
7910 mach_zone_info_array_t *infop,
7911 mach_msg_type_number_t *infoCntp,
7912 mach_memory_info_array_t *memoryInfop,
7913 mach_msg_type_number_t *memoryInfoCntp,
7914 bool redact_info);
7915
7916 static kern_return_t
mach_memory_info_security_check(bool redact_info)7917 mach_memory_info_security_check(bool redact_info)
7918 {
7919 /* If not root, only allow redacted calls. */
7920 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
7921 return KERN_NO_ACCESS;
7922 }
7923
7924 if (research_mode_state() == true) {
7925 return KERN_SUCCESS;
7926 }
7927
7928 /* If does not have the memory entitlement, fail. */
7929 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
7930 task_t task = current_task();
7931 if (task != kernel_task && !IOTaskHasEntitlement(task, MEMORYINFO_ENTITLEMENT)) {
7932 return KERN_DENIED;
7933 }
7934
7935 /*
7936 * On release non-mac arm devices, allow mach_memory_info
7937 * to be called twice per day per boot. memorymaintenanced
7938 * calls it once per day, which leaves room for a sysdiagnose.
7939 * Allow redacted version to be called without rate limit.
7940 */
7941
7942 if (!redact_info) {
7943 static uint64_t first_call = 0, second_call = 0;
7944 uint64_t now = 0;
7945 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
7946
7947 if (!first_call) {
7948 first_call = now;
7949 } else if (!second_call) {
7950 second_call = now;
7951 } else if (first_call + NSEC_DAY > now) {
7952 return KERN_DENIED;
7953 } else if (first_call + NSEC_DAY < now) {
7954 first_call = now;
7955 second_call = 0;
7956 }
7957 }
7958 #endif
7959
7960 return KERN_SUCCESS;
7961 }
7962
7963 #if DEVELOPMENT || DEBUG
7964
7965 kern_return_t
zone_reset_peak(const char * zonename)7966 zone_reset_peak(const char *zonename)
7967 {
7968 unsigned int max_zones;
7969
7970 if (zonename == NULL) {
7971 return KERN_INVALID_ARGUMENT;
7972 }
7973
7974 max_zones = os_atomic_load(&num_zones, relaxed);
7975 for (unsigned int i = 0; i < max_zones; i++) {
7976 zone_t z = &zone_array[i];
7977
7978 if (zone_name(z) &&
7979 track_this_zone(zone_name(z), zonename)) {
7980 /* Found the matching zone */
7981 os_log_info(OS_LOG_DEFAULT,
7982 "zalloc: resetting peak size for zone %s\n", zone_name(z));
7983 zone_lock(z);
7984 z->z_wired_hwm = z->z_wired_cur;
7985 zone_unlock(z);
7986 return KERN_SUCCESS;
7987 }
7988 }
7989 return KERN_NOT_FOUND;
7990 }
7991
7992 kern_return_t
zone_reset_all_peaks(void)7993 zone_reset_all_peaks(void)
7994 {
7995 unsigned int max_zones;
7996 os_log_info(OS_LOG_DEFAULT, "zalloc: resetting all zone size peaks\n");
7997 max_zones = os_atomic_load(&num_zones, relaxed);
7998 for (unsigned int i = 0; i < max_zones; i++) {
7999 zone_t z = &zone_array[i];
8000 zone_lock(z);
8001 z->z_wired_hwm = z->z_wired_cur;
8002 zone_unlock(z);
8003 }
8004 return KERN_SUCCESS;
8005 }
8006
8007 #endif /* DEVELOPMENT || DEBUG */
8008
8009 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8010 mach_zone_info(
8011 mach_port_t host_port,
8012 mach_zone_name_array_t *namesp,
8013 mach_msg_type_number_t *namesCntp,
8014 mach_zone_info_array_t *infop,
8015 mach_msg_type_number_t *infoCntp)
8016 {
8017 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8018 }
8019
8020 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8021 mach_memory_info(
8022 mach_port_t host_port,
8023 mach_zone_name_array_t *namesp,
8024 mach_msg_type_number_t *namesCntp,
8025 mach_zone_info_array_t *infop,
8026 mach_msg_type_number_t *infoCntp,
8027 mach_memory_info_array_t *memoryInfop,
8028 mach_msg_type_number_t *memoryInfoCntp)
8029 {
8030 bool redact_info = false;
8031 host_t host = HOST_NULL;
8032
8033 host = convert_port_to_host_priv(host_port);
8034 if (host == HOST_NULL) {
8035 redact_info = true;
8036 host = convert_port_to_host(host_port);
8037 }
8038
8039 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8040 }
8041
8042 static void
zone_info_redact(mach_zone_info_t * zi)8043 zone_info_redact(mach_zone_info_t *zi)
8044 {
8045 zi->mzi_cur_size = 0;
8046 zi->mzi_max_size = 0;
8047 zi->mzi_alloc_size = 0;
8048 zi->mzi_sum_size = 0;
8049 zi->mzi_collectable = 0;
8050 }
8051
8052 static bool
zone_info_needs_to_be_coalesced(int zone_index)8053 zone_info_needs_to_be_coalesced(int zone_index)
8054 {
8055 zone_security_flags_t zsflags = zone_security_array[zone_index];
8056 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8057 return true;
8058 }
8059 return false;
8060 }
8061
8062 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8063 zone_info_find_coalesce_zone(
8064 mach_zone_info_t *zi,
8065 mach_zone_info_t *info,
8066 int *coalesce,
8067 int coalesce_count,
8068 int *coalesce_index)
8069 {
8070 for (int i = 0; i < coalesce_count; i++) {
8071 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8072 *coalesce_index = coalesce[i];
8073 return true;
8074 }
8075 }
8076
8077 return false;
8078 }
8079
8080 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8081 zone_info_coalesce(
8082 mach_zone_info_t *info,
8083 int coalesce_index,
8084 mach_zone_info_t *zi)
8085 {
8086 info[coalesce_index].mzi_count += zi->mzi_count;
8087 }
8088
8089 kern_return_t
mach_memory_info_sample(mach_zone_name_t * names,mach_zone_info_t * info,int * coalesce,unsigned int * zonesCnt,mach_memory_info_t * memoryInfo,unsigned int memoryInfoCnt,bool redact_info)8090 mach_memory_info_sample(
8091 mach_zone_name_t *names,
8092 mach_zone_info_t *info,
8093 int *coalesce,
8094 unsigned int *zonesCnt,
8095 mach_memory_info_t *memoryInfo,
8096 unsigned int memoryInfoCnt,
8097 bool redact_info)
8098 {
8099 int coalesce_count = 0;
8100 unsigned int max_zones, used_zones = 0;
8101 mach_zone_name_t *zn;
8102 mach_zone_info_t *zi;
8103 kern_return_t kr;
8104
8105 uint64_t zones_collectable_bytes = 0;
8106
8107 kr = mach_memory_info_security_check(redact_info);
8108 if (kr != KERN_SUCCESS) {
8109 return kr;
8110 }
8111
8112 max_zones = *zonesCnt;
8113
8114 bzero(names, max_zones * sizeof(*names));
8115 bzero(info, max_zones * sizeof(*info));
8116 if (redact_info) {
8117 bzero(coalesce, max_zones * sizeof(*coalesce));
8118 }
8119
8120 zn = &names[0];
8121 zi = &info[0];
8122
8123 zone_index_foreach(i) {
8124 if (used_zones > max_zones) {
8125 break;
8126 }
8127
8128 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8129 continue;
8130 }
8131
8132 if (!redact_info) {
8133 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8134 zn++;
8135 zi++;
8136 used_zones++;
8137 continue;
8138 }
8139
8140 zone_info_redact(zi);
8141 if (!zone_info_needs_to_be_coalesced(i)) {
8142 zn++;
8143 zi++;
8144 used_zones++;
8145 continue;
8146 }
8147
8148 int coalesce_index;
8149 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8150 coalesce, coalesce_count, &coalesce_index);
8151
8152 /* Didn't find a zone to coalesce */
8153 if (!found_coalesce_zone) {
8154 /* Updates the zone name */
8155 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8156 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8157 (int)zi->mzi_elem_size);
8158
8159 coalesce[coalesce_count] = used_zones;
8160 coalesce_count++;
8161 zn++;
8162 zi++;
8163 used_zones++;
8164 continue;
8165 }
8166
8167 zone_info_coalesce(info, coalesce_index, zi);
8168 }
8169
8170 *zonesCnt = used_zones;
8171
8172 if (memoryInfo) {
8173 bzero(memoryInfo, memoryInfoCnt * sizeof(*memoryInfo));
8174 kr = vm_page_diagnose(memoryInfo, memoryInfoCnt, zones_collectable_bytes, redact_info);
8175 if (kr != KERN_SUCCESS) {
8176 return kr;
8177 }
8178 }
8179
8180 return kr;
8181 }
8182
8183 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8184 mach_memory_info_internal(
8185 host_t host,
8186 mach_zone_name_array_t *namesp,
8187 mach_msg_type_number_t *namesCntp,
8188 mach_zone_info_array_t *infop,
8189 mach_msg_type_number_t *infoCntp,
8190 mach_memory_info_array_t *memoryInfop,
8191 mach_msg_type_number_t *memoryInfoCntp,
8192 bool redact_info)
8193 {
8194 mach_zone_name_t *names;
8195 vm_offset_t names_addr;
8196 vm_size_t names_size;
8197
8198 mach_zone_info_t *info;
8199 vm_offset_t info_addr;
8200 vm_size_t info_size;
8201
8202 int *coalesce;
8203 vm_offset_t coalesce_addr;
8204 vm_size_t coalesce_size;
8205
8206 mach_memory_info_t *memory_info = NULL;
8207 vm_offset_t memory_info_addr = 0;
8208 vm_size_t memory_info_size;
8209 vm_size_t memory_info_vmsize;
8210 vm_map_copy_t memory_info_copy;
8211 unsigned int num_info = 0;
8212
8213 unsigned int max_zones, used_zones;
8214 kern_return_t kr;
8215
8216 if (host == HOST_NULL) {
8217 return KERN_INVALID_HOST;
8218 }
8219
8220 /*
8221 * We assume that zones aren't freed once allocated.
8222 * We won't pick up any zones that are allocated later.
8223 */
8224
8225 max_zones = os_atomic_load(&num_zones, relaxed);
8226
8227 names_size = round_page(max_zones * sizeof *names);
8228 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8229 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8230 if (kr != KERN_SUCCESS) {
8231 return kr;
8232 }
8233 names = (mach_zone_name_t *) names_addr;
8234
8235 info_size = round_page(max_zones * sizeof *info);
8236 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8237 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8238 if (kr != KERN_SUCCESS) {
8239 kmem_free(ipc_kernel_map,
8240 names_addr, names_size);
8241 return kr;
8242 }
8243 info = (mach_zone_info_t *) info_addr;
8244
8245 if (redact_info) {
8246 coalesce_size = round_page(max_zones * sizeof *coalesce);
8247 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8248 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8249 if (kr != KERN_SUCCESS) {
8250 kmem_free(ipc_kernel_map,
8251 names_addr, names_size);
8252 kmem_free(ipc_kernel_map,
8253 info_addr, info_size);
8254 return kr;
8255 }
8256 coalesce = (int *)coalesce_addr;
8257 }
8258
8259 if (memoryInfop && memoryInfoCntp) {
8260 num_info = vm_page_diagnose_estimate();
8261 memory_info_size = num_info * sizeof(*memory_info);
8262 memory_info_vmsize = round_page(memory_info_size);
8263 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8264 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8265 if (kr != KERN_SUCCESS) {
8266 return kr;
8267 }
8268
8269 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8270 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8271 assert(kr == KERN_SUCCESS);
8272
8273 memory_info = (mach_memory_info_t *) memory_info_addr;
8274 }
8275
8276 used_zones = max_zones;
8277 mach_memory_info_sample(names, info, coalesce, &used_zones, memory_info, num_info, redact_info);
8278
8279 if (redact_info) {
8280 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8281 }
8282
8283 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8284 *namesCntp = used_zones;
8285
8286 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8287 *infoCntp = used_zones;
8288
8289 if (memoryInfop && memoryInfoCntp) {
8290 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8291 assert(kr == KERN_SUCCESS);
8292
8293 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8294 (vm_map_size_t)memory_info_size, TRUE, &memory_info_copy);
8295 assert(kr == KERN_SUCCESS);
8296
8297 *memoryInfop = (mach_memory_info_t *) memory_info_copy;
8298 *memoryInfoCntp = num_info;
8299 }
8300
8301 return KERN_SUCCESS;
8302 }
8303
8304 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8305 mach_zone_info_for_zone(
8306 host_priv_t host,
8307 mach_zone_name_t name,
8308 mach_zone_info_t *infop)
8309 {
8310 zone_t zone_ptr;
8311
8312 if (host == HOST_NULL) {
8313 return KERN_INVALID_HOST;
8314 }
8315
8316 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8317 if (!PE_i_can_has_debugger(NULL)) {
8318 return KERN_INVALID_HOST;
8319 }
8320 #endif
8321
8322 if (infop == NULL) {
8323 return KERN_INVALID_ARGUMENT;
8324 }
8325
8326 zone_ptr = ZONE_NULL;
8327 zone_foreach(z) {
8328 /*
8329 * Append kalloc heap name to zone name (if zone is used by kalloc)
8330 */
8331 char temp_zone_name[MAX_ZONE_NAME] = "";
8332 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8333 zone_heap_name(z), z->z_name);
8334
8335 /* Find the requested zone by name */
8336 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8337 zone_ptr = z;
8338 break;
8339 }
8340 }
8341
8342 /* No zones found with the requested zone name */
8343 if (zone_ptr == ZONE_NULL) {
8344 return KERN_INVALID_ARGUMENT;
8345 }
8346
8347 if (get_zone_info(zone_ptr, NULL, infop)) {
8348 return KERN_SUCCESS;
8349 }
8350 return KERN_FAILURE;
8351 }
8352
8353 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8354 mach_zone_info_for_largest_zone(
8355 host_priv_t host,
8356 mach_zone_name_t *namep,
8357 mach_zone_info_t *infop)
8358 {
8359 if (host == HOST_NULL) {
8360 return KERN_INVALID_HOST;
8361 }
8362
8363 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8364 if (!PE_i_can_has_debugger(NULL)) {
8365 return KERN_INVALID_HOST;
8366 }
8367 #endif
8368
8369 if (namep == NULL || infop == NULL) {
8370 return KERN_INVALID_ARGUMENT;
8371 }
8372
8373 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8374 return KERN_SUCCESS;
8375 }
8376 return KERN_FAILURE;
8377 }
8378
8379 uint64_t
get_zones_collectable_bytes(void)8380 get_zones_collectable_bytes(void)
8381 {
8382 uint64_t zones_collectable_bytes = 0;
8383 mach_zone_info_t zi;
8384
8385 zone_foreach(z) {
8386 if (get_zone_info(z, NULL, &zi)) {
8387 zones_collectable_bytes +=
8388 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8389 }
8390 }
8391
8392 return zones_collectable_bytes;
8393 }
8394
8395 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8396 mach_zone_get_zlog_zones(
8397 host_priv_t host,
8398 mach_zone_name_array_t *namesp,
8399 mach_msg_type_number_t *namesCntp)
8400 {
8401 #if ZALLOC_ENABLE_LOGGING
8402 unsigned int max_zones, logged_zones, i;
8403 kern_return_t kr;
8404 zone_t zone_ptr;
8405 mach_zone_name_t *names;
8406 vm_offset_t names_addr;
8407 vm_size_t names_size;
8408
8409 if (host == HOST_NULL) {
8410 return KERN_INVALID_HOST;
8411 }
8412
8413 if (namesp == NULL || namesCntp == NULL) {
8414 return KERN_INVALID_ARGUMENT;
8415 }
8416
8417 max_zones = os_atomic_load(&num_zones, relaxed);
8418
8419 names_size = round_page(max_zones * sizeof *names);
8420 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8421 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8422 if (kr != KERN_SUCCESS) {
8423 return kr;
8424 }
8425 names = (mach_zone_name_t *) names_addr;
8426
8427 zone_ptr = ZONE_NULL;
8428 logged_zones = 0;
8429 for (i = 0; i < max_zones; i++) {
8430 zone_t z = &(zone_array[i]);
8431 assert(z != ZONE_NULL);
8432
8433 /* Copy out the zone name if zone logging is enabled */
8434 if (z->z_btlog) {
8435 get_zone_info(z, &names[logged_zones], NULL);
8436 logged_zones++;
8437 }
8438 }
8439
8440 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8441 *namesCntp = logged_zones;
8442
8443 return KERN_SUCCESS;
8444
8445 #else /* ZALLOC_ENABLE_LOGGING */
8446 #pragma unused(host, namesp, namesCntp)
8447 return KERN_FAILURE;
8448 #endif /* ZALLOC_ENABLE_LOGGING */
8449 }
8450
8451 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8452 mach_zone_get_btlog_records(
8453 host_priv_t host,
8454 mach_zone_name_t name,
8455 zone_btrecord_array_t *recsp,
8456 mach_msg_type_number_t *numrecs)
8457 {
8458 #if ZALLOC_ENABLE_LOGGING
8459 zone_btrecord_t *recs;
8460 kern_return_t kr;
8461 vm_address_t addr;
8462 vm_size_t size;
8463 zone_t zone_ptr;
8464 vm_map_copy_t copy;
8465
8466 if (host == HOST_NULL) {
8467 return KERN_INVALID_HOST;
8468 }
8469
8470 if (recsp == NULL || numrecs == NULL) {
8471 return KERN_INVALID_ARGUMENT;
8472 }
8473
8474 zone_ptr = ZONE_NULL;
8475 zone_foreach(z) {
8476 /*
8477 * Append kalloc heap name to zone name (if zone is used by kalloc)
8478 */
8479 char temp_zone_name[MAX_ZONE_NAME] = "";
8480 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8481 zone_heap_name(z), z->z_name);
8482
8483 /* Find the requested zone by name */
8484 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8485 zone_ptr = z;
8486 break;
8487 }
8488 }
8489
8490 /* No zones found with the requested zone name */
8491 if (zone_ptr == ZONE_NULL) {
8492 return KERN_INVALID_ARGUMENT;
8493 }
8494
8495 /* Logging not turned on for the requested zone */
8496 if (!zone_ptr->z_btlog) {
8497 return KERN_FAILURE;
8498 }
8499
8500 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8501 if (kr != KERN_SUCCESS) {
8502 return kr;
8503 }
8504
8505 addr = (vm_address_t)recs;
8506 size = sizeof(zone_btrecord_t) * *numrecs;
8507
8508 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
8509 assert(kr == KERN_SUCCESS);
8510
8511 *recsp = (zone_btrecord_t *)copy;
8512 return KERN_SUCCESS;
8513
8514 #else /* !ZALLOC_ENABLE_LOGGING */
8515 #pragma unused(host, name, recsp, numrecs)
8516 return KERN_FAILURE;
8517 #endif /* !ZALLOC_ENABLE_LOGGING */
8518 }
8519
8520
8521 kern_return_t
mach_zone_force_gc(host_t host)8522 mach_zone_force_gc(
8523 host_t host)
8524 {
8525 if (host == HOST_NULL) {
8526 return KERN_INVALID_HOST;
8527 }
8528
8529 #if DEBUG || DEVELOPMENT
8530 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8531 /* Callout to buffer cache GC to drop elements in the apfs zones */
8532 if (consider_buffer_cache_collect != NULL) {
8533 (void)(*consider_buffer_cache_collect)(0);
8534 }
8535 zone_gc(ZONE_GC_DRAIN);
8536 #endif /* DEBUG || DEVELOPMENT */
8537 return KERN_SUCCESS;
8538 }
8539
8540 zone_t
zone_find_largest(uint64_t * zone_size)8541 zone_find_largest(uint64_t *zone_size)
8542 {
8543 zone_t largest_zone = 0;
8544 uint64_t largest_zone_size = 0;
8545 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8546 if (zone_size) {
8547 *zone_size = largest_zone_size;
8548 }
8549 return largest_zone;
8550 }
8551
8552 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)8553 zone_get_stats(
8554 zone_t zone,
8555 struct zone_basic_stats *stats)
8556 {
8557 stats->zbs_avail = zone->z_elems_avail;
8558
8559 stats->zbs_alloc_fail = 0;
8560 zpercpu_foreach(zs, zone->z_stats) {
8561 stats->zbs_alloc_fail += zs->zs_alloc_fail;
8562 }
8563
8564 stats->zbs_cached = 0;
8565 if (zone->z_pcpu_cache) {
8566 zpercpu_foreach(zc, zone->z_pcpu_cache) {
8567 stats->zbs_cached += zc->zc_alloc_cur +
8568 zc->zc_free_cur +
8569 zc->zc_depot.zd_full * zc_mag_size();
8570 }
8571 }
8572
8573 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
8574
8575 /*
8576 * Since we don't take any locks, deal with possible inconsistencies
8577 * as the counters may have changed.
8578 */
8579 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
8580 &stats->zbs_alloc)) {
8581 stats->zbs_avail = stats->zbs_free;
8582 stats->zbs_alloc = 0;
8583 }
8584 }
8585
8586 #endif /* !ZALLOC_TEST */
8587 #pragma mark zone creation, configuration, destruction
8588 #if !ZALLOC_TEST
8589
8590 static zone_t
zone_init_defaults(zone_id_t zid)8591 zone_init_defaults(zone_id_t zid)
8592 {
8593 zone_t z = &zone_array[zid];
8594
8595 z->z_wired_max = ~0u;
8596 z->collectable = true;
8597
8598 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
8599 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
8600 zone_depot_init(&z->z_recirc);
8601 return z;
8602 }
8603
8604 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems,bool exhausts_by_design)8605 zone_set_exhaustible(zone_t zone, vm_size_t nelems, bool exhausts_by_design)
8606 {
8607 zone_lock(zone);
8608 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8609 zone->z_exhausts = exhausts_by_design;
8610 zone_unlock(zone);
8611 }
8612
8613 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)8614 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
8615 {
8616 zone_t zone = zov.zov_zone;
8617
8618 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
8619 zone = zov.zov_view->zv_zone;
8620 } else {
8621 zone = zov.zov_zone;
8622 }
8623
8624 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
8625 }
8626
8627 /**
8628 * @function zone_create_find
8629 *
8630 * @abstract
8631 * Finds an unused zone for the given name and element size.
8632 *
8633 * @param name the zone name
8634 * @param size the element size (including redzones, ...)
8635 * @param flags the flags passed to @c zone_create*
8636 * @param zid_inout the desired zone ID or ZONE_ID_ANY
8637 *
8638 * @returns a zone to initialize further.
8639 */
8640 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8641 zone_create_find(
8642 const char *name,
8643 vm_size_t size,
8644 zone_create_flags_t flags,
8645 zone_id_t *zid_inout)
8646 {
8647 zone_id_t nzones, zid = *zid_inout;
8648 zone_t z;
8649
8650 simple_lock(&all_zones_lock, &zone_locks_grp);
8651
8652 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8653 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8654
8655 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8656 /*
8657 * The first time around, make sure the reserved zone IDs
8658 * have an initialized lock as zone_index_foreach() will
8659 * enumerate them.
8660 */
8661 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8662 zone_init_defaults(nzones++);
8663 }
8664
8665 os_atomic_store(&num_zones, nzones, release);
8666 }
8667
8668 if (zid != ZONE_ID_ANY) {
8669 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8670 panic("zone_create: invalid desired zone ID %d for %s",
8671 zid, name);
8672 }
8673 if (flags & ZC_DESTRUCTIBLE) {
8674 panic("zone_create: ID %d (%s) must be permanent", zid, name);
8675 }
8676 if (zone_array[zid].z_self) {
8677 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
8678 }
8679 z = &zone_array[zid];
8680 } else {
8681 if (flags & ZC_DESTRUCTIBLE) {
8682 /*
8683 * If possible, find a previously zdestroy'ed zone in the
8684 * zone_array that we can reuse.
8685 */
8686 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
8687 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
8688 z = &zone_array[i];
8689
8690 /*
8691 * If the zone name and the element size are the
8692 * same, we can just reuse the old zone struct.
8693 */
8694 if (strcmp(z->z_name, name) ||
8695 zone_elem_outer_size(z) != size) {
8696 continue;
8697 }
8698 bitmap_clear(zone_destroyed_bitmap, i);
8699 z->z_destroyed = false;
8700 z->z_self = z;
8701 zid = (zone_id_t)i;
8702 goto out;
8703 }
8704 }
8705
8706 zid = nzones++;
8707 z = zone_init_defaults(zid);
8708
8709 /*
8710 * The release barrier pairs with the acquire in
8711 * zone_index_foreach() and makes sure that enumeration loops
8712 * always see an initialized zone lock.
8713 */
8714 os_atomic_store(&num_zones, nzones, release);
8715 }
8716
8717 out:
8718 num_zones_in_use++;
8719 simple_unlock(&all_zones_lock);
8720
8721 *zid_inout = zid;
8722 return z;
8723 }
8724
8725 __abortlike
8726 static void
zone_create_panic(const char * name,const char * f1,const char * f2)8727 zone_create_panic(const char *name, const char *f1, const char *f2)
8728 {
8729 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
8730 name, f1, f2);
8731 }
8732 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
8733 if ((flags) & forbidden_flag) { \
8734 zone_create_panic(name, #current_flag, #forbidden_flag); \
8735 }
8736
8737 /*
8738 * Adjusts the size of the element based on minimum size, alignment
8739 * and kasan redzones
8740 */
8741 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)8742 zone_elem_adjust_size(
8743 const char *name __unused,
8744 vm_size_t elem_size,
8745 zone_create_flags_t flags __unused,
8746 uint16_t *redzone __unused)
8747 {
8748 vm_size_t size;
8749
8750 /*
8751 * Adjust element size for minimum size and pointer alignment
8752 */
8753 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
8754 if (size < ZONE_MIN_ELEM_SIZE) {
8755 size = ZONE_MIN_ELEM_SIZE;
8756 }
8757
8758 #if KASAN_CLASSIC
8759 /*
8760 * Expand the zone allocation size to include the redzones.
8761 *
8762 * For page-multiple zones add a full guard page because they
8763 * likely require alignment.
8764 */
8765 uint16_t redzone_tmp;
8766 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
8767 redzone_tmp = 0;
8768 } else if ((size & PAGE_MASK) == 0) {
8769 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
8770 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
8771 "alignment", name);
8772 }
8773 redzone_tmp = PAGE_SIZE;
8774 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
8775 redzone_tmp = 0;
8776 } else {
8777 redzone_tmp = KASAN_GUARD_SIZE;
8778 }
8779 size += redzone_tmp;
8780 if (redzone) {
8781 *redzone = redzone_tmp;
8782 }
8783 #endif
8784 return size;
8785 }
8786
8787 /*
8788 * Returns the allocation chunk size that has least framentation
8789 */
8790 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)8791 zone_get_min_alloc_granule(
8792 vm_size_t elem_size,
8793 zone_create_flags_t flags)
8794 {
8795 vm_size_t alloc_granule = PAGE_SIZE;
8796 if (flags & ZC_PERCPU) {
8797 alloc_granule = PAGE_SIZE * zpercpu_count();
8798 if (PAGE_SIZE % elem_size > 256) {
8799 panic("zone_create: per-cpu zone has too much fragmentation");
8800 }
8801 } else if (flags & ZC_READONLY) {
8802 alloc_granule = PAGE_SIZE;
8803 } else if ((elem_size & PAGE_MASK) == 0) {
8804 /* zero fragmentation by definition */
8805 alloc_granule = elem_size;
8806 } else if (alloc_granule % elem_size == 0) {
8807 /* zero fragmentation by definition */
8808 } else {
8809 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
8810 vm_size_t alloc_tmp = PAGE_SIZE;
8811 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
8812
8813 #if __arm64__
8814 /*
8815 * Increase chunk size to 48K for sizes larger than 4K on 16k
8816 * machines, so as to reduce internal fragementation for kalloc
8817 * zones with sizes 12K and 24K.
8818 */
8819 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
8820 max_chunk_size = 48 * 1024;
8821 }
8822 #endif
8823 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
8824 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
8825 if (frag_tmp < frag) {
8826 frag = frag_tmp;
8827 alloc_granule = alloc_tmp;
8828 }
8829 }
8830 }
8831 return alloc_granule;
8832 }
8833
8834 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)8835 zone_get_early_alloc_size(
8836 const char *name __unused,
8837 vm_size_t elem_size,
8838 zone_create_flags_t flags,
8839 vm_size_t min_elems)
8840 {
8841 vm_size_t adjusted_size, alloc_granule, chunk_elems;
8842
8843 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
8844 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
8845 chunk_elems = alloc_granule / adjusted_size;
8846
8847 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
8848 }
8849
8850 zone_t
8851 zone_create_ext(
8852 const char *name,
8853 vm_size_t size,
8854 zone_create_flags_t flags,
8855 zone_id_t zid,
8856 void (^extra_setup)(zone_t))
8857 {
8858 zone_security_flags_t *zsflags;
8859 uint16_t redzone;
8860 zone_t z;
8861
8862 if (size > ZONE_MAX_ALLOC_SIZE) {
8863 panic("zone_create: element size too large: %zd", (size_t)size);
8864 }
8865
8866 if (size < 2 * sizeof(vm_size_t)) {
8867 /* Elements are too small for kasan. */
8868 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
8869 }
8870
8871 size = zone_elem_adjust_size(name, size, flags, &redzone);
8872
8873 /*
8874 * Allocate the zone slot, return early if we found an older match.
8875 */
8876 z = zone_create_find(name, size, flags, &zid);
8877 if (__improbable(z->z_self)) {
8878 /* We found a zone to reuse */
8879 return z;
8880 }
8881 zsflags = &zone_security_array[zid];
8882
8883 /*
8884 * Initialize the zone properly.
8885 */
8886
8887 /*
8888 * If the kernel is post lockdown, copy the zone name passed in.
8889 * Else simply maintain a pointer to the name string as it can only
8890 * be a core XNU zone (no unloadable kext exists before lockdown).
8891 */
8892 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
8893 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
8894 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
8895 strlcpy(buf, name, nsz);
8896 z->z_name = buf;
8897 } else {
8898 z->z_name = name;
8899 }
8900 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
8901 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
8902 } else {
8903 /*
8904 * zone_init() hasn't run yet, use the storage provided by
8905 * zone_stats_startup(), and zone_init() will replace it
8906 * with the final value once the PERCPU zone exists.
8907 */
8908 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
8909 }
8910
8911 if (flags & ZC_OBJ_CACHE) {
8912 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
8913 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
8914 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
8915 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
8916
8917 z->z_elem_size = (uint16_t)size;
8918 z->z_chunk_pages = 0;
8919 z->z_quo_magic = 0;
8920 z->z_align_magic = 0;
8921 z->z_chunk_elems = 0;
8922 z->z_elem_offs = 0;
8923 z->no_callout = true;
8924 zsflags->z_lifo = true;
8925 } else {
8926 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
8927
8928 z->z_elem_size = (uint16_t)(size - redzone);
8929 z->z_chunk_pages = (uint16_t)atop(alloc);
8930 z->z_quo_magic = Z_MAGIC_QUO(size);
8931 z->z_align_magic = Z_MAGIC_ALIGNED(size);
8932 if (flags & ZC_PERCPU) {
8933 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
8934 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
8935 } else {
8936 z->z_chunk_elems = (uint16_t)(alloc / size);
8937 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
8938 }
8939 }
8940
8941 /*
8942 * Handle KPI flags
8943 */
8944
8945 /* ZC_CACHING applied after all configuration is done */
8946 if (flags & ZC_NOCACHING) {
8947 z->z_nocaching = true;
8948 }
8949
8950 if (flags & ZC_READONLY) {
8951 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
8952 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
8953 assert(zid <= ZONE_ID__LAST_RO);
8954 #if ZSECURITY_CONFIG(READ_ONLY)
8955 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
8956 #endif
8957 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
8958 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
8959 assert(size <= PAGE_SIZE);
8960 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
8961 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
8962 (uint32_t)size, zone_heap_name(z), z->z_name);
8963 }
8964 }
8965
8966 if (flags & ZC_PERCPU) {
8967 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
8968 z->z_percpu = true;
8969 }
8970 if (flags & ZC_NOGC) {
8971 z->collectable = false;
8972 }
8973 /*
8974 * Handle ZC_NOENCRYPT from xnu only
8975 */
8976 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
8977 zsflags->z_noencrypt = true;
8978 }
8979 if (flags & ZC_NOCALLOUT) {
8980 z->no_callout = true;
8981 }
8982 if (flags & ZC_DESTRUCTIBLE) {
8983 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
8984 z->z_destructible = true;
8985 }
8986 /*
8987 * Handle Internal flags
8988 */
8989 #if ZSECURITY_CONFIG(ZONE_TAGGING)
8990 if (flags & (ZC_NO_TBI_TAG)) {
8991 zsflags->z_tag = false;
8992 }
8993
8994 #if KASAN_TBI
8995 /*
8996 * Maintain for now the old behavior of not tagging DATA. Remove once
8997 * we move to the new DATA-tagging behavior.
8998 */
8999 if (flags & ZC_DATA || flags & ZC_SHARED_DATA) {
9000 zsflags->z_tag = false;
9001 }
9002 #endif /* KASAN_TBI */
9003
9004
9005 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
9006
9007 if (flags & ZC_KALLOC_TYPE) {
9008 zsflags->z_kalloc_type = true;
9009 }
9010 if (flags & ZC_VM) {
9011 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9012 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9013 }
9014 if (flags & ZC_DATA) {
9015 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9016 }
9017 if (flags & ZC_SHARED_DATA) {
9018 zsflags->z_kheap_id = KHEAP_ID_DATA_SHARED;
9019 }
9020
9021 #if KASAN_CLASSIC
9022 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9023 z->z_kasan_quarantine = true;
9024 }
9025 z->z_kasan_redzone = redzone;
9026 #endif /* KASAN_CLASSIC */
9027 #if KASAN_FAKESTACK
9028 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9029 z->z_kasan_fakestacks = true;
9030 }
9031 #endif /* KASAN_FAKESTACK */
9032
9033 /*
9034 * Then if there's extra tuning, do it
9035 */
9036 if (extra_setup) {
9037 extra_setup(z);
9038 }
9039
9040 /*
9041 * Configure debugging features
9042 */
9043 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9044 #if ZALLOC_ENABLE_LOGGING
9045 /*
9046 * Check for and set up zone leak detection
9047 * if requested via boot-args.
9048 */
9049 zone_setup_logging(z);
9050 #endif /* ZALLOC_ENABLE_LOGGING */
9051 #if KASAN_TBI
9052 zone_setup_kasan_logging(z);
9053 #endif /* KASAN_TBI */
9054 }
9055
9056 #if VM_TAG_SIZECLASSES
9057 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9058 static uint16_t sizeclass_idx;
9059
9060 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9061 z->z_uses_tags = true;
9062 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9063 /*
9064 * Note that we don't use zone_is_data_kheap() here because we don't
9065 * want to insert the kheap size classes more than once.
9066 */
9067 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9068 z->z_tags_sizeclass = sizeclass_idx++;
9069 } else {
9070 uint16_t i = 0;
9071 for (; i < sizeclass_idx; i++) {
9072 if (size == zone_tags_sizeclasses[i]) {
9073 z->z_tags_sizeclass = i;
9074 break;
9075 }
9076 }
9077
9078 /*
9079 * Size class wasn't found, add it to zone_tags_sizeclasses
9080 */
9081 if (i == sizeclass_idx) {
9082 assert(i < VM_TAG_SIZECLASSES);
9083 zone_tags_sizeclasses[i] = (uint16_t)size;
9084 z->z_tags_sizeclass = sizeclass_idx++;
9085 }
9086 }
9087 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9088 }
9089 #endif
9090
9091 /*
9092 * Finally, fixup properties based on security policies, boot-args, ...
9093 */
9094 if (zone_is_data_kheap(zsflags->z_kheap_id)) {
9095 /*
9096 * We use LIFO in the data map, because workloads like network
9097 * usage or similar tend to rotate through allocations very
9098 * quickly with sometimes epxloding working-sets and using
9099 * a FIFO policy might cause massive TLB trashing with rather
9100 * dramatic performance impacts.
9101 */
9102 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9103 zsflags->z_lifo = true;
9104 }
9105
9106 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9107 /*
9108 * No zone made before zone_init() can have ZC_CACHING set.
9109 */
9110 assert(zc_magazine_zone);
9111 zone_enable_caching(z);
9112 }
9113
9114 zone_lock(z);
9115 z->z_self = z;
9116 zone_unlock(z);
9117
9118 return z;
9119 }
9120
9121 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9122 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9123 {
9124 zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9125 }
9126
9127 zone_id_t
zone_get_sig_eq(zone_t zone)9128 zone_get_sig_eq(zone_t zone)
9129 {
9130 return zone_security_array[zone_index(zone)].z_sig_eq;
9131 }
9132
9133 __mockable void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9134 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9135 {
9136 /* moving to SMR must be done before the zone has ever been used */
9137 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9138 assert(!zone_security_array[zone_index(zone)].z_lifo);
9139 assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9140
9141 if (!zone->z_pcpu_cache) {
9142 zone_enable_caching(zone);
9143 }
9144
9145 zone_lock(zone);
9146
9147 zpercpu_foreach(it, zone->z_pcpu_cache) {
9148 it->zc_smr = smr;
9149 it->zc_free = free_cb;
9150 }
9151 zone->z_smr = true;
9152
9153 zone_unlock(zone);
9154 }
9155
9156 __startup_func
9157 void
zone_create_startup(struct zone_create_startup_spec * spec)9158 zone_create_startup(struct zone_create_startup_spec *spec)
9159 {
9160 zone_t z;
9161
9162 z = zone_create_ext(spec->z_name, spec->z_size,
9163 spec->z_flags, spec->z_zid, spec->z_setup);
9164 if (spec->z_var) {
9165 *spec->z_var = z;
9166 }
9167 }
9168
9169 /*
9170 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9171 * union works. trust but verify.
9172 */
9173 #define zalloc_check_zov_alias(f1, f2) \
9174 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9175 zalloc_check_zov_alias(z_self, zv_zone);
9176 zalloc_check_zov_alias(z_stats, zv_stats);
9177 zalloc_check_zov_alias(z_name, zv_name);
9178 zalloc_check_zov_alias(z_views, zv_next);
9179 #undef zalloc_check_zov_alias
9180
9181 __startup_func
9182 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9183 zone_view_startup_init(struct zone_view_startup_spec *spec)
9184 {
9185 struct kalloc_heap *heap = NULL;
9186 zone_view_t zv = spec->zv_view;
9187 zone_t z;
9188 zone_security_flags_t zsflags;
9189
9190 switch (spec->zv_heapid) {
9191 case KHEAP_ID_DATA_BUFFERS:
9192 heap = KHEAP_DATA_BUFFERS;
9193 break;
9194 case KHEAP_ID_DATA_SHARED:
9195 heap = KHEAP_DATA_SHARED;
9196 break;
9197 default:
9198 heap = NULL;
9199 }
9200
9201 if (heap) {
9202 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9203 } else {
9204 z = *spec->zv_zone;
9205 assert(spec->zv_size <= zone_elem_inner_size(z));
9206 }
9207
9208 assert(z);
9209
9210 zv->zv_zone = z;
9211 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9212 zv->zv_next = z->z_views;
9213 zsflags = zone_security_config(z);
9214 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9215 /*
9216 * count the raw view for zones not in a heap,
9217 * kalloc_heap_init() already counts it for its members.
9218 */
9219 zone_view_count += 2;
9220 } else {
9221 zone_view_count += 1;
9222 }
9223 z->z_views = zv;
9224 }
9225
9226 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9227 zone_create(
9228 const char *name,
9229 vm_size_t size,
9230 zone_create_flags_t flags)
9231 {
9232 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9233 }
9234
9235 vm_size_t
zone_get_elem_size(zone_t zone)9236 zone_get_elem_size(zone_t zone)
9237 {
9238 return zone->z_elem_size;
9239 }
9240
9241 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9242
9243 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9244 zone_create_ro(
9245 const char *name,
9246 vm_size_t size,
9247 zone_create_flags_t flags,
9248 zone_create_ro_id_t zc_ro_id)
9249 {
9250 assert(zc_ro_id <= ZC_RO_ID__LAST);
9251 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9252 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9253 return reserved_zid;
9254 }
9255
9256 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9257 zinit(
9258 vm_size_t size, /* the size of an element */
9259 vm_size_t max __unused, /* maximum memory to use */
9260 vm_size_t alloc __unused, /* allocation size */
9261 const char *name) /* a name for the zone */
9262 {
9263 return zone_create(name, size, ZC_DESTRUCTIBLE);
9264 }
9265
9266 void
zdestroy(zone_t z)9267 zdestroy(zone_t z)
9268 {
9269 unsigned int zindex = zone_index(z);
9270 zone_security_flags_t zsflags = zone_security_array[zindex];
9271
9272 current_thread()->options |= TH_OPT_ZONE_PRIV;
9273 lck_mtx_lock(&zone_gc_lock);
9274
9275 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9276
9277 lck_mtx_unlock(&zone_gc_lock);
9278 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9279
9280 zone_lock(z);
9281
9282 if (!zone_submap_is_sequestered(zsflags)) {
9283 while (!zone_pva_is_null(z->z_pageq_va)) {
9284 struct zone_page_metadata *meta;
9285
9286 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9287 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9288 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9289 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9290 zone_unlock(z);
9291 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9292 ptoa(z->z_chunk_pages));
9293 zone_lock(z);
9294 }
9295 }
9296
9297 #if !KASAN_CLASSIC
9298 /* Assert that all counts are zero */
9299 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9300 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9301 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9302 zone_heap_name(z), z->z_name);
9303 }
9304
9305 /* consistency check: make sure everything is indeed empty */
9306 assert(zone_pva_is_null(z->z_pageq_empty));
9307 assert(zone_pva_is_null(z->z_pageq_partial));
9308 assert(zone_pva_is_null(z->z_pageq_full));
9309 if (!zone_submap_is_sequestered(zsflags)) {
9310 assert(zone_pva_is_null(z->z_pageq_va));
9311 }
9312 #endif
9313
9314 zone_unlock(z);
9315
9316 simple_lock(&all_zones_lock, &zone_locks_grp);
9317
9318 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9319 /* Mark the zone as empty in the bitmap */
9320 bitmap_set(zone_destroyed_bitmap, zindex);
9321 num_zones_in_use--;
9322 assert(num_zones_in_use > 0);
9323
9324 simple_unlock(&all_zones_lock);
9325 }
9326
9327 #endif /* !ZALLOC_TEST */
9328 #pragma mark zalloc module init
9329 #if !ZALLOC_TEST
9330
9331 /*
9332 * Initialize the "zone of zones" which uses fixed memory allocated
9333 * earlier in memory initialization. zone_bootstrap is called
9334 * before zone_init.
9335 */
9336 __startup_func
9337 void
zone_bootstrap(void)9338 zone_bootstrap(void)
9339 {
9340 #if DEBUG || DEVELOPMENT
9341 #if __x86_64__
9342 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9343 /*
9344 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9345 * the fp faulting code gets triggered and deadlocks.
9346 */
9347 zone_caching_disabled = 1;
9348 }
9349 #endif /* __x86_64__ */
9350 #endif /* DEBUG || DEVELOPMENT */
9351
9352 /* Validate struct zone_packed_virtual_address expectations */
9353 #ifndef __BUILDING_XNU_LIBRARY__ /* user-mode addresses are low*/
9354 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9355 #endif /* __BUILDING_XNU_LIBRARY__ */
9356 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9357 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9358 }
9359
9360 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9361 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9362 /*
9363 * Scale zc_mag_size() per machine.
9364 *
9365 * - wide machines get 128B magazines to avoid all false sharing
9366 * - smaller machines but with enough RAM get a bit bigger
9367 * buckets (empirically affects networking performance)
9368 */
9369 if (zpercpu_early_count >= 10) {
9370 _zc_mag_size = 14;
9371 } else if ((sane_size >> 30) >= 4) {
9372 _zc_mag_size = 10;
9373 }
9374 }
9375
9376 /*
9377 * Initialize random used to scramble early allocations
9378 */
9379 zpercpu_foreach_cpu(cpu) {
9380 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9381 }
9382
9383 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9384 /*
9385 * Randomly assign zones to one of the 4 general submaps,
9386 * and pick whether they allocate from the begining
9387 * or the end of it.
9388 *
9389 * A lot of OOB exploitation relies on precise interleaving
9390 * of specific types in the heap.
9391 *
9392 * Woops, you can't guarantee that anymore.
9393 */
9394 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9395 uint32_t r = zalloc_random_uniform32(0,
9396 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9397
9398 zone_security_array[i].z_submap_from_end = (r & 1);
9399 zone_security_array[i].z_submap_idx += (r >> 1);
9400 }
9401 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9402
9403
9404 thread_call_setup_with_options(&zone_expand_callout,
9405 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9406 THREAD_CALL_OPTIONS_ONCE);
9407
9408 thread_call_setup_with_options(&zone_trim_callout,
9409 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9410 THREAD_CALL_OPTIONS_ONCE);
9411 }
9412
9413 #define ZONE_GUARD_SIZE (64UL << 10)
9414
9415 __startup_func
9416 static void
zone_tunables_fixup(void)9417 zone_tunables_fixup(void)
9418 {
9419 int wdt = 0;
9420
9421 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9422 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9423 }
9424 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9425 !PE_parse_boot_argn("zet", NULL, 0)) {
9426 zone_exhausted_timeout = -1;
9427 }
9428 }
9429 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9430
9431 /** Get the left zone guard size for the submap at IDX */
9432 __pure2
9433 __startup_func
9434 static vm_map_size_t
zone_submap_left_guard_size(zone_submap_idx_t __unused idx)9435 zone_submap_left_guard_size(zone_submap_idx_t __unused idx)
9436 {
9437 return ZONE_GUARD_SIZE / 2;
9438 }
9439
9440 /** Get the right zone guard size for the submap at IDX */
9441 __pure2
9442 __startup_func
9443 static vm_map_size_t
zone_submap_right_guard_size(zone_submap_idx_t __unused idx)9444 zone_submap_right_guard_size(zone_submap_idx_t __unused idx)
9445 {
9446 return ZONE_GUARD_SIZE / 2;
9447 }
9448
9449 __startup_func
9450 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9451 zone_submap_init(
9452 mach_vm_offset_t *submap_min,
9453 zone_submap_idx_t idx,
9454 uint64_t zone_sub_map_numer,
9455 uint64_t *remaining_denom,
9456 vm_offset_t *remaining_size)
9457 {
9458 vm_map_create_options_t vmco;
9459 vm_map_address_t addr;
9460 vm_offset_t submap_start, submap_end;
9461 vm_size_t submap_actual_size, submap_usable_size;
9462 vm_map_t submap;
9463 vm_map_size_t left_guard_size = 0, right_guard_size = 0;
9464 vm_prot_t prot = VM_PROT_DEFAULT;
9465 vm_prot_t prot_max = VM_PROT_ALL;
9466 kern_return_t kr;
9467
9468 submap_usable_size =
9469 zone_sub_map_numer * *remaining_size / *remaining_denom;
9470 submap_usable_size = trunc_page(submap_usable_size);
9471
9472 submap_start = *submap_min;
9473
9474 left_guard_size = zone_submap_left_guard_size(idx);
9475 right_guard_size = zone_submap_right_guard_size(idx);
9476
9477 /*
9478 * Compute the final submap size.
9479 *
9480 * The usable size does not include the zone guards, so add them now. This
9481 * VA is paid for in zone_init ahead of time.
9482 */
9483
9484 submap_actual_size =
9485 submap_usable_size + left_guard_size + right_guard_size;
9486
9487 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9488 /*
9489 * The RO zone has special alignment requirements, so snap to the
9490 * required boundary and reflow based on the available space.
9491 *
9492 * This operation only increases the amount of VA used by the submap,
9493 * and so the guards will always still fit.
9494 */
9495 vm_offset_t submap_padding = 0;
9496
9497 submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9498 submap_start += submap_padding;
9499
9500 submap_actual_size = pmap_ro_zone_align(submap_actual_size);
9501 submap_usable_size =
9502 submap_actual_size - left_guard_size - right_guard_size;
9503
9504 assert(*remaining_size >= (submap_padding + submap_usable_size));
9505
9506 *remaining_size -= submap_padding;
9507 *submap_min = submap_start;
9508 }
9509
9510 submap_end = submap_start + submap_actual_size;
9511
9512 if (idx == Z_SUBMAP_IDX_VM) {
9513 vm_packing_verify_range("vm_compressor",
9514 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9515 vm_packing_verify_range("vm_page",
9516 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9517
9518 #if MACH_ASSERT
9519 /*
9520 * vm_submap_restriction_size_debug gives the size passed to the kmem
9521 * claim placer to ensure that the packing behaves correctly. If this
9522 * size is smaller than what we actually end up using for the VM submap,
9523 * the packing may be probabilistically invalid. Assert on this
9524 * condition to catch this type of failure deterministically rather than
9525 * relying on the above assertions catching it when we actually hit that
9526 * rare case and the packing is invalid.
9527 */
9528 assert(submap_actual_size <= vm_submap_restriction_size_debug);
9529 #endif /* MACH_ASSERT */
9530 }
9531
9532 vmco = VM_MAP_CREATE_NEVER_FAULTS;
9533 if (!zone_submap_is_sequestered(idx)) {
9534 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9535 }
9536
9537 vm_map_will_allocate_early_map(&zone_submaps[idx]);
9538 submap = kmem_suballoc(kernel_map, submap_min, submap_actual_size, vmco,
9539 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9540 KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT,
9541 VM_KERN_MEMORY_ZONE).kmr_submap;
9542
9543 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9544 zone_info.zi_ro_range.min_address = submap_start;
9545 zone_info.zi_ro_range.max_address = submap_end;
9546 prot_max = prot = VM_PROT_NONE;
9547 }
9548
9549 addr = submap_start;
9550 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(
9551 .vmkf_no_soft_limit = true,
9552 .vm_tag = VM_KERN_MEMORY_ZONE);
9553 vm_object_t kobject = kernel_object_default;
9554
9555 kr = vm_map_enter(submap, &addr, left_guard_size, 0,
9556 vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9557 if (kr != KERN_SUCCESS) {
9558 panic("ksubmap[%s]: failed to make first entry (%d)",
9559 zone_submaps_names[idx], kr);
9560 }
9561
9562 addr = submap_end - right_guard_size;
9563 kr = vm_map_enter(submap, &addr, right_guard_size, 0,
9564 vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9565 if (kr != KERN_SUCCESS) {
9566 panic("ksubmap[%s]: failed to make last entry (%d)",
9567 zone_submaps_names[idx], kr);
9568 }
9569
9570 #if DEBUG || DEVELOPMENT
9571 printf("zone_init: map %-5s %p:%p (%u%c, %u%c usable)\n",
9572 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9573 mach_vm_size_pretty(submap_actual_size),
9574 mach_vm_size_unit(submap_actual_size),
9575 mach_vm_size_pretty(submap_usable_size),
9576 mach_vm_size_unit(submap_usable_size));
9577 #endif /* DEBUG || DEVELOPMENT */
9578
9579 zone_submaps[idx] = submap;
9580 *submap_min = submap_end;
9581 *remaining_size -= submap_usable_size;
9582 *remaining_denom -= zone_sub_map_numer;
9583 }
9584
9585 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9586 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9587 {
9588 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9589 pva->packed_address += delta;
9590 }
9591 }
9592
9593 /*
9594 * Allocate metadata array and migrate bootstrap initial metadata and memory.
9595 */
9596 __startup_func
9597 static void
zone_metadata_init(void)9598 zone_metadata_init(void)
9599 {
9600 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9601 vm_map_entry_t first;
9602
9603 vmlp_api_start(ZONE_METADATA_INIT);
9604
9605 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
9606 vm_size_t early_sz;
9607 vm_offset_t reloc_base;
9608
9609 /*
9610 * Step 1: Allocate the metadata + bitmaps range
9611 *
9612 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9613 * of physical memory (16M per 1G).
9614 *
9615 * Let's preallocate for the worst to avoid weird panics.
9616 */
9617 vm_map_will_allocate_early_map(&zone_meta_map);
9618 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9619 zone_meta_size + zone_bits_size + zone_xtra_size,
9620 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9621 VM_KERN_MEMORY_ZONE, &zone_meta_map);
9622 meta_r.min_address += ZONE_GUARD_SIZE;
9623 meta_r.max_address -= ZONE_GUARD_SIZE;
9624 if (zone_xtra_size) {
9625 xtra_r.max_address = meta_r.max_address;
9626 meta_r.max_address -= zone_xtra_size;
9627 xtra_r.min_address = meta_r.max_address;
9628 } else {
9629 xtra_r.min_address = xtra_r.max_address = 0;
9630 }
9631 bits_r.max_address = meta_r.max_address;
9632 meta_r.max_address -= zone_bits_size;
9633 bits_r.min_address = meta_r.max_address;
9634
9635 #if DEBUG || DEVELOPMENT
9636 printf("zone_init: metadata %p:%p (%u%c)\n",
9637 (void *)meta_r.min_address, (void *)meta_r.max_address,
9638 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
9639 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
9640 printf("zone_init: metabits %p:%p (%u%c)\n",
9641 (void *)bits_r.min_address, (void *)bits_r.max_address,
9642 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
9643 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
9644 printf("zone_init: extra %p:%p (%u%c)\n",
9645 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
9646 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
9647 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
9648 #endif /* DEBUG || DEVELOPMENT */
9649
9650 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9651 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
9652
9653 /*
9654 * Step 2: Install new ranges.
9655 * Relocate metadata and bits.
9656 */
9657 early_r = zone_info.zi_map_range;
9658 early_sz = mach_vm_range_size(&early_r);
9659
9660 zone_info.zi_map_range = zone_map_range;
9661 zone_info.zi_meta_range = meta_r;
9662 zone_info.zi_bits_range = bits_r;
9663 zone_info.zi_xtra_range = xtra_r;
9664 zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
9665 (struct zone_page_metadata *)meta_r.min_address,
9666 -(ptrdiff_t)zone_pva_from_addr(zone_map_range.min_address).packed_address);
9667
9668 vm_map_lock(vm_map);
9669 first = vm_map_first_entry(vm_map);
9670 reloc_base = first->vme_end;
9671 first->vme_end += early_sz;
9672 vm_map->size += early_sz;
9673 vm_map_unlock(vm_map);
9674
9675 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
9676 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
9677 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
9678 /* this needs to sign extend */
9679 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
9680
9681 zone_meta_populate(reloc_base, early_sz);
9682 memcpy(new_meta, early_meta,
9683 atop(early_sz) * sizeof(struct zone_page_metadata));
9684 for (uint32_t i = 0; i < atop(early_sz); i++) {
9685 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
9686 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
9687 }
9688
9689 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
9690 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
9691
9692 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
9693 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
9694 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
9695 }
9696
9697 zba_populate(0, false);
9698 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
9699 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
9700
9701 /*
9702 * Step 3: Relocate the boostrap VM structs
9703 * (including rewriting their content).
9704 */
9705 kma_flags_t flags = KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL;
9706
9707 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9708 flags |= KMA_TAG;
9709 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
9710
9711
9712 kernel_memory_populate(reloc_base, early_sz, flags,
9713 VM_KERN_MEMORY_OSFMK);
9714
9715 vm_memtag_disable_checking();
9716 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
9717 vm_memtag_enable_checking();
9718
9719 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9720 vm_memtag_relocate_tags(reloc_base, early_r.min_address, early_sz);
9721 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
9722
9723 #if KASAN
9724 kasan_notify_address(reloc_base, early_sz);
9725 #endif /* KASAN */
9726
9727 vm_map_relocate_early_maps(reloc_delta);
9728
9729 for (uint32_t i = 0; i < atop(early_sz); i++) {
9730 zone_id_t zid = new_meta[i].zm_index;
9731 zone_t z = &zone_array[zid];
9732 vm_size_t esize = zone_elem_outer_size(z);
9733 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
9734 vm_address_t addr;
9735
9736 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
9737 continue;
9738 }
9739
9740 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
9741 if (zone_meta_is_free(&new_meta[i], eidx)) {
9742 continue;
9743 }
9744
9745 addr = vm_memtag_load_tag(base + eidx * esize);
9746 #if KASAN_CLASSIC
9747 kasan_alloc(addr,
9748 zone_elem_inner_size(z), zone_elem_inner_size(z),
9749 zone_elem_redzone(z), false,
9750 __builtin_frame_address(0));
9751 #endif
9752 vm_map_relocate_early_elem(zid, addr, reloc_delta);
9753 }
9754 }
9755
9756 vmlp_api_end(ZONE_METADATA_INIT, 0);
9757 }
9758
9759
9760 __startup_data
9761 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
9762 #if ZSECURITY_CONFIG(READ_ONLY)
9763 [Z_SUBMAP_IDX_VM] = 15,
9764 [Z_SUBMAP_IDX_READ_ONLY] = 5,
9765 #else
9766 [Z_SUBMAP_IDX_VM] = 20,
9767 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
9768 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9769 [Z_SUBMAP_IDX_GENERAL_0] = 15,
9770 [Z_SUBMAP_IDX_GENERAL_1] = 15,
9771 [Z_SUBMAP_IDX_GENERAL_2] = 15,
9772 [Z_SUBMAP_IDX_GENERAL_3] = 15,
9773 [Z_SUBMAP_IDX_DATA] = 20,
9774 #else
9775 [Z_SUBMAP_IDX_GENERAL_0] = 60,
9776 [Z_SUBMAP_IDX_DATA] = 20,
9777 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9778 };
9779
9780 __startup_func
9781 static inline uint16_t
zone_submap_ratios_denom(void)9782 zone_submap_ratios_denom(void)
9783 {
9784 uint16_t denom = 0;
9785
9786 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9787 denom += submap_ratios[idx];
9788 }
9789
9790 assert(denom == 100);
9791
9792 return denom;
9793 }
9794
9795 __startup_func
9796 static inline vm_offset_t
zone_restricted_va_max(void)9797 zone_restricted_va_max(void)
9798 {
9799 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
9800 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
9801
9802 return trunc_page(MIN(compressor_max, vm_page_max));
9803 }
9804
9805 __startup_func
9806 static void
zone_set_map_sizes(void)9807 zone_set_map_sizes(void)
9808 {
9809 vm_size_t zsize;
9810 vm_size_t zsizearg;
9811
9812 /*
9813 * Compute the physical limits for the zone map
9814 */
9815
9816 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
9817 zsize = zsizearg * (1024ULL * 1024);
9818 } else {
9819 /* Set target zone size as 1/4 of physical memory */
9820 zsize = (vm_size_t)(sane_size >> 2);
9821 zsize += zsize >> 1;
9822 }
9823
9824 if (zsize < CONFIG_ZONE_MAP_MIN) {
9825 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
9826 }
9827 if (zsize > sane_size >> 1) {
9828 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
9829 }
9830 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
9831 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
9832 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
9833 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
9834 zsize = ZONE_MAP_MAX;
9835 }
9836
9837 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
9838
9839
9840 /*
9841 * Declare restrictions on zone max
9842 */
9843 vm_offset_t vm_submap_size = round_page(
9844 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
9845 zone_submap_ratios_denom()) +
9846 zone_submap_left_guard_size(Z_SUBMAP_IDX_VM) +
9847 zone_submap_right_guard_size(Z_SUBMAP_IDX_VM);
9848
9849 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
9850 &zone_map_range.min_address)) {
9851 zone_map_range.min_address = 0;
9852 }
9853
9854 #if MACH_ASSERT
9855 vm_submap_restriction_size_debug = vm_submap_size;
9856 #endif /* MACH_ASSERT */
9857
9858 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
9859 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
9860
9861 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
9862 ZBA_PTR_MASK + 1);
9863 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
9864 (CHAR_BIT * KALLOC_MINSIZE));
9865
9866 #if VM_TAG_SIZECLASSES
9867 if (zone_tagging_on) {
9868 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
9869 }
9870 if (zba_xtra_shift) {
9871 /*
9872 * if we need the extra space range, then limit the size of the
9873 * bitmaps to something reasonable instead of a theoretical
9874 * worst case scenario of all zones being for the smallest
9875 * allocation granule, in order to avoid fake VA pressure on
9876 * other parts of the system.
9877 */
9878 zone_bits_size = round_page(zone_bits_size / 8);
9879 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
9880 }
9881 #endif /* VM_TAG_SIZECLASSES */
9882 }
9883 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
9884
9885 /*
9886 * Can't use zone_info.zi_map_range at this point as it is being used to
9887 * store the range of early pmap memory that was stolen to bootstrap the
9888 * necessary VM zones.
9889 */
9890 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
9891 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
9892 return zone_meta_size + zone_bits_size + zone_xtra_size;
9893 });
9894
9895 /*
9896 * Global initialization of Zone Allocator.
9897 * Runs after zone_bootstrap.
9898 */
9899 __startup_func
9900 static void
zone_init(void)9901 zone_init(void)
9902 {
9903 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
9904 mach_vm_offset_t submap_min = 0;
9905 uint64_t denom = zone_submap_ratios_denom();
9906 /*
9907 * And now allocate the various pieces of VA and submaps.
9908 */
9909
9910 submap_min = zone_map_range.min_address;
9911
9912 #ifndef __BUILDING_XNU_LIB_UNITTEST__ /* zone submap is not maintained in unit-test */
9913 /*
9914 * Allocate the submaps
9915 */
9916
9917 /*
9918 * In order to prevent us from throwing off the ratios, deduct VA for the
9919 * zone guards ahead of time.
9920 */
9921 for (uint32_t i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
9922 remaining_size -= zone_submap_left_guard_size(i);
9923 remaining_size -= zone_submap_right_guard_size(i);
9924 }
9925
9926 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9927 if (submap_ratios[idx] == 0) {
9928 zone_submaps[idx] = VM_MAP_NULL;
9929 } else {
9930 zone_submap_init(&submap_min, idx, submap_ratios[idx],
9931 &denom, &remaining_size);
9932 }
9933 }
9934
9935 zone_metadata_init();
9936 #else
9937 #pragma unused(denom, remaining_size)
9938 #endif
9939
9940 #if VM_TAG_SIZECLASSES
9941 if (zone_tagging_on) {
9942 vm_allocation_zones_init();
9943 }
9944 #endif /* VM_TAG_SIZECLASSES */
9945
9946 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
9947 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
9948
9949 (void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NO_TBI_TAG,
9950 ZONE_ID_PERMANENT, ^(zone_t z) {
9951 z->z_permanent = true;
9952 z->z_elem_size = 1;
9953 });
9954 (void)zone_create_ext("vm.permanent.percpu", 1,
9955 kma_flags | ZC_PERCPU | ZC_NO_TBI_TAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
9956 z->z_permanent = true;
9957 z->z_elem_size = 1;
9958 });
9959
9960 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
9961 zc_mag_size() * sizeof(vm_offset_t),
9962 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM);
9963 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
9964
9965 /*
9966 * Now migrate the startup statistics into their final storage,
9967 * and enable logging for early zones (that zone_create_ext() skipped).
9968 */
9969 int cpu = cpu_number();
9970 zone_index_foreach(idx) {
9971 zone_t tz = &zone_array[idx];
9972
9973 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
9974 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
9975
9976 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
9977 tz->z_stats = zs;
9978 }
9979 if (tz->z_self == tz) {
9980 #if ZALLOC_ENABLE_LOGGING
9981 zone_setup_logging(tz);
9982 #endif /* ZALLOC_ENABLE_LOGGING */
9983 #if KASAN_TBI
9984 zone_setup_kasan_logging(tz);
9985 #endif /* KASAN_TBI */
9986 }
9987 }
9988 }
9989 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
9990
9991 void
zalloc_iokit_lockdown(void)9992 zalloc_iokit_lockdown(void)
9993 {
9994 zone_share_always = false;
9995 }
9996
9997 void
zalloc_first_proc_made(void)9998 zalloc_first_proc_made(void)
9999 {
10000 zone_caching_disabled = 0;
10001 zone_early_thres_mul = 1;
10002 }
10003
10004 __startup_func
10005 vm_offset_t
zone_early_mem_init(vm_size_t size)10006 zone_early_mem_init(vm_size_t size)
10007 {
10008 vm_offset_t mem;
10009
10010 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10011
10012 /*
10013 * The zone that is used early to bring up the VM is stolen here.
10014 *
10015 * When the zone subsystem is actually initialized,
10016 * zone_metadata_init() will be called, and those pages
10017 * and the elements they contain, will be relocated into
10018 * the VM submap (even for architectures when those zones
10019 * do not live there).
10020 */
10021 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10022 mem = (vm_offset_t)zone_early_pages_to_cram;
10023
10024
10025 zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
10026 (struct zone_page_metadata *)zone_early_meta_array_startup,
10027 -(ptrdiff_t)zone_pva_from_addr(mem).packed_address);
10028 zone_info.zi_map_range.min_address = mem;
10029 zone_info.zi_map_range.max_address = mem + size;
10030
10031 zone_info.zi_bits_range = (struct mach_vm_range){
10032 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10033 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10034 sizeof(zba_chunk_startup),
10035 };
10036
10037 zba_meta()->zbam_left = 1;
10038 zba_meta()->zbam_right = 1;
10039 zba_init_chunk(0, false);
10040
10041 return mem;
10042 }
10043
10044 #endif /* !ZALLOC_TEST */
10045 #pragma mark - tests
10046 #if DEBUG || DEVELOPMENT
10047
10048 /*
10049 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10050 * thread goes through at a time.
10051 *
10052 * Or we can end up with multiple test zones (if a second zinit() comes through
10053 * before zdestroy()), which could lead us to run out of zones.
10054 */
10055 static bool any_zone_test_running = FALSE;
10056
10057 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10058 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10059 {
10060 vm_offset_t elem_size = zone_elem_outer_size(z);
10061 vm_offset_t base;
10062 struct zone_page_metadata *meta;
10063
10064 while (!zone_pva_is_null(page_index)) {
10065 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10066 meta = zone_pva_to_meta(page_index);
10067
10068 if (meta->zm_inline_bitmap) {
10069 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10070 uint32_t map = meta[i].zm_bitmap;
10071
10072 for (; map; map &= map - 1) {
10073 *elems++ = INSTANCE_PUT(base +
10074 elem_size * __builtin_clz(map));
10075 }
10076 base += elem_size * 32;
10077 }
10078 } else {
10079 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10080 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10081 for (size_t i = 0; i < (1u << order); i++) {
10082 uint64_t map = bits[i];
10083
10084 for (; map; map &= map - 1) {
10085 *elems++ = INSTANCE_PUT(base +
10086 elem_size * __builtin_clzll(map));
10087 }
10088 base += elem_size * 64;
10089 }
10090 }
10091
10092 page_index = meta->zm_page_next;
10093 }
10094 return elems;
10095 }
10096
10097 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10098 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10099 {
10100 zone_t zone = NULL;
10101 uintptr_t * array;
10102 uintptr_t * next;
10103 uintptr_t element;
10104 uint32_t idx, count, found;
10105 uint32_t nobtcount;
10106 uint32_t elemSize;
10107 size_t maxElems;
10108
10109 zone_foreach(z) {
10110 if (!z->z_name) {
10111 continue;
10112 }
10113 if (!strncmp(zoneName, z->z_name, nameLen)) {
10114 zone = z;
10115 break;
10116 }
10117 }
10118 if (zone == NULL) {
10119 return KERN_INVALID_NAME;
10120 }
10121
10122 elemSize = (uint32_t)zone_elem_inner_size(zone);
10123 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10124
10125 array = kalloc_type_tag(vm_offset_t, maxElems, Z_WAITOK, VM_KERN_MEMORY_DIAG);
10126 if (array == NULL) {
10127 return KERN_RESOURCE_SHORTAGE;
10128 }
10129
10130 zone_lock(zone);
10131
10132 next = array;
10133 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10134 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10135 count = (uint32_t)(next - array);
10136
10137 zone_unlock(zone);
10138
10139 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10140 assert(found <= count);
10141
10142 for (idx = 0; idx < count; idx++) {
10143 element = array[idx];
10144 if (kInstanceFlagReferenced & element) {
10145 continue;
10146 }
10147 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10148 }
10149
10150 #if ZALLOC_ENABLE_LOGGING
10151 if (zone->z_btlog && !corruption_debug_flag) {
10152 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10153 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10154 btlog_copy_backtraces_for_elements(zone->z_btlog,
10155 (vm_address_t *)array, &count, elemSize, proc);
10156 }
10157 #endif /* ZALLOC_ENABLE_LOGGING */
10158
10159 for (nobtcount = idx = 0; idx < count; idx++) {
10160 element = array[idx];
10161 if (!element) {
10162 continue;
10163 }
10164 if (kInstanceFlagReferenced & element) {
10165 continue;
10166 }
10167 nobtcount++;
10168 }
10169 if (nobtcount) {
10170 proc(nobtcount, elemSize, BTREF_NULL);
10171 }
10172
10173 kfree_type(vm_offset_t, maxElems, array);
10174 return KERN_SUCCESS;
10175 }
10176
10177 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10178 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10179 {
10180 zone_security_flags_t zsflags;
10181 uint32_t x = 4;
10182 uint32_t *test_ptr;
10183
10184 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10185 printf("zone_ro_basic_test: Test already running.\n");
10186 return EALREADY;
10187 }
10188
10189 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10190
10191 for (int i = 0; i < 3; i++) {
10192 #if ZSECURITY_CONFIG(READ_ONLY)
10193 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10194 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10195 printf("zone_ro_basic_test: create a sub-page size zone\n");
10196
10197 printf("zone_ro_basic_test: verify flags were set\n");
10198 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10199
10200 printf("zone_ro_basic_test: zalloc an element\n");
10201 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10202 assert(test_ptr);
10203
10204 printf("zone_ro_basic_test: verify we can't write to it\n");
10205 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10206
10207 x = 4;
10208 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10209 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10210 assert(test_ptr);
10211 assert(*(uint32_t*)test_ptr == x);
10212
10213 x = 5;
10214 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10215 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10216 assert(test_ptr);
10217 assert(*(uint32_t*)test_ptr == x);
10218
10219 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10220 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10221
10222 printf("zone_ro_basic_test: free elem\n");
10223 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10224 assert(!test_ptr);
10225 #else
10226 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10227
10228 printf("zone_ro_basic_test: verify flags were set\n");
10229 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10230
10231 printf("zone_ro_basic_test: zalloc an element\n");
10232 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10233 assert(test_ptr);
10234
10235 x = 4;
10236 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10237 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10238 assert(test_ptr);
10239 assert(*(uint32_t*)test_ptr == x);
10240
10241 x = 5;
10242 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10243 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10244 assert(test_ptr);
10245 assert(*(uint32_t*)test_ptr == x);
10246
10247 printf("zone_ro_basic_test: free elem\n");
10248 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10249 assert(!test_ptr);
10250 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10251 }
10252
10253 printf("zone_ro_basic_test: garbage collection\n");
10254 zone_gc(ZONE_GC_DRAIN);
10255
10256 printf("zone_ro_basic_test: Test passed\n");
10257
10258 *out = 1;
10259 os_atomic_store(&any_zone_test_running, false, relaxed);
10260 return 0;
10261 }
10262 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10263
10264 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10265 zone_basic_test_run(__unused int64_t in, int64_t *out)
10266 {
10267 static zone_t test_zone_ptr = NULL;
10268
10269 unsigned int i = 0, max_iter = 5;
10270 void * test_ptr;
10271 zone_t test_zone;
10272 int rc = 0;
10273
10274 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10275 printf("zone_basic_test: Test already running.\n");
10276 return EALREADY;
10277 }
10278
10279 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10280
10281 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10282 do {
10283 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10284 assert(test_zone);
10285
10286 #if KASAN_CLASSIC
10287 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10288 #else
10289 if (test_zone->z_elems_free != 0)
10290 #endif
10291 {
10292 printf("zone_basic_test: free count is not zero\n");
10293 rc = EIO;
10294 goto out;
10295 }
10296
10297 if (test_zone_ptr == NULL) {
10298 /* Stash the zone pointer returned on the fist zinit */
10299 printf("zone_basic_test: zone created for the first time\n");
10300 test_zone_ptr = test_zone;
10301 } else if (test_zone != test_zone_ptr) {
10302 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10303 rc = EIO;
10304 goto out;
10305 }
10306
10307 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10308 zfree(test_zone, test_ptr);
10309
10310 zdestroy(test_zone);
10311 i++;
10312
10313 printf("zone_basic_test: Iteration %d successful\n", i);
10314 } while (i < max_iter);
10315
10316 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10317 /* test Z_VA_SEQUESTER */
10318 {
10319 zone_t test_pcpu_zone;
10320 kern_return_t kr;
10321 const int num_allocs = 8;
10322 int idx;
10323 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10324 void *allocs[num_allocs];
10325 void **allocs_pcpu;
10326 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10327
10328 test_zone = zone_create("test_zone_sysctl", elem_size,
10329 ZC_DESTRUCTIBLE);
10330 assert(test_zone);
10331
10332 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10333 ZC_DESTRUCTIBLE | ZC_PERCPU);
10334 assert(test_pcpu_zone);
10335
10336 for (idx = 0; idx < num_allocs; idx++) {
10337 allocs[idx] = zalloc(test_zone);
10338 assert(NULL != allocs[idx]);
10339 printf("alloc[%d] %p\n", idx, allocs[idx]);
10340 }
10341 for (idx = 0; idx < num_allocs; idx++) {
10342 zfree(test_zone, allocs[idx]);
10343 }
10344 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10345
10346 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10347 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10348 assert(kr == KERN_SUCCESS);
10349
10350 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10351 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10352 Z_WAITOK | Z_ZERO);
10353 assert(NULL != allocs_pcpu[idx]);
10354 }
10355 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10356 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10357 }
10358 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10359
10360 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10361 vm_page_wire_count, vm_page_free_count,
10362 100L * phys_pages / zone_pages_wired_max);
10363 zone_gc(ZONE_GC_DRAIN);
10364 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10365 vm_page_wire_count, vm_page_free_count,
10366 100L * phys_pages / zone_pages_wired_max);
10367
10368 unsigned int allva = 0;
10369
10370 zone_foreach(z) {
10371 zone_lock(z);
10372 allva += z->z_wired_cur;
10373 if (zone_pva_is_null(z->z_pageq_va)) {
10374 zone_unlock(z);
10375 continue;
10376 }
10377 unsigned count = 0;
10378 uint64_t size;
10379 zone_pva_t pg = z->z_pageq_va;
10380 struct zone_page_metadata *page_meta;
10381 while (pg.packed_address) {
10382 page_meta = zone_pva_to_meta(pg);
10383 count += z->z_percpu ? 1 : z->z_chunk_pages;
10384 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10385 count -= page_meta->zm_page_index;
10386 }
10387 pg = page_meta->zm_page_next;
10388 }
10389 size = zone_size_wired(z);
10390 if (!size) {
10391 size = 1;
10392 }
10393 printf("%s%s: seq %d, res %d, %qd %%\n",
10394 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10395 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10396 zone_unlock(z);
10397 }
10398
10399 printf("total va: %d\n", allva);
10400
10401 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10402 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10403 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10404 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10405 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10406 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10407
10408 for (idx = 0; idx < num_allocs; idx++) {
10409 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10410 }
10411
10412 /* make sure the zone is still usable after a GC */
10413
10414 for (idx = 0; idx < num_allocs; idx++) {
10415 allocs[idx] = zalloc(test_zone);
10416 assert(allocs[idx]);
10417 printf("alloc[%d] %p\n", idx, allocs[idx]);
10418 }
10419 for (idx = 0; idx < num_allocs; idx++) {
10420 zfree(test_zone, allocs[idx]);
10421 }
10422
10423 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10424 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10425 Z_WAITOK | Z_ZERO);
10426 assert(NULL != allocs_pcpu[idx]);
10427 }
10428 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10429 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10430 }
10431
10432 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10433
10434 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10435
10436 zdestroy(test_zone);
10437 zdestroy(test_pcpu_zone);
10438 }
10439 #endif /* KASAN_CLASSIC */
10440
10441 printf("zone_basic_test: Test passed\n");
10442
10443
10444 *out = 1;
10445 out:
10446 os_atomic_store(&any_zone_test_running, false, relaxed);
10447 return rc;
10448 }
10449 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10450
10451 #define N_ALLOCATIONS 100
10452
10453 static int
run_kalloc_guard_insertion_test(int64_t in __unused,int64_t * out)10454 run_kalloc_guard_insertion_test(int64_t in __unused, int64_t *out)
10455 {
10456 size_t alloc_size = 24576;
10457 uint64_t *ptrs[N_ALLOCATIONS];
10458 uint32_t n_guard_regions = 0;
10459 zalloc_flags_t flags = Z_WAITOK | Z_FULLSIZE;
10460 int retval = 1;
10461
10462 *out = 0;
10463
10464 for (uint i = 0; i < N_ALLOCATIONS; ++i) {
10465 uint64_t *data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size,
10466 flags, &data_ptr).addr;
10467 if (!data_ptr) {
10468 printf("%s: kalloc_ext %zu with owner and Z_FULLSIZE returned null\n",
10469 __func__, alloc_size);
10470 goto cleanup;
10471 }
10472 ptrs[i] = data_ptr;
10473 }
10474
10475 /* We don't know where there are guard regions, but let's try to find one. */
10476 for (uint i = 0; i < N_ALLOCATIONS; i++) {
10477 vm_address_t addr;
10478 zone_t z;
10479 struct zone_page_metadata *meta;
10480 struct zone_page_metadata *gmeta;
10481 uint32_t chunk_pages;
10482
10483 addr = (vm_address_t)ptrs[i];
10484 meta = zone_meta_from_addr(addr);
10485 z = &zone_array[meta->zm_index];
10486 chunk_pages = z->z_chunk_pages;
10487
10488 if (meta->zm_guarded) {
10489 n_guard_regions++;
10490 if (meta->zm_chunk_len == chunk_pages) {
10491 gmeta = meta + chunk_pages;
10492 } else if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10493 gmeta = meta + meta->zm_subchunk_len;
10494 } else if (meta->zm_chunk_len == ZM_PGZ_GUARD) {
10495 printf("%s: kalloc_ext gave us address 0x%lx for a guard region.\n",
10496 __func__, addr);
10497 goto cleanup;
10498 } else if ((meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) && !z->z_percpu) {
10499 printf("%s: zone [%s%s] is not per-CPU.\n",
10500 __func__, zone_heap_name(z), zone_name(z));
10501 goto cleanup;
10502 } else {
10503 printf("%s: zm_chunk_len value not recognized for 0x%lx.\n",
10504 __func__, addr);
10505 goto cleanup;
10506 }
10507
10508 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
10509 /* Now check that we have chunk_len of guard pages. */
10510 for (uint j = 0; j < chunk_pages; j++) {
10511 if (gmeta->zm_chunk_len != ZM_PGZ_GUARD) {
10512 printf("%s: page %u / %u is not a guard page.\n",
10513 __func__, j + 1, chunk_pages);
10514 goto cleanup;
10515 }
10516 gmeta++;
10517 }
10518
10519 /* The metadata following the guard region should not be a guard page. */
10520 if (gmeta->zm_chunk_len == ZM_PGZ_GUARD) {
10521 printf("%s: zone page following guard region is a guard page.\n",
10522 __func__);
10523 goto cleanup;
10524 }
10525 }
10526 }
10527
10528 printf("%s: there were %u guard regions in %d allocations.\n",
10529 __func__, n_guard_regions, N_ALLOCATIONS);
10530
10531 *out = 1;
10532 retval = 0;
10533
10534 cleanup:
10535 for (uint i = 0; i < N_ALLOCATIONS; ++i) {
10536 kfree_ext(KHEAP_DATA_BUFFERS, ptrs[i], alloc_size);
10537 }
10538
10539 return retval;
10540 }
10541 SYSCTL_TEST_REGISTER(kalloc_guard_regions, run_kalloc_guard_insertion_test);
10542
10543
10544 struct zone_stress_obj {
10545 TAILQ_ENTRY(zone_stress_obj) zso_link;
10546 };
10547
10548 struct zone_stress_ctx {
10549 thread_t zsc_leader;
10550 lck_mtx_t zsc_lock;
10551 zone_t zsc_zone;
10552 uint64_t zsc_end;
10553 uint32_t zsc_workers;
10554 };
10555
10556 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10557 zone_stress_worker(void *arg, wait_result_t __unused wr)
10558 {
10559 struct zone_stress_ctx *ctx = arg;
10560 bool leader = ctx->zsc_leader == current_thread();
10561 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10562 struct zone_bool_gen bg = { };
10563 struct zone_stress_obj *obj;
10564 uint32_t allocs = 0;
10565
10566 random_bool_init(&bg.zbg_bg);
10567
10568 do {
10569 for (int i = 0; i < 2000; i++) {
10570 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10571 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10572 switch (what) {
10573 case 0:
10574 case 1:
10575 if (allocs < 10000) {
10576 obj = zalloc(ctx->zsc_zone);
10577 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10578 allocs++;
10579 }
10580 break;
10581 case 2:
10582 case 3:
10583 if (allocs < 10000) {
10584 obj = zalloc(ctx->zsc_zone);
10585 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10586 allocs++;
10587 }
10588 break;
10589 case 4:
10590 if (leader) {
10591 zone_gc(ZONE_GC_DRAIN);
10592 }
10593 break;
10594 case 5:
10595 case 6:
10596 if (!TAILQ_EMPTY(&head)) {
10597 obj = TAILQ_FIRST(&head);
10598 TAILQ_REMOVE(&head, obj, zso_link);
10599 zfree(ctx->zsc_zone, obj);
10600 allocs--;
10601 }
10602 break;
10603 case 7:
10604 if (!TAILQ_EMPTY(&head)) {
10605 obj = TAILQ_LAST(&head, zone_stress_head);
10606 TAILQ_REMOVE(&head, obj, zso_link);
10607 zfree(ctx->zsc_zone, obj);
10608 allocs--;
10609 }
10610 break;
10611 }
10612 }
10613 } while (mach_absolute_time() < ctx->zsc_end);
10614
10615 while (!TAILQ_EMPTY(&head)) {
10616 obj = TAILQ_FIRST(&head);
10617 TAILQ_REMOVE(&head, obj, zso_link);
10618 zfree(ctx->zsc_zone, obj);
10619 }
10620
10621 lck_mtx_lock(&ctx->zsc_lock);
10622 if (--ctx->zsc_workers == 0) {
10623 thread_wakeup(ctx);
10624 } else if (leader) {
10625 while (ctx->zsc_workers) {
10626 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10627 THREAD_UNINT);
10628 }
10629 }
10630 lck_mtx_unlock(&ctx->zsc_lock);
10631
10632 if (!leader) {
10633 thread_terminate_self();
10634 __builtin_unreachable();
10635 }
10636 }
10637
10638 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10639 zone_stress_test_run(__unused int64_t in, int64_t *out)
10640 {
10641 struct zone_stress_ctx ctx = {
10642 .zsc_leader = current_thread(),
10643 .zsc_workers = 3,
10644 };
10645 kern_return_t kr;
10646 thread_t th;
10647
10648 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10649 printf("zone_stress_test: Test already running.\n");
10650 return EALREADY;
10651 }
10652
10653 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10654 ctx.zsc_zone = zone_create("test_zone_344", 344,
10655 ZC_DESTRUCTIBLE | ZC_NOCACHING);
10656 assert(ctx.zsc_zone->z_chunk_pages > 1);
10657
10658 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10659
10660 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10661
10662 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10663
10664 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10665 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10666 BASEPRI_DEFAULT, &th);
10667 if (kr == KERN_SUCCESS) {
10668 printf("zone_stress_test: thread %d: %p\n", i, th);
10669 thread_deallocate(th);
10670 } else {
10671 ctx.zsc_workers--;
10672 }
10673 }
10674
10675 zone_stress_worker(&ctx, 0);
10676
10677 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10678
10679 zdestroy(ctx.zsc_zone);
10680
10681 printf("zone_stress_test: Done\n");
10682
10683 *out = 1;
10684 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10685 os_atomic_store(&any_zone_test_running, false, relaxed);
10686 return 0;
10687 }
10688 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10689
10690 struct zone_gc_stress_obj {
10691 STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
10692 uintptr_t zgso_pad[63];
10693 };
10694 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
10695
10696 #define ZONE_GC_OBJ_PER_PAGE (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
10697
10698 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
10699
10700 struct zone_gc_stress_ctx {
10701 bool zgsc_done;
10702 lck_mtx_t zgsc_lock;
10703 zone_t zgsc_zone;
10704 uint64_t zgsc_end;
10705 uint32_t zgsc_workers;
10706 };
10707
10708 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)10709 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
10710 {
10711 struct zone_gc_stress_obj *obj;
10712
10713 for (size_t i = 0; i < n; i++) {
10714 obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
10715 STAILQ_INSERT_TAIL(head, obj, zgso_link);
10716 }
10717 }
10718
10719 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)10720 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
10721 {
10722 struct zone_gc_stress_obj *obj;
10723
10724 while ((obj = STAILQ_FIRST(head))) {
10725 STAILQ_REMOVE_HEAD(head, zgso_link);
10726 zfree(zone_gc_stress_zone, obj);
10727 }
10728 }
10729
10730 __dead2
10731 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)10732 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
10733 {
10734 struct zone_gc_stress_ctx *ctx = arg;
10735 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
10736
10737 while (!ctx->zgsc_done) {
10738 zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
10739 zone_gc_stress_test_free_n(&head);
10740 }
10741
10742 lck_mtx_lock(&ctx->zgsc_lock);
10743 if (--ctx->zgsc_workers == 0) {
10744 thread_wakeup(ctx);
10745 }
10746 lck_mtx_unlock(&ctx->zgsc_lock);
10747
10748 thread_terminate_self();
10749 __builtin_unreachable();
10750 }
10751
10752 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)10753 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
10754 {
10755 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
10756 struct zone_gc_stress_ctx ctx = {
10757 .zgsc_workers = 3,
10758 };
10759 kern_return_t kr;
10760 thread_t th;
10761
10762 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10763 printf("zone_gc_stress_test: Test already running.\n");
10764 return EALREADY;
10765 }
10766
10767 lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10768 lck_mtx_lock(&ctx.zgsc_lock);
10769
10770 printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
10771
10772 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10773
10774 for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
10775 kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
10776 BASEPRI_DEFAULT, &th);
10777 if (kr == KERN_SUCCESS) {
10778 printf("zone_gc_stress_test: thread %d: %p\n", i, th);
10779 thread_deallocate(th);
10780 } else {
10781 ctx.zgsc_workers--;
10782 }
10783 }
10784
10785 for (uint64_t i = 0; i < in; i++) {
10786 size_t count = zc_mag_size() * zc_free_batch_size() * 20;
10787
10788 if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
10789 count = ZONE_GC_OBJ_PER_PAGE * 20;
10790 }
10791
10792 zone_gc_stress_test_alloc_n(&head, count);
10793 zone_gc_stress_test_free_n(&head);
10794
10795 lck_mtx_lock(&zone_gc_lock);
10796 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
10797 ZONE_RECLAIM_TRIM);
10798 lck_mtx_unlock(&zone_gc_lock);
10799
10800 printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
10801 }
10802
10803 os_atomic_thread_fence(seq_cst);
10804 ctx.zgsc_done = true;
10805 lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
10806 lck_mtx_unlock(&ctx.zgsc_lock);
10807
10808 lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
10809
10810 lck_mtx_lock(&zone_gc_lock);
10811 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
10812 ZONE_RECLAIM_DRAIN);
10813 lck_mtx_unlock(&zone_gc_lock);
10814
10815 printf("zone_gc_stress_test: Done\n");
10816
10817 *out = 1;
10818 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10819 os_atomic_store(&any_zone_test_running, false, relaxed);
10820 return 0;
10821 }
10822 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
10823
10824 /*
10825 * Routines to test that zone garbage collection and zone replenish threads
10826 * running at the same time don't cause problems.
10827 */
10828
10829 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)10830 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
10831 {
10832 zone_gc(ZONE_GC_DRAIN);
10833 *out = 1;
10834 return 0;
10835 }
10836 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
10837
10838 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)10839 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
10840 {
10841 zone_t z = vm_map_entry_zone;
10842 struct data { struct data *next; } *node, *list = NULL;
10843
10844 if (z == NULL) {
10845 printf("Couldn't find a replenish zone\n");
10846 return EIO;
10847 }
10848
10849 /* big enough to go past replenishment */
10850 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
10851 node = zalloc(z);
10852 node->next = list;
10853 list = node;
10854 }
10855
10856 /*
10857 * release the memory we allocated
10858 */
10859 while (list != NULL) {
10860 node = list;
10861 list = list->next;
10862 zfree(z, node);
10863 }
10864
10865 *out = 1;
10866 return 0;
10867 }
10868 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
10869
10870
10871 #endif /* DEBUG || DEVELOPMENT */
10872