1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/host_statistics.h>
93 #include <kern/misc_protos.h>
94 #include <kern/thread_call.h>
95 #include <kern/zalloc_internal.h>
96 #include <kern/kalloc.h>
97 #include <kern/debug.h>
98 #include <kern/smr.h>
99
100 #include <prng/random.h>
101
102 #include <vm/pmap.h>
103 #include <vm/vm_map_internal.h>
104 #include <vm/vm_memtag.h>
105 #include <vm/vm_kern_internal.h>
106 #include <vm/vm_kern_xnu.h>
107 #include <vm/vm_page_internal.h>
108 #include <vm/vm_pageout_internal.h>
109 #include <vm/vm_compressor_xnu.h> /* C_SLOT_PACKED_PTR* */
110 #include <vm/vm_far.h>
111
112 #include <pexpert/pexpert.h>
113
114 #include <machine/machparam.h>
115 #include <machine/machine_routines.h> /* ml_cpu_get_info */
116
117 #include <os/atomic.h>
118 #include <os/log.h>
119
120 #include <libkern/OSDebug.h>
121 #include <libkern/OSAtomic.h>
122 #include <libkern/section_keywords.h>
123 #include <sys/kdebug.h>
124 #include <sys/kern_memorystatus_xnu.h>
125 #include <sys/code_signing.h>
126
127 #include <san/kasan.h>
128 #include <libsa/stdlib.h>
129 #include <sys/errno.h>
130 #include <sys/code_signing.h>
131
132 #include <IOKit/IOBSD.h>
133 #include <arm64/amcc_rorgn.h>
134
135 #if DEBUG
136 #define z_debug_assert(expr) assert(expr)
137 #else
138 #define z_debug_assert(expr) (void)(expr)
139 #endif
140
141 /* Returns pid of the task with the largest number of VM map entries. */
142 extern pid_t find_largest_process_vm_map_entries(void);
143
144 extern zone_t vm_object_zone;
145 extern zone_t ipc_service_port_label_zone;
146
147 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
148 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
149
150 #if ZSECURITY_CONFIG(ZONE_TAGGING)
151 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
152 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
153 #else /* ZSECURITY_CONFIG_ZONE_TAGGING */
154 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
155 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
156 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
157
158 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
159 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
160 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
161 #define ZONE_MAX_CHUNK_ALLOC_NUM (10)
162 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
163
164 #if XNU_PLATFORM_MacOSX
165 #define ZONE_MAP_MAX (32ULL << 30)
166 #define ZONE_MAP_VA_SIZE (128ULL << 30)
167 #else
168 #define ZONE_MAP_MAX (8ULL << 30)
169 #define ZONE_MAP_VA_SIZE (24ULL << 30)
170 #endif
171
172 __enum_closed_decl(zm_len_t, uint16_t, {
173 ZM_CHUNK_FREE = 0x0,
174 /* 1 through 8 are valid lengths */
175 ZM_CHUNK_LEN_MAX = 0x8,
176
177 /* PGZ magical values */
178 ZM_PGZ_GUARD = 0xb, /* oo[b] */
179
180 /* secondary page markers */
181 ZM_SECONDARY_PAGE = 0xe,
182 ZM_SECONDARY_PCPU_PAGE = 0xf,
183 });
184
185 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
186
187 struct zone_page_metadata {
188 union {
189 struct {
190 /* The index of the zone this metadata page belongs to */
191 zone_id_t zm_index : 10;
192
193 /*
194 * This chunk ends with a guard page.
195 */
196 uint16_t zm_guarded : 1;
197
198 /*
199 * Whether `zm_bitmap` is an inline bitmap
200 * or a packed bitmap reference
201 */
202 uint16_t zm_inline_bitmap : 1;
203
204 /*
205 * Zones allocate in "chunks" of zone_t::z_chunk_pages
206 * consecutive pages, or zpercpu_count() pages if the
207 * zone is percpu.
208 *
209 * The first page of it has its metadata set with:
210 * - 0 if none of the pages are currently wired
211 * - the number of wired pages in the chunk
212 * (not scaled for percpu).
213 *
214 * Other pages in the chunk have their zm_chunk_len set
215 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
216 * depending on whether the zone is percpu or not.
217 * For those, zm_page_index holds the index of that page
218 * in the run, and zm_subchunk_len the remaining length
219 * within the chunk.
220 */
221 zm_len_t zm_chunk_len : 4;
222 };
223 uint16_t zm_bits;
224 };
225
226 union {
227 #define ZM_ALLOC_SIZE_LOCK 1u
228 uint16_t zm_alloc_size; /* first page only */
229 struct {
230 uint8_t zm_page_index; /* secondary pages only */
231 uint8_t zm_subchunk_len; /* secondary pages only */
232 };
233 uint16_t zm_oob_offs; /* in guard pages */
234 };
235 union {
236 uint32_t zm_bitmap; /* most zones */
237 uint32_t zm_bump; /* permanent zones */
238 };
239
240 union {
241 struct {
242 zone_pva_t zm_page_next;
243 zone_pva_t zm_page_prev;
244 };
245 };
246 };
247 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
248
249 /*!
250 * @typedef zone_magazine_t
251 *
252 * @brief
253 * Magazine of cached allocations.
254 *
255 * @field zm_next linkage used by magazine depots.
256 * @field zm_elems an array of @c zc_mag_size() elements.
257 */
258 struct zone_magazine {
259 zone_magazine_t zm_next;
260 smr_seq_t zm_seq;
261 vm_offset_t zm_elems[0];
262 };
263
264 /*!
265 * @typedef zone_cache_t
266 *
267 * @brief
268 * Magazine of cached allocations.
269 *
270 * @discussion
271 * Below is a diagram of the caching system. This design is inspired by the
272 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
273 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
274 * zone allocator (itself derived from this seminal work).
275 *
276 * It is divided into 3 layers:
277 * - the per-cpu layer,
278 * - the recirculation depot layer,
279 * - the Zone Allocator.
280 *
281 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
282 * which are stacks of up to @c zc_mag_size() elements.
283 *
284 * <h2>CPU layer</h2>
285 *
286 * The CPU layer (@c zone_cache_t) looks like this:
287 *
288 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
289 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
290 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
291 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
292 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
293 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
294 * ╰─────────┴─────────────────────────────╯
295 *
296 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
297 * or free to. Serialization is achieved through disabling preemption, and only
298 * the current CPU can acces those allocations. This is represented on the left
299 * hand side of the diagram above.
300 *
301 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
302 * full magazines, and is protected by the @c zm_depot_lock for access.
303 * The lock is expected to absolutely never be contended, as only the local CPU
304 * tends to access the local per-cpu depot in regular operation mode.
305 *
306 * However unlike UMA, our implementation allows for the zone GC to reclaim
307 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
308 *
309 *
310 * <h2>Recirculation Depot</h2>
311 *
312 * The recirculation depot layer is a list similar to the per-cpu depot,
313 * however it is different in two fundamental ways:
314 *
315 * - it is protected by the regular zone lock,
316 * - elements referenced by the magazines in that layer appear free
317 * to the zone layer.
318 *
319 *
320 * <h2>Magazine circulation and sizing</h2>
321 *
322 * The caching system sizes itself dynamically. Operations that allocate/free
323 * a single element call @c zone_lock_nopreempt_check_contention() which records
324 * contention on the lock by doing a trylock and recording its success.
325 *
326 * This information is stored in the @c z_recirc_cont_cur field of the zone,
327 * and a windowed moving average is maintained in @c z_contention_wma.
328 * The periodically run function @c compute_zone_working_set_size() will then
329 * take this into account to decide to grow the number of buckets allowed
330 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
331 * thresholds.
332 *
333 * The per-cpu layer will attempt to work with its depot, finding both full and
334 * empty magazines cached there. If it can't get what it needs, then it will
335 * mediate with the zone recirculation layer. Such recirculation is done in
336 * batches in order to amortize lock holds.
337 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
338 *
339 * The recirculation layer keeps a track of what the minimum amount of magazines
340 * it had over time was for each of the full and empty queues. This allows for
341 * @c compute_zone_working_set_size() to return memory to the system when a zone
342 * stops being used as much.
343 *
344 * <h2>Security considerations</h2>
345 *
346 * The zone caching layer has been designed to avoid returning elements in
347 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
348 * and @c zfree() free to the (f) magazine, and only swap them when the
349 * requested operation cannot be fulfilled.
350 *
351 * The per-cpu overflow depot or the recirculation depots are similarly used
352 * in FIFO order.
353 *
354 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
355 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
356 * @field zc_free_cur denormalized number of elements in the (f) magazine
357 * @field zc_alloc_elems a pointer to the array of elements in (a)
358 * @field zc_free_elems a pointer to the array of elements in (f)
359 *
360 * @field zc_depot a list of @c zc_depot_cur full magazines
361 */
362 typedef struct zone_cache {
363 hw_lck_ticket_t zc_depot_lock;
364 uint16_t zc_alloc_cur;
365 uint16_t zc_free_cur;
366 vm_offset_t *zc_alloc_elems;
367 vm_offset_t *zc_free_elems;
368 struct zone_depot zc_depot;
369 smr_t zc_smr;
370 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
371 } __attribute__((aligned(64))) * zone_cache_t;
372
373 #if !__x86_64__
374 static
375 #endif
376 __security_const_late struct {
377 struct mach_vm_range zi_map_range; /* all zone submaps */
378 struct mach_vm_range zi_ro_range; /* read-only range */
379 struct mach_vm_range zi_meta_range; /* debugging only */
380 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
381 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
382
383 /*
384 * The metadata lives within the zi_meta_range address range.
385 *
386 * The correct formula to find a metadata index is:
387 * absolute_page_index - page_index(zi_map_range.min_address)
388 *
389 * And then this index is used to dereference zi_meta_range.min_address
390 * as a `struct zone_page_metadata` array.
391 *
392 * To avoid doing that substraction all the time in the various fast-paths,
393 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
394 * that math all the time.
395 */
396 struct zone_page_metadata *zi_meta_base;
397 } zone_info;
398
399 __startup_data static struct mach_vm_range zone_map_range;
400 __startup_data static vm_map_size_t zone_meta_size;
401 __startup_data static vm_map_size_t zone_bits_size;
402 __startup_data static vm_map_size_t zone_xtra_size;
403 #if HAS_MTE
404 __startup_data struct mach_vm_range zone_early_range;
405 #endif
406 #if MACH_ASSERT
407 __startup_data static vm_map_size_t vm_submap_restriction_size_debug;
408 #endif /* MACH_ASSERT */
409
410 /*
411 * Initial array of metadata for stolen memory.
412 *
413 * The numbers here have to be kept in sync with vm_map_steal_memory()
414 * so that we have reserved enough metadata.
415 *
416 * After zone_init() has run (which happens while the kernel is still single
417 * threaded), the metadata is moved to its final dynamic location, and
418 * this array is unmapped with the rest of __startup_data at lockdown.
419 */
420 #define ZONE_EARLY_META_INLINE_COUNT 64
421 __startup_data
422 static struct zone_page_metadata
423 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
424
425
426 __startup_data __attribute__((aligned(PAGE_MAX_SIZE)))
427 static uint8_t zone_early_pages_to_cram[PAGE_MAX_SIZE * 16];
428
429 /*
430 * The zone_locks_grp allows for collecting lock statistics.
431 * All locks are associated to this group in zinit.
432 * Look at tools/lockstat for debugging lock contention.
433 */
434 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
435 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
436
437 /*
438 * The zone metadata lock protects:
439 * - metadata faulting,
440 * - VM submap VA allocations,
441 * - early gap page queue list
442 */
443 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
444 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
445
446 /*
447 * Exclude more than one concurrent garbage collection
448 */
449 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
450 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
451 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
452
453 /*
454 * Panic logging metadata
455 */
456 bool panic_include_zprint = false;
457 bool panic_include_kalloc_types = false;
458 zone_t kalloc_type_src_zone = ZONE_NULL;
459 zone_t kalloc_type_dst_zone = ZONE_NULL;
460 mach_memory_info_t *panic_kext_memory_info = NULL;
461 vm_size_t panic_kext_memory_size = 0;
462 vm_offset_t panic_fault_address = 0;
463
464 /*
465 * Protects zone_array, num_zones, num_zones_in_use, and
466 * zone_destroyed_bitmap
467 */
468 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
469 static zone_id_t num_zones_in_use;
470 zone_id_t _Atomic num_zones;
471 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
472
473 /*
474 * Initial globals for zone stats until we can allocate the real ones.
475 * Those get migrated inside the per-CPU ones during zone_init() and
476 * this array is unmapped with the rest of __startup_data at lockdown.
477 */
478
479 /* zone to allocate zone_magazine structs from */
480 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
481 /*
482 * Until pid1 is made, zone caching is off,
483 * until compute_zone_working_set_size() runs for the firt time.
484 *
485 * -1 represents the "never enabled yet" value.
486 */
487 static int8_t zone_caching_disabled = -1;
488
489 __startup_data
490 static struct zone_stats zone_stats_startup[MAX_ZONES];
491 struct zone zone_array[MAX_ZONES];
492 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
493 [0 ... MAX_ZONES - 1] = {
494 .z_kheap_id = KHEAP_ID_NONE,
495 .z_noencrypt = false,
496 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
497 .z_kalloc_type = false,
498 .z_sig_eq = 0,
499 #if ZSECURITY_CONFIG(ZONE_TAGGING)
500 .z_tag = 1,
501 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
502 .z_tag = 0,
503 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
504 },
505 };
506 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
SECURITY_READ_ONLY_LATE(zone_cache_ops_t)507 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
508
509 #if DEBUG || DEVELOPMENT
510 unsigned int
511 zone_max_zones(void)
512 {
513 return MAX_ZONES;
514 }
515 #endif
516
517 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
518 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
519
520 /* Used to keep track of destroyed slots in the zone_array */
521 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
522
523 /* number of zone mapped pages used by all zones */
524 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
525 size_t zone_pages_wired;
526 size_t zone_guard_pages;
527
528 /* Time in (ms) after which we panic for zone exhaustions */
529 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
530 static bool zone_share_always = true;
531 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
532
533 #if VM_TAG_SIZECLASSES
534 /*
535 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
536 * zones only.
537 *
538 * There are 3 kinds of tags that can be used:
539 * - pre-registered VM_KERN_MEMORY_*
540 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
541 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
542 *
543 * The VM tracks the statistics in lazily allocated structures.
544 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
545 *
546 * If for some reason the requested tag cannot be accounted for,
547 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
548 *
549 * Each allocated element also remembers the tag it was assigned,
550 * which lets zalloc/zfree update statistics correctly.
551 */
552
553 /* enable tags for zones that ask for it */
554 static TUNABLE(bool, zone_tagging_on, "-zt", false);
555
556 /*
557 * Array of all sizeclasses used by kalloc variants so that we can
558 * have accounting per size class for each kalloc callsite
559 */
560 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
561 #endif /* VM_TAG_SIZECLASSES */
562
563 #if DEBUG || DEVELOPMENT
564 static int zalloc_simulate_vm_pressure;
565 #endif /* DEBUG || DEVELOPMENT */
566
567 #define Z_TUNABLE(t, n, d) \
568 TUNABLE(t, _##n, #n, d); \
569 __pure2 static inline t n(void) { return _##n; }
570
571 /*
572 * Zone caching tunables
573 *
574 * zc_mag_size():
575 * size of magazines, larger to reduce contention at the expense of memory
576 *
577 * zc_enable_level
578 * number of contentions per second after which zone caching engages
579 * automatically.
580 *
581 * 0 to disable.
582 *
583 * zc_grow_level
584 * number of contentions per second x cpu after which the number of magazines
585 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
586 *
587 * zc_shrink_level
588 * number of contentions per second x cpu below which the number of magazines
589 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
590 *
591 * zc_pcpu_max
592 * maximum memory size in bytes that can hang from a CPU,
593 * which will affect how many magazines are allowed in the depot.
594 *
595 * The alloc/free magazines are assumed to be on average half-empty
596 * and to count for "1" unit of magazines.
597 *
598 * zc_autotrim_size
599 * Size allowed to hang extra from the recirculation depot before
600 * auto-trim kicks in.
601 *
602 * zc_autotrim_buckets
603 *
604 * How many buckets in excess of the working-set are allowed
605 * before auto-trim kicks in for empty buckets.
606 *
607 * zc_free_batch_size
608 * The size of batches of frees/reclaim that can be done before we
609 * check if we have kept the zone lock held (and preemption disabled)
610 * for too long.
611 *
612 * zc_free_batch_timeout
613 * The number of mach ticks that may elapse before we will drop and
614 * reaquire the zone lock.
615 */
616 Z_TUNABLE(uint16_t, zc_mag_size, 8);
617 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
618 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
619 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
620 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
621 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
622 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
623 static Z_TUNABLE(uint32_t, zc_free_batch_size, 64);
624 static Z_TUNABLE(uint64_t, zc_free_batch_timeout, 9600); // 400us
625
626 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
627 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
628 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
629 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
630 [Z_SUBMAP_IDX_VM] = "VM",
631 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
632 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
633 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
634 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
635 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
636 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
637 #else
638 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
639 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
640 [Z_SUBMAP_IDX_DATA] = "DATA",
641 };
642
643 #if __x86_64__
644 #define ZONE_ENTROPY_CNT 8
645 #else
646 #define ZONE_ENTROPY_CNT 2
647 #endif
648 static struct zone_bool_gen {
649 struct bool_gen zbg_bg;
650 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
651 } zone_bool_gen[MAX_CPUS];
652
653 static zone_t zone_find_largest(uint64_t *zone_size);
654
655 #endif /* !ZALLOC_TEST */
656 #pragma mark Zone metadata
657 #if !ZALLOC_TEST
658
659 static inline bool
zone_has_index(zone_t z,zone_id_t zid)660 zone_has_index(zone_t z, zone_id_t zid)
661 {
662 return zone_array + zid == z;
663 }
664
665 __abortlike
666 void
zone_invalid_panic(zone_t zone)667 zone_invalid_panic(zone_t zone)
668 {
669 panic("zone %p isn't in the zone_array", zone);
670 }
671
672 __abortlike
673 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)674 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
675 const char *kind)
676 {
677 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
678 kind, meta, zone_heap_name(zone), zone->z_name);
679 }
680
681 __abortlike
682 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)683 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
684 {
685 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
686 (void *)addr, zone_heap_name(zone), zone->z_name);
687 }
688
689 __abortlike
690 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)691 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
692 struct zone_page_metadata *meta)
693 {
694 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
695 zone_id_t zidx;
696 zone_t src_zone;
697
698 if (zsflags.z_kalloc_type) {
699 panic_include_kalloc_types = true;
700 kalloc_type_dst_zone = zone;
701 }
702
703 zidx = meta->zm_index;
704 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
705 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
706 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
707 zidx);
708 }
709
710 src_zone = &zone_array[zidx];
711 src_zsflags = zone_security_array[zidx];
712 if (src_zsflags.z_kalloc_type) {
713 panic_include_kalloc_types = true;
714 kalloc_type_src_zone = src_zone;
715 }
716
717 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
718 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
719 zone_heap_name(src_zone), src_zone->z_name, zidx);
720 }
721
722 __abortlike
723 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)724 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
725 {
726 panic("metadata list corruption through element %p detected in zone %s%s",
727 meta, zone_heap_name(zone), zone->z_name);
728 }
729
730 __abortlike
731 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)732 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
733 const char *kind)
734 {
735 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
736 zone_heap_name(zone), zone->z_name, meta);
737 }
738
739 __abortlike
740 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)741 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
742 {
743 panic("%s: double free of %p to zone %s%s", caller,
744 (void *)addr, zone_heap_name(zone), zone->z_name);
745 }
746
747 __abortlike
748 static void
zone_accounting_panic(zone_t zone,const char * kind)749 zone_accounting_panic(zone_t zone, const char *kind)
750 {
751 panic("accounting mismatch (%s) for zone %s%s", kind,
752 zone_heap_name(zone), zone->z_name);
753 }
754
755 #define zone_counter_sub(z, stat, value) ({ \
756 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
757 zone_accounting_panic(z, #stat " wrap-around"); \
758 } \
759 (z)->stat; \
760 })
761
762 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)763 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
764 vm_offset_t esize)
765 {
766 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
767 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
768 }
769 return m->zm_alloc_size;
770 }
771
772 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)773 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
774 vm_offset_t esize)
775 {
776 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
777 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
778 }
779 return m->zm_alloc_size;
780 }
781
782 __abortlike
783 static void
zone_nofail_panic(zone_t zone)784 zone_nofail_panic(zone_t zone)
785 {
786 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
787 zone_heap_name(zone), zone->z_name);
788 }
789
790 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)791 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
792 {
793 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
794 struct mach_vm_range r = { addr_start, addr_end };
795
796 return mach_vm_range_intersects(ro_r, &r);
797 }
798
799 #define from_range(r, addr, size) \
800 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
801 mach_vm_range_contains(r, vm_memtag_canonicalize_kernel((mach_vm_offset_t)(addr))), \
802 mach_vm_range_contains(r, vm_memtag_canonicalize_kernel((mach_vm_offset_t)(addr)), size))
803
804 #define from_ro_map(addr, size) \
805 from_range(&zone_info.zi_ro_range, addr, size)
806
807 #define from_zone_map(addr, size) \
808 from_range(&zone_info.zi_map_range, addr, size)
809
810 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)811 zone_pva_is_null(zone_pva_t page)
812 {
813 return page.packed_address == 0;
814 }
815
816 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)817 zone_pva_is_queue(zone_pva_t page)
818 {
819 // actual kernel pages have the top bit set
820 return (int32_t)page.packed_address > 0;
821 }
822
823 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)824 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
825 {
826 return pva1.packed_address == pva2.packed_address;
827 }
828
829 __header_always_inline zone_pva_t *
zone_pageq_base(void)830 zone_pageq_base(void)
831 {
832 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
833
834 /*
835 * `-1` so that if the first __DATA variable is a page queue,
836 * it gets a non 0 index
837 */
838 return data_seg_start - 1;
839 }
840
841 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)842 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
843 struct zone_page_metadata *meta)
844 {
845 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
846
847 if (!zone_pva_is_equal(*queue_head, oldv)) {
848 zone_page_metadata_list_corruption(z, meta);
849 }
850 *queue_head = meta->zm_page_next;
851 }
852
853 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)854 zone_queue_encode(zone_pva_t *headp)
855 {
856 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
857 }
858
859 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)860 zone_pva_from_addr(vm_address_t addr)
861 {
862 // cannot use atop() because we want to maintain the sign bit
863 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
864 }
865
866 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)867 zone_pva_to_addr(zone_pva_t page)
868 {
869 // cause sign extension so that we end up with the right address
870 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
871 }
872
873 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)874 zone_pva_to_meta(zone_pva_t page)
875 {
876 return VM_FAR_ADD_PTR_UNBOUNDED(
877 zone_info.zi_meta_base, page.packed_address);
878 }
879
880 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)881 zone_pva_from_meta(struct zone_page_metadata *meta)
882 {
883 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
884 }
885
886 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)887 zone_meta_from_addr(vm_offset_t addr)
888 {
889 return zone_pva_to_meta(zone_pva_from_addr(addr));
890 }
891
892 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)893 zone_index_from_ptr(const void *ptr)
894 {
895 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
896 }
897
898 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)899 zone_meta_to_addr(struct zone_page_metadata *meta)
900 {
901 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
902 }
903
904 __attribute__((overloadable))
905 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)906 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
907 {
908 if (!zone_has_index(z, meta->zm_index)) {
909 zone_page_metadata_index_confusion_panic(z, addr, meta);
910 }
911 }
912
913 __attribute__((overloadable))
914 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)915 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
916 {
917 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
918 }
919
920 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)921 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
922 struct zone_page_metadata *meta)
923 {
924 zone_pva_t head = *headp;
925 zone_pva_t queue_pva = zone_queue_encode(headp);
926 struct zone_page_metadata *tmp;
927
928 meta->zm_page_next = head;
929 if (!zone_pva_is_null(head)) {
930 tmp = zone_pva_to_meta(head);
931 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
932 zone_page_metadata_list_corruption(z, meta);
933 }
934 tmp->zm_page_prev = zone_pva_from_meta(meta);
935 }
936 meta->zm_page_prev = queue_pva;
937 *headp = zone_pva_from_meta(meta);
938 }
939
940 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)941 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
942 {
943 zone_pva_t head = *headp;
944 struct zone_page_metadata *meta = zone_pva_to_meta(head);
945 struct zone_page_metadata *tmp;
946
947 zone_meta_validate(z, meta);
948
949 if (!zone_pva_is_null(meta->zm_page_next)) {
950 tmp = zone_pva_to_meta(meta->zm_page_next);
951 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
952 zone_page_metadata_list_corruption(z, meta);
953 }
954 tmp->zm_page_prev = meta->zm_page_prev;
955 }
956 *headp = meta->zm_page_next;
957
958 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
959
960 return meta;
961 }
962
963 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)964 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
965 {
966 zone_pva_t meta_pva = zone_pva_from_meta(meta);
967 struct zone_page_metadata *tmp;
968
969 if (!zone_pva_is_null(meta->zm_page_next)) {
970 tmp = zone_pva_to_meta(meta->zm_page_next);
971 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
972 zone_page_metadata_list_corruption(z, meta);
973 }
974 tmp->zm_page_prev = meta->zm_page_prev;
975 }
976 if (zone_pva_is_queue(meta->zm_page_prev)) {
977 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
978 } else {
979 tmp = zone_pva_to_meta(meta->zm_page_prev);
980 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
981 zone_page_metadata_list_corruption(z, meta);
982 }
983 tmp->zm_page_next = meta->zm_page_next;
984 }
985
986 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
987 }
988
989 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)990 zone_meta_requeue(zone_t z, zone_pva_t *headp,
991 struct zone_page_metadata *meta)
992 {
993 zone_meta_remqueue(z, meta);
994 zone_meta_queue_push(z, headp, meta);
995 }
996
997 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
998 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)999 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1000 {
1001 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1002
1003 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1004 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1005 zone_meta_requeue(z, &z->z_pageq_partial, m);
1006 zone_counter_sub(z, z_wired_empty, len);
1007 }
1008 }
1009
1010 /* allows a given metadata to reach the z_pageq_empty queue again */
1011 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1012 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1013 {
1014 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1015
1016 assert(new_size % sizeof(vm_offset_t) == 0);
1017 if (new_size == 0) {
1018 zone_meta_requeue(z, &z->z_pageq_empty, m);
1019 z->z_wired_empty += len;
1020 }
1021 }
1022
1023 /*
1024 * Routine to populate a page backing metadata in the zone_metadata_region.
1025 * Must be called without the zone lock held as it might potentially block.
1026 */
1027 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1028 zone_meta_populate(vm_offset_t base, vm_size_t size)
1029 {
1030 struct zone_page_metadata *from = zone_meta_from_addr(base);
1031 struct zone_page_metadata *to = from + atop(size);
1032 vm_offset_t page_addr = trunc_page(from);
1033
1034 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1035 #if !KASAN
1036 /*
1037 * This can race with another thread doing a populate on the same metadata
1038 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1039 * fault in the shadow when we first access the metadata page. Avoid this
1040 * by always synchronizing on the zone_metadata_region lock with KASan.
1041 */
1042 if (pmap_find_phys(kernel_pmap, page_addr)) {
1043 continue;
1044 }
1045 #endif
1046
1047 for (;;) {
1048 kern_return_t ret = KERN_SUCCESS;
1049
1050 /*
1051 * All updates to the zone_metadata_region are done
1052 * under the zone_metadata_region_lck
1053 */
1054 zone_meta_lock();
1055 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1056 ret = kernel_memory_populate(page_addr,
1057 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1058 VM_KERN_MEMORY_OSFMK);
1059 }
1060 zone_meta_unlock();
1061
1062 if (ret == KERN_SUCCESS) {
1063 break;
1064 }
1065
1066 /*
1067 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1068 * to bad system deadlocks, so if the allocation failed,
1069 * we need to do the VM_PAGE_WAIT() outside of the lock.
1070 */
1071 VM_PAGE_WAIT();
1072 }
1073 }
1074 }
1075
1076 __abortlike
1077 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1078 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1079 {
1080 struct zone_page_metadata *meta;
1081 const char *from_cache = "";
1082 vm_offset_t page;
1083
1084 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1085 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1086 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1087 }
1088 page = trunc_page(addr);
1089 meta = zone_meta_from_addr(addr);
1090
1091 if (!zone_has_index(zone, meta->zm_index)) {
1092 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1093 }
1094
1095 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1096 panic("metadata %p corresponding to addr %p being freed to "
1097 "zone %s%s%s, is marked as secondary per cpu page",
1098 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1099 from_cache);
1100 }
1101 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1102 page -= ptoa(meta->zm_page_index);
1103 meta -= meta->zm_page_index;
1104 }
1105
1106 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1107 panic("metadata %p corresponding to addr %p being freed to "
1108 "zone %s%s%s, has chunk len greater than max",
1109 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1110 from_cache);
1111 }
1112
1113 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1114 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1115 "zone element size", (void *)addr, zone_heap_name(zone),
1116 zone->z_name, from_cache);
1117 }
1118
1119 zone_invalid_element_addr_panic(zone, addr);
1120 }
1121
1122 __attribute__((always_inline))
1123 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1124 zone_element_resolve(
1125 zone_t zone,
1126 vm_offset_t addr,
1127 vm_offset_t *idx)
1128 {
1129 struct zone_page_metadata *meta;
1130 vm_offset_t offs, eidx;
1131
1132 meta = zone_meta_from_addr(addr);
1133 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1134 zone_invalid_element_panic(zone, addr);
1135 }
1136
1137 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1138 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1139 offs += ptoa(meta->zm_page_index);
1140 meta -= meta->zm_page_index;
1141 }
1142
1143 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1144 if (eidx * zone_elem_outer_size(zone) != offs) {
1145 zone_invalid_element_panic(zone, addr);
1146 }
1147
1148 *idx = eidx;
1149 return meta;
1150 }
1151
1152 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1153 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1154 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1155 {
1156 vm_offset_t addr = (vm_offset_t)ptr;
1157 vm_offset_t end = addr + elem_size;
1158 vm_offset_t offs;
1159
1160 /*
1161 * 0-sized allocations in a KALLOC_MINSIZE bucket
1162 * would be offset to the next allocation which is incorrect.
1163 */
1164 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1165
1166 /*
1167 * Given how chunks work, for a zone with PGZ guards on,
1168 * there's a single element which ends precisely
1169 * at the page boundary: the last one.
1170 */
1171 if (req_size == elem_size ||
1172 (end & PAGE_MASK) ||
1173 !zone_meta_from_addr(addr)->zm_guarded) {
1174 return ptr;
1175 }
1176
1177 offs = elem_size - req_size;
1178 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1179
1180 return (char *)addr + offs;
1181 }
1182 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1183
1184 __abortlike
1185 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1186 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1187 {
1188 struct zone_page_metadata *meta;
1189 vm_offset_t offs, size, page;
1190 zone_t zone;
1191
1192 page = trunc_page(addr);
1193 meta = zone_meta_from_addr(addr);
1194 zone = &zone_array[meta->zm_index];
1195
1196 if (zone->z_percpu) {
1197 panic("zone bound checks: address %p is a per-cpu allocation",
1198 (void *)addr);
1199 }
1200
1201 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1202 page -= ptoa(meta->zm_page_index);
1203 meta -= meta->zm_page_index;
1204 }
1205
1206 size = zone_elem_outer_size(zone);
1207 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1208 zone->z_quo_magic, size);
1209 panic("zone bound checks: buffer %p of length %zd overflows "
1210 "object %p of size %zd in zone %p[%s%s]",
1211 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1212 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1213 }
1214
1215 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1216 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1217 {
1218 struct zone_page_metadata *meta;
1219 vm_offset_t offs, size;
1220 zone_t zone;
1221
1222 if (!from_zone_map(addr, 1)) {
1223 return;
1224 }
1225
1226 meta = zone_meta_from_addr(addr);
1227 zone = zone_by_id(meta->zm_index);
1228
1229 if (zone->z_percpu) {
1230 zone_element_bounds_check_panic(addr, len);
1231 }
1232
1233 if (zone->z_permanent) {
1234 /* We don't know bounds for those */
1235 return;
1236 }
1237
1238 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1239 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1240 offs += ptoa(meta->zm_page_index);
1241 }
1242 size = zone_elem_outer_size(zone);
1243 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1244 if (len + zone_elem_redzone(zone) > size - offs) {
1245 zone_element_bounds_check_panic(addr, len);
1246 }
1247 }
1248
1249 /*
1250 * Routine to get the size of a zone allocated address.
1251 * If the address doesn't belong to the zone maps, returns 0.
1252 */
1253 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1254 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1255 {
1256 vm_address_t addr = (vm_address_t)elem;
1257 struct zone_page_metadata *meta;
1258 vm_size_t esize, offs, end;
1259 zone_t zone;
1260
1261 if (from_zone_map(addr, sizeof(void *))) {
1262 meta = zone_meta_from_addr(addr);
1263 zone = zone_by_id(meta->zm_index);
1264 esize = zone_elem_inner_size(zone);
1265 end = vm_memtag_canonicalize_kernel(addr + esize);
1266 offs = 0;
1267
1268 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1269 /*
1270 * If the chunk uses guards, and that (addr + esize)
1271 * either crosses a page boundary or is at the boundary,
1272 * we need to look harder.
1273 */
1274 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1275 uint32_t chunk_pages = zone->z_chunk_pages;
1276
1277 /*
1278 * Because in the vast majority of cases the element
1279 * size is sub-page, and that meta[1] must be faulted,
1280 * we can quickly peek at whether it's a guard.
1281 *
1282 * For elements larger than a page, finding the guard
1283 * page requires a little more effort.
1284 */
1285 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1286 offs = meta[1].zm_oob_offs;
1287 if (clear_oob) {
1288 meta[1].zm_oob_offs = 0;
1289 }
1290 } else if (esize > PAGE_SIZE) {
1291 struct zone_page_metadata *gmeta;
1292
1293 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1294 gmeta = meta + meta->zm_subchunk_len;
1295 } else {
1296 gmeta = meta + chunk_pages;
1297 }
1298 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1299
1300 if (end >= zone_meta_to_addr(gmeta)) {
1301 offs = gmeta->zm_oob_offs;
1302 if (clear_oob) {
1303 gmeta->zm_oob_offs = 0;
1304 }
1305 }
1306 }
1307 }
1308 #else
1309 #pragma unused(end, clear_oob)
1310 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1311
1312 if (oob_offs) {
1313 *oob_offs = offs;
1314 }
1315 if (z) {
1316 *z = zone;
1317 }
1318 return esize;
1319 }
1320
1321 if (oob_offs) {
1322 *oob_offs = 0;
1323 }
1324
1325 return 0;
1326 }
1327
1328 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1329 zone_id_for_element(void *addr, vm_size_t esize)
1330 {
1331 zone_id_t zid = ZONE_ID_INVALID;
1332 if (from_zone_map(addr, esize)) {
1333 zid = zone_index_from_ptr(addr);
1334 __builtin_assume(zid != ZONE_ID_INVALID);
1335 }
1336 return zid;
1337 }
1338
1339 /* This function just formats the reason for the panics by redoing the checks */
1340 __abortlike
1341 static void
zone_require_panic(zone_t zone,void * addr)1342 zone_require_panic(zone_t zone, void *addr)
1343 {
1344 uint32_t zindex;
1345 zone_t other;
1346
1347 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1348 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1349 }
1350
1351 zindex = zone_index_from_ptr(addr);
1352 other = &zone_array[zindex];
1353 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1354 panic("zone_require failed: invalid zone index %d "
1355 "(addr: %p, expected: %s%s)", zindex,
1356 addr, zone_heap_name(zone), zone->z_name);
1357 } else {
1358 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1359 "(addr: %p, expected: %s%s)",
1360 zindex, zone_heap_name(other), other->z_name,
1361 addr, zone_heap_name(zone), zone->z_name);
1362 }
1363 }
1364
1365 __abortlike
1366 static void
zone_id_require_panic(zone_id_t zid,void * addr)1367 zone_id_require_panic(zone_id_t zid, void *addr)
1368 {
1369 zone_require_panic(&zone_array[zid], addr);
1370 }
1371
1372 /*
1373 * Routines to panic if a pointer is not mapped to an expected zone.
1374 * This can be used as a means of pinning an object to the zone it is expected
1375 * to be a part of. Causes a panic if the address does not belong to any
1376 * specified zone, does not belong to any zone, has been freed and therefore
1377 * unmapped from the zone, or the pointer contains an uninitialized value that
1378 * does not belong to any zone.
1379 */
1380 __mockable void
zone_require(zone_t zone,void * addr)1381 zone_require(zone_t zone, void *addr)
1382 {
1383 vm_size_t esize = zone_elem_inner_size(zone);
1384
1385 if (from_zone_map(addr, esize) &&
1386 zone_has_index(zone, zone_index_from_ptr(addr))) {
1387 return;
1388 }
1389 zone_require_panic(zone, addr);
1390 }
1391
1392 __mockable void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1393 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1394 {
1395 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1396 return;
1397 }
1398 zone_id_require_panic(zid, addr);
1399 }
1400
1401 void
zone_id_require_aligned(zone_id_t zid,void * addr)1402 zone_id_require_aligned(zone_id_t zid, void *addr)
1403 {
1404 zone_t zone = zone_by_id(zid);
1405 vm_offset_t elem, offs;
1406
1407 elem = (vm_offset_t)addr;
1408 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
1409
1410 if (from_zone_map(addr, 1)) {
1411 struct zone_page_metadata *meta;
1412
1413 meta = zone_meta_from_addr(elem);
1414 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1415 offs += ptoa(meta->zm_page_index);
1416 }
1417
1418 if (zid == meta->zm_index &&
1419 Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
1420 return;
1421 }
1422 }
1423
1424 zone_invalid_element_panic(zone, elem);
1425 }
1426
1427 bool
zone_owns(zone_t zone,void * addr)1428 zone_owns(zone_t zone, void *addr)
1429 {
1430 vm_size_t esize = zone_elem_inner_size(zone);
1431
1432 if (from_zone_map(addr, esize)) {
1433 return zone_has_index(zone, zone_index_from_ptr(addr));
1434 }
1435 return false;
1436 }
1437
1438 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1439 zone_kmem_suballoc(
1440 mach_vm_offset_t addr,
1441 vm_size_t size,
1442 int flags,
1443 vm_tag_t tag,
1444 vm_map_t *new_map)
1445 {
1446 struct mach_vm_range r;
1447 #ifndef __BUILDING_XNU_LIB_UNITTEST__
1448 /* Don't create the zalloc submap, unit-test mock all zalloc functionality */
1449 *new_map = kmem_suballoc(kernel_map, &addr, size,
1450 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1451 flags, KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT, tag).kmr_submap;
1452 #else
1453 #pragma unused(flags, tag, new_map)
1454 #endif
1455 r.min_address = addr;
1456 r.max_address = addr + size;
1457 return r;
1458 }
1459
1460 #endif /* !ZALLOC_TEST */
1461 #pragma mark Zone bits allocator
1462
1463 /*!
1464 * @defgroup Zone Bitmap allocator
1465 * @{
1466 *
1467 * @brief
1468 * Functions implementing the zone bitmap allocator
1469 *
1470 * @discussion
1471 * The zone allocator maintains which elements are allocated or free in bitmaps.
1472 *
1473 * When the number of elements per page is smaller than 32, it is stored inline
1474 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1475 * and @c zm_bitmap used for storage).
1476 *
1477 * When the number of elements is larger, then a bitmap is allocated from
1478 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1479 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1480 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1481 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1482 * cannot be larger than 1024 bytes (8192 bits).
1483 *
1484 * This buddy allocator can actually accomodate allocations as large
1485 * as 8k on 16k systems and 2k on 4k systems.
1486 *
1487 * Note: @c zba_* functions are implementation details not meant to be used
1488 * outside of the allocation of the allocator itself. Interfaces to the rest of
1489 * the zone allocator are documented and not @c zba_* prefixed.
1490 */
1491
1492 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1493 #define ZBA_GRANULE sizeof(uint64_t)
1494 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1495 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1496 #define ZBA_MAX_ALLOC_ORDER 7
1497 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1498 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1499 #define ZBA_PTR_MASK 0x0fffffff
1500 #define ZBA_ORDER_SHIFT 29
1501 #define ZBA_HAS_EXTRA_BIT 0x10000000
1502
1503 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1504 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1505
1506 struct zone_bits_chain {
1507 uint32_t zbc_next;
1508 uint32_t zbc_prev;
1509 } __attribute__((aligned(ZBA_GRANULE)));
1510
1511 struct zone_bits_head {
1512 uint32_t zbh_next;
1513 uint32_t zbh_unused;
1514 } __attribute__((aligned(ZBA_GRANULE)));
1515
1516 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1517 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1518
1519 struct zone_bits_allocator_meta {
1520 uint32_t zbam_left;
1521 uint32_t zbam_right;
1522 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1523 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1524 };
1525
1526 struct zone_bits_allocator_header {
1527 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1528 };
1529
1530 #if ZALLOC_TEST
1531 static struct zalloc_bits_allocator_test_setup {
1532 vm_offset_t zbats_base;
1533 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1534 } zba_test_info;
1535
1536 static struct zone_bits_allocator_header *
zba_base_header(void)1537 zba_base_header(void)
1538 {
1539 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1540 }
1541
1542 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1543 zba_populate(uint32_t n, bool with_extra __unused)
1544 {
1545 vm_address_t base = zba_test_info.zbats_base;
1546 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1547
1548 return KERN_SUCCESS;
1549 }
1550 #else
1551 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1552 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1553
1554 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1555 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1556
1557 static struct zone_bits_allocator_header *
zba_base_header(void)1558 zba_base_header(void)
1559 {
1560 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1561 }
1562
1563 static void
zba_lock(void)1564 zba_lock(void)
1565 {
1566 lck_mtx_lock(&zba_mtx);
1567 }
1568
1569 static void
zba_unlock(void)1570 zba_unlock(void)
1571 {
1572 lck_mtx_unlock(&zba_mtx);
1573 }
1574
1575 __abortlike
1576 static void
zba_memory_exhausted(void)1577 zba_memory_exhausted(void)
1578 {
1579 uint64_t zsize = 0;
1580 zone_t z = zone_find_largest(&zsize);
1581 panic("zba_populate: out of bitmap space, "
1582 "likely due to memory leak in zone [%s%s] "
1583 "(%u%c, %d elements allocated)",
1584 zone_heap_name(z), zone_name(z),
1585 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1586 zone_count_allocated(z));
1587 }
1588
1589
1590 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1591 zba_populate(uint32_t n, bool with_extra)
1592 {
1593 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1594 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1595 vm_address_t bits_addr;
1596 vm_address_t xtra_addr;
1597 kern_return_t kr;
1598
1599 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1600 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1601
1602 kr = kernel_memory_populate(bits_addr, bits_size,
1603 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1604 VM_KERN_MEMORY_OSFMK);
1605 if (kr != KERN_SUCCESS) {
1606 return kr;
1607 }
1608
1609
1610 if (with_extra) {
1611 kr = kernel_memory_populate(xtra_addr, xtra_size,
1612 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1613 VM_KERN_MEMORY_OSFMK);
1614 if (kr != KERN_SUCCESS) {
1615 kernel_memory_depopulate(bits_addr, bits_size,
1616 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1617 VM_KERN_MEMORY_OSFMK);
1618 }
1619 }
1620
1621 return kr;
1622 }
1623 #endif
1624
1625 __pure2
1626 static struct zone_bits_allocator_meta *
zba_meta(void)1627 zba_meta(void)
1628 {
1629 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1630 }
1631
1632 __pure2
1633 static uint64_t *
zba_slot_base(void)1634 zba_slot_base(void)
1635 {
1636 return (uint64_t *)zba_base_header();
1637 }
1638
1639 __pure2
1640 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1641 zba_head(uint32_t order, bool with_extra)
1642 {
1643 if (with_extra) {
1644 return &zba_meta()->zbam_lists_with_extra[order];
1645 } else {
1646 return &zba_meta()->zbam_lists[order];
1647 }
1648 }
1649
1650 __pure2
1651 static uint32_t
zba_head_index(struct zone_bits_head * hd)1652 zba_head_index(struct zone_bits_head *hd)
1653 {
1654 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1655 }
1656
1657 __pure2
1658 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1659 zba_chain_for_index(uint32_t index)
1660 {
1661 return (struct zone_bits_chain *)(zba_slot_base() + index);
1662 }
1663
1664 __pure2
1665 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1666 zba_chain_to_index(const struct zone_bits_chain *zbc)
1667 {
1668 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1669 }
1670
1671 __abortlike
1672 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1673 zba_head_corruption_panic(uint32_t order, bool with_extra)
1674 {
1675 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1676 order, with_extra, zba_head(order, with_extra));
1677 }
1678
1679 __abortlike
1680 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1681 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1682 {
1683 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1684 }
1685
1686 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1687 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1688 {
1689 struct zone_bits_head *hd = zba_head(order, with_extra);
1690 uint32_t hd_index = zba_head_index(hd);
1691 uint32_t index = zba_chain_to_index(zbc);
1692 struct zone_bits_chain *next;
1693
1694 if (hd->zbh_next) {
1695 next = zba_chain_for_index(hd->zbh_next);
1696 if (next->zbc_prev != hd_index) {
1697 zba_head_corruption_panic(order, with_extra);
1698 }
1699 next->zbc_prev = index;
1700 }
1701 zbc->zbc_next = hd->zbh_next;
1702 zbc->zbc_prev = hd_index;
1703 hd->zbh_next = index;
1704 }
1705
1706 static void
zba_remove_block(struct zone_bits_chain * zbc)1707 zba_remove_block(struct zone_bits_chain *zbc)
1708 {
1709 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1710 uint32_t index = zba_chain_to_index(zbc);
1711
1712 if (prev->zbc_next != index) {
1713 zba_chain_corruption_panic(prev, zbc);
1714 }
1715 if ((prev->zbc_next = zbc->zbc_next)) {
1716 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1717 if (next->zbc_prev != index) {
1718 zba_chain_corruption_panic(zbc, next);
1719 }
1720 next->zbc_prev = zbc->zbc_prev;
1721 }
1722 }
1723
1724 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1725 zba_try_pop_block(uint32_t order, bool with_extra)
1726 {
1727 struct zone_bits_head *hd = zba_head(order, with_extra);
1728 struct zone_bits_chain *zbc;
1729
1730 if (hd->zbh_next == 0) {
1731 return 0;
1732 }
1733
1734 zbc = zba_chain_for_index(hd->zbh_next);
1735 zba_remove_block(zbc);
1736 return (vm_address_t)zbc;
1737 }
1738
1739 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1740 zba_header(vm_offset_t addr)
1741 {
1742 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1743 return (struct zone_bits_allocator_header *)addr;
1744 }
1745
1746 static size_t
zba_node_parent(size_t node)1747 zba_node_parent(size_t node)
1748 {
1749 return (node - 1) / 2;
1750 }
1751
1752 static size_t
zba_node_left_child(size_t node)1753 zba_node_left_child(size_t node)
1754 {
1755 return node * 2 + 1;
1756 }
1757
1758 static size_t
zba_node_buddy(size_t node)1759 zba_node_buddy(size_t node)
1760 {
1761 return ((node - 1) ^ 1) + 1;
1762 }
1763
1764 static size_t
zba_node(vm_offset_t addr,uint32_t order)1765 zba_node(vm_offset_t addr, uint32_t order)
1766 {
1767 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1768 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1769 }
1770
1771 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1772 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1773 {
1774 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1775 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1776 }
1777
1778 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1779 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1780 {
1781 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1782 }
1783
1784 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1785 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1786 {
1787 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1788 }
1789
1790 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1791 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1792 {
1793 struct zone_bits_allocator_header *zbah = zba_header(addr);
1794 struct zone_bits_chain *zbc;
1795 size_t node = zba_node(addr, order);
1796
1797 while (node) {
1798 size_t parent = zba_node_parent(node);
1799
1800 zba_node_flip_split(zbah, parent);
1801 if (zba_node_is_split(zbah, parent)) {
1802 break;
1803 }
1804
1805 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1806 zba_remove_block(zbc);
1807 order++;
1808 node = parent;
1809 }
1810
1811 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1812 }
1813
1814 static vm_size_t
zba_chunk_header_size(uint32_t n)1815 zba_chunk_header_size(uint32_t n)
1816 {
1817 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1818 if (n == 0) {
1819 hdr_size += sizeof(struct zone_bits_allocator_meta);
1820 }
1821 return hdr_size;
1822 }
1823
1824 static void
zba_init_chunk(uint32_t n,bool with_extra)1825 zba_init_chunk(uint32_t n, bool with_extra)
1826 {
1827 vm_size_t hdr_size = zba_chunk_header_size(n);
1828 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1829 struct zone_bits_allocator_header *zbah = zba_header(page);
1830 vm_size_t size = ZBA_CHUNK_SIZE;
1831 size_t node;
1832
1833 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1834 if (size < hdr_size + (ZBA_GRANULE << o)) {
1835 continue;
1836 }
1837 size -= ZBA_GRANULE << o;
1838 node = zba_node(page + size, o);
1839 zba_node_flip_split(zbah, zba_node_parent(node));
1840 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1841 }
1842 }
1843
1844 __attribute__((noinline))
1845 static void
zba_grow(bool with_extra)1846 zba_grow(bool with_extra)
1847 {
1848 struct zone_bits_allocator_meta *meta = zba_meta();
1849 kern_return_t kr = KERN_SUCCESS;
1850 uint32_t chunk;
1851
1852 #if !ZALLOC_TEST
1853 if (meta->zbam_left >= meta->zbam_right) {
1854 zba_memory_exhausted();
1855 }
1856 #endif
1857
1858 if (with_extra) {
1859 chunk = meta->zbam_right - 1;
1860 } else {
1861 chunk = meta->zbam_left;
1862 }
1863
1864 kr = zba_populate(chunk, with_extra);
1865 if (kr == KERN_SUCCESS) {
1866 if (with_extra) {
1867 meta->zbam_right -= 1;
1868 } else {
1869 meta->zbam_left += 1;
1870 }
1871
1872 zba_init_chunk(chunk, with_extra);
1873 #if !ZALLOC_TEST
1874 } else {
1875 /*
1876 * zba_populate() has to be allowed to fail populating,
1877 * as we are under a global lock, we need to do the
1878 * VM_PAGE_WAIT() outside of the lock.
1879 */
1880 assert(kr == KERN_RESOURCE_SHORTAGE);
1881 zba_unlock();
1882 VM_PAGE_WAIT();
1883 zba_lock();
1884 #endif
1885 }
1886 }
1887
1888 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1889 zba_alloc(uint32_t order, bool with_extra)
1890 {
1891 struct zone_bits_allocator_header *zbah;
1892 uint32_t cur = order;
1893 vm_address_t addr;
1894 size_t node;
1895
1896 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1897 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1898 zba_grow(with_extra);
1899 cur = order;
1900 }
1901 }
1902
1903 zbah = zba_header(addr);
1904 node = zba_node(addr, cur);
1905 zba_node_flip_split(zbah, zba_node_parent(node));
1906 while (cur > order) {
1907 cur--;
1908 zba_node_flip_split(zbah, node);
1909 node = zba_node_left_child(node);
1910 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1911 cur, with_extra);
1912 }
1913
1914 return addr;
1915 }
1916
1917 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
1918 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
1919 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
1920 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
1921
1922 #if !ZALLOC_TEST
1923 #if VM_TAG_SIZECLASSES
1924
1925 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1926 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1927 {
1928 vm_offset_t base = zone_info.zi_xtra_range.min_address;
1929 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1930
1931 return (void *)(base + ((offs + idx) << zba_xtra_shift));
1932 }
1933
1934 #endif /* VM_TAG_SIZECLASSES */
1935
1936 static uint32_t
zba_bits_ref_order(uint32_t bref)1937 zba_bits_ref_order(uint32_t bref)
1938 {
1939 return bref >> ZBA_ORDER_SHIFT;
1940 }
1941
1942 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1943 zba_bits_ref_ptr(uint32_t bref)
1944 {
1945 return zba_slot_base() + (bref & ZBA_PTR_MASK);
1946 }
1947
1948 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1949 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1950 zalloc_flags_t flags, vm_offset_t eidx)
1951 {
1952 size_t i = eidx / 32;
1953 uint32_t map;
1954
1955 if (eidx % 32) {
1956 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
1957 if (map) {
1958 eidx = __builtin_ctz(map);
1959 meta[i].zm_bitmap ^= 1u << eidx;
1960 return i * 32 + eidx;
1961 }
1962 i++;
1963 }
1964
1965 uint32_t chunk_len = meta->zm_chunk_len;
1966 if (flags & Z_PCPU) {
1967 chunk_len = zpercpu_count();
1968 }
1969 for (int j = 0; j < chunk_len; j++, i++) {
1970 if (i >= chunk_len) {
1971 i = 0;
1972 }
1973 if (__probable(map = meta[i].zm_bitmap)) {
1974 meta[i].zm_bitmap &= map - 1;
1975 return i * 32 + __builtin_ctz(map);
1976 }
1977 }
1978
1979 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
1980 }
1981
1982 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)1983 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
1984 vm_offset_t eidx)
1985 {
1986 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
1987 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
1988 size_t i = eidx / 64;
1989 uint64_t map;
1990
1991 if (eidx % 64) {
1992 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
1993 if (map) {
1994 eidx = __builtin_ctzll(map);
1995 bits[i] ^= 1ull << eidx;
1996 return i * 64 + eidx;
1997 }
1998 i++;
1999 }
2000
2001 for (int j = 0; j < bits_size; i++, j++) {
2002 if (i >= bits_size) {
2003 i = 0;
2004 }
2005 if (__probable(map = bits[i])) {
2006 bits[i] &= map - 1;
2007 return i * 64 + __builtin_ctzll(map);
2008 }
2009 }
2010
2011 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2012 }
2013
2014 /*!
2015 * @function zone_meta_find_and_clear_bit
2016 *
2017 * @brief
2018 * The core of the bitmap allocator: find a bit set in the bitmaps.
2019 *
2020 * @discussion
2021 * This method will round robin through available allocations,
2022 * with a per-core memory of the last allocated element index allocated.
2023 *
2024 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2025 * double-free bugs way too practical.
2026 *
2027 * @param zone The zone we're allocating from.
2028 * @param meta The main metadata for the chunk being allocated from.
2029 * @param flags the alloc flags (for @c Z_PCPU).
2030 */
2031 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2032 zone_meta_find_and_clear_bit(
2033 zone_t zone,
2034 zone_stats_t zs,
2035 struct zone_page_metadata *meta,
2036 zalloc_flags_t flags)
2037 {
2038 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2039
2040 if (meta->zm_inline_bitmap) {
2041 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2042 } else {
2043 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2044 }
2045 zs->zs_alloc_rr = (uint16_t)eidx;
2046 return eidx;
2047 }
2048
2049 /*!
2050 * @function zone_meta_bits_init_inline
2051 *
2052 * @brief
2053 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2054 *
2055 * @param meta The main metadata for the initialized chunk.
2056 * @param count The number of elements the chunk can hold
2057 * (which might be partial for partially populated chunks).
2058 */
2059 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2060 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2061 {
2062 /*
2063 * We're called with the metadata zm_bitmap fields already zeroed out.
2064 */
2065 for (size_t i = 0; i < count / 32; i++) {
2066 meta[i].zm_bitmap = ~0u;
2067 }
2068 if (count % 32) {
2069 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2070 }
2071 }
2072
2073 /*!
2074 * @function zone_meta_bits_alloc_init
2075 *
2076 * @brief
2077 * Allocates a zm_bitmap field for a newly assigned chunk.
2078 *
2079 * @param count The number of elements the chunk can hold
2080 * (which might be partial for partially populated chunks).
2081 * @param nbits The maximum nuber of bits that will be used.
2082 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2083 */
2084 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2085 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2086 {
2087 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2088 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2089
2090 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2091 uint64_t *bits;
2092 size_t i = 0;
2093
2094 assert(order <= ZBA_MAX_ALLOC_ORDER);
2095 assert(count <= ZBA_GRANULE_BITS << order);
2096
2097 zba_lock();
2098 bits = (uint64_t *)zba_alloc(order, with_extra);
2099 zba_unlock();
2100
2101 while (i < count / 64) {
2102 bits[i++] = ~0ull;
2103 }
2104 if (count % 64) {
2105 bits[i++] = zba_map_mask_lt(uint64_t, count);
2106 }
2107 while (i < 1u << order) {
2108 bits[i++] = 0;
2109 }
2110
2111 return (uint32_t)(bits - zba_slot_base()) +
2112 (order << ZBA_ORDER_SHIFT) +
2113 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2114 }
2115
2116 /*!
2117 * @function zone_meta_bits_merge
2118 *
2119 * @brief
2120 * Adds elements <code>[start, end)</code> to a chunk being extended.
2121 *
2122 * @param meta The main metadata for the extended chunk.
2123 * @param start The index of the first element to add to the chunk.
2124 * @param end The index of the last (exclusive) element to add.
2125 */
2126 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2127 zone_meta_bits_merge(struct zone_page_metadata *meta,
2128 uint32_t start, uint32_t end)
2129 {
2130 if (meta->zm_inline_bitmap) {
2131 while (start < end) {
2132 size_t s_i = start / 32;
2133 size_t s_e = end / 32;
2134
2135 if (s_i == s_e) {
2136 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2137 zba_map_mask_ge(uint32_t, start);
2138 break;
2139 }
2140
2141 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2142 start += 32 - (start % 32);
2143 }
2144 } else {
2145 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2146
2147 while (start < end) {
2148 size_t s_i = start / 64;
2149 size_t s_e = end / 64;
2150
2151 if (s_i == s_e) {
2152 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2153 zba_map_mask_ge(uint64_t, start);
2154 break;
2155 }
2156 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2157 start += 64 - (start % 64);
2158 }
2159 }
2160 }
2161
2162 /*!
2163 * @function zone_bits_free
2164 *
2165 * @brief
2166 * Frees a bitmap to the zone bitmap allocator.
2167 *
2168 * @param bref
2169 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2170 */
2171 static void
zone_bits_free(uint32_t bref)2172 zone_bits_free(uint32_t bref)
2173 {
2174 zba_lock();
2175 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2176 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2177 zba_unlock();
2178 }
2179
2180 /*!
2181 * @function zone_meta_is_free
2182 *
2183 * @brief
2184 * Returns whether a given element appears free.
2185 */
2186 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2187 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2188 {
2189 if (meta->zm_inline_bitmap) {
2190 uint32_t bit = zba_map_bit(uint32_t, eidx);
2191 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2192 } else {
2193 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2194 uint64_t bit = zba_map_bit(uint64_t, eidx);
2195 return bits[zba_map_index(uint64_t, eidx)] & bit;
2196 }
2197 }
2198
2199 /*!
2200 * @function zone_meta_mark_free
2201 *
2202 * @brief
2203 * Marks an element as free and returns whether it was marked as used.
2204 */
2205 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2206 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2207 {
2208 if (meta->zm_inline_bitmap) {
2209 uint32_t bit = zba_map_bit(uint32_t, eidx);
2210 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2211 return false;
2212 }
2213 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2214 } else {
2215 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2216 uint64_t bit = zba_map_bit(uint64_t, eidx);
2217 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2218 return false;
2219 }
2220 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2221 }
2222 return true;
2223 }
2224
2225 #if VM_TAG_SIZECLASSES
2226
2227 __startup_func
2228 void
__zone_site_register(vm_allocation_site_t * site)2229 __zone_site_register(vm_allocation_site_t *site)
2230 {
2231 if (zone_tagging_on) {
2232 vm_tag_alloc(site);
2233 }
2234 }
2235
2236 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2237 zone_index_from_tag_index(uint32_t sizeclass_idx)
2238 {
2239 return zone_tags_sizeclasses[sizeclass_idx];
2240 }
2241
2242 #endif /* VM_TAG_SIZECLASSES */
2243 #endif /* !ZALLOC_TEST */
2244 /*! @} */
2245 #pragma mark zalloc helpers
2246 #if !ZALLOC_TEST
2247
2248 static inline void *
zstack_tbi_fix(vm_offset_t elem)2249 zstack_tbi_fix(vm_offset_t elem)
2250 {
2251 elem = vm_memtag_load_tag(elem);
2252 return (void *)elem;
2253 }
2254
2255 static inline vm_offset_t
zstack_tbi_fill(void * addr)2256 zstack_tbi_fill(void *addr)
2257 {
2258 vm_offset_t elem = (vm_offset_t)addr;
2259
2260 return vm_memtag_canonicalize_kernel(elem);
2261 }
2262
2263 __attribute__((always_inline))
2264 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2265 zstack_push_no_delta(zstack_t *stack, void *addr)
2266 {
2267 vm_offset_t elem = zstack_tbi_fill(addr);
2268
2269 *(vm_offset_t *)addr = stack->z_head - elem;
2270 stack->z_head = elem;
2271 }
2272
2273 __attribute__((always_inline))
2274 void
zstack_push(zstack_t * stack,void * addr)2275 zstack_push(zstack_t *stack, void *addr)
2276 {
2277 zstack_push_no_delta(stack, addr);
2278 stack->z_count++;
2279 }
2280
2281 __attribute__((always_inline))
2282 static inline void *
zstack_pop_no_delta(zstack_t * stack)2283 zstack_pop_no_delta(zstack_t *stack)
2284 {
2285 void *addr = zstack_tbi_fix(stack->z_head);
2286
2287 stack->z_head += *(vm_offset_t *)addr;
2288 *(vm_offset_t *)addr = 0;
2289
2290 return addr;
2291 }
2292
2293 __attribute__((always_inline))
2294 void *
zstack_pop(zstack_t * stack)2295 zstack_pop(zstack_t *stack)
2296 {
2297 stack->z_count--;
2298 return zstack_pop_no_delta(stack);
2299 }
2300
2301 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2302 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2303 {
2304 uint32_t ticket;
2305
2306 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2307 &ticket, &zone_locks_grp))) {
2308 return;
2309 }
2310
2311 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2312
2313 /*
2314 * If zone caching has been disabled due to memory pressure,
2315 * then recording contention is not useful, give the system
2316 * time to recover.
2317 */
2318 if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2319 zone->z_recirc_cont_cur++;
2320 }
2321 }
2322
2323 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2324 zone_recirc_lock_nopreempt(zone_t zone)
2325 {
2326 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2327 }
2328
2329 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2330 zone_recirc_unlock_nopreempt(zone_t zone)
2331 {
2332 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2333 }
2334
2335 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2336 zone_lock_nopreempt_check_contention(zone_t zone)
2337 {
2338 uint32_t ticket;
2339 #if KASAN_FAKESTACK
2340 spl_t s = 0;
2341 if (zone->z_kasan_fakestacks) {
2342 s = splsched();
2343 }
2344 #endif /* KASAN_FAKESTACK */
2345
2346 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2347 &zone_locks_grp))) {
2348 #if KASAN_FAKESTACK
2349 zone->z_kasan_spl = s;
2350 #endif /* KASAN_FAKESTACK */
2351 return;
2352 }
2353
2354 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2355 #if KASAN_FAKESTACK
2356 zone->z_kasan_spl = s;
2357 #endif /* KASAN_FAKESTACK */
2358
2359 /*
2360 * If zone caching has been disabled due to memory pressure,
2361 * then recording contention is not useful, give the system
2362 * time to recover.
2363 */
2364 if (__probable(!zone_caching_disabled &&
2365 !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2366 zone->z_recirc_cont_cur++;
2367 }
2368 }
2369
2370 static inline void
zone_lock_nopreempt(zone_t zone)2371 zone_lock_nopreempt(zone_t zone)
2372 {
2373 #if KASAN_FAKESTACK
2374 spl_t s = 0;
2375 if (zone->z_kasan_fakestacks) {
2376 s = splsched();
2377 }
2378 #endif /* KASAN_FAKESTACK */
2379 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2380 #if KASAN_FAKESTACK
2381 zone->z_kasan_spl = s;
2382 #endif /* KASAN_FAKESTACK */
2383 }
2384
2385 static inline void
zone_unlock_nopreempt(zone_t zone)2386 zone_unlock_nopreempt(zone_t zone)
2387 {
2388 #if KASAN_FAKESTACK
2389 spl_t s = zone->z_kasan_spl;
2390 zone->z_kasan_spl = 0;
2391 #endif /* KASAN_FAKESTACK */
2392 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2393 #if KASAN_FAKESTACK
2394 if (zone->z_kasan_fakestacks) {
2395 splx(s);
2396 }
2397 #endif /* KASAN_FAKESTACK */
2398 }
2399
2400 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2401 zone_depot_lock_nopreempt(zone_cache_t zc)
2402 {
2403 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2404 }
2405
2406 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2407 zone_depot_unlock_nopreempt(zone_cache_t zc)
2408 {
2409 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2410 }
2411
2412 static inline void
zone_depot_lock(zone_cache_t zc)2413 zone_depot_lock(zone_cache_t zc)
2414 {
2415 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2416 }
2417
2418 static inline void
zone_depot_unlock(zone_cache_t zc)2419 zone_depot_unlock(zone_cache_t zc)
2420 {
2421 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2422 }
2423
2424 zone_t
zone_by_id(size_t zid)2425 zone_by_id(size_t zid)
2426 {
2427 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2428 }
2429
2430 static inline bool
zone_supports_vm(zone_t z)2431 zone_supports_vm(zone_t z)
2432 {
2433 /*
2434 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2435 * to overcommit because they're used to reclaim memory
2436 * (VM support).
2437 */
2438 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2439 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2440 }
2441
2442 const char *
zone_name(zone_t z)2443 zone_name(zone_t z)
2444 {
2445 return z->z_name;
2446 }
2447
2448 const char *
zone_heap_name(zone_t z)2449 zone_heap_name(zone_t z)
2450 {
2451 zone_security_flags_t zsflags = zone_security_config(z);
2452 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2453 return kalloc_heap_names[zsflags.z_kheap_id];
2454 }
2455 return "invalid";
2456 }
2457
2458 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2459 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2460 {
2461 vm_size_t elem_count, chunks;
2462
2463 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2464 zone_elem_outer_size(z);
2465 chunks = (max_elems + elem_count - 1) / elem_count;
2466
2467 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2468 }
2469
2470 static inline vm_size_t
zone_submaps_approx_size(void)2471 zone_submaps_approx_size(void)
2472 {
2473 vm_size_t size = 0;
2474
2475 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2476 if (zone_submaps[idx] != VM_MAP_NULL) {
2477 size += zone_submaps[idx]->size;
2478 }
2479 }
2480
2481 return size;
2482 }
2483
2484 static inline void
zone_depot_init(struct zone_depot * zd)2485 zone_depot_init(struct zone_depot *zd)
2486 {
2487 *zd = (struct zone_depot){
2488 .zd_tail = &zd->zd_head,
2489 };
2490 }
2491
2492 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2493 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2494 {
2495 if (zd->zd_full++ == 0) {
2496 zd->zd_tail = &mag->zm_next;
2497 }
2498 mag->zm_next = zd->zd_head;
2499 zd->zd_head = mag;
2500 }
2501
2502 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2503 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2504 {
2505 zd->zd_full++;
2506 mag->zm_next = *zd->zd_tail;
2507 *zd->zd_tail = mag;
2508 zd->zd_tail = &mag->zm_next;
2509 }
2510
2511 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2512 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2513 {
2514 zd->zd_empty++;
2515 mag->zm_next = *zd->zd_tail;
2516 *zd->zd_tail = mag;
2517 }
2518
2519 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2520 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2521 {
2522 zone_magazine_t mag = zd->zd_head;
2523
2524 assert(zd->zd_full);
2525
2526 zd->zd_full--;
2527 if (z && z->z_recirc_full_min > zd->zd_full) {
2528 z->z_recirc_full_min = zd->zd_full;
2529 }
2530 zd->zd_head = mag->zm_next;
2531 if (zd->zd_full == 0) {
2532 zd->zd_tail = &zd->zd_head;
2533 }
2534
2535 mag->zm_next = NULL;
2536 return mag;
2537 }
2538
2539 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2540 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2541 {
2542 zone_magazine_t mag = *zd->zd_tail;
2543
2544 assert(zd->zd_empty);
2545
2546 zd->zd_empty--;
2547 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2548 z->z_recirc_empty_min = zd->zd_empty;
2549 }
2550 *zd->zd_tail = mag->zm_next;
2551
2552 mag->zm_next = NULL;
2553 return mag;
2554 }
2555
2556 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2557 zone_depot_move_full(
2558 struct zone_depot *dst,
2559 struct zone_depot *src,
2560 uint32_t n,
2561 zone_t z)
2562 {
2563 zone_magazine_t head, last;
2564
2565 assert(n);
2566 assert(src->zd_full >= n);
2567
2568 src->zd_full -= n;
2569 if (z && z->z_recirc_full_min > src->zd_full) {
2570 z->z_recirc_full_min = src->zd_full;
2571 }
2572 head = last = src->zd_head;
2573 for (uint32_t i = n; i-- > 1;) {
2574 last = last->zm_next;
2575 }
2576
2577 src->zd_head = last->zm_next;
2578 if (src->zd_full == 0) {
2579 src->zd_tail = &src->zd_head;
2580 }
2581
2582 if (z && zone_security_array[zone_index(z)].z_lifo) {
2583 if (dst->zd_full == 0) {
2584 dst->zd_tail = &last->zm_next;
2585 }
2586 last->zm_next = dst->zd_head;
2587 dst->zd_head = head;
2588 } else {
2589 last->zm_next = *dst->zd_tail;
2590 *dst->zd_tail = head;
2591 dst->zd_tail = &last->zm_next;
2592 }
2593 dst->zd_full += n;
2594
2595 return last->zm_seq;
2596 }
2597
2598 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2599 zone_depot_move_empty(
2600 struct zone_depot *dst,
2601 struct zone_depot *src,
2602 uint32_t n,
2603 zone_t z)
2604 {
2605 zone_magazine_t head, last;
2606
2607 assert(n);
2608 assert(src->zd_empty >= n);
2609
2610 src->zd_empty -= n;
2611 if (z && z->z_recirc_empty_min > src->zd_empty) {
2612 z->z_recirc_empty_min = src->zd_empty;
2613 }
2614 head = last = *src->zd_tail;
2615 for (uint32_t i = n; i-- > 1;) {
2616 last = last->zm_next;
2617 }
2618
2619 *src->zd_tail = last->zm_next;
2620
2621 dst->zd_empty += n;
2622 last->zm_next = *dst->zd_tail;
2623 *dst->zd_tail = head;
2624 }
2625
2626 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2627 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2628 {
2629 if (depot->zd_full == 0) {
2630 return false;
2631 }
2632
2633 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2634 }
2635
2636 static void
zone_cache_swap_magazines(zone_cache_t cache)2637 zone_cache_swap_magazines(zone_cache_t cache)
2638 {
2639 uint16_t count_a = cache->zc_alloc_cur;
2640 uint16_t count_f = cache->zc_free_cur;
2641 vm_offset_t *elems_a = cache->zc_alloc_elems;
2642 vm_offset_t *elems_f = cache->zc_free_elems;
2643
2644 z_debug_assert(count_a <= zc_mag_size());
2645 z_debug_assert(count_f <= zc_mag_size());
2646
2647 cache->zc_alloc_cur = count_f;
2648 cache->zc_free_cur = count_a;
2649 cache->zc_alloc_elems = elems_f;
2650 cache->zc_free_elems = elems_a;
2651 }
2652
2653 __pure2
2654 static smr_t
zone_cache_smr(zone_cache_t cache)2655 zone_cache_smr(zone_cache_t cache)
2656 {
2657 return cache->zc_smr;
2658 }
2659
2660 /*!
2661 * @function zone_magazine_replace
2662 *
2663 * @brief
2664 * Unlod a magazine and load a new one instead.
2665 */
2666 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2667 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2668 {
2669 zone_magazine_t old;
2670 vm_offset_t **elems;
2671
2672 mag->zm_seq = SMR_SEQ_INVALID;
2673
2674 if (empty) {
2675 elems = &zc->zc_free_elems;
2676 zc->zc_free_cur = 0;
2677 } else {
2678 elems = &zc->zc_alloc_elems;
2679 zc->zc_alloc_cur = zc_mag_size();
2680 }
2681 old = (zone_magazine_t)((uintptr_t)*elems -
2682 offsetof(struct zone_magazine, zm_elems));
2683 *elems = mag->zm_elems;
2684
2685 return old;
2686 }
2687
2688 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2689 zone_magazine_alloc(zalloc_flags_t flags)
2690 {
2691 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2692 }
2693
2694 static void
zone_magazine_free(zone_magazine_t mag)2695 zone_magazine_free(zone_magazine_t mag)
2696 {
2697 (zfree)(zc_magazine_zone, mag);
2698 }
2699
2700 static void
zone_magazine_free_list(struct zone_depot * zd)2701 zone_magazine_free_list(struct zone_depot *zd)
2702 {
2703 zone_magazine_t tmp, mag = *zd->zd_tail;
2704
2705 while (mag) {
2706 tmp = mag->zm_next;
2707 zone_magazine_free(mag);
2708 mag = tmp;
2709 }
2710
2711 *zd->zd_tail = NULL;
2712 zd->zd_empty = 0;
2713 }
2714
2715 __mockable void
zone_enable_caching(zone_t zone)2716 zone_enable_caching(zone_t zone)
2717 {
2718 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2719 zone_cache_t caches;
2720 size_t depot_limit;
2721
2722 depot_limit = zc_pcpu_max() / size_per_mag;
2723 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2724
2725 caches = zalloc_percpu_permanent_type(struct zone_cache);
2726 zpercpu_foreach(zc, caches) {
2727 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2728 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2729 zone_depot_init(&zc->zc_depot);
2730 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2731 }
2732
2733 zone_lock(zone);
2734 assert(zone->z_pcpu_cache == NULL);
2735 zone->z_pcpu_cache = caches;
2736 zone->z_recirc_cont_cur = 0;
2737 zone->z_recirc_cont_wma = 0;
2738 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2739 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2740 zone_unlock(zone);
2741 }
2742
2743 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2744 zone_maps_owned(vm_address_t addr, vm_size_t size)
2745 {
2746 return from_zone_map(addr, size);
2747 }
2748
2749 #if KASAN_LIGHT
2750 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2751 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2752 {
2753 return from_zone_map(addr, size) ||
2754 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2755 }
2756 #endif /* KASAN_LIGHT */
2757
2758 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2759 zone_map_sizes(
2760 vm_map_size_t *psize,
2761 vm_map_size_t *pfree,
2762 vm_map_size_t *plargest_free)
2763 {
2764 vm_map_size_t size, free, largest;
2765
2766 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2767
2768 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2769 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2770 *psize += size;
2771 *pfree += free;
2772 *plargest_free = MAX(*plargest_free, largest);
2773 }
2774 }
2775
2776 __attribute__((always_inline))
2777 vm_map_t
zone_submap(zone_security_flags_t zsflags)2778 zone_submap(zone_security_flags_t zsflags)
2779 {
2780 return zone_submaps[zsflags.z_submap_idx];
2781 }
2782
2783 unsigned
zpercpu_count(void)2784 zpercpu_count(void)
2785 {
2786 return zpercpu_early_count;
2787 }
2788
2789 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
2790 /*
2791 * Returns a random number of a given bit-width.
2792 *
2793 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2794 *
2795 * This uses Intel's rdrand because random() uses FP registers
2796 * which causes FP faults and allocations which isn't something
2797 * we can do from zalloc itself due to reentrancy problems.
2798 *
2799 * For pre-rdrand machines (which we no longer support),
2800 * we use a bad biased random generator that doesn't use FP.
2801 * Such HW is no longer supported, but VM of newer OSes on older
2802 * bare metal is made to limp along (with reduced security) this way.
2803 */
2804 static uint64_t
zalloc_random_mask64(uint32_t bits)2805 zalloc_random_mask64(uint32_t bits)
2806 {
2807 uint64_t mask = ~0ull >> (64 - bits);
2808 uint64_t v;
2809
2810 #if __x86_64__
2811 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2812 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2813 v &= mask;
2814 } else {
2815 disable_preemption();
2816 int cpu = cpu_number();
2817 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2818 zone_bool_gen[cpu].zbg_entropy,
2819 ZONE_ENTROPY_CNT, bits);
2820 enable_preemption();
2821 }
2822 #else
2823 v = early_random() & mask;
2824 #endif
2825
2826 return v;
2827 }
2828
2829 /*
2830 * Returns a random number within [bound_min, bound_max)
2831 *
2832 * This isn't _exactly_ uniform, but the skew is small enough
2833 * not to matter for the consumers of this interface.
2834 *
2835 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2836 * will be returned (bound_max - bound_min) / 2^64 more often
2837 * than values within [2^64 % (bound_max - bound_min), bound_max).
2838 */
2839 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2840 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2841 {
2842 uint64_t delta = bound_max - bound_min;
2843
2844 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2845 }
2846
2847 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
2848 #if ZALLOC_ENABLE_LOGGING
2849 /*
2850 * Track all kalloc zones of specified size for zlog name
2851 * - kalloc.type.var.<size>
2852 * - kalloc.data.<size>
2853 * - kalloc.data_shared.<size>
2854 * - kalloc.type.<size>
2855 * - kalloc.<size>
2856 *
2857 * Additionally track all early kalloc zones with early.kalloc
2858 */
2859 static bool
track_kalloc_zones(zone_t z,const char * logname)2860 track_kalloc_zones(zone_t z, const char *logname)
2861 {
2862 const char *prefix;
2863 size_t len;
2864 zone_security_flags_t zsflags = zone_security_config(z);
2865
2866 prefix = "kalloc.type.var.";
2867 len = strlen(prefix);
2868 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2869 strncmp(logname, prefix, len) == 0) {
2870 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2871
2872 return zone_elem_inner_size(z) == sizeclass;
2873 }
2874
2875 prefix = "kalloc.type.";
2876 len = strlen(prefix);
2877 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2878 strncmp(logname, prefix, len) == 0) {
2879 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2880
2881 return zone_elem_inner_size(z) == sizeclass;
2882 }
2883
2884 prefix = "kalloc.data.";
2885 len = strlen(prefix);
2886 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS &&
2887 strncmp(logname, prefix, len) == 0) {
2888 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2889
2890 return zone_elem_inner_size(z) == sizeclass;
2891 }
2892
2893 prefix = "kalloc.data_shared.";
2894 len = strlen(prefix);
2895 if (zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED &&
2896 strncmp(logname, prefix, len) == 0) {
2897 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2898
2899 return zone_elem_inner_size(z) == sizeclass;
2900 }
2901
2902 prefix = "kalloc.";
2903 len = strlen(prefix);
2904 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2905 strncmp(logname, prefix, len) == 0) {
2906 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2907
2908 return zone_elem_inner_size(z) == sizeclass;
2909 }
2910
2911 prefix = "early.kalloc";
2912 if ((zsflags.z_kheap_id == KHEAP_ID_EARLY) &&
2913 (strcmp(logname, prefix) == 0)) {
2914 return true;
2915 }
2916
2917 return false;
2918 }
2919 #endif
2920
2921 int
track_this_zone(const char * zonename,const char * logname)2922 track_this_zone(const char *zonename, const char *logname)
2923 {
2924 unsigned int len;
2925 const char *zc = zonename;
2926 const char *lc = logname;
2927
2928 /*
2929 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2930 */
2931
2932 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2933 /*
2934 * If the current characters don't match, check for a space in
2935 * in the zone name and a corresponding period in the log name.
2936 * If that's not there, then the strings don't match.
2937 */
2938
2939 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2940 break;
2941 }
2942
2943 /*
2944 * The strings are equal so far. If we're at the end, then it's a match.
2945 */
2946
2947 if (*zc == '\0') {
2948 return TRUE;
2949 }
2950 }
2951
2952 return FALSE;
2953 }
2954
2955 #if DEBUG || DEVELOPMENT
2956
2957 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2958 zone_element_info(void *addr, vm_tag_t * ptag)
2959 {
2960 vm_size_t size = 0;
2961 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2962 struct zone *src_zone;
2963
2964 if (from_zone_map(addr, sizeof(void *))) {
2965 src_zone = zone_by_id(zone_index_from_ptr(addr));
2966 size = zone_elem_inner_size(src_zone);
2967 #if VM_TAG_SIZECLASSES
2968 if (__improbable(src_zone->z_uses_tags)) {
2969 struct zone_page_metadata *meta;
2970 vm_offset_t eidx;
2971 vm_tag_t *slot;
2972
2973 meta = zone_element_resolve(src_zone,
2974 (vm_offset_t)addr, &eidx);
2975 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
2976 tag = *slot;
2977 }
2978 #endif /* VM_TAG_SIZECLASSES */
2979 }
2980
2981 *ptag = tag;
2982 return size;
2983 }
2984
2985 #endif /* DEBUG || DEVELOPMENT */
2986 #if KASAN_CLASSIC
2987
2988 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)2989 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
2990 {
2991 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
2992
2993 *zonep = zone;
2994 return zone_elem_inner_size(zone);
2995 }
2996
2997 #endif /* KASAN_CLASSIC */
2998 #endif /* !ZALLOC_TEST */
2999 #pragma mark Zone zeroing and early random
3000 #if !ZALLOC_TEST
3001
3002 /*
3003 * Zone zeroing
3004 *
3005 * All allocations from zones are zeroed on free and are additionally
3006 * check that they are still zero on alloc. The check is
3007 * always on, on embedded devices. Perf regression was detected
3008 * on intel as we cant use the vectorized implementation of
3009 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3010 * initization and allocation. Therefore we perform the check
3011 * on 20% of the allocations.
3012 */
3013 #if ZALLOC_ENABLE_ZERO_CHECK
3014 #if defined(__x86_64__)
3015 /*
3016 * Peform zero validation on every 5th allocation
3017 */
3018 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3019 static uint32_t PERCPU_DATA(zzc_decrementer);
3020 #endif /* defined(__x86_64__) */
3021
3022 /*
3023 * Determine if zero validation for allocation should be skipped
3024 */
3025 static bool
zalloc_skip_zero_check(void)3026 zalloc_skip_zero_check(void)
3027 {
3028 #if defined(__x86_64__)
3029 uint32_t *counterp, cnt;
3030
3031 counterp = PERCPU_GET(zzc_decrementer);
3032 cnt = *counterp;
3033 if (__probable(cnt > 0)) {
3034 *counterp = cnt - 1;
3035 return true;
3036 }
3037 *counterp = zzc_rate - 1;
3038 #endif /* !defined(__x86_64__) */
3039 return false;
3040 }
3041
3042 __abortlike
3043 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3044 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3045 {
3046 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3047 uint32_t first_offs = ~0u;
3048 uintptr_t first_bits = 0, v;
3049 char buf[1024];
3050 int pos = 0;
3051
3052 buf[0] = '\0';
3053
3054 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3055 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3056 continue;
3057 }
3058 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3059 "%5d: 0x%016lx", o, v);
3060 if (first_offs > o) {
3061 first_offs = o;
3062 first_bits = v;
3063 }
3064 }
3065
3066 (panic)("[%s%s]: element modified after free "
3067 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3068 zone_heap_name(z), zone_name(z),
3069 first_offs, first_bits, esize, (void *)elem, buf);
3070 }
3071
3072 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3073 zalloc_validate_element(
3074 zone_t zone,
3075 vm_offset_t elem,
3076 vm_size_t size,
3077 zalloc_flags_t flags)
3078 {
3079 if (flags & Z_NOZZC) {
3080 return;
3081 }
3082 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3083 zalloc_uaf_panic(zone, elem, size);
3084 }
3085 if (flags & Z_PCPU) {
3086 for (size_t i = zpercpu_count(); --i > 0;) {
3087 elem += PAGE_SIZE;
3088 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3089 zalloc_uaf_panic(zone, elem, size);
3090 }
3091 }
3092 }
3093 }
3094
3095 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3096
3097 __attribute__((noinline))
3098 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3099 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3100 {
3101 #if KASAN_FAKESTACK
3102 /*
3103 * This can cause re-entrancy with kasan fakestacks
3104 */
3105 #pragma unused(zone, cpu, zs)
3106 #else
3107 uint32_t bits;
3108
3109 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3110 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3111
3112 zs->zs_alloc_rr += bits;
3113 zs->zs_alloc_rr %= zone->z_chunk_elems;
3114 #endif
3115 }
3116
3117 #endif /* !ZALLOC_TEST */
3118 #pragma mark Zone Leak Detection
3119 #if !ZALLOC_TEST
3120 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3121
3122 /*
3123 * Zone leak debugging code
3124 *
3125 * When enabled, this code keeps a log to track allocations to a particular
3126 * zone that have not yet been freed.
3127 *
3128 * Examining this log will reveal the source of a zone leak.
3129 *
3130 * The log is allocated only when logging is enabled (it is off by default),
3131 * so there is no effect on the system when it's turned off.
3132 *
3133 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3134 * zone name to log, with n starting at 1.
3135 *
3136 * Leaks debugging utilizes 2 tunables:
3137 * - zlsize (in kB) which describes how much "size" the record covers
3138 * (zones with smaller elements get more records, default is 4M).
3139 *
3140 * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3141 * size at which automatic leak detection will sample allocations.
3142 * (default is 8k)
3143 *
3144 *
3145 * Zone corruption logging
3146 *
3147 * Logging can also be used to help identify the source of a zone corruption.
3148 *
3149 * First, identify the zone that is being corrupted,
3150 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3151 *
3152 * When -zc is used in conjunction with zlog,
3153 * it changes the logging style to track both allocations and frees to the zone.
3154 *
3155 * When the corruption is detected, examining the log will show you the stack
3156 * traces of the callers who last allocated and freed any particular element in
3157 * the zone.
3158 *
3159 * Corruption debugging logs will have zrecs records
3160 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3161 */
3162
3163 #define ZRECORDS_MAX (256u << 10)
3164 #define ZRECORDS_DEFAULT (16u << 10)
3165 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3166 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3167 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3168
3169 __startup_func
3170 static void
zone_leaks_init_zrecs(void)3171 zone_leaks_init_zrecs(void)
3172 {
3173 /*
3174 * Don't allow more than ZRECORDS_MAX records,
3175 * even if the user asked for more.
3176 *
3177 * This prevents accidentally hogging too much kernel memory
3178 * and making the system unusable.
3179 */
3180 if (zrecs == 0) {
3181 zrecs = ZRECORDS_DEFAULT *
3182 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3183 }
3184 if (zrecs > ZRECORDS_MAX) {
3185 zrecs = ZRECORDS_MAX;
3186 }
3187 }
3188 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3189
3190 static uint32_t
zone_leaks_record_count(zone_t z)3191 zone_leaks_record_count(zone_t z)
3192 {
3193 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3194
3195 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3196 }
3197
3198 static uint32_t
zone_leaks_sample_rate(zone_t z)3199 zone_leaks_sample_rate(zone_t z)
3200 {
3201 return zlfreq / zone_elem_inner_size(z);
3202 }
3203
3204 #if ZALLOC_ENABLE_LOGGING
3205 /* Log allocations and frees to help debug a zone element corruption */
3206 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3207
3208 /*
3209 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3210 */
3211 #define MAX_ZONES_LOG_REQUESTS 10
3212
3213 static bool
zone_get_zlog_boot_arg(int i,char zlog_val[static MAX_ZONE_NAME])3214 zone_get_zlog_boot_arg(int i, char zlog_val[static MAX_ZONE_NAME])
3215 {
3216 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3217
3218 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3219
3220 if (PE_parse_boot_argn(zlog_name, zlog_val, MAX_ZONE_NAME)) {
3221 return true;
3222 }
3223
3224 return false;
3225 }
3226
3227 /**
3228 * @function zone_setup_logging
3229 *
3230 * @abstract
3231 * Optionally sets up a zone for logging.
3232 *
3233 * @discussion
3234 * We recognized two boot-args:
3235 *
3236 * zlog=<zone_to_log>
3237 * zrecs=<num_records_in_log>
3238 * zlsize=<memory to cover for leaks>
3239 *
3240 * The zlog arg is used to specify the zone name that should be logged,
3241 * and zrecs/zlsize is used to control the size of the log.
3242 */
3243 static void
zone_setup_logging(zone_t z)3244 zone_setup_logging(zone_t z)
3245 {
3246 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3247 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3248 bool logging_on = false;
3249
3250 /*
3251 * Append kalloc heap name to zone name (if zone is used by kalloc)
3252 */
3253 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3254
3255 /* zlog0 isn't allowed. */
3256 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3257 if (zone_get_zlog_boot_arg(i, zlog_val)) {
3258 if (track_this_zone(zone_name, zlog_val) ||
3259 track_kalloc_zones(z, zlog_val)) {
3260 logging_on = true;
3261 break;
3262 }
3263 }
3264 }
3265
3266 /*
3267 * Backwards compat. with the old boot-arg used to specify single zone
3268 * logging i.e. zlog Needs to happen after the newer zlogn checks
3269 * because the prefix will match all the zlogn
3270 * boot-args.
3271 */
3272 if (!logging_on &&
3273 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3274 if (track_this_zone(zone_name, zlog_val) ||
3275 track_kalloc_zones(z, zlog_val)) {
3276 logging_on = true;
3277 }
3278 }
3279
3280 /*
3281 * If we want to log a zone, see if we need to allocate buffer space for
3282 * the log.
3283 *
3284 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3285 * we have to defer allocation in that case.
3286 *
3287 * zone_init() will finish the job.
3288 *
3289 * If we want to log one of the VM related zones that's set up early on,
3290 * we will skip allocation of the log until zinit is called again later
3291 * on some other zone.
3292 */
3293 if (logging_on) {
3294 if (corruption_debug_flag) {
3295 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3296 } else {
3297 z->z_btlog = btlog_create(BTLOG_HASH,
3298 zone_leaks_record_count(z), 0);
3299 }
3300 if (z->z_btlog) {
3301 z->z_log_on = true;
3302 printf("zone[%s%s]: logging enabled\n",
3303 zone_heap_name(z), z->z_name);
3304 } else {
3305 printf("zone[%s%s]: failed to enable logging\n",
3306 zone_heap_name(z), z->z_name);
3307 }
3308 }
3309 }
3310
3311 #endif /* ZALLOC_ENABLE_LOGGING */
3312 #if KASAN_TBI
3313 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3314
3315 __startup_func
3316 static void
kasan_tbi_init_zrecs(void)3317 kasan_tbi_init_zrecs(void)
3318 {
3319 /*
3320 * Don't allow more than ZRECORDS_MAX records,
3321 * even if the user asked for more.
3322 *
3323 * This prevents accidentally hogging too much kernel memory
3324 * and making the system unusable.
3325 */
3326 if (kasan_zrecs == 0) {
3327 kasan_zrecs = ZRECORDS_DEFAULT *
3328 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3329 }
3330 if (kasan_zrecs > ZRECORDS_MAX) {
3331 kasan_zrecs = ZRECORDS_MAX;
3332 }
3333 }
3334 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3335
3336 static void
zone_setup_kasan_logging(zone_t z)3337 zone_setup_kasan_logging(zone_t z)
3338 {
3339 if (!z->z_tbi_tag) {
3340 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3341 zone_heap_name(z), z->z_name);
3342 return;
3343 }
3344
3345 z->z_log_on = true;
3346 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3347 if (!z->z_btlog) {
3348 printf("zone[%s%s]: failed to enable kasan logging\n",
3349 zone_heap_name(z), z->z_name);
3350 }
3351 }
3352
3353 #endif /* KASAN_TBI */
3354 #if CONFIG_ZLEAKS
3355
3356 static thread_call_data_t zone_leaks_callout;
3357
3358 /*
3359 * The zone leak detector, abbreviated 'zleak', keeps track
3360 * of a subset of the currently outstanding allocations
3361 * made by the zone allocator.
3362 *
3363 * Zones who use more than zleak_pages_per_zone_wired_threshold
3364 * pages will get a BTLOG_HASH btlog with sampling to minimize
3365 * perf impact, yet receive statistical data about the backtrace
3366 * that is the most likely to cause the leak.
3367 *
3368 * If the zone goes under the threshold enough, then the log
3369 * is disabled and backtraces freed. Data can be collected
3370 * from userspace with the zlog(1) command.
3371 */
3372
3373 uint32_t zleak_active;
3374 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3375
3376 /* Size a zone will have before we will collect data on it */
3377 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3378 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3379
3380 static inline bool
zleak_should_enable_for_zone(zone_t z)3381 zleak_should_enable_for_zone(zone_t z)
3382 {
3383 if (z->z_log_on) {
3384 return false;
3385 }
3386 if (z->z_btlog) {
3387 return false;
3388 }
3389 if (z->z_exhausts) {
3390 return false;
3391 }
3392 if (zone_exhaustible(z)) {
3393 return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3394 }
3395 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3396 }
3397
3398 static inline bool
zleak_should_disable_for_zone(zone_t z)3399 zleak_should_disable_for_zone(zone_t z)
3400 {
3401 if (z->z_log_on) {
3402 return false;
3403 }
3404 if (!z->z_btlog) {
3405 return false;
3406 }
3407 if (zone_exhaustible(z)) {
3408 return z->z_wired_cur * 8 < z->z_wired_max * 7;
3409 }
3410 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3411 }
3412
3413 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3414 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3415 {
3416 btlog_t log;
3417
3418 zone_foreach(z) {
3419 if (zleak_should_disable_for_zone(z)) {
3420 log = z->z_btlog;
3421 z->z_btlog = NULL;
3422 assert(z->z_btlog_disabled == NULL);
3423 btlog_disable(log);
3424 z->z_btlog_disabled = log;
3425 os_atomic_dec(&zleak_active, relaxed);
3426 }
3427
3428 if (zleak_should_enable_for_zone(z)) {
3429 log = z->z_btlog_disabled;
3430 if (log == NULL) {
3431 log = btlog_create(BTLOG_HASH,
3432 zone_leaks_record_count(z),
3433 zone_leaks_sample_rate(z));
3434 } else if (btlog_enable(log) == KERN_SUCCESS) {
3435 z->z_btlog_disabled = NULL;
3436 } else {
3437 log = NULL;
3438 }
3439 os_atomic_store(&z->z_btlog, log, release);
3440 os_atomic_inc(&zleak_active, relaxed);
3441 }
3442 }
3443 }
3444
3445 __startup_func
3446 static void
zleak_init(void)3447 zleak_init(void)
3448 {
3449 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3450
3451 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3452 zleak_max_zonemap_size / 8);
3453
3454 thread_call_setup_with_options(&zone_leaks_callout,
3455 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3456 THREAD_CALL_OPTIONS_ONCE);
3457 }
3458 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3459
3460 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3461 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3462 {
3463 if (value >= zleak_max_zonemap_size) {
3464 return KERN_INVALID_VALUE;
3465 }
3466
3467 if (arg == &zleak_per_zone_tracking_threshold) {
3468 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3469 zleak_pages_per_zone_wired_threshold = atop(value);
3470 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3471 thread_call_enter(&zone_leaks_callout);
3472 }
3473 return KERN_SUCCESS;
3474 }
3475
3476 return KERN_INVALID_ARGUMENT;
3477 }
3478
3479 static void
panic_display_zleaks(bool has_syms)3480 panic_display_zleaks(bool has_syms)
3481 {
3482 bool did_header = false;
3483 vm_address_t bt[BTLOG_MAX_DEPTH];
3484 uint32_t len, count;
3485
3486 zone_foreach(z) {
3487 btlog_t log = z->z_btlog;
3488
3489 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3490 continue;
3491 }
3492
3493 count = btlog_guess_top(log, bt, &len);
3494 if (count == 0) {
3495 continue;
3496 }
3497
3498 if (!did_header) {
3499 paniclog_append_noflush("Zone (suspected) leak report:\n");
3500 did_header = true;
3501 }
3502
3503 paniclog_append_noflush(" Zone: %s%s\n",
3504 zone_heap_name(z), zone_name(z));
3505 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3506 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3507 paniclog_append_noflush(" Size: %ld\n",
3508 (long)zone_size_wired(z));
3509 paniclog_append_noflush(" Top backtrace:\n");
3510 for (uint32_t i = 0; i < len; i++) {
3511 if (has_syms) {
3512 paniclog_append_noflush(" %p ", (void *)bt[i]);
3513 panic_print_symbol_name(bt[i]);
3514 paniclog_append_noflush("\n");
3515 } else {
3516 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3517 }
3518 }
3519
3520 kmod_panic_dump(bt, len);
3521 paniclog_append_noflush("\n");
3522 }
3523 }
3524 #endif /* CONFIG_ZLEAKS */
3525
3526 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3527 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3528
3529 #if !KASAN_TBI
3530 __cold
3531 #endif
3532 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3533 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3534 {
3535 btlog_t log = zone->z_btlog;
3536 btref_get_flags_t flags = 0;
3537 btref_t ref;
3538
3539 #if !KASAN_TBI
3540 if (!log || !btlog_sample(log)) {
3541 return;
3542 }
3543 #endif
3544 if (get_preemption_level() || zone_supports_vm(zone)) {
3545 /*
3546 * VM zones can be used by btlog, avoid reentrancy issues.
3547 */
3548 flags = BTREF_GET_NOWAIT;
3549 }
3550
3551 ref = btref_get(fp, flags);
3552 while (count-- > 0) {
3553 if (count) {
3554 btref_retain(ref);
3555 }
3556 addr = (vm_offset_t)zstack_tbi_fix(addr);
3557 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3558 addr += *(vm_offset_t *)addr;
3559 }
3560 }
3561
3562 #define ZALLOC_LOG(zone, addr, count) ({ \
3563 if ((zone)->z_btlog) { \
3564 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3565 } \
3566 })
3567
3568 #if !KASAN_TBI
3569 __cold
3570 #endif
3571 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3572 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3573 {
3574 btlog_t log = zone->z_btlog;
3575 btref_get_flags_t flags = 0;
3576 btref_t ref;
3577
3578 #if !KASAN_TBI
3579 if (!log) {
3580 return;
3581 }
3582 #endif
3583
3584 /*
3585 * See if we're doing logging on this zone.
3586 *
3587 * There are two styles of logging used depending on
3588 * whether we're trying to catch a leak or corruption.
3589 */
3590 #if !KASAN_TBI
3591 if (btlog_get_type(log) == BTLOG_HASH) {
3592 /*
3593 * We're logging to catch a leak.
3594 *
3595 * Remove any record we might have for this element
3596 * since it's being freed. Note that we may not find it
3597 * if the buffer overflowed and that's OK.
3598 *
3599 * Since the log is of a limited size, old records get
3600 * overwritten if there are more zallocs than zfrees.
3601 */
3602 while (count-- > 0) {
3603 addr = (vm_offset_t)zstack_tbi_fix(addr);
3604 btlog_erase(log, (void *)addr);
3605 addr += *(vm_offset_t *)addr;
3606 }
3607 return;
3608 }
3609 #endif /* !KASAN_TBI */
3610
3611 if (get_preemption_level() || zone_supports_vm(zone)) {
3612 /*
3613 * VM zones can be used by btlog, avoid reentrancy issues.
3614 */
3615 flags = BTREF_GET_NOWAIT;
3616 }
3617
3618 ref = btref_get(fp, flags);
3619 while (count-- > 0) {
3620 if (count) {
3621 btref_retain(ref);
3622 }
3623 addr = (vm_offset_t)zstack_tbi_fix(addr);
3624 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3625 addr += *(vm_offset_t *)addr;
3626 }
3627 }
3628
3629 #define ZFREE_LOG(zone, addr, count) ({ \
3630 if ((zone)->z_btlog) { \
3631 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3632 } \
3633 })
3634
3635 #else
3636 #define ZALLOC_LOG(...) ((void)0)
3637 #define ZFREE_LOG(...) ((void)0)
3638 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3639 #endif /* !ZALLOC_TEST */
3640 #pragma mark zone (re)fill
3641 #if !ZALLOC_TEST
3642
3643 /*!
3644 * @defgroup Zone Refill
3645 * @{
3646 *
3647 * @brief
3648 * Functions handling The zone refill machinery.
3649 *
3650 * @discussion
3651 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3652 *
3653 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3654 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3655 *
3656 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3657 *
3658 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3659 * to block, or if the reserve becomes depleted.
3660 *
3661 *
3662 * <h2>Synchronous expansion</h2>
3663 *
3664 * This mechanism is actually the only one that may refill a zone, and all the
3665 * other ones funnel through this one eventually.
3666 *
3667 * @c zone_expand_locked() implements the core of the expansion mechanism,
3668 * and will do so while a caller specified predicate is true.
3669 *
3670 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3671 * - one VM privileged thread,
3672 * - one regular thread.
3673 *
3674 * Regular threads that refill will put down their identity in @c z_expander,
3675 * so that priority inversion avoidance can be implemented.
3676 *
3677 * However, VM privileged threads are allowed to use VM page reserves,
3678 * which allows for the system to recover from extreme memory pressure
3679 * situations, allowing for the few allocations that @c zone_gc() or
3680 * killing processes require.
3681 *
3682 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3683 * is set. @c z_expander is not necessarily the identity of this VM privileged
3684 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3685 * could even be @c THREAD_NULL otherwise).
3686 *
3687 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3688 * spending a whole pointer on priority inheritance for VM privileged threads
3689 * (and other issues related to having two owners), we use the rwlock boost as
3690 * a stop gap to avoid priority inversions.
3691 *
3692 *
3693 * <h2>Chunk wiring policies</h2>
3694 *
3695 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3696 * to try to minimize fragmentation relative to element sizes not aligning with
3697 * a chunk size well. However, this can grow large and be hard to fulfill on
3698 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3699 * 4k page systems).
3700 *
3701 * This is why, when under memory pressure the system allows chunks to be
3702 * partially populated. The metadata of the first page in the chunk maintains
3703 * the count of actually populated pages.
3704 *
3705 * The metadata for addresses assigned to a zone are found of 4 queues:
3706 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3707 * elements (those can be targeted by @c zone_gc()),
3708 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3709 * used,
3710 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3711 * left,
3712 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3713 * the zone forever, or the first secondary metadata for a chunk whose
3714 * corresponding page is not populated in the chunk.
3715 *
3716 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3717 * queues are preferred.
3718 *
3719 *
3720 * <h2>Asynchronous expansion</h2>
3721 *
3722 * This mechanism allows for refilling zones used mostly with non blocking
3723 * callers. It relies on a thread call (@c zone_expand_callout) which will
3724 * iterate all zones and refill the ones marked with @c z_async_refilling.
3725 *
3726 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3727 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3728 *
3729 *
3730 * <h2>Dealing with zone allocations from the mach VM code</h2>
3731 *
3732 * The implementation of the mach VM itself uses the zone allocator
3733 * for things like the vm_map_entry data structure. In order to prevent
3734 * a recursion problem when adding more pages to a zone, the VM zones
3735 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3736 * or any VM map functions to allocate.
3737 *
3738 * Instead, a really simple coalescing first-fit allocator is used
3739 * for this submap, and no one else than zalloc can allocate from it.
3740 *
3741 * Memory is directly populated which doesn't require allocation of
3742 * VM map entries, and avoids recursion. The cost of this scheme however,
3743 * is that `vm_map_lookup_entry` will not function on those addresses
3744 * (nor any API relying on it).
3745 */
3746
3747 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3748 static void zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd);
3749 static thread_call_data_t zone_expand_callout;
3750
3751 __attribute__((overloadable))
3752 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3753 zone_submap_is_sequestered(zone_submap_idx_t idx)
3754 {
3755 return idx != Z_SUBMAP_IDX_DATA;
3756 }
3757
3758 __attribute__((overloadable))
3759 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3760 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3761 {
3762 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3763 }
3764
3765 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3766 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3767 {
3768 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3769
3770 if (zsflags.z_noencrypt) {
3771 kmaflags |= KMA_NOENCRYPT;
3772 }
3773
3774 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
3775 kmaflags |= KMA_DATA;
3776 } else if ((zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED) ||
3777 (zsflags.z_submap_idx == Z_SUBMAP_IDX_DATA)) {
3778 /*
3779 * assume zones which are manually in the data heap,
3780 * like mbufs, are going to be shared somehow.
3781 */
3782 kmaflags |= KMA_DATA_SHARED;
3783 }
3784
3785 if (flags & Z_NOPAGEWAIT) {
3786 kmaflags |= KMA_NOPAGEWAIT;
3787 }
3788 if (z->z_permanent || (!z->z_destructible &&
3789 zone_submap_is_sequestered(zsflags))) {
3790 kmaflags |= KMA_PERMANENT;
3791 }
3792 if (zsflags.z_submap_from_end) {
3793 kmaflags |= KMA_LAST_FREE;
3794 }
3795
3796 #if HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING)
3797 if (zsflags.z_tag) {
3798 kmaflags |= KMA_TAG;
3799 }
3800 #endif /* HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING) */
3801
3802 return kmaflags;
3803 }
3804
3805 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3806 zone_add_wired_pages(zone_t z, uint32_t pages)
3807 {
3808 os_atomic_add(&zone_pages_wired, pages, relaxed);
3809
3810 #if CONFIG_ZLEAKS
3811 if (__improbable(zleak_should_enable_for_zone(z) &&
3812 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3813 thread_call_enter(&zone_leaks_callout);
3814 }
3815 #else
3816 (void)z;
3817 #endif
3818 }
3819
3820 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3821 zone_remove_wired_pages(zone_t z, uint32_t pages)
3822 {
3823 os_atomic_sub(&zone_pages_wired, pages, relaxed);
3824
3825 #if CONFIG_ZLEAKS
3826 if (__improbable(zleak_should_disable_for_zone(z) &&
3827 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3828 thread_call_enter(&zone_leaks_callout);
3829 }
3830 #else
3831 (void)z;
3832 #endif
3833 }
3834
3835 #if ZSECURITY_CONFIG(ZONE_TAGGING)
3836
3837 static inline void
zone_tag_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3838 zone_tag_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3839 {
3840 if (zone->z_percpu) {
3841 zpercpu_foreach_cpu(index) {
3842 vm_memtag_store_tag(addr + ptoa(index), elem_size);
3843 }
3844 }
3845 }
3846
3847 static inline caddr_t
zone_tag_free_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3848 zone_tag_free_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3849 {
3850 #if HAS_MTE
3851 /*
3852 * We got a tagged address here and we are about to re-tag it.
3853 * Verify that a weird tagged value didn't slip all the way down
3854 * here as that would almost certainly signal malicious action.
3855 */
3856 vm_memtag_verify_tag((vm_map_address_t)addr);
3857
3858 /*
3859 * Tagging policy is to "tag-on-free" and 0xF is banned from
3860 * the kernel versioning space (as it equates the canonical
3861 * tag value). We can therefore assume that any address that is
3862 * lower than 0xFF00000000000000ULL is actually a tagged address.
3863 * We use that simple trick to avoid storing into the zone data
3864 * whether it has tagging enabled or not, as that could in some
3865 * stretched case become a target for an attacker.
3866 */
3867 #endif /* HAS_MTE */
3868 if (__improbable((uintptr_t)addr > 0xFF00000000000000ULL)) {
3869 return addr;
3870 }
3871
3872 addr = vm_memtag_generate_and_store_tag(addr, elem_size);
3873 zone_tag_element(zone, addr, elem_size);
3874
3875 return addr;
3876 }
3877
3878 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3879 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3880 {
3881 zone_security_flags_t *zsflags = &zone_security_array[zone_index(zone)];
3882
3883 if (!zsflags->z_tag) {
3884 return;
3885 }
3886
3887 vm_size_t elem_size = zone_elem_outer_size(zone);
3888 vm_size_t oob_offs = zone_elem_outer_offs(zone);
3889
3890 #if HAS_MTE
3891 caddr_t prev_addr = (caddr_t)-1ULL;
3892 #endif /* HAS_MTE */
3893
3894 for (uint32_t i = start; i < end; i++) {
3895 caddr_t elem_addr = (caddr_t)(base + oob_offs + i * elem_size);
3896
3897 #if HAS_MTE
3898 /* To initialize a fresh page, just randomize remembering the previous tag */
3899 mte_exclude_mask_t mask = GCR_EL1_EXCLUDE_TAGS_KERNEL;
3900 mask = mte_update_exclude_mask(prev_addr, mask);
3901
3902 elem_addr = mte_generate_and_store_tag(elem_addr, elem_size, mask);
3903 prev_addr = elem_addr;
3904 #else /* HAS_MTE */
3905 elem_addr = vm_memtag_generate_and_store_tag(elem_addr, elem_size);
3906 #endif /* HAS_MTE */
3907 zone_tag_element(zone, elem_addr, elem_size);
3908 }
3909 }
3910 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3911 #define zone_tag_free_element(z, a, s) (a)
3912 #define zcram_memtag_init(z, b, s, e) do {} while (0)
3913 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3914
3915 /*!
3916 * @function zcram_and_lock()
3917 *
3918 * @brief
3919 * Prepare some memory for being usable for allocation purposes.
3920 *
3921 * @discussion
3922 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3923 * to be usable in the zone.
3924 *
3925 * This function assumes the metadata is already populated for the range.
3926 *
3927 * Calling this function with @c pg_start being 0 means that the memory
3928 * is either a partial chunk, or a full chunk, that isn't published anywhere
3929 * and the initialization can happen without locks held.
3930 *
3931 * Calling this function with a non zero @c pg_start means that we are extending
3932 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3933 * is already usable and published in the zone, so extending it requires holding
3934 * the zone lock.
3935 *
3936 * @param zone The zone to cram new populated pages into
3937 * @param addr The base address for the chunk(s)
3938 * @param pg_va_new The number of virtual pages newly assigned to the zone
3939 * @param pg_start The first newly populated page relative to @a addr.
3940 * @param pg_end The after-last newly populated page relative to @a addr.
3941 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3942 */
3943 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3944 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3945 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3946 {
3947 zone_id_t zindex = zone_index(zone);
3948 vm_offset_t elem_size = zone_elem_outer_size(zone);
3949 uint32_t free_start = 0, free_end = 0;
3950 uint32_t oob_offs = zone_elem_outer_offs(zone);
3951
3952 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3953 uint32_t chunk_pages = zone->z_chunk_pages;
3954 bool guarded = meta->zm_guarded;
3955
3956 assert(pg_start < pg_end && pg_end <= chunk_pages);
3957
3958 if (pg_start == 0) {
3959 uint16_t chunk_len = (uint16_t)pg_end;
3960 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3961 bool inline_bitmap = false;
3962
3963 if (zone->z_percpu) {
3964 chunk_len = 1;
3965 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3966 assert(pg_end == zpercpu_count());
3967 }
3968 if (!zone->z_permanent && !zone->z_uses_tags) {
3969 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3970 }
3971
3972 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3973
3974 meta[0] = (struct zone_page_metadata){
3975 .zm_index = zindex,
3976 .zm_guarded = guarded,
3977 .zm_inline_bitmap = inline_bitmap,
3978 .zm_chunk_len = chunk_len,
3979 .zm_alloc_size = lock,
3980 };
3981
3982 if (!zone->z_permanent && !inline_bitmap) {
3983 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3984 zone->z_chunk_elems, zone->z_uses_tags);
3985 }
3986
3987 for (uint16_t i = 1; i < chunk_pages; i++) {
3988 meta[i] = (struct zone_page_metadata){
3989 .zm_index = zindex,
3990 .zm_guarded = guarded,
3991 .zm_inline_bitmap = inline_bitmap,
3992 .zm_chunk_len = secondary_len,
3993 .zm_page_index = (uint8_t)i,
3994 .zm_bitmap = meta[0].zm_bitmap,
3995 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
3996 };
3997 }
3998
3999 if (inline_bitmap) {
4000 zone_meta_bits_init_inline(meta, free_end);
4001 }
4002 } else {
4003 assert(!zone->z_percpu && !zone->z_permanent);
4004
4005 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
4006 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
4007 }
4008
4009 zcram_memtag_init(zone, addr, free_start, free_end);
4010
4011 #if KASAN_CLASSIC
4012 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
4013 if (zone->z_permanent) {
4014 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
4015 } else if (zone->z_percpu) {
4016 for (uint32_t i = 0; i < pg_end; i++) {
4017 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
4018 zone_elem_outer_size(zone),
4019 zone_elem_outer_offs(zone),
4020 zone_elem_redzone(zone));
4021 }
4022 } else {
4023 kasan_zmem_add(addr, ptoa(pg_end),
4024 zone_elem_outer_size(zone),
4025 zone_elem_outer_offs(zone),
4026 zone_elem_redzone(zone));
4027 }
4028 #endif /* KASAN_CLASSIC */
4029
4030 /*
4031 * Insert the initialized pages / metadatas into the right lists.
4032 */
4033
4034 zone_lock(zone);
4035 assert(zone->z_self == zone);
4036
4037 if (pg_start != 0) {
4038 assert(meta->zm_chunk_len == pg_start);
4039
4040 zone_meta_bits_merge(meta, free_start, free_end);
4041 meta->zm_chunk_len = (uint16_t)pg_end;
4042
4043 /*
4044 * consume the zone_meta_lock_in_partial()
4045 * done in zone_expand_locked()
4046 */
4047 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4048 zone_meta_remqueue(zone, meta);
4049 }
4050
4051 if (zone->z_permanent || meta->zm_alloc_size) {
4052 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4053 } else {
4054 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4055 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4056 }
4057 if (pg_end < chunk_pages) {
4058 /* push any non populated residual VA on z_pageq_va */
4059 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4060 }
4061
4062 zone->z_elems_free += free_end - free_start;
4063 zone->z_elems_avail += free_end - free_start;
4064 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4065 if (pg_va_new) {
4066 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4067 }
4068 if (zone->z_wired_hwm < zone->z_wired_cur) {
4069 zone->z_wired_hwm = zone->z_wired_cur;
4070 }
4071
4072 #if CONFIG_ZLEAKS
4073 if (__improbable(zleak_should_enable_for_zone(zone) &&
4074 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4075 thread_call_enter(&zone_leaks_callout);
4076 }
4077 #endif /* CONFIG_ZLEAKS */
4078
4079 zone_add_wired_pages(zone, pg_end - pg_start);
4080 }
4081
4082 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4083 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4084 {
4085 uint32_t chunk_pages = zone->z_chunk_pages;
4086
4087 assert(pages % chunk_pages == 0);
4088 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4089 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4090 zone_unlock(zone);
4091 }
4092 }
4093
4094 __startup_func
4095 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4096 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4097 {
4098 uint32_t pages = (uint32_t)atop(size);
4099
4100 assert(from_zone_map(newmem, size));
4101 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4102 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4103
4104 /*
4105 * The early pages we move at the pmap layer can't be "depopulated"
4106 * because there's no vm_page_t for them.
4107 *
4108 * "Lock" them so that they never hit z_pageq_empty.
4109 */
4110 #if HAS_MTE
4111 /*
4112 * This range of memory was obtained by pmap_steal. We are here from
4113 * bootstrap and we'll pre-tag the region next, so we just zero
4114 * the content not bothering about tag state.
4115 */
4116 #endif /* HAS_MTE */
4117 vm_memtag_bzero_unchecked((void *)newmem, size);
4118 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4119 }
4120
4121 /*!
4122 * @function zone_submap_alloc_sequestered_va
4123 *
4124 * @brief
4125 * Allocates VA without using vm_find_space().
4126 *
4127 * @discussion
4128 * Allocate VA quickly without using the slower vm_find_space() for cases
4129 * when the submaps are fully sequestered.
4130 *
4131 * The VM submap is used to implement the VM itself so it is always sequestered,
4132 * as it can't kmem_alloc which needs to always allocate vm entries.
4133 * However, it can use vm_map_enter() which tries to coalesce entries, which
4134 * always works, so the VM map only ever needs 2 entries (one for each end).
4135 *
4136 * The RO submap is similarly always sequestered if it exists (as a non
4137 * sequestered RO submap makes very little sense).
4138 *
4139 * The allocator is a very simple bump-allocator
4140 * that allocates from either end.
4141 */
4142 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4143 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4144 vm_offset_t *addrp)
4145 {
4146 vm_size_t size = ptoa(pages);
4147 vm_map_t map = zone_submap(zsflags);
4148 vm_map_entry_t first, last;
4149 vm_map_offset_t addr;
4150
4151 vmlp_api_start(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA);
4152
4153 vm_map_lock(map);
4154
4155 first = vm_map_first_entry(map);
4156 last = vm_map_last_entry(map);
4157
4158 if (zsflags.z_submap_from_end) {
4159 vmlp_range_event(map, last->vme_start - size, size);
4160 } else {
4161 vmlp_range_event(map, first->vme_end, size);
4162 }
4163
4164 if (first->vme_end + size > last->vme_start) {
4165 vm_map_unlock(map);
4166 vmlp_api_end(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA, KERN_NO_SPACE);
4167 return KERN_NO_SPACE;
4168 }
4169
4170 if (zsflags.z_submap_from_end) {
4171 last->vme_start -= size;
4172 addr = last->vme_start;
4173 VME_OFFSET_SET(last, addr);
4174 } else {
4175 addr = first->vme_end;
4176 first->vme_end += size;
4177 }
4178 map->size += size;
4179
4180 vm_map_unlock(map);
4181
4182 *addrp = addr;
4183 vmlp_api_end(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA, KERN_SUCCESS);
4184 return KERN_SUCCESS;
4185 }
4186
4187 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4188 zone_fill_initially(zone_t zone, vm_size_t nelems)
4189 {
4190 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4191 kern_return_t kr;
4192 vm_offset_t addr;
4193 uint32_t pages;
4194 zone_security_flags_t zsflags = zone_security_config(zone);
4195
4196 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4197 assert(zone->z_elems_avail == 0);
4198
4199 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4200 pages = zone_alloc_pages_for_nelems(zone, nelems);
4201 if (zone_submap_is_sequestered(zsflags)) {
4202 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4203 if (kr != KERN_SUCCESS) {
4204 panic("zone_submap_alloc_sequestered_va() "
4205 "of %u pages failed", pages);
4206 }
4207 kernel_memory_populate(addr, ptoa(pages),
4208 kmaflags, VM_KERN_MEMORY_ZONE);
4209 } else {
4210 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4211 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4212 kmaflags, VM_KERN_MEMORY_ZONE);
4213 }
4214
4215 zone_meta_populate(addr, ptoa(pages));
4216 zcram(zone, addr, pages, 0);
4217 }
4218
4219 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4220 __attribute__((noinline))
4221 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4222 zone_scramble_va_and_unlock(
4223 zone_t z,
4224 struct zone_page_metadata *meta,
4225 uint32_t runs,
4226 uint32_t pages,
4227 uint32_t chunk_pages,
4228 uint64_t guard_mask)
4229 {
4230 struct zone_page_metadata *arr[ZONE_MAX_CHUNK_ALLOC_NUM];
4231
4232 for (uint32_t run = 0, n = 0; run < runs; run++) {
4233 arr[run] = meta + n;
4234 n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4235 }
4236
4237 /*
4238 * Fisher–Yates shuffle, for an array with indices [0, n)
4239 *
4240 * for i from n−1 downto 1 do
4241 * j ← random integer such that 0 ≤ j ≤ i
4242 * exchange a[j] and a[i]
4243 *
4244 * The point here is that early allocations aren't at a fixed
4245 * distance from each other.
4246 */
4247 for (uint32_t i = runs - 1; i > 0; i--) {
4248 uint32_t j = zalloc_random_uniform32(0, i + 1);
4249
4250 meta = arr[j];
4251 arr[j] = arr[i];
4252 arr[i] = meta;
4253 }
4254
4255 zone_lock(z);
4256
4257 for (uint32_t i = 0; i < runs; i++) {
4258 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4259 }
4260 z->z_va_cur += z->z_percpu ? runs : pages;
4261 }
4262
4263 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4264 dist_u32(uint32_t a, uint32_t b)
4265 {
4266 return a < b ? b - a : a - b;
4267 }
4268
4269 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4270 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4271 {
4272 for (; n-- > 0; pop--) {
4273 uint32_t bit = zalloc_random_uniform32(0, pop);
4274 uint64_t m = mask;
4275
4276 for (; bit; bit--) {
4277 m &= m - 1;
4278 }
4279
4280 mask ^= 1ull << __builtin_ctzll(m);
4281 }
4282
4283 return mask;
4284 }
4285
4286 /**
4287 * @function zalloc_random_bits
4288 *
4289 * @brief
4290 * Compute a random number with a specified number of bit set in a given width.
4291 *
4292 * @discussion
4293 * This function generates a "uniform" distribution of sets of bits set in
4294 * a given width, with typically less than width/4 calls to random.
4295 *
4296 * @param pop the target number of bits set.
4297 * @param width the number of bits in the random integer to generate.
4298 */
4299 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4300 zalloc_random_bits(uint32_t pop, uint32_t width)
4301 {
4302 uint64_t w_mask = (1ull << width) - 1;
4303 uint64_t mask;
4304 uint32_t cur;
4305
4306 if (3 * width / 4 <= pop) {
4307 mask = w_mask;
4308 cur = width;
4309 } else if (pop <= width / 4) {
4310 mask = 0;
4311 cur = 0;
4312 } else {
4313 /*
4314 * Chosing a random number this way will overwhelmingly
4315 * contain `width` bits +/- a few.
4316 */
4317 mask = zalloc_random_mask64(width);
4318 cur = __builtin_popcountll(mask);
4319
4320 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4321 /*
4322 * If the opposite mask has a closer popcount,
4323 * then start with that one as the seed.
4324 */
4325 cur = width - cur;
4326 mask ^= w_mask;
4327 }
4328 }
4329
4330 if (cur < pop) {
4331 /*
4332 * Setting `pop - cur` bits is really clearing that many from
4333 * the opposite mask.
4334 */
4335 mask ^= w_mask;
4336 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4337 mask ^= w_mask;
4338 } else if (pop < cur) {
4339 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4340 }
4341
4342 return mask;
4343 }
4344 #endif
4345
4346 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4347 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4348 {
4349 zone_security_flags_t zsflags = zone_security_config(z);
4350 struct zone_page_metadata *meta;
4351 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4352 uint32_t chunk_pages = z->z_chunk_pages;
4353 uint32_t runs, pages, guards, guard_pages, rnum;
4354 uint64_t guard_mask = 0;
4355 bool lead_guard = false;
4356 zone_id_t zidx = zone_index(z);
4357 kern_return_t kr;
4358 vm_offset_t addr;
4359
4360 zone_unlock(z);
4361
4362 /*
4363 * A lot of OOB exploitation techniques rely on precise placement
4364 * and interleaving of zone pages. The layout that is sought
4365 * by attackers will be C/P/T types, where:
4366 * - (C)ompromised is the type for which attackers have a bug,
4367 * - (P)adding is used to pad memory,
4368 * - (T)arget is the type that the attacker will attempt to corrupt
4369 * by exploiting (C).
4370 *
4371 * Note that in some cases C==T and P isn't needed.
4372 *
4373 * In order to make those placement games much harder,
4374 * we grow zones by random runs of memory, up to 10 chunks.
4375 * This makes predicting the precise layout of the heap
4376 * quite more complicated.
4377 *
4378 * Note: this function makes a very heavy use of random,
4379 * however, it is mostly limited to sequestered zones,
4380 * and eventually the layout will be fixed,
4381 * and the usage of random vastly reduced.
4382 *
4383 * For non sequestered zones, there's a single call
4384 * to random in order to decide whether we want
4385 * a guard page or not.
4386 */
4387 pages = chunk_pages;
4388 guards = 0;
4389 runs = 1;
4390 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4391 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4392 runs = ZONE_MAX_CHUNK_ALLOC_NUM;
4393 runs = zalloc_random_uniform32(1, runs + 1);
4394 pages = runs * chunk_pages;
4395 }
4396 static_assert(ZONE_MAX_CHUNK_ALLOC_NUM <= 10,
4397 "make sure that `runs` will never exceed 10");
4398 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4399
4400 /*
4401 * For zones that are suceptible to OOB,
4402 * guards might be added after each chunk.
4403 *
4404 * Those guard pages are marked with the ZM_PGZ_GUARD
4405 * magical chunk len, and their zm_oob_offs field
4406 * is used to remember optional shift applied
4407 * to returned elements, in order to right-align-them
4408 * as much as possible.
4409 *
4410 * In an adversarial context, while guard pages
4411 * are extremely effective against linear overflow,
4412 * using a predictable frequency of guard pages feels like
4413 * a missed opportunity. Which is why we choose to insert
4414 * one guard region (chunk_pages guard pages) with 25% probability,
4415 * with a goal of having ~20% of the VA allocated consist of guard pages.
4416 */
4417 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4418 if (!z->z_percpu) {
4419 /*
4420 * Don't bother with adding guard regions for per-CPU zones, as
4421 * they're not interesting to attackers.
4422 */
4423 for (uint32_t run = 0; run < runs; run++) {
4424 rnum = zalloc_random_uniform32(0, 4 * 128);
4425 guards += (rnum < 128);
4426 }
4427 }
4428 assert3u(guards, <=, runs);
4429
4430 guard_mask = 0;
4431
4432 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4433 /*
4434 * Several exploitation strategies rely on a C/T (compromised
4435 * then target types) ordering of pages with a sub-page reach
4436 * from C into T.
4437 *
4438 * We want to reliably thwart such exploitations
4439 * and hence force a guard page between alternating
4440 * memory types.
4441 *
4442 * Note: this counts towards the number of guard pages we want.
4443 */
4444 guard_mask |= 1ull << (runs - 1);
4445
4446 if (guards > 1) {
4447 guard_mask |= zalloc_random_bits(guards - 1, runs - 1);
4448 } else {
4449 guards = 1;
4450 }
4451
4452 /*
4453 * While we randomize the chunks lengths, an attacker with
4454 * precise timing control can guess when overflows happen,
4455 * and "measure" the runs, which gives them an indication
4456 * of where the next run start offset is.
4457 *
4458 * In order to make this knowledge unusable, add a guard page
4459 * _before_ the new run with a 25% probability, regardless
4460 * of whether we had enough guard pages.
4461 */
4462 if ((rnum & 3) == 0) {
4463 lead_guard = true;
4464 guards++;
4465 }
4466 } else {
4467 assert3u(runs, ==, 1);
4468 assert3u(guards, <=, 1);
4469 guard_mask = guards << (runs - 1);
4470 }
4471 #else
4472 (void)rnum;
4473 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4474
4475 /* We want guards to be at least the size of the chunk. */
4476 guard_pages = guards * chunk_pages;
4477 if (zone_submap_is_sequestered(zsflags)) {
4478 kr = zone_submap_alloc_sequestered_va(zsflags,
4479 pages + guard_pages, &addr);
4480 } else {
4481 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4482 kr = kmem_alloc(zone_submap(zsflags), &addr,
4483 ptoa(pages + guard_pages), kmaflags, VM_KERN_MEMORY_ZONE);
4484 }
4485
4486 if (kr != KERN_SUCCESS) {
4487 uint64_t zone_size = 0;
4488 zone_t zone_largest = zone_find_largest(&zone_size);
4489 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4490 "likely due to memory leak in zone [%s%s] "
4491 "(%u%c, %d elements allocated)",
4492 kr, zone_heap_name(z), zone_name(z),
4493 zone_heap_name(zone_largest), zone_name(zone_largest),
4494 mach_vm_size_pretty(zone_size),
4495 mach_vm_size_unit(zone_size),
4496 zone_count_allocated(zone_largest));
4497 }
4498
4499 meta = zone_meta_from_addr(addr);
4500 zone_meta_populate(addr, ptoa(pages + guard_pages));
4501
4502 /*
4503 * Handle the leading guard page, if any
4504 */
4505 if (lead_guard) {
4506 for (uint32_t i = 0; i < chunk_pages; i++) {
4507 meta[i].zm_index = zidx;
4508 meta[i].zm_chunk_len = ZM_PGZ_GUARD;
4509 meta[i].zm_guarded = true;
4510 meta++;
4511 }
4512 }
4513
4514 for (uint32_t run = 0, n = 0; run < runs; run++) {
4515 bool guarded = (guard_mask >> run) & 1;
4516
4517 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4518 meta[n].zm_index = zidx;
4519 meta[n].zm_guarded = guarded;
4520 }
4521 if (guarded) {
4522 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4523 meta[n].zm_index = zidx;
4524 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4525 }
4526 }
4527 }
4528 if (guards) {
4529 os_atomic_add(&zone_guard_pages, guard_pages, relaxed);
4530 }
4531
4532 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4533 if (__improbable(zone_caching_disabled < 0)) {
4534 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4535 chunk_pages, guard_mask);
4536 }
4537 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4538
4539 zone_lock(z);
4540
4541 for (uint32_t run = 0, n = 0; run < runs; run++) {
4542 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4543 n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4544 }
4545 z->z_va_cur += z->z_percpu ? runs : pages;
4546 }
4547
4548 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4549 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4550 {
4551 #if DEBUG || DEVELOPMENT
4552 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_START,
4553 size, 0, 0, 0);
4554 #else
4555 (void)size;
4556 #endif
4557 }
4558
4559 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4560 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4561 {
4562 task_t task = current_task_early();
4563 if (pages) {
4564 if (task) {
4565 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4566 }
4567 counter_add(&vm_page_grab_count_kern, pages);
4568 }
4569 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
4570 pages, 0, 0, 0);
4571 }
4572
4573 __attribute__((noinline))
4574 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4575 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4576 {
4577 uint64_t wait_start = 0;
4578 long mapped;
4579
4580 sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4581
4582 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4583 return;
4584 }
4585
4586 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4587
4588 /*
4589 * If the zone map is really exhausted, wait on the GC thread,
4590 * donating our priority (which is important because the GC
4591 * thread is at a rather low priority).
4592 */
4593 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4594 uint32_t wait_ms = n * (n + 1) / 2;
4595 uint64_t interval;
4596
4597 if (n == 1) {
4598 wait_start = mach_absolute_time();
4599 } else {
4600 sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4601 }
4602 if (zone_exhausted_timeout > 0 &&
4603 wait_ms > zone_exhausted_timeout) {
4604 panic("zone map exhaustion: waited for %dms "
4605 "(pages: %ld, max: %ld, wanted: %d)",
4606 wait_ms, mapped, zone_pages_wired_max, pgs);
4607 }
4608
4609 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4610 &interval);
4611
4612 lck_spin_lock(&zone_exhausted_lock);
4613 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4614 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4615 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4616
4617 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4618 }
4619 }
4620
4621 static bool
zone_expand_wait_for_pages(bool waited)4622 zone_expand_wait_for_pages(bool waited)
4623 {
4624 if (waited) {
4625 return false;
4626 }
4627 #if DEBUG || DEVELOPMENT
4628 if (zalloc_simulate_vm_pressure) {
4629 return false;
4630 }
4631 #endif /* DEBUG || DEVELOPMENT */
4632 return !vm_pool_low();
4633 }
4634
4635 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4636 zone_expand_async_schedule_if_allowed(zone_t zone)
4637 {
4638 if (zone->z_async_refilling || zone->no_callout) {
4639 return;
4640 }
4641
4642 if (zone_exhausted(zone)) {
4643 return;
4644 }
4645
4646 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4647 return;
4648 }
4649
4650 if (!vm_pool_low() || zone_supports_vm(zone)) {
4651 zone->z_async_refilling = true;
4652 thread_call_enter(&zone_expand_callout);
4653 }
4654 }
4655
4656 __attribute__((noinline))
4657 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4658 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4659 {
4660 struct zone_depot zd;
4661 zone_magazine_t mag = NULL;
4662
4663 if (z->z_depot_size) {
4664 z->z_depot_size = 0;
4665 z->z_depot_cleanup = true;
4666
4667 zone_depot_init(&zd);
4668 zone_depot_trim(z, 0, &zd);
4669
4670 zone_recirc_lock_nopreempt(z);
4671 if (zd.zd_full) {
4672 zone_depot_move_full(&z->z_recirc,
4673 &zd, zd.zd_full, NULL);
4674 }
4675 if (zd.zd_empty) {
4676 zone_depot_move_empty(&z->z_recirc,
4677 &zd, zd.zd_empty, NULL);
4678 }
4679 zone_recirc_unlock_nopreempt(z);
4680 }
4681
4682 zone_recirc_lock_nopreempt(z);
4683 if (z->z_recirc.zd_full) {
4684 mag = zone_depot_pop_head_full(&z->z_recirc, z);
4685 }
4686 zone_recirc_unlock_nopreempt(z);
4687
4688 if (mag) {
4689 zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4690 zone_magazine_free(mag);
4691 }
4692
4693 return mag != NULL;
4694 }
4695
4696 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4697 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4698 {
4699 if (zone->z_elems_free > zone->z_elems_rsv) {
4700 return false;
4701 }
4702 if (!zone_exhausted(zone)) {
4703 return true;
4704 }
4705 if (zone->z_pcpu_cache && zone->z_depot_size) {
4706 if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4707 return false;
4708 }
4709 }
4710 return (flags & Z_NOFAIL) != 0;
4711 }
4712
4713 static void
zone_wakeup_exhausted_waiters(zone_t z)4714 zone_wakeup_exhausted_waiters(zone_t z)
4715 {
4716 z->z_exhausted_wait = false;
4717 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, false);
4718 thread_wakeup(&z->z_expander);
4719 }
4720
4721 __attribute__((noinline))
4722 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4723 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4724 {
4725 if (z->z_pcpu_cache && z->z_depot_size &&
4726 zalloc_expand_drain_exhausted_caches_locked(z)) {
4727 return;
4728 }
4729
4730 if (!z->z_exhausted_wait) {
4731 zone_recirc_lock_nopreempt(z);
4732 z->z_exhausted_wait = true;
4733 zone_recirc_unlock_nopreempt(z);
4734 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, true);
4735 }
4736
4737 assert_wait(&z->z_expander, TH_UNINT);
4738 zone_unlock(z);
4739 thread_block(THREAD_CONTINUE_NULL);
4740 zone_lock(z);
4741 }
4742
4743 static pmap_mapping_type_t
zone_mapping_type(zone_t z)4744 zone_mapping_type(zone_t z)
4745 {
4746 zone_security_flags_t zsflags = zone_security_config(z);
4747
4748 /*
4749 * If the zone has z_submap_idx is not Z_SUBMAP_IDX_DATA or
4750 * Z_SUBMAP_IDX_READ_ONLY, mark the corresponding mapping
4751 * type as PMAP_MAPPING_TYPE_RESTRICTED.
4752 */
4753 switch (zsflags.z_submap_idx) {
4754 case Z_SUBMAP_IDX_DATA:
4755 return PMAP_MAPPING_TYPE_DEFAULT;
4756 case Z_SUBMAP_IDX_READ_ONLY:
4757 return PMAP_MAPPING_TYPE_ROZONE;
4758 default:
4759 return PMAP_MAPPING_TYPE_RESTRICTED;
4760 }
4761 }
4762
4763 static vm_prot_t
zone_page_prot(zone_security_flags_t zsflags)4764 zone_page_prot(zone_security_flags_t zsflags)
4765 {
4766 switch (zsflags.z_submap_idx) {
4767 case Z_SUBMAP_IDX_READ_ONLY:
4768 return VM_PROT_READ;
4769 default:
4770 return VM_PROT_READ | VM_PROT_WRITE;
4771 }
4772 }
4773
4774 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4775 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4776 {
4777 zone_security_flags_t zsflags = zone_security_config(z);
4778 struct zone_expand ze = {
4779 .ze_thread = current_thread(),
4780 };
4781
4782 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4783 ze.ze_thread->options |= TH_OPT_VMPRIV;
4784 ze.ze_clear_priv = true;
4785 }
4786
4787 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4788 /*
4789 * When the thread is VM privileged,
4790 * vm_page_grab() will call VM_PAGE_WAIT()
4791 * without our knowledge, so we must assume
4792 * it's being called unfortunately.
4793 *
4794 * In practice it's not a big deal because
4795 * Z_NOPAGEWAIT is not really used on zones
4796 * that VM privileged threads are going to expand.
4797 */
4798 ze.ze_pg_wait = true;
4799 ze.ze_vm_priv = true;
4800 }
4801
4802 for (;;) {
4803 if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4804 goto out;
4805 }
4806
4807 if (z->z_expander == NULL) {
4808 z->z_expander = &ze;
4809 break;
4810 }
4811
4812 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4813 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4814 ze.ze_next = z->z_expander;
4815 z->z_expander = &ze;
4816 break;
4817 }
4818
4819 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4820 goto out;
4821 }
4822
4823 z->z_expanding_wait = true;
4824 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4825 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4826 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4827 }
4828
4829 do {
4830 struct zone_page_metadata *meta = NULL;
4831 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4832 vm_page_t page_list = NULL;
4833 vm_offset_t addr = 0;
4834 int waited = 0;
4835
4836 if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4837 __ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4838 continue; /* reevaluate if we really need it */
4839 }
4840
4841 /*
4842 * While we hold the zone lock, look if there's VA we can:
4843 * - complete from partial pages,
4844 * - reuse from the sequester list.
4845 *
4846 * When the page is being populated we pretend we allocated
4847 * an extra element so that zone_gc() can't attempt to free
4848 * the chunk (as it could become empty while we wait for pages).
4849 */
4850 if (zone_pva_is_null(z->z_pageq_va)) {
4851 zone_allocate_va_locked(z, flags);
4852 }
4853
4854 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4855 addr = zone_meta_to_addr(meta);
4856 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4857 cur_pages = meta->zm_page_index;
4858 meta -= cur_pages;
4859 addr -= ptoa(cur_pages);
4860 zone_meta_lock_in_partial(z, meta, cur_pages);
4861 }
4862 zone_unlock(z);
4863
4864 /*
4865 * And now allocate pages to populate our VA.
4866 */
4867 min_pages = z->z_chunk_pages;
4868 #if !KASAN_CLASSIC
4869 if (!z->z_percpu) {
4870 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4871 zone_elem_outer_size(z)));
4872 }
4873 #endif /* !KASAN_CLASSIC */
4874
4875 /*
4876 * Trigger jetsams via VM_pageout GC
4877 * if we're running out of zone memory
4878 */
4879 if (__improbable(zone_map_nearing_exhaustion())) {
4880 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4881 }
4882
4883 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4884
4885 while (pages < z->z_chunk_pages - cur_pages) {
4886 vm_grab_options_t grab_options = VM_PAGE_GRAB_NOPAGEWAIT;
4887 vm_page_t m;
4888
4889 #if ZSECURITY_CONFIG(ZONE_TAGGING) && HAS_MTE
4890 if (zsflags.z_tag) {
4891 grab_options |= VM_PAGE_GRAB_MTE;
4892 }
4893 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) && HAS_MTE */
4894 m = vm_page_grab_options(grab_options);
4895
4896 if (m) {
4897 pages++;
4898 m->vmp_snext = page_list;
4899 page_list = m;
4900 vm_page_zero_fill(
4901 m
4902 #if HAS_MTE
4903 , false /* zero_tags */
4904 #endif /* HAS_MTE */
4905 );
4906 continue;
4907 }
4908
4909 if (pages >= min_pages &&
4910 !zone_expand_wait_for_pages(waited)) {
4911 break;
4912 }
4913
4914 if ((flags & Z_NOPAGEWAIT) == 0) {
4915 /*
4916 * The first time we're about to wait for pages,
4917 * mention that to waiters and wake them all.
4918 *
4919 * Set `ze_pg_wait` in our zone_expand context
4920 * so that waiters who care do not wait again.
4921 */
4922 if (!ze.ze_pg_wait) {
4923 zone_lock(z);
4924 if (z->z_expanding_wait) {
4925 z->z_expanding_wait = false;
4926 wakeup_all_with_inheritor(&z->z_expander,
4927 THREAD_AWAKENED);
4928 }
4929 ze.ze_pg_wait = true;
4930 zone_unlock(z);
4931 }
4932
4933 waited++;
4934 VM_PAGE_WAIT();
4935 continue;
4936 }
4937
4938 /*
4939 * Undo everything and bail out:
4940 *
4941 * - free pages
4942 * - undo the fake allocation if any
4943 * - put the VA back on the VA page queue.
4944 */
4945 vm_page_free_list(page_list, FALSE);
4946 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4947
4948 zone_lock(z);
4949
4950 zone_expand_async_schedule_if_allowed(z);
4951
4952 if (cur_pages) {
4953 zone_meta_unlock_from_partial(z, meta, cur_pages);
4954 }
4955 if (meta) {
4956 zone_meta_queue_push(z, &z->z_pageq_va,
4957 meta + cur_pages);
4958 }
4959 goto page_shortage;
4960 }
4961 vm_object_t object;
4962 #if HAS_MTE
4963 object = zsflags.z_tag ? kernel_object_tagged : kernel_object_default;
4964 #else /* HAS_MTE */
4965 object = kernel_object_default;
4966 #endif /* HAS_MTE */
4967 vm_object_lock(object);
4968
4969 kernel_memory_populate_object_and_unlock(object,
4970 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4971 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4972 zone_page_prot(zsflags), zone_mapping_type(z));
4973
4974 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4975
4976 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4977
4978 /*
4979 * permanent zones only try once,
4980 * the retry loop is in the caller
4981 */
4982 } while (!z->z_permanent && zalloc_needs_refill(z, flags));
4983
4984 page_shortage:
4985 if (z->z_expander == &ze) {
4986 z->z_expander = ze.ze_next;
4987 } else {
4988 assert(z->z_expander->ze_next == &ze);
4989 z->z_expander->ze_next = NULL;
4990 }
4991 if (z->z_expanding_wait) {
4992 z->z_expanding_wait = false;
4993 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4994 }
4995 out:
4996 if (ze.ze_clear_priv) {
4997 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4998 }
4999 }
5000
5001 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)5002 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
5003 {
5004 zone_foreach(z) {
5005 if (z->no_callout) {
5006 /* z_async_refilling will never be set */
5007 continue;
5008 }
5009
5010 if (!z->z_async_refilling) {
5011 /*
5012 * avoid locking all zones, because the one(s)
5013 * we're looking for have been set _before_
5014 * thread_call_enter() was called, if we fail
5015 * to observe the bit, it means the thread-call
5016 * has been "dinged" again and we'll notice it then.
5017 */
5018 continue;
5019 }
5020
5021 zone_lock(z);
5022 if (z->z_self && z->z_async_refilling) {
5023 zone_expand_locked(z, Z_WAITOK);
5024 /*
5025 * clearing _after_ we grow is important,
5026 * so that we avoid waking up the thread call
5027 * while we grow and cause to run a second time.
5028 */
5029 z->z_async_refilling = false;
5030 }
5031 zone_unlock(z);
5032 }
5033 }
5034
5035 #endif /* !ZALLOC_TEST */
5036 #pragma mark zone jetsam integration
5037 #if !ZALLOC_TEST
5038
5039 /*
5040 * We're being very conservative here and picking a value of 95%. We might need to lower this if
5041 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
5042 */
5043 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
5044
5045 /*
5046 * Threshold above which largest zones should be included in the panic log
5047 */
5048 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
5049
5050 /*
5051 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
5052 * where X=zone_map_jetsam_limit.
5053 *
5054 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
5055 */
5056 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
5057 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
5058
5059 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)5060 zone_map_jetsam_set_limit(uint32_t value)
5061 {
5062 if (value <= 0 || value > 100) {
5063 return KERN_INVALID_VALUE;
5064 }
5065
5066 zone_map_jetsam_limit = value;
5067 os_atomic_store(&zone_pages_jetsam_threshold,
5068 zone_pages_wired_max * value / 100, relaxed);
5069 return KERN_SUCCESS;
5070 }
5071
5072 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)5073 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
5074 {
5075 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5076 *current_size = ptoa_64(phys_pages);
5077 *capacity = ptoa_64(zone_pages_wired_max);
5078 }
5079
5080 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)5081 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
5082 {
5083 zone_t largest_zone = zone_find_largest(zone_size);
5084
5085 /*
5086 * Append kalloc heap name to zone name (if zone is used by kalloc)
5087 */
5088 snprintf(zone_name, zone_name_len, "%s%s",
5089 zone_heap_name(largest_zone), largest_zone->z_name);
5090 }
5091
5092 static bool
zone_map_nearing_threshold(unsigned int threshold)5093 zone_map_nearing_threshold(unsigned int threshold)
5094 {
5095 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5096 return phys_pages * 100 > zone_pages_wired_max * threshold;
5097 }
5098
5099 bool
zone_map_nearing_exhaustion(void)5100 zone_map_nearing_exhaustion(void)
5101 {
5102 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5103
5104 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5105 }
5106
5107
5108 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5109
5110 /*
5111 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5112 * to walk through the jetsam priority bands and kill processes.
5113 */
5114 static zone_t
kill_process_in_largest_zone(void)5115 kill_process_in_largest_zone(void)
5116 {
5117 pid_t pid = -1;
5118 uint64_t zone_size = 0;
5119 zone_t largest_zone = zone_find_largest(&zone_size);
5120
5121 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5122 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5123 ptoa_64(zone_pages_wired_max),
5124 (uint64_t)zone_submaps_approx_size(),
5125 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5126 zone_map_jetsam_limit);
5127 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5128 largest_zone->z_name, (uintptr_t)zone_size);
5129
5130 /*
5131 * We want to make sure we don't call this function from userspace.
5132 * Or we could end up trying to synchronously kill the process
5133 * whose context we're in, causing the system to hang.
5134 */
5135 assert(current_task() == kernel_task);
5136
5137 /*
5138 * If vm_object_zone is the largest, check to see if the number of
5139 * elements in vm_map_entry_zone is comparable.
5140 *
5141 * If so, consider vm_map_entry_zone as the largest. This lets us target
5142 * a specific process to jetsam to quickly recover from the zone map
5143 * bloat.
5144 */
5145 if (largest_zone == vm_object_zone) {
5146 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5147 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5148 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5149 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5150 largest_zone = vm_map_entry_zone;
5151 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5152 (uintptr_t)zone_size_wired(largest_zone));
5153 }
5154 }
5155
5156 /* TODO: Extend this to check for the largest process in other zones as well. */
5157 if (largest_zone == vm_map_entry_zone) {
5158 pid = find_largest_process_vm_map_entries();
5159 } else {
5160 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5161 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5162 largest_zone->z_name);
5163 }
5164 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5165 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5166 }
5167
5168 return largest_zone;
5169 }
5170
5171 #endif /* !ZALLOC_TEST */
5172 #pragma mark zfree
5173 #if !ZALLOC_TEST
5174
5175 /*!
5176 * @defgroup zfree
5177 * @{
5178 *
5179 * @brief
5180 * The codepath for zone frees.
5181 *
5182 * @discussion
5183 * There are 4 major ways to allocate memory that end up in the zone allocator:
5184 * - @c zfree()
5185 * - @c zfree_percpu()
5186 * - @c kfree*()
5187 * - @c zfree_permanent()
5188 *
5189 * While permanent zones have their own allocation scheme, all other codepaths
5190 * will eventually go through the @c zfree_ext() choking point.
5191 */
5192
5193 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5194 zfree_drop(zone_t zone, vm_offset_t addr)
5195 {
5196 vm_offset_t esize = zone_elem_outer_size(zone);
5197 struct zone_page_metadata *meta;
5198 vm_offset_t eidx;
5199
5200 meta = zone_element_resolve(zone, addr, &eidx);
5201
5202 if (!zone_meta_mark_free(meta, eidx)) {
5203 zone_meta_double_free_panic(zone, addr, __func__);
5204 }
5205
5206 vm_offset_t old_size = meta->zm_alloc_size;
5207 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5208 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5209
5210 if (new_size == 0) {
5211 /* whether the page was on the intermediate or all_used, queue, move it to free */
5212 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5213 zone->z_wired_empty += meta->zm_chunk_len;
5214 } else if (old_size + esize > max_size) {
5215 /* first free element on page, move from all_used */
5216 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5217 }
5218
5219 if (__improbable(zone->z_exhausted_wait)) {
5220 zone_wakeup_exhausted_waiters(zone);
5221 }
5222 }
5223
5224 __attribute__((noinline))
5225 static void
zfree_item(zone_t zone,vm_offset_t addr)5226 zfree_item(zone_t zone, vm_offset_t addr)
5227 {
5228 /* transfer preemption count to lock */
5229 zone_lock_nopreempt_check_contention(zone);
5230
5231 zfree_drop(zone, addr);
5232 zone->z_elems_free += 1;
5233
5234 zone_unlock(zone);
5235 }
5236
5237 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5238 zfree_cached_depot_recirculate(
5239 zone_t zone,
5240 uint32_t depot_max,
5241 zone_cache_t cache)
5242 {
5243 smr_t smr = zone_cache_smr(cache);
5244 smr_seq_t seq;
5245 uint32_t n;
5246
5247 zone_recirc_lock_nopreempt_check_contention(zone);
5248
5249 n = cache->zc_depot.zd_full;
5250 if (n >= depot_max) {
5251 /*
5252 * If SMR is in use, rotate the entire chunk of magazines.
5253 *
5254 * If the head of the recirculation layer is ready to be
5255 * reused, pull them back to refill a little.
5256 */
5257 seq = zone_depot_move_full(&zone->z_recirc,
5258 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5259
5260 if (smr) {
5261 smr_deferred_advance_commit(smr, seq);
5262 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5263 zone_depot_move_full(&cache->zc_depot,
5264 &zone->z_recirc, depot_max / 2, NULL);
5265 }
5266 }
5267 }
5268
5269 n = depot_max - cache->zc_depot.zd_full;
5270 if (n > zone->z_recirc.zd_empty) {
5271 n = zone->z_recirc.zd_empty;
5272 }
5273 if (n) {
5274 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5275 n, zone);
5276 }
5277
5278 zone_recirc_unlock_nopreempt(zone);
5279 }
5280
5281 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5282 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5283 {
5284 zone_magazine_t mag = NULL, tmp = NULL;
5285 smr_t smr = zone_cache_smr(cache);
5286 bool wakeup_exhausted = false;
5287
5288 if (zone->z_recirc.zd_empty == 0) {
5289 mag = zone_magazine_alloc(Z_NOWAIT);
5290 }
5291
5292 zone_recirc_lock_nopreempt_check_contention(zone);
5293
5294 if (mag == NULL && zone->z_recirc.zd_empty) {
5295 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5296 __builtin_assume(mag);
5297 }
5298 if (mag) {
5299 tmp = zone_magazine_replace(cache, mag, true);
5300 if (smr) {
5301 smr_deferred_advance_commit(smr, tmp->zm_seq);
5302 }
5303 if (zone_security_array[zone_index(zone)].z_lifo) {
5304 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5305 } else {
5306 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5307 }
5308
5309 wakeup_exhausted = zone->z_exhausted_wait;
5310 }
5311
5312 zone_recirc_unlock_nopreempt(zone);
5313
5314 if (__improbable(wakeup_exhausted)) {
5315 zone_lock_nopreempt(zone);
5316 if (zone->z_exhausted_wait) {
5317 zone_wakeup_exhausted_waiters(zone);
5318 }
5319 zone_unlock_nopreempt(zone);
5320 }
5321
5322 return mag ? cache : NULL;
5323 }
5324
5325 __attribute__((noinline))
5326 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5327 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5328 {
5329 zone_magazine_t mag = NULL, tmp = NULL;
5330 uint32_t depot_max;
5331
5332 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5333 if (depot_max) {
5334 zone_depot_lock_nopreempt(cache);
5335
5336 if (cache->zc_depot.zd_empty == 0) {
5337 zfree_cached_depot_recirculate(zone, depot_max, cache);
5338 }
5339
5340 if (__probable(cache->zc_depot.zd_empty)) {
5341 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5342 __builtin_assume(mag);
5343 } else {
5344 mag = zone_magazine_alloc(Z_NOWAIT);
5345 }
5346 if (mag) {
5347 tmp = zone_magazine_replace(cache, mag, true);
5348 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5349 }
5350
5351 zone_depot_unlock_nopreempt(cache);
5352
5353 return mag ? cache : NULL;
5354 }
5355
5356 return zfree_cached_recirculate(zone, cache);
5357 }
5358
5359 __attribute__((always_inline))
5360 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5361 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5362 {
5363 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5364
5365 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5366 return cache;
5367 }
5368
5369 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5370 zone_cache_swap_magazines(cache);
5371 return cache;
5372 }
5373
5374 return zfree_cached_trim(zone, cache);
5375 }
5376
5377 __attribute__((always_inline))
5378 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5379 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5380 {
5381 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5382 size_t idx = cache->zc_free_cur;
5383
5384 if (__probable(idx + 1 < zc_mag_size())) {
5385 return cache;
5386 }
5387
5388 /*
5389 * when SMR is in use, the bucket is tagged early with
5390 * @c smr_deferred_advance(), which costs a full barrier,
5391 * but performs no store.
5392 *
5393 * When zones hit the recirculation layer, the advance is commited,
5394 * under the recirculation lock (see zfree_cached_recirculate()).
5395 *
5396 * When done this way, the zone contention detection mechanism
5397 * will adjust the size of the per-cpu depots gracefully, which
5398 * mechanically reduces the pace of these commits as usage increases.
5399 */
5400
5401 if (__probable(idx + 1 == zc_mag_size())) {
5402 zone_magazine_t mag;
5403
5404 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5405 offsetof(struct zone_magazine, zm_elems));
5406 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5407 return cache;
5408 }
5409
5410 return zfree_cached_trim(zone, cache);
5411 }
5412
5413 __attribute__((always_inline))
5414 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5415 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5416 {
5417 struct zone_page_metadata *meta;
5418 vm_offset_t offs;
5419
5420 #pragma unused(combined_size)
5421
5422 meta = zone_meta_from_addr(elem);
5423 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5424 zone_invalid_element_panic(zone, elem);
5425 }
5426
5427 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5428 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5429 offs += ptoa(meta->zm_page_index);
5430 }
5431
5432 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5433 zone_invalid_element_panic(zone, elem);
5434 }
5435
5436 #if VM_TAG_SIZECLASSES
5437 if (__improbable(zone->z_uses_tags)) {
5438 vm_tag_t *slot;
5439
5440 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5441 Z_FAST_QUO(offs, zone->z_quo_magic));
5442 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5443 -(long)ZFREE_ELEM_SIZE(combined_size));
5444 *slot = VM_KERN_MEMORY_NONE;
5445 }
5446 #endif /* VM_TAG_SIZECLASSES */
5447
5448 #if KASAN_CLASSIC
5449 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5450 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5451 zone->z_percpu, __builtin_frame_address(0));
5452 #endif
5453
5454 elem = (vm_offset_t)zone_tag_free_element(zone, (caddr_t)elem, ZFREE_ELEM_SIZE(combined_size));
5455 return elem;
5456 }
5457
5458 __attribute__((always_inline))
5459 void *
zcache_mark_invalid(zone_t zone,void * elem)5460 zcache_mark_invalid(zone_t zone, void *elem)
5461 {
5462 vm_size_t esize = zone_elem_inner_size(zone);
5463
5464 ZFREE_LOG(zone, (vm_offset_t)elem, 1);
5465 return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
5466 }
5467
5468 /*
5469 * The function is noinline when zlog can be used so that the backtracing can
5470 * reliably skip the zfree_ext() and zfree_log()
5471 * boring frames.
5472 */
5473 #if ZALLOC_ENABLE_LOGGING
5474 __attribute__((noinline))
5475 #endif /* ZALLOC_ENABLE_LOGGING */
5476 __mockable void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5477 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5478 {
5479 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5480 vm_offset_t elem = (vm_offset_t)addr;
5481 int cpu;
5482
5483 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5484
5485 ZFREE_LOG(zone, elem, 1);
5486 elem = __zcache_mark_invalid(zone, elem, combined_size);
5487
5488 disable_preemption();
5489 cpu = cpu_number();
5490 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5491
5492 #if KASAN_CLASSIC
5493 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5494 struct kasan_quarantine_result kqr;
5495
5496 kqr = kasan_quarantine(elem, esize);
5497 elem = kqr.addr;
5498 zone = kqr.zone;
5499 if (elem == 0) {
5500 return enable_preemption();
5501 }
5502 }
5503 #endif
5504
5505 if (zone->z_pcpu_cache) {
5506 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
5507
5508 if (__probable(cache)) {
5509 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5510 return enable_preemption();
5511 }
5512 }
5513
5514 return zfree_item(zone, elem);
5515 }
5516
5517 __attribute__((always_inline))
5518 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)5519 zcache_free_stack_to_cpu(
5520 zone_id_t zid,
5521 zone_cache_t cache,
5522 zstack_t stack,
5523 vm_size_t esize,
5524 zone_cache_ops_t ops,
5525 bool zero)
5526 {
5527 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
5528 vm_offset_t *p;
5529
5530 stack.z_count -= n;
5531 cache->zc_free_cur += n;
5532 p = cache->zc_free_elems + cache->zc_free_cur;
5533
5534 do {
5535 void *o = zstack_pop_no_delta(&stack);
5536
5537 if (ops) {
5538 o = ops->zc_op_mark_invalid(zid, o);
5539 } else {
5540 if (zero) {
5541 vm_memtag_bzero_unchecked(o, esize);
5542 }
5543 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
5544 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
5545 }
5546 *--p = (vm_offset_t)o;
5547 } while (--n > 0);
5548
5549 return stack;
5550 }
5551
5552 __attribute__((always_inline))
5553 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)5554 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5555 {
5556 vm_offset_t elem = (vm_offset_t)addr;
5557 zone_cache_t cache;
5558 vm_size_t esize;
5559 zone_t zone = zone_by_id(zid);
5560 int cpu;
5561
5562 ZFREE_LOG(zone, elem, 1);
5563
5564 disable_preemption();
5565 cpu = cpu_number();
5566 esize = zone_elem_inner_size(zone);
5567 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5568 if (!ops) {
5569 addr = (void *)__zcache_mark_invalid(zone, elem,
5570 ZFREE_PACK_SIZE(esize, esize));
5571 }
5572 cache = zfree_cached_get_pcpu_cache(zone, cpu);
5573 if (__probable(cache)) {
5574 if (ops) {
5575 addr = ops->zc_op_mark_invalid(zid, addr);
5576 }
5577 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5578 enable_preemption();
5579 } else if (ops) {
5580 enable_preemption();
5581 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
5582 ops->zc_op_free(zid, addr);
5583 } else {
5584 zfree_item(zone, elem);
5585 }
5586 }
5587
5588 __attribute__((always_inline))
5589 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)5590 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
5591 {
5592 zone_t zone = zone_by_id(zid);
5593 zone_cache_t cache;
5594 vm_size_t esize;
5595 int cpu;
5596
5597 ZFREE_LOG(zone, stack.z_head, stack.z_count);
5598
5599 disable_preemption();
5600 cpu = cpu_number();
5601 esize = zone_elem_inner_size(zone);
5602 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
5603 stack.z_count * esize;
5604
5605 for (;;) {
5606 cache = zfree_cached_get_pcpu_cache(zone, cpu);
5607 if (__probable(cache)) {
5608 stack = zcache_free_stack_to_cpu(zid, cache,
5609 stack, esize, ops, zero);
5610 enable_preemption();
5611 } else if (ops) {
5612 enable_preemption();
5613 os_atomic_dec(&zone->z_elems_avail, relaxed);
5614 ops->zc_op_free(zid, zstack_pop(&stack));
5615 } else {
5616 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
5617
5618 if (zero) {
5619 vm_memtag_bzero_unchecked((void *)addr, esize);
5620 }
5621 addr = __zcache_mark_invalid(zone, addr,
5622 ZFREE_PACK_SIZE(esize, esize));
5623 zfree_item(zone, addr);
5624 }
5625
5626 if (stack.z_count == 0) {
5627 break;
5628 }
5629
5630 disable_preemption();
5631 cpu = cpu_number();
5632 }
5633 }
5634
5635 void
5636 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5637 {
5638 __builtin_assume(ops != NULL);
5639 zcache_free_1_ext(zid, addr, ops);
5640 }
5641
5642 void
5643 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
5644 {
5645 __builtin_assume(ops != NULL);
5646 zcache_free_n_ext(zid, stack, ops, false);
5647 }
5648
5649 void
5650 (zfree_n)(zone_id_t zid, zstack_t stack)
5651 {
5652 zcache_free_n_ext(zid, stack, NULL, true);
5653 }
5654
5655 void
5656 (zfree_nozero)(zone_id_t zid, void *addr)
5657 {
5658 zcache_free_1_ext(zid, addr, NULL);
5659 }
5660
5661 void
5662 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
5663 {
5664 zcache_free_n_ext(zid, stack, NULL, false);
5665 }
5666
5667 void
5668 (zfree)(zone_t zov, void *addr)
5669 {
5670 zone_t zone = zov->z_self;
5671 zone_stats_t zstats = zov->z_stats;
5672 vm_offset_t esize = zone_elem_inner_size(zone);
5673
5674 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
5675 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
5676 vm_memtag_bzero_unchecked(addr, esize);
5677
5678 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
5679 }
5680
5681 __attribute__((noinline))
5682 void
zfree_percpu(union zone_or_view zov,void * addr)5683 zfree_percpu(union zone_or_view zov, void *addr)
5684 {
5685 zone_t zone = zov.zov_view->zv_zone;
5686 zone_stats_t zstats = zov.zov_view->zv_stats;
5687 vm_offset_t esize = zone_elem_inner_size(zone);
5688
5689 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
5690 assert(zone->z_percpu);
5691 zpercpu_foreach_cpu(i) {
5692 vm_memtag_bzero_unchecked((char *)addr + ptoa(i), esize);
5693 }
5694 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
5695 }
5696
5697 void
5698 (zfree_id)(zone_id_t zid, void *addr)
5699 {
5700 (zfree)(&zone_array[zid], addr);
5701 }
5702
5703 void
5704 (zfree_ro)(zone_id_t zid, void *addr)
5705 {
5706 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
5707 zone_t zone = zone_by_id(zid);
5708 zone_stats_t zstats = zone->z_stats;
5709 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
5710
5711 #if ZSECURITY_CONFIG(READ_ONLY)
5712 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
5713 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
5714 #else
5715 (void)zid;
5716 bzero(addr, esize);
5717 #endif /* !KASAN_CLASSIC */
5718 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
5719 }
5720
5721 __attribute__((noinline))
5722 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)5723 zfree_item_smr(zone_t zone, vm_offset_t addr)
5724 {
5725 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
5726 vm_size_t esize = zone_elem_inner_size(zone);
5727
5728 /*
5729 * This should be taken extremely rarely:
5730 * this happens if we failed allocating an empty bucket.
5731 */
5732 smr_synchronize(zone_cache_smr(cache));
5733
5734 cache->zc_free((void *)addr, esize);
5735 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
5736
5737 zfree_item(zone, addr);
5738 }
5739
5740 void
5741 (zfree_smr)(zone_t zone, void *addr)
5742 {
5743 vm_offset_t elem = (vm_offset_t)addr;
5744 vm_offset_t esize;
5745 zone_cache_t cache;
5746 int cpu;
5747
5748 ZFREE_LOG(zone, elem, 1);
5749
5750 disable_preemption();
5751 cpu = cpu_number();
5752 #if MACH_ASSERT
5753 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5754 assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
5755 #endif
5756 esize = zone_elem_inner_size(zone);
5757 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5758 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
5759 if (__probable(cache)) {
5760 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5761 enable_preemption();
5762 } else {
5763 zfree_item_smr(zone, elem);
5764 }
5765 }
5766
5767 void
5768 (zfree_id_smr)(zone_id_t zid, void *addr)
5769 {
5770 (zfree_smr)(&zone_array[zid], addr);
5771 }
5772
5773 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)5774 kfree_type_impl_internal(
5775 kalloc_type_view_t kt_view,
5776 void *ptr __unsafe_indexable)
5777 {
5778 zone_t zsig = kt_view->kt_zsig;
5779 zone_t z = kt_view->kt_zv.zv_zone;
5780 struct zone_page_metadata *meta;
5781 zone_id_t zidx_meta;
5782 zone_security_flags_t zsflags_meta;
5783 zone_security_flags_t zsflags_z = zone_security_config(z);
5784 zone_security_flags_t zsflags_zsig;
5785
5786 if (NULL == ptr) {
5787 return;
5788 }
5789
5790 meta = zone_meta_from_addr((vm_offset_t) ptr);
5791 zidx_meta = meta->zm_index;
5792 zsflags_meta = zone_security_array[zidx_meta];
5793
5794 if (zone_is_data_kheap(zsflags_z.z_kheap_id) ||
5795 zone_has_index(z, zidx_meta)) {
5796 return (zfree)(&kt_view->kt_zv, ptr);
5797 }
5798 zsflags_zsig = zone_security_config(zsig);
5799 if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
5800 z = zone_array + zidx_meta;
5801 return (zfree)(z, ptr);
5802 }
5803
5804 return (zfree)(kt_view->kt_zearly, ptr);
5805 }
5806
5807 /*! @} */
5808 #endif /* !ZALLOC_TEST */
5809 #pragma mark zalloc
5810 #if !ZALLOC_TEST
5811
5812 /*!
5813 * @defgroup zalloc
5814 * @{
5815 *
5816 * @brief
5817 * The codepath for zone allocations.
5818 *
5819 * @discussion
5820 * There are 4 major ways to allocate memory that end up in the zone allocator:
5821 * - @c zalloc(), @c zalloc_flags(), ...
5822 * - @c zalloc_percpu()
5823 * - @c kalloc*()
5824 * - @c zalloc_permanent()
5825 *
5826 * While permanent zones have their own allocation scheme, all other codepaths
5827 * will eventually go through the @c zalloc_ext() choking point.
5828 *
5829 * @c zalloc_return() is the final function everyone tail calls into,
5830 * which prepares the element for consumption by the caller and deals with
5831 * common treatment (zone logging, tags, kasan, validation, ...).
5832 */
5833
5834 /*!
5835 * @function zalloc_import
5836 *
5837 * @brief
5838 * Import @c n elements in the specified array, opposite of @c zfree_drop().
5839 *
5840 * @param zone The zone to import elements from
5841 * @param elems The array to import into
5842 * @param n The number of elements to import. Must be non zero,
5843 * and smaller than @c zone->z_elems_free.
5844 */
5845 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)5846 zalloc_import(
5847 zone_t zone,
5848 vm_offset_t *elems,
5849 zalloc_flags_t flags,
5850 uint32_t n)
5851 {
5852 vm_offset_t esize = zone_elem_outer_size(zone);
5853 vm_offset_t offs = zone_elem_inner_offs(zone);
5854 zone_stats_t zs;
5855 int cpu = cpu_number();
5856 uint32_t i = 0;
5857
5858 zs = zpercpu_get_cpu(zone->z_stats, cpu);
5859
5860 if (__improbable(zone_caching_disabled < 0)) {
5861 /*
5862 * In the first 10s after boot, mess with
5863 * the scan position in order to make early
5864 * allocations patterns less predictable.
5865 */
5866 zone_early_scramble_rr(zone, cpu, zs);
5867 }
5868
5869 do {
5870 vm_offset_t page, eidx, size = 0;
5871 struct zone_page_metadata *meta;
5872
5873 if (!zone_pva_is_null(zone->z_pageq_partial)) {
5874 meta = zone_pva_to_meta(zone->z_pageq_partial);
5875 page = zone_pva_to_addr(zone->z_pageq_partial);
5876 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
5877 meta = zone_pva_to_meta(zone->z_pageq_empty);
5878 page = zone_pva_to_addr(zone->z_pageq_empty);
5879 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
5880 } else {
5881 zone_accounting_panic(zone, "z_elems_free corruption");
5882 }
5883
5884 zone_meta_validate(zone, meta, page);
5885
5886 vm_offset_t old_size = meta->zm_alloc_size;
5887 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5888
5889 do {
5890 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
5891 elems[i++] = page + offs + eidx * esize;
5892 size += esize;
5893 } while (i < n && old_size + size + esize <= max_size);
5894
5895 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
5896
5897 if (new_size + esize > max_size) {
5898 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
5899 } else if (old_size == 0) {
5900 /* remove from free, move to intermediate */
5901 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5902 }
5903 } while (i < n);
5904
5905 n = zone_counter_sub(zone, z_elems_free, n);
5906 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
5907 zone->z_elems_free_min = n;
5908 }
5909
5910 return zone_elem_inner_size(zone);
5911 }
5912
5913 __attribute__((always_inline))
5914 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)5915 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
5916 {
5917 #pragma unused(zone, flags)
5918 #if KASAN_CLASSIC || VM_TAG_SIZECLASSES
5919 vm_offset_t esize = zone_elem_inner_size(zone);
5920 #endif
5921
5922 #if HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING)
5923 /*
5924 * Retrieve the memory tag assigned on free and update the pointer
5925 * metadata.
5926 */
5927 #endif /* HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING) */
5928 addr = vm_memtag_load_tag(addr);
5929
5930 #if VM_TAG_SIZECLASSES
5931 if (__improbable(zone->z_uses_tags)) {
5932 struct zone_page_metadata *meta;
5933 vm_offset_t offs;
5934 vm_tag_t *slot;
5935 vm_tag_t tag;
5936
5937 tag = zalloc_flags_get_tag(flags);
5938 meta = zone_meta_from_addr(addr);
5939 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
5940 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5941 offs += ptoa(meta->zm_page_index);
5942 }
5943
5944 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5945 Z_FAST_QUO(offs, zone->z_quo_magic));
5946 *slot = tag;
5947
5948 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
5949 (long)esize);
5950 }
5951 #endif /* VM_TAG_SIZECLASSES */
5952
5953 #if KASAN_CLASSIC
5954 /*
5955 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
5956 */
5957 if ((flags & Z_SKIP_KASAN) == 0) {
5958 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
5959 (flags & Z_PCPU), __builtin_frame_address(0));
5960 }
5961 #endif /* KASAN_CLASSIC */
5962
5963 return addr;
5964 }
5965
5966 __attribute__((always_inline))
5967 void *
zcache_mark_valid(zone_t zone,void * addr)5968 zcache_mark_valid(zone_t zone, void *addr)
5969 {
5970 addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
5971 ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
5972 return addr;
5973 }
5974
5975 /*!
5976 * @function zalloc_return
5977 *
5978 * @brief
5979 * Performs the tail-end of the work required on allocations before the caller
5980 * uses them.
5981 *
5982 * @discussion
5983 * This function is called without any zone lock held,
5984 * and preemption back to the state it had when @c zalloc_ext() was called.
5985 *
5986 * @param zone The zone we're allocating from.
5987 * @param addr The element we just allocated.
5988 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
5989 * @param elem_size The element size for this zone.
5990 */
5991 __attribute__((always_inline))
5992 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)5993 zalloc_return(
5994 zone_t zone,
5995 vm_offset_t addr,
5996 zalloc_flags_t flags,
5997 vm_offset_t elem_size)
5998 {
5999 addr = __zcache_mark_valid(zone, addr, flags);
6000 #if ZALLOC_ENABLE_ZERO_CHECK
6001 zalloc_validate_element(zone, addr, elem_size, flags);
6002 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6003 ZALLOC_LOG(zone, addr, 1);
6004
6005 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6006 return (struct kalloc_result){ (void *)addr, elem_size };
6007 }
6008
6009 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6010 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6011 {
6012 if (esize <= 512) {
6013 return zone_early_thres_mul * page_size / 4;
6014 } else if (esize < 2048) {
6015 return zone_early_thres_mul * esize * 8;
6016 }
6017 return zone_early_thres_mul * zone->z_chunk_elems * esize;
6018 }
6019
6020 __attribute__((noinline))
6021 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6022 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6023 {
6024 vm_offset_t esize, addr;
6025 zone_stats_t zs;
6026
6027 zone_lock_nopreempt_check_contention(zone);
6028
6029 zs = zpercpu_get(zstats);
6030 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6031 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6032 zone_expand_async_schedule_if_allowed(zone);
6033 } else {
6034 zone_expand_locked(zone, flags);
6035 }
6036 if (__improbable(zone->z_elems_free == 0)) {
6037 zs->zs_alloc_fail++;
6038 zone_unlock(zone);
6039 if (__improbable(flags & Z_NOFAIL)) {
6040 zone_nofail_panic(zone);
6041 }
6042 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6043 return (struct kalloc_result){ };
6044 }
6045 }
6046
6047 esize = zalloc_import(zone, &addr, flags, 1);
6048 zs->zs_mem_allocated += esize;
6049
6050 if (__improbable(!zone_share_always &&
6051 !os_atomic_load(&zs->zs_alloc_not_early, relaxed))) {
6052 if (flags & Z_SET_NOTEARLY) {
6053 vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6054
6055 if (zs->zs_mem_allocated >= shared_threshold) {
6056 zpercpu_foreach(zs_cpu, zstats) {
6057 os_atomic_store(&zs_cpu->zs_alloc_not_early, 1, relaxed);
6058 }
6059 }
6060 }
6061 }
6062 zone_unlock(zone);
6063
6064 return zalloc_return(zone, addr, flags, esize);
6065 }
6066
6067 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6068 zalloc_cached_import(
6069 zone_t zone,
6070 zalloc_flags_t flags,
6071 zone_cache_t cache)
6072 {
6073 uint16_t n_elems = zc_mag_size();
6074
6075 zone_lock_nopreempt(zone);
6076
6077 if (__probable(!zone_caching_disabled &&
6078 zone->z_elems_free > zone->z_elems_rsv / 2)) {
6079 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6080 zone_expand_async_schedule_if_allowed(zone);
6081 }
6082 if (zone->z_elems_free < n_elems) {
6083 n_elems = (uint16_t)zone->z_elems_free;
6084 }
6085 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6086 cache->zc_alloc_cur = n_elems;
6087 }
6088
6089 zone_unlock_nopreempt(zone);
6090 }
6091
6092 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6093 zalloc_cached_depot_recirculate(
6094 zone_t zone,
6095 uint32_t depot_max,
6096 zone_cache_t cache,
6097 smr_t smr)
6098 {
6099 smr_seq_t seq;
6100 uint32_t n;
6101
6102 zone_recirc_lock_nopreempt_check_contention(zone);
6103
6104 n = cache->zc_depot.zd_empty;
6105 if (n >= depot_max) {
6106 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6107 n - depot_max / 2, NULL);
6108 }
6109
6110 n = cache->zc_depot.zd_full;
6111 if (smr && n) {
6112 /*
6113 * if SMR is in use, it means smr_poll() failed,
6114 * so rotate the entire chunk of magazines in order
6115 * to let the sequence numbers age.
6116 */
6117 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6118 n, NULL);
6119 smr_deferred_advance_commit(smr, seq);
6120 }
6121
6122 n = depot_max - cache->zc_depot.zd_empty;
6123 if (n > zone->z_recirc.zd_full) {
6124 n = zone->z_recirc.zd_full;
6125 }
6126
6127 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6128 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6129 n, zone);
6130 }
6131
6132 zone_recirc_unlock_nopreempt(zone);
6133 }
6134
6135 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6136 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6137 {
6138 zone_smr_free_cb_t zc_free = cache->zc_free;
6139 vm_size_t esize = zone_elem_inner_size(z);
6140
6141 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6142 vm_offset_t elem = mag->zm_elems[i];
6143
6144 zc_free((void *)elem, zone_elem_inner_size(z));
6145 elem = __zcache_mark_invalid(z, elem,
6146 ZFREE_PACK_SIZE(esize, esize));
6147 mag->zm_elems[i] = elem;
6148 }
6149 }
6150
6151 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6152 zalloc_cached_recirculate(
6153 zone_t zone,
6154 zone_cache_t cache)
6155 {
6156 zone_magazine_t mag = NULL;
6157
6158 zone_recirc_lock_nopreempt_check_contention(zone);
6159
6160 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6161 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6162 if (zone_cache_smr(cache)) {
6163 zalloc_cached_reuse_smr(zone, cache, mag);
6164 }
6165 mag = zone_magazine_replace(cache, mag, false);
6166 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6167 }
6168
6169 zone_recirc_unlock_nopreempt(zone);
6170 }
6171
6172 __attribute__((noinline))
6173 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6174 zalloc_cached_prime(
6175 zone_t zone,
6176 zone_cache_ops_t ops,
6177 zalloc_flags_t flags,
6178 zone_cache_t cache)
6179 {
6180 zone_magazine_t mag = NULL;
6181 uint32_t depot_max;
6182 smr_t smr;
6183
6184 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6185 if (depot_max) {
6186 smr = zone_cache_smr(cache);
6187
6188 zone_depot_lock_nopreempt(cache);
6189
6190 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6191 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6192 smr);
6193 }
6194
6195 if (__probable(cache->zc_depot.zd_full)) {
6196 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6197 if (zone_cache_smr(cache)) {
6198 zalloc_cached_reuse_smr(zone, cache, mag);
6199 }
6200 mag = zone_magazine_replace(cache, mag, false);
6201 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6202 }
6203
6204 zone_depot_unlock_nopreempt(cache);
6205 } else if (zone->z_recirc.zd_full) {
6206 zalloc_cached_recirculate(zone, cache);
6207 }
6208
6209 if (__probable(cache->zc_alloc_cur)) {
6210 return cache;
6211 }
6212
6213 if (ops == NULL) {
6214 zalloc_cached_import(zone, flags, cache);
6215 if (__probable(cache->zc_alloc_cur)) {
6216 return cache;
6217 }
6218 }
6219
6220 return NULL;
6221 }
6222
6223 __attribute__((always_inline))
6224 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6225 zalloc_cached_get_pcpu_cache(
6226 zone_t zone,
6227 zone_cache_ops_t ops,
6228 int cpu,
6229 zalloc_flags_t flags)
6230 {
6231 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6232
6233 if (__probable(cache->zc_alloc_cur != 0)) {
6234 return cache;
6235 }
6236
6237 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6238 zone_cache_swap_magazines(cache);
6239 return cache;
6240 }
6241
6242 return zalloc_cached_prime(zone, ops, flags, cache);
6243 }
6244
6245
6246 /*!
6247 * @function zalloc_ext
6248 *
6249 * @brief
6250 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6251 */
6252 __mockable struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6253 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6254 {
6255 /*
6256 * KASan uses zalloc() for fakestack, which can be called anywhere.
6257 * However, we make sure these calls can never block.
6258 */
6259 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6260 #if KASAN_FAKESTACK
6261 zone->z_kasan_fakestacks ||
6262 #endif /* KASAN_FAKESTACK */
6263 ml_get_interrupts_enabled() ||
6264 ml_is_quiescing() ||
6265 debug_mode_active(),
6266 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6267
6268 /*
6269 * Make sure Z_NOFAIL was not obviously misused
6270 */
6271 if (flags & Z_NOFAIL) {
6272 assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6273 }
6274
6275 #if VM_TAG_SIZECLASSES
6276 if (__improbable(zone->z_uses_tags)) {
6277 vm_tag_t tag = zalloc_flags_get_tag(flags);
6278
6279 if (flags & Z_VM_TAG_BT_BIT) {
6280 tag = vm_tag_bt() ?: tag;
6281 }
6282 if (tag != VM_KERN_MEMORY_NONE) {
6283 tag = vm_tag_will_update_zone(tag,
6284 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6285 }
6286 if (tag == VM_KERN_MEMORY_NONE) {
6287 zone_security_flags_t zsflags = zone_security_config(zone);
6288
6289 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6290 tag = VM_KERN_MEMORY_KALLOC_DATA;
6291 } else if (zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED) {
6292 tag = VM_KERN_MEMORY_KALLOC_SHARED;
6293 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6294 zsflags.z_kalloc_type) {
6295 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6296 } else {
6297 tag = VM_KERN_MEMORY_KALLOC;
6298 }
6299 }
6300 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6301 }
6302 #endif /* VM_TAG_SIZECLASSES */
6303
6304 disable_preemption();
6305
6306 #if ZALLOC_ENABLE_ZERO_CHECK
6307 if (zalloc_skip_zero_check()) {
6308 flags |= Z_NOZZC;
6309 }
6310 #endif
6311
6312 if (zone->z_pcpu_cache) {
6313 zone_cache_t cache;
6314 vm_offset_t index, addr, esize;
6315 int cpu = cpu_number();
6316
6317 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6318 if (__probable(cache)) {
6319 esize = zone_elem_inner_size(zone);
6320 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6321 index = --cache->zc_alloc_cur;
6322 addr = cache->zc_alloc_elems[index];
6323 cache->zc_alloc_elems[index] = 0;
6324 enable_preemption();
6325 return zalloc_return(zone, addr, flags, esize);
6326 }
6327 }
6328
6329 __attribute__((musttail))
6330 return zalloc_item(zone, zstats, flags);
6331 }
6332
6333 __attribute__((always_inline))
6334 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6335 zcache_alloc_stack_from_cpu(
6336 zone_id_t zid,
6337 zone_cache_t cache,
6338 zstack_t stack,
6339 uint32_t n,
6340 zone_cache_ops_t ops)
6341 {
6342 vm_offset_t *p;
6343
6344 n = MIN(n, cache->zc_alloc_cur);
6345 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6346 cache->zc_alloc_cur -= n;
6347 stack.z_count += n;
6348
6349 do {
6350 vm_offset_t e = *--p;
6351
6352 *p = 0;
6353 if (ops) {
6354 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6355 } else {
6356 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6357 }
6358 zstack_push_no_delta(&stack, (void *)e);
6359 } while (--n > 0);
6360
6361 return stack;
6362 }
6363
6364 __attribute__((noinline))
6365 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6366 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6367 {
6368 zone_t zone = zone_by_id(zid);
6369 zone_stats_t zstats = zone->z_stats;
6370 int cpu;
6371
6372 count -= stack.z_count;
6373
6374 disable_preemption();
6375 cpu = cpu_number();
6376 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6377 count * zone_elem_inner_size(zone);
6378 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6379 enable_preemption();
6380
6381 return stack;
6382 }
6383
6384 #define ZCACHE_ALLOC_RETRY ((void *)-1)
6385
6386 __attribute__((noinline))
6387 static void *
zcache_alloc_one(zone_id_t zid,zalloc_flags_t flags,zone_cache_ops_t ops)6388 zcache_alloc_one(
6389 zone_id_t zid,
6390 zalloc_flags_t flags,
6391 zone_cache_ops_t ops)
6392 {
6393 zone_t zone = zone_by_id(zid);
6394 void *o;
6395
6396 /*
6397 * First try to allocate in rudimentary zones without ever going into
6398 * __ZONE_EXHAUSTED_AND_WAITING_HARD__() by clearing Z_NOFAIL.
6399 */
6400 enable_preemption();
6401 o = ops->zc_op_alloc(zid, flags & ~Z_NOFAIL);
6402 if (__probable(o)) {
6403 os_atomic_inc(&zone->z_elems_avail, relaxed);
6404 } else if (__probable(flags & Z_NOFAIL)) {
6405 zone_cache_t cache;
6406 vm_offset_t index;
6407 int cpu;
6408
6409 zone_lock(zone);
6410
6411 cpu = cpu_number();
6412 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6413 o = ZCACHE_ALLOC_RETRY;
6414 if (__probable(cache)) {
6415 index = --cache->zc_alloc_cur;
6416 o = (void *)cache->zc_alloc_elems[index];
6417 cache->zc_alloc_elems[index] = 0;
6418 o = ops->zc_op_mark_valid(zid, o);
6419 } else if (zone->z_elems_free == 0) {
6420 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone);
6421 }
6422
6423 zone_unlock(zone);
6424 }
6425
6426 return o;
6427 }
6428
6429 __attribute__((always_inline))
6430 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6431 zcache_alloc_n_ext(
6432 zone_id_t zid,
6433 uint32_t count,
6434 zalloc_flags_t flags,
6435 zone_cache_ops_t ops)
6436 {
6437 zstack_t stack = { };
6438 zone_cache_t cache;
6439 zone_t zone;
6440 int cpu;
6441
6442 disable_preemption();
6443 cpu = cpu_number();
6444 zone = zone_by_id(zid);
6445 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6446 count * zone_elem_inner_size(zone);
6447
6448 for (;;) {
6449 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6450 if (__probable(cache)) {
6451 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6452 count - stack.z_count, ops);
6453 enable_preemption();
6454 } else {
6455 void *o;
6456
6457 if (ops) {
6458 o = zcache_alloc_one(zid, flags, ops);
6459 } else {
6460 o = zalloc_item(zone, zone->z_stats, flags).addr;
6461 }
6462 if (__improbable(o == NULL)) {
6463 return zcache_alloc_fail(zid, stack, count);
6464 }
6465 if (ops == NULL || o != ZCACHE_ALLOC_RETRY) {
6466 zstack_push(&stack, o);
6467 }
6468 }
6469
6470 if (stack.z_count == count) {
6471 break;
6472 }
6473
6474 disable_preemption();
6475 cpu = cpu_number();
6476 }
6477
6478 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6479
6480 return stack;
6481 }
6482
6483 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6484 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6485 {
6486 return zcache_alloc_n_ext(zid, count, flags, NULL);
6487 }
6488
zstack_t(zcache_alloc_n)6489 zstack_t
6490 (zcache_alloc_n)(
6491 zone_id_t zid,
6492 uint32_t count,
6493 zalloc_flags_t flags,
6494 zone_cache_ops_t ops)
6495 {
6496 __builtin_assume(ops != NULL);
6497 return zcache_alloc_n_ext(zid, count, flags, ops);
6498 }
6499
6500 __attribute__((always_inline))
6501 void *
zalloc(zone_t zov)6502 zalloc(zone_t zov)
6503 {
6504 return zalloc_flags(zov, Z_WAITOK);
6505 }
6506
6507 __attribute__((always_inline))
6508 void *
zalloc_noblock(zone_t zov)6509 zalloc_noblock(zone_t zov)
6510 {
6511 return zalloc_flags(zov, Z_NOWAIT);
6512 }
6513
6514 void *
6515 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
6516 {
6517 zone_t zone = zov->z_self;
6518 zone_stats_t zstats = zov->z_stats;
6519
6520 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6521 assert(!zone->z_percpu && !zone->z_permanent);
6522 return zalloc_ext(zone, zstats, flags).addr;
6523 }
6524
6525 __attribute__((always_inline))
6526 void *
6527 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6528 {
6529 return (zalloc_flags)(zone_by_id(zid), flags);
6530 }
6531
6532 void *
6533 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6534 {
6535 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6536 zone_t zone = zone_by_id(zid);
6537 zone_stats_t zstats = zone->z_stats;
6538 struct kalloc_result kr;
6539
6540 kr = zalloc_ext(zone, zstats, flags);
6541 #if ZSECURITY_CONFIG(READ_ONLY) && !__BUILDING_XNU_LIBRARY__ /* zalloc mocks don't create ro memory */
6542 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6543 if (kr.addr) {
6544 zone_require_ro(zid, kr.size, kr.addr);
6545 }
6546 #endif
6547 return kr.addr;
6548 }
6549
6550 #if ZSECURITY_CONFIG(READ_ONLY)
6551
6552 __attribute__((always_inline))
6553 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6554 from_current_stack(vm_offset_t addr, vm_size_t size)
6555 {
6556 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6557 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6558
6559 addr = vm_memtag_canonicalize_kernel(addr);
6560
6561 return (addr >= start) && (addr + size < end);
6562 }
6563
6564 /*
6565 * Check if an address is from const memory i.e TEXT or DATA CONST segements
6566 * or the SECURITY_READ_ONLY_LATE section.
6567 */
6568 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
6569 __attribute__((always_inline))
6570 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6571 from_const_memory(const vm_offset_t addr, vm_size_t size)
6572 {
6573 return rorgn_contains(addr, size, true);
6574 }
6575 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */
6576 __attribute__((always_inline))
6577 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6578 from_const_memory(const vm_offset_t addr, vm_size_t size)
6579 {
6580 #pragma unused(addr, size)
6581 return true;
6582 }
6583 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */
6584
6585 __abortlike
6586 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6587 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6588 const vm_offset_t src, vm_size_t src_size)
6589 {
6590 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
6591 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
6592 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
6593 extern vm_offset_t rorgn_begin;
6594 extern vm_offset_t rorgn_end;
6595 #else
6596 vm_offset_t const rorgn_begin = 0;
6597 vm_offset_t const rorgn_end = 0;
6598 #endif
6599
6600 if (from_ro_map(src, src_size)) {
6601 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6602 zone_t dst_zone = &zone_array[zid];
6603 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6604 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6605 dst_zone->z_name);
6606 }
6607
6608 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
6609 "current stack (%p - %p) or const memory (phys %p - %p)",
6610 (void *)src, (void*)kvtophys(src),
6611 (void *)zone_info.zi_ro_range.min_address,
6612 (void *)zone_info.zi_ro_range.max_address,
6613 (void *)stack_start, (void *)stack_end,
6614 (void *)rorgn_begin, (void *)rorgn_end);
6615 }
6616
6617 __attribute__((always_inline))
6618 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6619 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6620 const vm_offset_t src, vm_size_t src_size)
6621 {
6622 if (from_current_stack(src, src_size) ||
6623 (from_ro_map(src, src_size) &&
6624 zid == zone_index_from_ptr((void *)src)) ||
6625 from_const_memory(src, src_size)) {
6626 return;
6627 }
6628 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6629 }
6630
6631 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6632
6633 __mockable __attribute__((noinline))
6634 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)6635 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
6636 const void *new_data, vm_size_t new_data_size)
6637 {
6638 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6639
6640 #if ZSECURITY_CONFIG(READ_ONLY)
6641 bool skip_src_check = false;
6642
6643 /*
6644 * The OSEntitlements RO-zone is a little differently treated. For more
6645 * information: rdar://100518485.
6646 */
6647 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
6648 code_signing_config_t cs_config = 0;
6649
6650 code_signing_configuration(NULL, &cs_config);
6651 if (cs_config & CS_CONFIG_CSM_ENABLED) {
6652 skip_src_check = true;
6653 }
6654 }
6655
6656 if (skip_src_check == false) {
6657 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
6658 new_data_size);
6659 }
6660 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
6661 (vm_offset_t) new_data, new_data_size);
6662 #else
6663 (void)zid;
6664 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
6665 #endif
6666 }
6667
6668 __attribute__((noinline))
6669 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)6670 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
6671 zro_atomic_op_t op, uint64_t value)
6672 {
6673 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6674
6675 #if ZSECURITY_CONFIG(READ_ONLY)
6676 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
6677 #else
6678 (void)zid;
6679 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
6680 #endif
6681 return value;
6682 }
6683
6684 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)6685 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
6686 {
6687 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6688 #if ZSECURITY_CONFIG(READ_ONLY)
6689 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
6690 #else
6691 (void)zid;
6692 bzero((void *)((uintptr_t)elem + offset), size);
6693 #endif
6694 }
6695
6696 /*
6697 * This function will run in the PPL and needs to be robust
6698 * against an attacker with arbitrary kernel write.
6699 */
6700
6701 #if ZSECURITY_CONFIG(READ_ONLY) && !defined(__BUILDING_XNU_LIBRARY__)
6702
6703 __abortlike
6704 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)6705 zone_id_require_ro_panic(zone_id_t zid, void *addr)
6706 {
6707 struct zone_size_params p = zone_ro_size_params[zid];
6708 vm_offset_t elem = (vm_offset_t)addr;
6709 uint32_t zindex;
6710 zone_t other;
6711 zone_t zone = &zone_array[zid];
6712
6713 if (!from_ro_map(addr, 1)) {
6714 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
6715 }
6716
6717 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
6718 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
6719 }
6720
6721 zindex = zone_index_from_ptr(addr);
6722 other = &zone_array[zindex];
6723 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
6724 panic("zone_require_ro failed: invalid zone index %d "
6725 "(addr: %p, expected: %s%s)", zindex,
6726 addr, zone_heap_name(zone), zone->z_name);
6727 } else {
6728 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
6729 "(addr: %p, expected: %s%s)",
6730 zindex, zone_heap_name(other), other->z_name,
6731 addr, zone_heap_name(zone), zone->z_name);
6732 }
6733 }
6734
6735 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6736
6737 __attribute__((always_inline))
6738 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)6739 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
6740 {
6741 #if ZSECURITY_CONFIG(READ_ONLY) && !defined(__BUILDING_XNU_LIBRARY__) \
6742 /* can't do this in user-mode because there's no zones submap */
6743 struct zone_size_params p = zone_ro_size_params[zid];
6744 vm_offset_t elem = (vm_offset_t)addr;
6745
6746 if (!from_ro_map(addr, 1) ||
6747 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
6748 zid != zone_meta_from_addr(elem)->zm_index) {
6749 zone_id_require_ro_panic(zid, addr);
6750 }
6751 #else
6752 #pragma unused(zid, addr)
6753 #endif
6754 }
6755
6756 void *
6757 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
6758 {
6759 zone_t zone = zov.zov_view->zv_zone;
6760 zone_stats_t zstats = zov.zov_view->zv_stats;
6761
6762 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6763 assert(zone->z_percpu);
6764 flags |= Z_PCPU;
6765 return zalloc_ext(zone, zstats, flags).addr;
6766 }
6767
6768 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)6769 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
6770 {
6771 struct zone_page_metadata *page_meta;
6772 vm_offset_t offs, addr;
6773 zone_pva_t pva;
6774
6775 assert(ml_get_interrupts_enabled() ||
6776 ml_is_quiescing() ||
6777 debug_mode_active() ||
6778 startup_phase < STARTUP_SUB_EARLY_BOOT);
6779
6780 size = (size + mask) & ~mask;
6781 assert(size <= PAGE_SIZE);
6782
6783 zone_lock(zone);
6784 assert(zone->z_self == zone);
6785
6786 for (;;) {
6787 pva = zone->z_pageq_partial;
6788 while (!zone_pva_is_null(pva)) {
6789 page_meta = zone_pva_to_meta(pva);
6790 if (page_meta->zm_bump + size <= PAGE_SIZE) {
6791 goto found;
6792 }
6793 pva = page_meta->zm_page_next;
6794 }
6795
6796 zone_expand_locked(zone, Z_WAITOK);
6797 }
6798
6799 found:
6800 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
6801 page_meta->zm_bump = (uint16_t)(offs + size);
6802 page_meta->zm_alloc_size += size;
6803 zone->z_elems_free -= size;
6804 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
6805
6806 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
6807 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
6808 }
6809
6810 zone_unlock(zone);
6811
6812 if (zone->z_tbi_tag) {
6813 addr = vm_memtag_load_tag(offs + zone_pva_to_addr(pva));
6814 } else {
6815 addr = offs + zone_pva_to_addr(pva);
6816 }
6817
6818 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6819 return (void *)addr;
6820 }
6821
6822 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)6823 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
6824 {
6825 vm_offset_t addr;
6826
6827 kernel_memory_allocate(kernel_map, &addr, size, mask,
6828 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
6829
6830 return (void *)addr;
6831 }
6832
6833 __mockable void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)6834 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
6835 {
6836 if (size <= PAGE_SIZE) {
6837 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
6838 return _zalloc_permanent(zone, size, mask);
6839 }
6840 return _zalloc_permanent_large(size, mask, tag);
6841 }
6842
6843 __mockable void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)6844 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
6845 {
6846 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
6847 return _zalloc_permanent(zone, size, mask);
6848 }
6849
6850 /*! @} */
6851 #endif /* !ZALLOC_TEST */
6852 #pragma mark zone GC / trimming
6853 #if !ZALLOC_TEST
6854
6855 static thread_call_data_t zone_trim_callout;
6856 EVENT_DEFINE(ZONE_EXHAUSTED);
6857
6858 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)6859 zone_reclaim_chunk(
6860 zone_t z,
6861 struct zone_page_metadata *meta,
6862 uint32_t free_count)
6863 {
6864 vm_address_t page_addr;
6865 vm_size_t size_to_free;
6866 uint32_t bitmap_ref;
6867 uint32_t page_count;
6868 zone_security_flags_t zsflags = zone_security_config(z);
6869 bool sequester = !z->z_destroyed;
6870 bool oob_guard = false;
6871
6872 if (zone_submap_is_sequestered(zsflags)) {
6873 /*
6874 * If the entire map is sequestered, we can't return the VA.
6875 * It stays pinned to the zone forever.
6876 */
6877 sequester = true;
6878 }
6879
6880 zone_meta_queue_pop(z, &z->z_pageq_empty);
6881
6882 page_addr = zone_meta_to_addr(meta);
6883 page_count = meta->zm_chunk_len;
6884 oob_guard = meta->zm_guarded;
6885
6886 if (meta->zm_alloc_size) {
6887 zone_metadata_corruption(z, meta, "alloc_size");
6888 }
6889 if (z->z_percpu) {
6890 if (page_count != 1) {
6891 zone_metadata_corruption(z, meta, "page_count");
6892 }
6893 size_to_free = ptoa(z->z_chunk_pages);
6894 zone_remove_wired_pages(z, z->z_chunk_pages);
6895 } else {
6896 if (page_count > z->z_chunk_pages) {
6897 zone_metadata_corruption(z, meta, "page_count");
6898 }
6899 if (page_count < z->z_chunk_pages) {
6900 /* Dequeue non populated VA from z_pageq_va */
6901 zone_meta_remqueue(z, meta + page_count);
6902 }
6903 size_to_free = ptoa(page_count);
6904 zone_remove_wired_pages(z, page_count);
6905 }
6906
6907 zone_counter_sub(z, z_elems_free, free_count);
6908 zone_counter_sub(z, z_elems_avail, free_count);
6909 zone_counter_sub(z, z_wired_empty, page_count);
6910 zone_counter_sub(z, z_wired_cur, page_count);
6911
6912 if (z->z_pcpu_cache == NULL) {
6913 if (z->z_elems_free_min < free_count) {
6914 z->z_elems_free_min = 0;
6915 } else {
6916 z->z_elems_free_min -= free_count;
6917 }
6918 }
6919 if (z->z_elems_free_wma < free_count) {
6920 z->z_elems_free_wma = 0;
6921 } else {
6922 z->z_elems_free_wma -= free_count;
6923 }
6924
6925 bitmap_ref = 0;
6926 if (sequester) {
6927 if (meta->zm_inline_bitmap) {
6928 for (int i = 0; i < meta->zm_chunk_len; i++) {
6929 meta[i].zm_bitmap = 0;
6930 }
6931 } else {
6932 bitmap_ref = meta->zm_bitmap;
6933 meta->zm_bitmap = 0;
6934 }
6935 meta->zm_chunk_len = 0;
6936 } else {
6937 if (!meta->zm_inline_bitmap) {
6938 bitmap_ref = meta->zm_bitmap;
6939 }
6940 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
6941 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
6942 }
6943
6944 #if CONFIG_ZLEAKS
6945 if (__improbable(zleak_should_disable_for_zone(z) &&
6946 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
6947 thread_call_enter(&zone_leaks_callout);
6948 }
6949 #endif /* CONFIG_ZLEAKS */
6950
6951 zone_unlock(z);
6952
6953 if (bitmap_ref) {
6954 zone_bits_free(bitmap_ref);
6955 }
6956
6957 /* Free the pages for metadata and account for them */
6958 #if KASAN_CLASSIC
6959 if (z->z_percpu) {
6960 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
6961 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
6962 zone_elem_outer_size(z),
6963 zone_elem_outer_offs(z),
6964 zone_elem_redzone(z));
6965 }
6966 } else {
6967 kasan_zmem_remove(page_addr, size_to_free,
6968 zone_elem_outer_size(z),
6969 zone_elem_outer_offs(z),
6970 zone_elem_redzone(z));
6971 }
6972 #endif /* KASAN_CLASSIC */
6973
6974 if (sequester) {
6975 kma_flags_t flags = zone_kma_flags(z, zsflags, 0) | KMA_KOBJECT;
6976 kernel_memory_depopulate(page_addr, size_to_free,
6977 flags, VM_KERN_MEMORY_ZONE);
6978 } else {
6979 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
6980 kmem_free(zone_submap(zsflags), page_addr,
6981 ptoa(z->z_chunk_pages + oob_guard));
6982 if (oob_guard) {
6983 os_atomic_dec(&zone_guard_pages, relaxed);
6984 }
6985 }
6986
6987 thread_yield_to_preemption();
6988
6989 zone_lock(z);
6990
6991 if (sequester) {
6992 zone_meta_queue_push(z, &z->z_pageq_va, meta);
6993 }
6994 }
6995
6996 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)6997 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
6998 {
6999 z_debug_assert(n <= zc_mag_size());
7000
7001 for (uint16_t i = 0; i < n; i++) {
7002 vm_offset_t addr = elems[i];
7003 elems[i] = 0;
7004 zfree_drop(z, addr);
7005 }
7006
7007 z->z_elems_free += n;
7008 }
7009
7010 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7011 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7012 {
7013 z_debug_assert(n <= zc_mag_size());
7014 zone_cache_ops_t ops = zcache_ops[zid];
7015
7016 for (uint16_t i = 0; i < n; i++) {
7017 vm_offset_t addr = elems[i];
7018 elems[i] = 0;
7019 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7020 ops->zc_op_free(zid, (void *)addr);
7021 }
7022
7023 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7024 }
7025
7026 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7027 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7028 {
7029 zpercpu_foreach(zc, z->z_pcpu_cache) {
7030 zone_depot_lock(zc);
7031
7032 if (zc->zc_depot.zd_full > (target + 1) / 2) {
7033 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7034 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7035 }
7036
7037 if (zc->zc_depot.zd_empty > target / 2) {
7038 uint32_t n = zc->zc_depot.zd_empty - target / 2;
7039 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7040 }
7041
7042 zone_depot_unlock(zc);
7043 }
7044 }
7045
7046 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7047 ZONE_RECLAIM_TRIM,
7048 ZONE_RECLAIM_DRAIN,
7049 ZONE_RECLAIM_DESTROY,
7050 });
7051
7052 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7053 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7054 {
7055 uint32_t depot_max = 0;
7056 bool cleanup = mode != ZONE_RECLAIM_TRIM;
7057
7058 if (z->z_depot_cleanup) {
7059 z->z_depot_cleanup = false;
7060 depot_max = z->z_depot_size;
7061 cleanup = true;
7062 }
7063
7064 if (cleanup) {
7065 zone_depot_trim(z, depot_max, zd);
7066 }
7067
7068 if (mode == ZONE_RECLAIM_DESTROY) {
7069 zpercpu_foreach(zc, z->z_pcpu_cache) {
7070 zone_reclaim_elements(z, zc->zc_alloc_cur,
7071 zc->zc_alloc_elems);
7072 zone_reclaim_elements(z, zc->zc_free_cur,
7073 zc->zc_free_elems);
7074 zc->zc_alloc_cur = zc->zc_free_cur = 0;
7075 }
7076
7077 z->z_recirc_empty_min = 0;
7078 z->z_recirc_empty_wma = 0;
7079 z->z_recirc_full_min = 0;
7080 z->z_recirc_full_wma = 0;
7081 z->z_recirc_cont_cur = 0;
7082 z->z_recirc_cont_wma = 0;
7083 }
7084 }
7085
7086 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)7087 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
7088 {
7089 assert(zd->zd_empty == 0);
7090 assert(zd->zd_full == 0);
7091
7092 zone_recirc_lock_nopreempt(z);
7093
7094 *zd = z->z_recirc;
7095 if (zd->zd_full == 0) {
7096 zd->zd_tail = &zd->zd_head;
7097 }
7098 zone_depot_init(&z->z_recirc);
7099 z->z_recirc_empty_min = 0;
7100 z->z_recirc_empty_wma = 0;
7101 z->z_recirc_full_min = 0;
7102 z->z_recirc_full_wma = 0;
7103
7104 zone_recirc_unlock_nopreempt(z);
7105 }
7106
7107 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7108 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7109 {
7110 for (;;) {
7111 uint64_t maxtime = mach_continuous_speculative_time() +
7112 zc_free_batch_timeout();
7113 uint32_t budget = zc_free_batch_size();
7114 uint32_t count;
7115 bool done = true;
7116
7117 zone_recirc_lock_nopreempt(z);
7118 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7119 z->z_recirc_empty_min);
7120 assert(count <= z->z_recirc.zd_empty);
7121
7122 if (count > budget) {
7123 count = budget;
7124 done = false;
7125 }
7126 if (count) {
7127 budget -= count;
7128 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7129 z->z_recirc_empty_min -= count;
7130 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7131 }
7132
7133 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7134 z->z_recirc_full_min);
7135 assert(count <= z->z_recirc.zd_full);
7136
7137 if (count > budget) {
7138 count = budget;
7139 done = false;
7140 }
7141 if (count) {
7142 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7143 z->z_recirc_full_min -= count;
7144 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7145 }
7146
7147 zone_recirc_unlock_nopreempt(z);
7148
7149 if (done) {
7150 return;
7151 }
7152
7153 if (mach_continuous_speculative_time() < maxtime) {
7154 continue;
7155 }
7156
7157 /*
7158 * We have held preemption disabled for too long. Drop and
7159 * retake the lock to allow a pending preemption to occur.
7160 */
7161 #if SCHED_HYGIENE_DEBUG
7162 abandon_preemption_disable_measurement();
7163 #endif
7164 zone_unlock(z);
7165 zone_lock(z);
7166 maxtime = mach_continuous_speculative_time() +
7167 zc_free_batch_timeout();
7168 }
7169 }
7170
7171 /*!
7172 * @function zone_reclaim
7173 *
7174 * @brief
7175 * Drains or trim the zone.
7176 *
7177 * @discussion
7178 * Draining the zone will free it from all its elements.
7179 *
7180 * Trimming the zone tries to respect the working set size, and avoids draining
7181 * the depot when it's not necessary.
7182 *
7183 * @param z The zone to reclaim from
7184 * @param mode The purpose of this reclaim.
7185 */
7186 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7187 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7188 {
7189 struct zone_depot zd;
7190
7191 zone_depot_init(&zd);
7192
7193 zone_lock(z);
7194
7195 if (mode == ZONE_RECLAIM_DESTROY) {
7196 if (!z->z_destructible || z->z_elems_rsv) {
7197 panic("zdestroy: Zone %s%s isn't destructible",
7198 zone_heap_name(z), z->z_name);
7199 }
7200
7201 if (!z->z_self || z->z_expander ||
7202 z->z_async_refilling || z->z_expanding_wait) {
7203 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7204 zone_heap_name(z), z->z_name);
7205 }
7206
7207 #if !KASAN_CLASSIC
7208 /*
7209 * Unset the valid bit. We'll hit an assert failure on further
7210 * operations on this zone, until zinit() is called again.
7211 *
7212 * Leave the zone valid for KASan as we will see zfree's on
7213 * quarantined free elements even after the zone is destroyed.
7214 */
7215 z->z_self = NULL;
7216 #endif
7217 z->z_destroyed = true;
7218 } else if (z->z_destroyed) {
7219 return zone_unlock(z);
7220 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7221 /* If the zone is under its reserve level, leave it alone. */
7222 return zone_unlock(z);
7223 }
7224
7225 if (z->z_pcpu_cache) {
7226 zone_magazine_t mag;
7227 uint32_t freed = 0;
7228
7229 /*
7230 * This is all done with the zone lock held on purpose.
7231 * The work here is O(ncpu), which should still be short.
7232 *
7233 * We need to keep the lock held until we have reclaimed
7234 * at least a few magazines, otherwise if the zone has no
7235 * free elements outside of the depot, a thread performing
7236 * a concurrent allocatiuon could try to grow the zone
7237 * while we're trying to drain it.
7238 */
7239 if (mode == ZONE_RECLAIM_TRIM) {
7240 zone_reclaim_recirc_trim(z, &zd);
7241 } else {
7242 zone_reclaim_recirc_drain(z, &zd);
7243 }
7244 zone_reclaim_pcpu(z, mode, &zd);
7245
7246 if (z->z_chunk_elems) {
7247 uint64_t maxtime = mach_continuous_speculative_time() +
7248 zc_free_batch_timeout();
7249 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7250 smr_t smr = zone_cache_smr(cache);
7251
7252 while (zd.zd_full) {
7253 mag = zone_depot_pop_head_full(&zd, NULL);
7254 if (smr) {
7255 smr_wait(smr, mag->zm_seq);
7256 zalloc_cached_reuse_smr(z, cache, mag);
7257 freed += zc_mag_size();
7258 }
7259 zone_reclaim_elements(z, zc_mag_size(),
7260 mag->zm_elems);
7261 zone_depot_insert_head_empty(&zd, mag);
7262
7263 freed += zc_mag_size();
7264 if (freed >= zc_free_batch_size() ||
7265 mach_continuous_speculative_time() >= maxtime) {
7266 #if SCHED_HYGIENE_DEBUG
7267 abandon_preemption_disable_measurement();
7268 #endif
7269 zone_unlock(z);
7270 zone_magazine_free_list(&zd);
7271 thread_yield_to_preemption();
7272 zone_lock(z);
7273 freed = 0;
7274 maxtime = mach_continuous_speculative_time() +
7275 zc_free_batch_timeout();
7276 }
7277 }
7278 } else {
7279 zone_id_t zid = zone_index(z);
7280
7281 zone_unlock(z);
7282
7283 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7284
7285 while (zd.zd_full) {
7286 mag = zone_depot_pop_head_full(&zd, NULL);
7287 zcache_reclaim_elements(zid, zc_mag_size(),
7288 mag->zm_elems);
7289 zone_magazine_free(mag);
7290 }
7291
7292 goto cleanup;
7293 }
7294 }
7295
7296 while (!zone_pva_is_null(z->z_pageq_empty)) {
7297 struct zone_page_metadata *meta;
7298 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7299
7300 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7301 limit = MAX(limit, z->z_elems_free -
7302 MIN(z->z_elems_free_min, z->z_elems_free_wma / Z_WMA_UNIT));
7303 }
7304
7305 meta = zone_pva_to_meta(z->z_pageq_empty);
7306 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7307
7308 if (zone_count_free(z) - count < limit) {
7309 break;
7310 }
7311
7312 zone_reclaim_chunk(z, meta, count);
7313 }
7314
7315 zone_unlock(z);
7316
7317 cleanup:
7318 zone_magazine_free_list(&zd);
7319 }
7320
7321 void
zone_drain(zone_t zone)7322 zone_drain(zone_t zone)
7323 {
7324 current_thread()->options |= TH_OPT_ZONE_PRIV;
7325 lck_mtx_lock(&zone_gc_lock);
7326 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7327 lck_mtx_unlock(&zone_gc_lock);
7328 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7329 }
7330
7331 void
zcache_drain(zone_id_t zid)7332 zcache_drain(zone_id_t zid)
7333 {
7334 zone_drain(zone_by_id(zid));
7335 }
7336
7337 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7338 zone_reclaim_all(zone_reclaim_mode_t mode)
7339 {
7340 /*
7341 * Start with zcaches, so that they flow into the regular zones.
7342 *
7343 * Then the zones with VA sequester since depopulating
7344 * pages will not need to allocate vm map entries for holes,
7345 * which will give memory back to the system faster.
7346 */
7347 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7348 zone_t z = zone_by_id(zid);
7349
7350 if (z->z_self && z->z_chunk_elems == 0) {
7351 zone_reclaim(z, mode);
7352 }
7353 }
7354 zone_index_foreach(zid) {
7355 zone_t z = zone_by_id(zid);
7356
7357 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7358 continue;
7359 }
7360 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7361 z->collectable) {
7362 zone_reclaim(z, mode);
7363 }
7364 }
7365
7366 zone_index_foreach(zid) {
7367 zone_t z = zone_by_id(zid);
7368
7369 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7370 continue;
7371 }
7372 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7373 z->collectable) {
7374 zone_reclaim(z, mode);
7375 }
7376 }
7377
7378 zone_reclaim(zc_magazine_zone, mode);
7379 }
7380
7381 void
zone_userspace_reboot_checks(void)7382 zone_userspace_reboot_checks(void)
7383 {
7384 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7385 if (label_zone_size != 0) {
7386 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7387 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7388 }
7389 }
7390
7391 void
zone_gc(zone_gc_level_t level)7392 zone_gc(zone_gc_level_t level)
7393 {
7394 zone_reclaim_mode_t mode;
7395 zone_t largest_zone = NULL;
7396
7397 switch (level) {
7398 case ZONE_GC_TRIM:
7399 mode = ZONE_RECLAIM_TRIM;
7400 break;
7401 case ZONE_GC_DRAIN:
7402 mode = ZONE_RECLAIM_DRAIN;
7403 break;
7404 case ZONE_GC_JETSAM:
7405 largest_zone = kill_process_in_largest_zone();
7406 mode = ZONE_RECLAIM_TRIM;
7407 break;
7408 }
7409
7410 current_thread()->options |= TH_OPT_ZONE_PRIV;
7411 lck_mtx_lock(&zone_gc_lock);
7412
7413 zone_reclaim_all(mode);
7414
7415 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7416 /*
7417 * If we possibly killed a process, but we're still critical,
7418 * we need to drain harder.
7419 */
7420 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7421 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7422 }
7423
7424 lck_mtx_unlock(&zone_gc_lock);
7425 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7426 }
7427
7428 void
zone_gc_trim(void)7429 zone_gc_trim(void)
7430 {
7431 zone_gc(ZONE_GC_TRIM);
7432 }
7433
7434 void
zone_gc_drain(void)7435 zone_gc_drain(void)
7436 {
7437 zone_gc(ZONE_GC_DRAIN);
7438 }
7439
7440 static bool
zone_trim_needed(zone_t z)7441 zone_trim_needed(zone_t z)
7442 {
7443 if (z->z_depot_cleanup) {
7444 return true;
7445 }
7446
7447 if (z->z_async_refilling) {
7448 /* Don't fight with refill */
7449 return false;
7450 }
7451
7452 if (z->z_pcpu_cache) {
7453 uint32_t e_n, f_n;
7454
7455 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7456 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7457
7458 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7459 return true;
7460 }
7461
7462 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7463 f_n * zc_mag_size() * zone_elem_inner_size(z) >
7464 zc_autotrim_size() * Z_WMA_UNIT) {
7465 return true;
7466 }
7467
7468 return false;
7469 }
7470
7471 if (!zone_pva_is_null(z->z_pageq_empty)) {
7472 uint32_t n;
7473
7474 n = MIN(z->z_elems_free_wma / Z_WMA_UNIT, z->z_elems_free_min);
7475
7476 return n >= z->z_elems_rsv + z->z_chunk_elems;
7477 }
7478
7479 return false;
7480 }
7481
7482 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7483 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7484 {
7485 current_thread()->options |= TH_OPT_ZONE_PRIV;
7486
7487 zone_foreach(z) {
7488 if (!z->collectable || z == zc_magazine_zone) {
7489 continue;
7490 }
7491
7492 if (zone_trim_needed(z)) {
7493 lck_mtx_lock(&zone_gc_lock);
7494 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7495 lck_mtx_unlock(&zone_gc_lock);
7496 }
7497 }
7498
7499 if (zone_trim_needed(zc_magazine_zone)) {
7500 lck_mtx_lock(&zone_gc_lock);
7501 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7502 lck_mtx_unlock(&zone_gc_lock);
7503 }
7504
7505 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7506 }
7507
7508 void
compute_zone_working_set_size(__unused void * param)7509 compute_zone_working_set_size(__unused void *param)
7510 {
7511 uint32_t zc_auto = zc_enable_level();
7512 bool needs_trim = false;
7513
7514 /*
7515 * Keep zone caching disabled until the first proc is made.
7516 */
7517 if (__improbable(zone_caching_disabled < 0)) {
7518 return;
7519 }
7520
7521 zone_caching_disabled = vm_pool_low();
7522
7523 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
7524 zc_auto = 0;
7525 }
7526
7527 zone_foreach(z) {
7528 uint32_t old, wma, cur;
7529 bool needs_caching = false;
7530
7531 if (z->z_self != z) {
7532 continue;
7533 }
7534
7535 zone_lock(z);
7536
7537 zone_recirc_lock_nopreempt(z);
7538
7539 if (z->z_pcpu_cache) {
7540 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
7541 z->z_recirc_empty_min = z->z_recirc.zd_empty;
7542 z->z_recirc_empty_wma = wma;
7543 } else {
7544 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
7545 z->z_elems_free_min = z->z_elems_free;
7546 z->z_elems_free_wma = wma;
7547 }
7548
7549 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
7550 z->z_recirc_full_min = z->z_recirc.zd_full;
7551 z->z_recirc_full_wma = wma;
7552
7553 /* fixed point decimal of contentions per second */
7554 old = z->z_recirc_cont_wma;
7555 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
7556 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
7557 cur = (3 * old + cur) / 4;
7558 zone_recirc_unlock_nopreempt(z);
7559
7560 if (z->z_pcpu_cache) {
7561 uint16_t size = z->z_depot_size;
7562
7563 if (zone_exhausted(z)) {
7564 if (z->z_depot_size) {
7565 z->z_depot_size = 0;
7566 z->z_depot_cleanup = true;
7567 }
7568 } else if (size < z->z_depot_limit && cur > zc_grow_level()) {
7569 /*
7570 * lose history on purpose now
7571 * that we just grew, to give
7572 * the sytem time to adjust.
7573 */
7574 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7575 size = size ? (3 * size + 2) / 2 : 2;
7576 z->z_depot_size = MIN(z->z_depot_limit, size);
7577 } else if (size > 0 && cur <= zc_shrink_level()) {
7578 /*
7579 * lose history on purpose now
7580 * that we just shrunk, to give
7581 * the sytem time to adjust.
7582 */
7583 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7584 z->z_depot_size = size - 1;
7585 z->z_depot_cleanup = true;
7586 }
7587 } else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
7588 old >= zc_auto && cur >= zc_auto) {
7589 needs_caching = true;
7590 }
7591
7592 z->z_recirc_cont_wma = cur;
7593 z->z_recirc_cont_cur = 0;
7594
7595 if (!needs_trim && zone_trim_needed(z)) {
7596 needs_trim = true;
7597 }
7598
7599 zone_unlock(z);
7600
7601 if (needs_caching) {
7602 zone_enable_caching(z);
7603 }
7604 }
7605
7606 if (needs_trim) {
7607 thread_call_enter(&zone_trim_callout);
7608 }
7609 }
7610
7611 #endif /* !ZALLOC_TEST */
7612 #pragma mark vm integration, MIG routines
7613 #if !ZALLOC_TEST
7614
7615 extern unsigned int stack_total;
7616 #if defined (__x86_64__)
7617 extern unsigned int inuse_ptepages_count;
7618 #endif
7619
7620 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)7621 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
7622 bool is_kt_var)
7623 {
7624 if (is_kt_var) {
7625 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
7626 return cur.ktv_var->kt_name;
7627 } else {
7628 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
7629 return cur.ktv_fixed->kt_zv.zv_name;
7630 }
7631 }
7632
7633 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7634 panic_print_types_in_zone(zone_t z, const char* debug_str)
7635 {
7636 kalloc_type_views_t kt_cur = {};
7637 const char *prev_type = "";
7638 size_t skip_over_site = sizeof("site.") - 1;
7639 zone_security_flags_t zsflags = zone_security_config(z);
7640 bool is_kt_var = false;
7641
7642 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
7643 uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
7644 kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
7645 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
7646 is_kt_var = true;
7647 } else {
7648 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
7649 }
7650
7651 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
7652 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
7653
7654 while (kt_cur.ktv_fixed) {
7655 kalloc_type_views_t kt_next = {};
7656 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
7657 is_kt_var) + skip_over_site;
7658 if (strcmp(typename, prev_type) != 0) {
7659 paniclog_append_noflush("\t%-50s\n", typename);
7660 prev_type = typename;
7661 }
7662 kt_cur = kt_next;
7663 }
7664 paniclog_append_noflush("\n");
7665 }
7666
7667 static void
panic_display_kalloc_types(void)7668 panic_display_kalloc_types(void)
7669 {
7670 if (kalloc_type_src_zone) {
7671 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7672 }
7673 if (kalloc_type_dst_zone) {
7674 panic_print_types_in_zone(kalloc_type_dst_zone,
7675 "addr is being freed to");
7676 }
7677 }
7678
7679 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7680 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7681 uint64_t *zone_size)
7682 {
7683 zone_index_foreach(zid) {
7684 zone_t z = &zone_array[zid];
7685 vm_offset_t size = zone_size_wired(z);
7686
7687 if (zid == ZONE_ID_VM_PAGES) {
7688 continue;
7689 }
7690 for (uint32_t i = 0; i < n; i++) {
7691 if (size > zone_size[i]) {
7692 largest_zones[i] = z;
7693 zone_size[i] = size;
7694 break;
7695 }
7696 }
7697 }
7698 }
7699
7700 #define NUM_LARGEST_ZONES 5
7701 static void
panic_display_largest_zones(void)7702 panic_display_largest_zones(void)
7703 {
7704 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
7705 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
7706
7707 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
7708 (uint64_t *) &largest_size);
7709
7710 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
7711 "Zone Name", "Cur Size", "Free Size");
7712 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
7713 zone_t z = largest_zones[i];
7714 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
7715 zone_heap_name(z), z->z_name,
7716 mach_vm_size_pretty(largest_size[i]),
7717 mach_vm_size_unit(largest_size[i]),
7718 mach_vm_size_pretty(zone_size_free(z)),
7719 mach_vm_size_unit(zone_size_free(z)));
7720 }
7721 }
7722
7723 static void
panic_display_zprint(void)7724 panic_display_zprint(void)
7725 {
7726 panic_display_largest_zones();
7727 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
7728 (uintptr_t)(kernel_stack_size * stack_total));
7729 #if defined (__x86_64__)
7730 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
7731 (uintptr_t)ptoa(inuse_ptepages_count));
7732 #endif
7733 paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
7734 counter_load(&kalloc_large_total));
7735
7736 if (panic_kext_memory_info) {
7737 mach_memory_info_t *mem_info = panic_kext_memory_info;
7738
7739 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
7740 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
7741 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
7742 continue;
7743 }
7744 if (mem_info[i].size > (1024 * 1024)) {
7745 paniclog_append_noflush("%-5lld %10lld\n",
7746 mem_info[i].site, mem_info[i].size);
7747 }
7748 }
7749 }
7750 }
7751
7752 static void
panic_display_zone_info(void)7753 panic_display_zone_info(void)
7754 {
7755 paniclog_append_noflush("Zone info:\n");
7756 paniclog_append_noflush(" Zone map: %p - %p\n",
7757 (void *)zone_info.zi_map_range.min_address,
7758 (void *)zone_info.zi_map_range.max_address);
7759 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7760 vm_map_t map = zone_submaps[i];
7761
7762 if (map == VM_MAP_NULL) {
7763 continue;
7764 }
7765 paniclog_append_noflush(" . %-6s: %p - %p\n",
7766 zone_submaps_names[i],
7767 (void *)map->min_offset,
7768 (void *)map->max_offset);
7769 }
7770 paniclog_append_noflush(" Metadata: %p - %p\n"
7771 " Bitmaps : %p - %p\n"
7772 " Extra : %p - %p\n"
7773 "\n",
7774 (void *)zone_info.zi_meta_range.min_address,
7775 (void *)zone_info.zi_meta_range.max_address,
7776 (void *)zone_info.zi_bits_range.min_address,
7777 (void *)zone_info.zi_bits_range.max_address,
7778 (void *)zone_info.zi_xtra_range.min_address,
7779 (void *)zone_info.zi_xtra_range.max_address);
7780 }
7781
7782 static void
panic_display_zone_fault(vm_offset_t addr)7783 panic_display_zone_fault(vm_offset_t addr)
7784 {
7785 struct zone_page_metadata meta = { };
7786 vm_map_t map = VM_MAP_NULL;
7787 vm_offset_t oob_offs = 0, size = 0;
7788 int map_idx = -1;
7789 zone_t z = NULL;
7790 const char *kind = "whild deref";
7791 bool oob = false;
7792
7793 /*
7794 * First: look if we bumped into guard pages between submaps
7795 */
7796 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7797 map = zone_submaps[i];
7798 if (map == VM_MAP_NULL) {
7799 continue;
7800 }
7801
7802 if (addr >= map->min_offset && addr < map->max_offset) {
7803 map_idx = i;
7804 break;
7805 }
7806 }
7807
7808 if (map_idx == -1) {
7809 /* this really shouldn't happen, submaps are back to back */
7810 return;
7811 }
7812
7813 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
7814
7815 /*
7816 * Second: look if there's just no metadata at all
7817 */
7818 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
7819 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
7820 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
7821 zone_array[meta.zm_index].z_self == NULL) {
7822 paniclog_append_noflush(" Zone : <unknown>\n");
7823 kind = "wild deref, missing or invalid metadata";
7824 } else {
7825 z = &zone_array[meta.zm_index];
7826 paniclog_append_noflush(" Zone : %s%s\n",
7827 zone_heap_name(z), zone_name(z));
7828 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
7829 kind = "out-of-bounds (high confidence)";
7830 oob = true;
7831 size = zone_element_size((void *)addr,
7832 &z, false, &oob_offs);
7833 } else {
7834 kind = "use-after-free (medium confidence)";
7835 }
7836 }
7837
7838 paniclog_append_noflush(" Address : %p\n", (void *)addr);
7839 if (oob) {
7840 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
7841 (void *)(trunc_page(addr) - (size - oob_offs)),
7842 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
7843 }
7844 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
7845 zone_submaps_names[map_idx],
7846 (void *)map->min_offset, (void *)map->max_offset);
7847 paniclog_append_noflush(" Kind : %s\n", kind);
7848 if (oob) {
7849 paniclog_append_noflush(" Access : %d byte(s) past\n",
7850 (uint32_t)(addr & PAGE_MASK) + 1);
7851 }
7852 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
7853 "0x%04x 0x%08x 0x%08x 0x%08x\n",
7854 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
7855 meta.zm_alloc_size, meta.zm_bitmap,
7856 meta.zm_page_next.packed_address,
7857 meta.zm_page_prev.packed_address);
7858 paniclog_append_noflush("\n");
7859 }
7860
7861 void
panic_display_zalloc(void)7862 panic_display_zalloc(void)
7863 {
7864 bool keepsyms = false;
7865
7866 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
7867
7868 panic_display_zone_info();
7869
7870 if (panic_fault_address) {
7871 if (zone_maps_owned(panic_fault_address, 1)) {
7872 panic_display_zone_fault(panic_fault_address);
7873 }
7874 }
7875
7876 if (panic_include_zprint) {
7877 panic_display_zprint();
7878 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
7879 panic_display_largest_zones();
7880 }
7881 #if CONFIG_ZLEAKS
7882 if (zleak_active) {
7883 panic_display_zleaks(keepsyms);
7884 }
7885 #endif
7886 if (panic_include_kalloc_types) {
7887 panic_display_kalloc_types();
7888 }
7889 }
7890
7891 /*
7892 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
7893 * requesting zone information.
7894 * Frees unused pages towards the end of the region, and zero'es out unused
7895 * space on the last page.
7896 */
7897 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)7898 create_vm_map_copy(
7899 vm_offset_t start_addr,
7900 vm_size_t total_size,
7901 vm_size_t used_size)
7902 {
7903 kern_return_t kr;
7904 vm_offset_t end_addr;
7905 vm_size_t free_size;
7906 vm_map_copy_t copy;
7907
7908 if (used_size != total_size) {
7909 end_addr = start_addr + used_size;
7910 free_size = total_size - (round_page(end_addr) - start_addr);
7911
7912 if (free_size >= PAGE_SIZE) {
7913 kmem_free(ipc_kernel_map,
7914 round_page(end_addr), free_size);
7915 }
7916 bzero((char *) end_addr, round_page(end_addr) - end_addr);
7917 }
7918
7919 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
7920 (vm_map_size_t)used_size, TRUE, ©);
7921 assert(kr == KERN_SUCCESS);
7922
7923 return copy;
7924 }
7925
7926 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)7927 get_zone_info(
7928 zone_t z,
7929 mach_zone_name_t *zn,
7930 mach_zone_info_t *zi)
7931 {
7932 struct zone zcopy;
7933 vm_size_t cached = 0;
7934
7935 assert(z != ZONE_NULL);
7936 zone_lock(z);
7937 if (!z->z_self) {
7938 zone_unlock(z);
7939 return FALSE;
7940 }
7941 zcopy = *z;
7942 if (z->z_pcpu_cache) {
7943 zpercpu_foreach(zc, z->z_pcpu_cache) {
7944 cached += zc->zc_alloc_cur + zc->zc_free_cur;
7945 cached += zc->zc_depot.zd_full * zc_mag_size();
7946 }
7947 }
7948 zone_unlock(z);
7949
7950 if (zn != NULL) {
7951 /*
7952 * Append kalloc heap name to zone name (if zone is used by kalloc)
7953 */
7954 char temp_zone_name[MAX_ZONE_NAME] = "";
7955 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
7956 zone_heap_name(z), z->z_name);
7957
7958 /* assuming here the name data is static */
7959 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
7960 strlen(temp_zone_name) + 1);
7961 }
7962
7963 if (zi != NULL) {
7964 *zi = (mach_zone_info_t) {
7965 .mzi_count = zone_count_allocated(&zcopy) - cached,
7966 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
7967 // max_size for zprint is now high-watermark of pages used
7968 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
7969 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
7970 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
7971 .mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
7972 };
7973 if (zcopy.z_chunk_pages == 0) {
7974 /* this is a zcache */
7975 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
7976 }
7977 zpercpu_foreach(zs, zcopy.z_stats) {
7978 zi->mzi_sum_size += zs->zs_mem_allocated;
7979 }
7980 if (zcopy.collectable) {
7981 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
7982 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
7983 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
7984 }
7985 }
7986
7987 return TRUE;
7988 }
7989
7990 /* mach_memory_info entitlement */
7991 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
7992
7993 /* macro needed to rate-limit mach_memory_info */
7994 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
7995
7996 /* declarations necessary to call kauth_cred_issuser() */
7997 struct ucred;
7998 extern int kauth_cred_issuser(struct ucred *);
7999 extern struct ucred *kauth_cred_get(void);
8000
8001 static kern_return_t
8002 mach_memory_info_internal(
8003 host_t host,
8004 mach_zone_name_array_t *namesp,
8005 mach_msg_type_number_t *namesCntp,
8006 mach_zone_info_array_t *infop,
8007 mach_msg_type_number_t *infoCntp,
8008 mach_memory_info_array_t *memoryInfop,
8009 mach_msg_type_number_t *memoryInfoCntp,
8010 bool redact_info);
8011
8012 static kern_return_t
mach_memory_info_security_check(bool redact_info)8013 mach_memory_info_security_check(bool redact_info)
8014 {
8015 /* If not root, only allow redacted calls. */
8016 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8017 return KERN_NO_ACCESS;
8018 }
8019
8020 if (research_mode_state() == true) {
8021 return KERN_SUCCESS;
8022 }
8023
8024 /* If does not have the memory entitlement, fail. */
8025 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8026 task_t task = current_task();
8027 if (task != kernel_task && !IOTaskHasEntitlement(task, MEMORYINFO_ENTITLEMENT)) {
8028 return KERN_DENIED;
8029 }
8030
8031 /*
8032 * On release non-mac arm devices, allow mach_memory_info
8033 * to be called twice per day per boot. memorymaintenanced
8034 * calls it once per day, which leaves room for a sysdiagnose.
8035 * Allow redacted version to be called without rate limit.
8036 */
8037
8038 if (!redact_info) {
8039 static uint64_t first_call = 0, second_call = 0;
8040 uint64_t now = 0;
8041 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8042
8043 if (!first_call) {
8044 first_call = now;
8045 } else if (!second_call) {
8046 second_call = now;
8047 } else if (first_call + NSEC_DAY > now) {
8048 return KERN_DENIED;
8049 } else if (first_call + NSEC_DAY < now) {
8050 first_call = now;
8051 second_call = 0;
8052 }
8053 }
8054 #endif
8055
8056 return KERN_SUCCESS;
8057 }
8058
8059 #if DEVELOPMENT || DEBUG
8060
8061 kern_return_t
zone_reset_peak(const char * zonename)8062 zone_reset_peak(const char *zonename)
8063 {
8064 unsigned int max_zones;
8065
8066 if (zonename == NULL) {
8067 return KERN_INVALID_ARGUMENT;
8068 }
8069
8070 max_zones = os_atomic_load(&num_zones, relaxed);
8071 for (unsigned int i = 0; i < max_zones; i++) {
8072 zone_t z = &zone_array[i];
8073
8074 if (zone_name(z) &&
8075 track_this_zone(zone_name(z), zonename)) {
8076 /* Found the matching zone */
8077 os_log_info(OS_LOG_DEFAULT,
8078 "zalloc: resetting peak size for zone %s\n", zone_name(z));
8079 zone_lock(z);
8080 z->z_wired_hwm = z->z_wired_cur;
8081 zone_unlock(z);
8082 return KERN_SUCCESS;
8083 }
8084 }
8085 return KERN_NOT_FOUND;
8086 }
8087
8088 kern_return_t
zone_reset_all_peaks(void)8089 zone_reset_all_peaks(void)
8090 {
8091 unsigned int max_zones;
8092 os_log_info(OS_LOG_DEFAULT, "zalloc: resetting all zone size peaks\n");
8093 max_zones = os_atomic_load(&num_zones, relaxed);
8094 for (unsigned int i = 0; i < max_zones; i++) {
8095 zone_t z = &zone_array[i];
8096 zone_lock(z);
8097 z->z_wired_hwm = z->z_wired_cur;
8098 zone_unlock(z);
8099 }
8100 return KERN_SUCCESS;
8101 }
8102
8103 #endif /* DEVELOPMENT || DEBUG */
8104
8105 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8106 mach_zone_info(
8107 mach_port_t host_port,
8108 mach_zone_name_array_t *namesp,
8109 mach_msg_type_number_t *namesCntp,
8110 mach_zone_info_array_t *infop,
8111 mach_msg_type_number_t *infoCntp)
8112 {
8113 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8114 }
8115
8116 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8117 mach_memory_info(
8118 mach_port_t host_port,
8119 mach_zone_name_array_t *namesp,
8120 mach_msg_type_number_t *namesCntp,
8121 mach_zone_info_array_t *infop,
8122 mach_msg_type_number_t *infoCntp,
8123 mach_memory_info_array_t *memoryInfop,
8124 mach_msg_type_number_t *memoryInfoCntp)
8125 {
8126 bool redact_info = false;
8127 host_t host = HOST_NULL;
8128
8129 host = convert_port_to_host_priv(host_port);
8130 if (host == HOST_NULL) {
8131 redact_info = true;
8132 host = convert_port_to_host(host_port);
8133 }
8134
8135 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8136 }
8137
8138 static void
zone_info_redact(mach_zone_info_t * zi)8139 zone_info_redact(mach_zone_info_t *zi)
8140 {
8141 zi->mzi_cur_size = 0;
8142 zi->mzi_max_size = 0;
8143 zi->mzi_alloc_size = 0;
8144 zi->mzi_sum_size = 0;
8145 zi->mzi_collectable = 0;
8146 }
8147
8148 static bool
zone_info_needs_to_be_coalesced(int zone_index)8149 zone_info_needs_to_be_coalesced(int zone_index)
8150 {
8151 zone_security_flags_t zsflags = zone_security_array[zone_index];
8152 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8153 return true;
8154 }
8155 return false;
8156 }
8157
8158 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8159 zone_info_find_coalesce_zone(
8160 mach_zone_info_t *zi,
8161 mach_zone_info_t *info,
8162 int *coalesce,
8163 int coalesce_count,
8164 int *coalesce_index)
8165 {
8166 for (int i = 0; i < coalesce_count; i++) {
8167 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8168 *coalesce_index = coalesce[i];
8169 return true;
8170 }
8171 }
8172
8173 return false;
8174 }
8175
8176 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8177 zone_info_coalesce(
8178 mach_zone_info_t *info,
8179 int coalesce_index,
8180 mach_zone_info_t *zi)
8181 {
8182 info[coalesce_index].mzi_count += zi->mzi_count;
8183 }
8184
8185 kern_return_t
mach_memory_info_sample(mach_zone_name_t * names,mach_zone_info_t * info,int * coalesce,unsigned int * zonesCnt,mach_memory_info_t * memoryInfo,unsigned int memoryInfoCnt,bool redact_info)8186 mach_memory_info_sample(
8187 mach_zone_name_t *names,
8188 mach_zone_info_t *info,
8189 int *coalesce,
8190 unsigned int *zonesCnt,
8191 mach_memory_info_t *memoryInfo,
8192 unsigned int memoryInfoCnt,
8193 bool redact_info)
8194 {
8195 int coalesce_count = 0;
8196 unsigned int max_zones, used_zones = 0;
8197 mach_zone_name_t *zn;
8198 mach_zone_info_t *zi;
8199 kern_return_t kr;
8200
8201 uint64_t zones_collectable_bytes = 0;
8202
8203 kr = mach_memory_info_security_check(redact_info);
8204 if (kr != KERN_SUCCESS) {
8205 return kr;
8206 }
8207
8208 max_zones = *zonesCnt;
8209
8210 bzero(names, max_zones * sizeof(*names));
8211 bzero(info, max_zones * sizeof(*info));
8212 if (redact_info) {
8213 bzero(coalesce, max_zones * sizeof(*coalesce));
8214 }
8215
8216 zn = &names[0];
8217 zi = &info[0];
8218
8219 zone_index_foreach(i) {
8220 if (used_zones > max_zones) {
8221 break;
8222 }
8223
8224 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8225 continue;
8226 }
8227
8228 if (!redact_info) {
8229 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8230 zn++;
8231 zi++;
8232 used_zones++;
8233 continue;
8234 }
8235
8236 zone_info_redact(zi);
8237 if (!zone_info_needs_to_be_coalesced(i)) {
8238 zn++;
8239 zi++;
8240 used_zones++;
8241 continue;
8242 }
8243
8244 int coalesce_index;
8245 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8246 coalesce, coalesce_count, &coalesce_index);
8247
8248 /* Didn't find a zone to coalesce */
8249 if (!found_coalesce_zone) {
8250 /* Updates the zone name */
8251 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8252 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8253 (int)zi->mzi_elem_size);
8254
8255 coalesce[coalesce_count] = used_zones;
8256 coalesce_count++;
8257 zn++;
8258 zi++;
8259 used_zones++;
8260 continue;
8261 }
8262
8263 zone_info_coalesce(info, coalesce_index, zi);
8264 }
8265
8266 *zonesCnt = used_zones;
8267
8268 if (memoryInfo) {
8269 bzero(memoryInfo, memoryInfoCnt * sizeof(*memoryInfo));
8270 kr = vm_page_diagnose(memoryInfo, memoryInfoCnt, zones_collectable_bytes, redact_info);
8271 if (kr != KERN_SUCCESS) {
8272 return kr;
8273 }
8274 }
8275
8276 return kr;
8277 }
8278
8279 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8280 mach_memory_info_internal(
8281 host_t host,
8282 mach_zone_name_array_t *namesp,
8283 mach_msg_type_number_t *namesCntp,
8284 mach_zone_info_array_t *infop,
8285 mach_msg_type_number_t *infoCntp,
8286 mach_memory_info_array_t *memoryInfop,
8287 mach_msg_type_number_t *memoryInfoCntp,
8288 bool redact_info)
8289 {
8290 mach_zone_name_t *names;
8291 vm_offset_t names_addr;
8292 vm_size_t names_size;
8293
8294 mach_zone_info_t *info;
8295 vm_offset_t info_addr;
8296 vm_size_t info_size;
8297
8298 int *coalesce;
8299 vm_offset_t coalesce_addr;
8300 vm_size_t coalesce_size;
8301
8302 mach_memory_info_t *memory_info = NULL;
8303 vm_offset_t memory_info_addr = 0;
8304 vm_size_t memory_info_size;
8305 vm_size_t memory_info_vmsize;
8306 vm_map_copy_t memory_info_copy;
8307 unsigned int num_info = 0;
8308
8309 unsigned int max_zones, used_zones;
8310 kern_return_t kr;
8311
8312 if (host == HOST_NULL) {
8313 return KERN_INVALID_HOST;
8314 }
8315
8316 /*
8317 * We assume that zones aren't freed once allocated.
8318 * We won't pick up any zones that are allocated later.
8319 */
8320
8321 max_zones = os_atomic_load(&num_zones, relaxed);
8322
8323 names_size = round_page(max_zones * sizeof *names);
8324 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8325 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8326 if (kr != KERN_SUCCESS) {
8327 return kr;
8328 }
8329 names = (mach_zone_name_t *) names_addr;
8330
8331 info_size = round_page(max_zones * sizeof *info);
8332 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8333 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8334 if (kr != KERN_SUCCESS) {
8335 kmem_free(ipc_kernel_map,
8336 names_addr, names_size);
8337 return kr;
8338 }
8339 info = (mach_zone_info_t *) info_addr;
8340
8341 if (redact_info) {
8342 coalesce_size = round_page(max_zones * sizeof *coalesce);
8343 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8344 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8345 if (kr != KERN_SUCCESS) {
8346 kmem_free(ipc_kernel_map,
8347 names_addr, names_size);
8348 kmem_free(ipc_kernel_map,
8349 info_addr, info_size);
8350 return kr;
8351 }
8352 coalesce = (int *)coalesce_addr;
8353 }
8354
8355 if (memoryInfop && memoryInfoCntp) {
8356 num_info = vm_page_diagnose_estimate();
8357 memory_info_size = num_info * sizeof(*memory_info);
8358 memory_info_vmsize = round_page(memory_info_size);
8359 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8360 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8361 if (kr != KERN_SUCCESS) {
8362 return kr;
8363 }
8364
8365 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8366 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8367 assert(kr == KERN_SUCCESS);
8368
8369 memory_info = (mach_memory_info_t *) memory_info_addr;
8370 }
8371
8372 used_zones = max_zones;
8373 mach_memory_info_sample(names, info, coalesce, &used_zones, memory_info, num_info, redact_info);
8374
8375 if (redact_info) {
8376 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8377 }
8378
8379 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8380 *namesCntp = used_zones;
8381
8382 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8383 *infoCntp = used_zones;
8384
8385 if (memoryInfop && memoryInfoCntp) {
8386 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8387 assert(kr == KERN_SUCCESS);
8388
8389 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8390 (vm_map_size_t)memory_info_size, TRUE, &memory_info_copy);
8391 assert(kr == KERN_SUCCESS);
8392
8393 *memoryInfop = (mach_memory_info_t *) memory_info_copy;
8394 *memoryInfoCntp = num_info;
8395 }
8396
8397 return KERN_SUCCESS;
8398 }
8399
8400 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8401 mach_zone_info_for_zone(
8402 host_priv_t host,
8403 mach_zone_name_t name,
8404 mach_zone_info_t *infop)
8405 {
8406 zone_t zone_ptr;
8407
8408 if (host == HOST_NULL) {
8409 return KERN_INVALID_HOST;
8410 }
8411
8412 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8413 if (!PE_i_can_has_debugger(NULL)) {
8414 return KERN_INVALID_HOST;
8415 }
8416 #endif
8417
8418 if (infop == NULL) {
8419 return KERN_INVALID_ARGUMENT;
8420 }
8421
8422 zone_ptr = ZONE_NULL;
8423 zone_foreach(z) {
8424 /*
8425 * Append kalloc heap name to zone name (if zone is used by kalloc)
8426 */
8427 char temp_zone_name[MAX_ZONE_NAME] = "";
8428 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8429 zone_heap_name(z), z->z_name);
8430
8431 /* Find the requested zone by name */
8432 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8433 zone_ptr = z;
8434 break;
8435 }
8436 }
8437
8438 /* No zones found with the requested zone name */
8439 if (zone_ptr == ZONE_NULL) {
8440 return KERN_INVALID_ARGUMENT;
8441 }
8442
8443 if (get_zone_info(zone_ptr, NULL, infop)) {
8444 return KERN_SUCCESS;
8445 }
8446 return KERN_FAILURE;
8447 }
8448
8449 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8450 mach_zone_info_for_largest_zone(
8451 host_priv_t host,
8452 mach_zone_name_t *namep,
8453 mach_zone_info_t *infop)
8454 {
8455 if (host == HOST_NULL) {
8456 return KERN_INVALID_HOST;
8457 }
8458
8459 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8460 if (!PE_i_can_has_debugger(NULL)) {
8461 return KERN_INVALID_HOST;
8462 }
8463 #endif
8464
8465 if (namep == NULL || infop == NULL) {
8466 return KERN_INVALID_ARGUMENT;
8467 }
8468
8469 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8470 return KERN_SUCCESS;
8471 }
8472 return KERN_FAILURE;
8473 }
8474
8475 uint64_t
get_zones_collectable_bytes(void)8476 get_zones_collectable_bytes(void)
8477 {
8478 uint64_t zones_collectable_bytes = 0;
8479 mach_zone_info_t zi;
8480
8481 zone_foreach(z) {
8482 if (get_zone_info(z, NULL, &zi)) {
8483 zones_collectable_bytes +=
8484 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8485 }
8486 }
8487
8488 return zones_collectable_bytes;
8489 }
8490
8491 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8492 mach_zone_get_zlog_zones(
8493 host_priv_t host,
8494 mach_zone_name_array_t *namesp,
8495 mach_msg_type_number_t *namesCntp)
8496 {
8497 #if ZALLOC_ENABLE_LOGGING
8498 unsigned int max_zones, logged_zones, i;
8499 kern_return_t kr;
8500 zone_t zone_ptr;
8501 mach_zone_name_t *names;
8502 vm_offset_t names_addr;
8503 vm_size_t names_size;
8504
8505 if (host == HOST_NULL) {
8506 return KERN_INVALID_HOST;
8507 }
8508
8509 if (namesp == NULL || namesCntp == NULL) {
8510 return KERN_INVALID_ARGUMENT;
8511 }
8512
8513 max_zones = os_atomic_load(&num_zones, relaxed);
8514
8515 names_size = round_page(max_zones * sizeof *names);
8516 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8517 KMA_PAGEABLE | KMA_DATA_SHARED, VM_KERN_MEMORY_IPC);
8518 if (kr != KERN_SUCCESS) {
8519 return kr;
8520 }
8521 names = (mach_zone_name_t *) names_addr;
8522
8523 zone_ptr = ZONE_NULL;
8524 logged_zones = 0;
8525 for (i = 0; i < max_zones; i++) {
8526 zone_t z = &(zone_array[i]);
8527 assert(z != ZONE_NULL);
8528
8529 /* Copy out the zone name if zone logging is enabled */
8530 if (z->z_btlog) {
8531 get_zone_info(z, &names[logged_zones], NULL);
8532 logged_zones++;
8533 }
8534 }
8535
8536 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8537 *namesCntp = logged_zones;
8538
8539 return KERN_SUCCESS;
8540
8541 #else /* ZALLOC_ENABLE_LOGGING */
8542 #pragma unused(host, namesp, namesCntp)
8543 return KERN_FAILURE;
8544 #endif /* ZALLOC_ENABLE_LOGGING */
8545 }
8546
8547 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8548 mach_zone_get_btlog_records(
8549 host_priv_t host,
8550 mach_zone_name_t name,
8551 zone_btrecord_array_t *recsp,
8552 mach_msg_type_number_t *numrecs)
8553 {
8554 #if ZALLOC_ENABLE_LOGGING
8555 zone_btrecord_t *recs;
8556 kern_return_t kr;
8557 vm_address_t addr;
8558 vm_size_t size;
8559 zone_t zone_ptr;
8560 vm_map_copy_t copy;
8561
8562 if (host == HOST_NULL) {
8563 return KERN_INVALID_HOST;
8564 }
8565
8566 if (recsp == NULL || numrecs == NULL) {
8567 return KERN_INVALID_ARGUMENT;
8568 }
8569
8570 zone_ptr = ZONE_NULL;
8571 zone_foreach(z) {
8572 /*
8573 * Append kalloc heap name to zone name (if zone is used by kalloc)
8574 */
8575 char temp_zone_name[MAX_ZONE_NAME] = "";
8576 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8577 zone_heap_name(z), z->z_name);
8578
8579 /* Find the requested zone by name */
8580 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8581 zone_ptr = z;
8582 break;
8583 }
8584 }
8585
8586 /* No zones found with the requested zone name */
8587 if (zone_ptr == ZONE_NULL) {
8588 return KERN_INVALID_ARGUMENT;
8589 }
8590
8591 /* Logging not turned on for the requested zone */
8592 if (!zone_ptr->z_btlog) {
8593 return KERN_FAILURE;
8594 }
8595
8596 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8597 if (kr != KERN_SUCCESS) {
8598 return kr;
8599 }
8600
8601 addr = (vm_address_t)recs;
8602 size = sizeof(zone_btrecord_t) * *numrecs;
8603
8604 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
8605 assert(kr == KERN_SUCCESS);
8606
8607 *recsp = (zone_btrecord_t *)copy;
8608 return KERN_SUCCESS;
8609
8610 #else /* !ZALLOC_ENABLE_LOGGING */
8611 #pragma unused(host, name, recsp, numrecs)
8612 return KERN_FAILURE;
8613 #endif /* !ZALLOC_ENABLE_LOGGING */
8614 }
8615
8616
8617 kern_return_t
mach_zone_force_gc(host_t host)8618 mach_zone_force_gc(
8619 host_t host)
8620 {
8621 if (host == HOST_NULL) {
8622 return KERN_INVALID_HOST;
8623 }
8624
8625 #if DEBUG || DEVELOPMENT
8626 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8627 /* Callout to buffer cache GC to drop elements in the apfs zones */
8628 if (consider_buffer_cache_collect != NULL) {
8629 (void)(*consider_buffer_cache_collect)(0);
8630 }
8631 zone_gc(ZONE_GC_DRAIN);
8632 #endif /* DEBUG || DEVELOPMENT */
8633 return KERN_SUCCESS;
8634 }
8635
8636 zone_t
zone_find_largest(uint64_t * zone_size)8637 zone_find_largest(uint64_t *zone_size)
8638 {
8639 zone_t largest_zone = 0;
8640 uint64_t largest_zone_size = 0;
8641 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8642 if (zone_size) {
8643 *zone_size = largest_zone_size;
8644 }
8645 return largest_zone;
8646 }
8647
8648 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)8649 zone_get_stats(
8650 zone_t zone,
8651 struct zone_basic_stats *stats)
8652 {
8653 stats->zbs_avail = zone->z_elems_avail;
8654
8655 stats->zbs_alloc_fail = 0;
8656 zpercpu_foreach(zs, zone->z_stats) {
8657 stats->zbs_alloc_fail += zs->zs_alloc_fail;
8658 }
8659
8660 stats->zbs_cached = 0;
8661 if (zone->z_pcpu_cache) {
8662 zpercpu_foreach(zc, zone->z_pcpu_cache) {
8663 stats->zbs_cached += zc->zc_alloc_cur +
8664 zc->zc_free_cur +
8665 zc->zc_depot.zd_full * zc_mag_size();
8666 }
8667 }
8668
8669 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
8670
8671 /*
8672 * Since we don't take any locks, deal with possible inconsistencies
8673 * as the counters may have changed.
8674 */
8675 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
8676 &stats->zbs_alloc)) {
8677 stats->zbs_avail = stats->zbs_free;
8678 stats->zbs_alloc = 0;
8679 }
8680 }
8681
8682 #endif /* !ZALLOC_TEST */
8683 #pragma mark zone creation, configuration, destruction
8684 #if !ZALLOC_TEST
8685
8686 static zone_t
zone_init_defaults(zone_id_t zid)8687 zone_init_defaults(zone_id_t zid)
8688 {
8689 zone_t z = &zone_array[zid];
8690
8691 z->z_wired_max = ~0u;
8692 z->collectable = true;
8693
8694 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
8695 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
8696 zone_depot_init(&z->z_recirc);
8697 return z;
8698 }
8699
8700 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems,bool exhausts_by_design)8701 zone_set_exhaustible(zone_t zone, vm_size_t nelems, bool exhausts_by_design)
8702 {
8703 zone_lock(zone);
8704 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8705 zone->z_exhausts = exhausts_by_design;
8706 zone_unlock(zone);
8707 }
8708
8709 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)8710 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
8711 {
8712 zone_t zone = zov.zov_zone;
8713
8714 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
8715 zone = zov.zov_view->zv_zone;
8716 } else {
8717 zone = zov.zov_zone;
8718 }
8719
8720 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
8721 }
8722
8723 /**
8724 * @function zone_create_find
8725 *
8726 * @abstract
8727 * Finds an unused zone for the given name and element size.
8728 *
8729 * @param name the zone name
8730 * @param size the element size (including redzones, ...)
8731 * @param flags the flags passed to @c zone_create*
8732 * @param zid_inout the desired zone ID or ZONE_ID_ANY
8733 *
8734 * @returns a zone to initialize further.
8735 */
8736 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8737 zone_create_find(
8738 const char *name,
8739 vm_size_t size,
8740 zone_create_flags_t flags,
8741 zone_id_t *zid_inout)
8742 {
8743 zone_id_t nzones, zid = *zid_inout;
8744 zone_t z;
8745
8746 simple_lock(&all_zones_lock, &zone_locks_grp);
8747
8748 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8749 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8750
8751 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8752 /*
8753 * The first time around, make sure the reserved zone IDs
8754 * have an initialized lock as zone_index_foreach() will
8755 * enumerate them.
8756 */
8757 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8758 zone_init_defaults(nzones++);
8759 }
8760
8761 os_atomic_store(&num_zones, nzones, release);
8762 }
8763
8764 if (zid != ZONE_ID_ANY) {
8765 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8766 panic("zone_create: invalid desired zone ID %d for %s",
8767 zid, name);
8768 }
8769 if (flags & ZC_DESTRUCTIBLE) {
8770 panic("zone_create: ID %d (%s) must be permanent", zid, name);
8771 }
8772 if (zone_array[zid].z_self) {
8773 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
8774 }
8775 z = &zone_array[zid];
8776 } else {
8777 if (flags & ZC_DESTRUCTIBLE) {
8778 /*
8779 * If possible, find a previously zdestroy'ed zone in the
8780 * zone_array that we can reuse.
8781 */
8782 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
8783 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
8784 z = &zone_array[i];
8785
8786 /*
8787 * If the zone name and the element size are the
8788 * same, we can just reuse the old zone struct.
8789 */
8790 if (strcmp(z->z_name, name) ||
8791 zone_elem_outer_size(z) != size) {
8792 continue;
8793 }
8794 bitmap_clear(zone_destroyed_bitmap, i);
8795 z->z_destroyed = false;
8796 z->z_self = z;
8797 zid = (zone_id_t)i;
8798 goto out;
8799 }
8800 }
8801
8802 zid = nzones++;
8803 z = zone_init_defaults(zid);
8804
8805 /*
8806 * The release barrier pairs with the acquire in
8807 * zone_index_foreach() and makes sure that enumeration loops
8808 * always see an initialized zone lock.
8809 */
8810 os_atomic_store(&num_zones, nzones, release);
8811 }
8812
8813 out:
8814 num_zones_in_use++;
8815 simple_unlock(&all_zones_lock);
8816
8817 *zid_inout = zid;
8818 return z;
8819 }
8820
8821 __abortlike
8822 static void
zone_create_panic(const char * name,const char * f1,const char * f2)8823 zone_create_panic(const char *name, const char *f1, const char *f2)
8824 {
8825 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
8826 name, f1, f2);
8827 }
8828 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
8829 if ((flags) & forbidden_flag) { \
8830 zone_create_panic(name, #current_flag, #forbidden_flag); \
8831 }
8832
8833 /*
8834 * Adjusts the size of the element based on minimum size, alignment
8835 * and kasan redzones
8836 */
8837 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)8838 zone_elem_adjust_size(
8839 const char *name __unused,
8840 vm_size_t elem_size,
8841 zone_create_flags_t flags __unused,
8842 uint16_t *redzone __unused)
8843 {
8844 vm_size_t size;
8845
8846 /*
8847 * Adjust element size for minimum size and pointer alignment
8848 */
8849 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
8850 if (size < ZONE_MIN_ELEM_SIZE) {
8851 size = ZONE_MIN_ELEM_SIZE;
8852 }
8853
8854 #if KASAN_CLASSIC
8855 /*
8856 * Expand the zone allocation size to include the redzones.
8857 *
8858 * For page-multiple zones add a full guard page because they
8859 * likely require alignment.
8860 */
8861 uint16_t redzone_tmp;
8862 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
8863 redzone_tmp = 0;
8864 } else if ((size & PAGE_MASK) == 0) {
8865 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
8866 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
8867 "alignment", name);
8868 }
8869 redzone_tmp = PAGE_SIZE;
8870 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
8871 redzone_tmp = 0;
8872 } else {
8873 redzone_tmp = KASAN_GUARD_SIZE;
8874 }
8875 size += redzone_tmp;
8876 if (redzone) {
8877 *redzone = redzone_tmp;
8878 }
8879 #endif
8880 return size;
8881 }
8882
8883 /*
8884 * Returns the allocation chunk size that has least framentation
8885 */
8886 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)8887 zone_get_min_alloc_granule(
8888 vm_size_t elem_size,
8889 zone_create_flags_t flags)
8890 {
8891 vm_size_t alloc_granule = PAGE_SIZE;
8892 if (flags & ZC_PERCPU) {
8893 alloc_granule = PAGE_SIZE * zpercpu_count();
8894 if (PAGE_SIZE % elem_size > 256) {
8895 panic("zone_create: per-cpu zone has too much fragmentation");
8896 }
8897 } else if (flags & ZC_READONLY) {
8898 alloc_granule = PAGE_SIZE;
8899 } else if ((elem_size & PAGE_MASK) == 0) {
8900 /* zero fragmentation by definition */
8901 alloc_granule = elem_size;
8902 } else if (alloc_granule % elem_size == 0) {
8903 /* zero fragmentation by definition */
8904 } else {
8905 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
8906 vm_size_t alloc_tmp = PAGE_SIZE;
8907 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
8908
8909 #if __arm64__
8910 /*
8911 * Increase chunk size to 48K for sizes larger than 4K on 16k
8912 * machines, so as to reduce internal fragementation for kalloc
8913 * zones with sizes 12K and 24K.
8914 */
8915 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
8916 max_chunk_size = 48 * 1024;
8917 }
8918 #endif
8919 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
8920 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
8921 if (frag_tmp < frag) {
8922 frag = frag_tmp;
8923 alloc_granule = alloc_tmp;
8924 }
8925 }
8926 }
8927 return alloc_granule;
8928 }
8929
8930 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)8931 zone_get_early_alloc_size(
8932 const char *name __unused,
8933 vm_size_t elem_size,
8934 zone_create_flags_t flags,
8935 vm_size_t min_elems)
8936 {
8937 vm_size_t adjusted_size, alloc_granule, chunk_elems;
8938
8939 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
8940 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
8941 chunk_elems = alloc_granule / adjusted_size;
8942
8943 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
8944 }
8945
8946 zone_t
8947 zone_create_ext(
8948 const char *name,
8949 vm_size_t size,
8950 zone_create_flags_t flags,
8951 zone_id_t zid,
8952 void (^extra_setup)(zone_t))
8953 {
8954 zone_security_flags_t *zsflags;
8955 uint16_t redzone;
8956 zone_t z;
8957
8958 if (size > ZONE_MAX_ALLOC_SIZE) {
8959 panic("zone_create: element size too large: %zd", (size_t)size);
8960 }
8961
8962 if (size < 2 * sizeof(vm_size_t)) {
8963 /* Elements are too small for kasan. */
8964 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
8965 }
8966
8967 size = zone_elem_adjust_size(name, size, flags, &redzone);
8968
8969 /*
8970 * Allocate the zone slot, return early if we found an older match.
8971 */
8972 z = zone_create_find(name, size, flags, &zid);
8973 if (__improbable(z->z_self)) {
8974 /* We found a zone to reuse */
8975 return z;
8976 }
8977 zsflags = &zone_security_array[zid];
8978
8979 /*
8980 * Initialize the zone properly.
8981 */
8982
8983 /*
8984 * If the kernel is post lockdown, copy the zone name passed in.
8985 * Else simply maintain a pointer to the name string as it can only
8986 * be a core XNU zone (no unloadable kext exists before lockdown).
8987 */
8988 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
8989 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
8990 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
8991 strlcpy(buf, name, nsz);
8992 z->z_name = buf;
8993 } else {
8994 z->z_name = name;
8995 }
8996 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
8997 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
8998 } else {
8999 /*
9000 * zone_init() hasn't run yet, use the storage provided by
9001 * zone_stats_startup(), and zone_init() will replace it
9002 * with the final value once the PERCPU zone exists.
9003 */
9004 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9005 }
9006
9007 if (flags & ZC_OBJ_CACHE) {
9008 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9009 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9010 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9011 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9012
9013 z->z_elem_size = (uint16_t)size;
9014 z->z_chunk_pages = 0;
9015 z->z_quo_magic = 0;
9016 z->z_align_magic = 0;
9017 z->z_chunk_elems = 0;
9018 z->z_elem_offs = 0;
9019 z->no_callout = true;
9020 zsflags->z_lifo = true;
9021 } else {
9022 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9023
9024 z->z_elem_size = (uint16_t)(size - redzone);
9025 z->z_chunk_pages = (uint16_t)atop(alloc);
9026 z->z_quo_magic = Z_MAGIC_QUO(size);
9027 z->z_align_magic = Z_MAGIC_ALIGNED(size);
9028 if (flags & ZC_PERCPU) {
9029 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9030 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9031 } else {
9032 z->z_chunk_elems = (uint16_t)(alloc / size);
9033 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9034 }
9035 }
9036
9037 /*
9038 * Handle KPI flags
9039 */
9040
9041 /* ZC_CACHING applied after all configuration is done */
9042 if (flags & ZC_NOCACHING) {
9043 z->z_nocaching = true;
9044 }
9045
9046 if (flags & ZC_READONLY) {
9047 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9048 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9049 assert(zid <= ZONE_ID__LAST_RO);
9050 #if ZSECURITY_CONFIG(READ_ONLY)
9051 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9052 #endif
9053 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9054 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9055 assert(size <= PAGE_SIZE);
9056 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9057 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9058 (uint32_t)size, zone_heap_name(z), z->z_name);
9059 }
9060 }
9061
9062 if (flags & ZC_PERCPU) {
9063 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9064 z->z_percpu = true;
9065 }
9066 if (flags & ZC_NOGC) {
9067 z->collectable = false;
9068 }
9069 /*
9070 * Handle ZC_NOENCRYPT from xnu only
9071 */
9072 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9073 zsflags->z_noencrypt = true;
9074 }
9075 if (flags & ZC_NOCALLOUT) {
9076 z->no_callout = true;
9077 }
9078 if (flags & ZC_DESTRUCTIBLE) {
9079 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9080 z->z_destructible = true;
9081 }
9082 /*
9083 * Handle Internal flags
9084 */
9085 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9086 if (flags & (ZC_NO_TBI_TAG)) {
9087 zsflags->z_tag = false;
9088 }
9089
9090 #if KASAN_TBI
9091 /*
9092 * Maintain for now the old behavior of not tagging DATA. Remove once
9093 * we move to the new DATA-tagging behavior.
9094 */
9095 if (flags & ZC_DATA || flags & ZC_SHARED_DATA) {
9096 zsflags->z_tag = false;
9097 }
9098 #endif /* KASAN_TBI */
9099
9100 #if HAS_MTE
9101 /*
9102 * Read-only allocator currently doesn't support MTE.
9103 * While writing the support would not be too complicated
9104 * (STGs must happen while the write window is open), we should
9105 * explore to retire the allocator to win back perf and complexity.
9106 */
9107 if (is_mte_enabled && (flags & ZC_READONLY)) {
9108 zsflags->z_tag = false;
9109 }
9110 #endif /* HAS_MTE */
9111
9112 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
9113
9114 if (flags & ZC_KALLOC_TYPE) {
9115 zsflags->z_kalloc_type = true;
9116 }
9117 if (flags & ZC_VM) {
9118 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9119 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9120 }
9121 if (flags & ZC_DATA) {
9122 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9123 #if HAS_MTE
9124 if (!mte_kern_data_enabled()) {
9125 zsflags->z_tag = false;
9126 }
9127 #endif /* HAS_MTE */
9128 }
9129 if (flags & ZC_SHARED_DATA) {
9130 zsflags->z_kheap_id = KHEAP_ID_DATA_SHARED;
9131 #if HAS_MTE
9132 zsflags->z_tag = false;
9133 #endif /* HAS_MTE */
9134 }
9135
9136 #if KASAN_CLASSIC
9137 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9138 z->z_kasan_quarantine = true;
9139 }
9140 z->z_kasan_redzone = redzone;
9141 #endif /* KASAN_CLASSIC */
9142 #if KASAN_FAKESTACK
9143 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9144 z->z_kasan_fakestacks = true;
9145 }
9146 #endif /* KASAN_FAKESTACK */
9147
9148 /*
9149 * Then if there's extra tuning, do it
9150 */
9151 if (extra_setup) {
9152 extra_setup(z);
9153 }
9154
9155 /*
9156 * Configure debugging features
9157 */
9158 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9159 #if ZALLOC_ENABLE_LOGGING
9160 /*
9161 * Check for and set up zone leak detection
9162 * if requested via boot-args.
9163 */
9164 zone_setup_logging(z);
9165 #endif /* ZALLOC_ENABLE_LOGGING */
9166 #if KASAN_TBI
9167 zone_setup_kasan_logging(z);
9168 #endif /* KASAN_TBI */
9169 }
9170
9171 #if VM_TAG_SIZECLASSES
9172 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9173 static uint16_t sizeclass_idx;
9174
9175 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9176 z->z_uses_tags = true;
9177 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9178 /*
9179 * Note that we don't use zone_is_data_kheap() here because we don't
9180 * want to insert the kheap size classes more than once.
9181 */
9182 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9183 z->z_tags_sizeclass = sizeclass_idx++;
9184 } else {
9185 uint16_t i = 0;
9186 for (; i < sizeclass_idx; i++) {
9187 if (size == zone_tags_sizeclasses[i]) {
9188 z->z_tags_sizeclass = i;
9189 break;
9190 }
9191 }
9192
9193 /*
9194 * Size class wasn't found, add it to zone_tags_sizeclasses
9195 */
9196 if (i == sizeclass_idx) {
9197 assert(i < VM_TAG_SIZECLASSES);
9198 zone_tags_sizeclasses[i] = (uint16_t)size;
9199 z->z_tags_sizeclass = sizeclass_idx++;
9200 }
9201 }
9202 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9203 }
9204 #endif
9205
9206 /*
9207 * Finally, fixup properties based on security policies, boot-args, ...
9208 */
9209 if (zone_is_data_kheap(zsflags->z_kheap_id)) {
9210 /*
9211 * We use LIFO in the data map, because workloads like network
9212 * usage or similar tend to rotate through allocations very
9213 * quickly with sometimes epxloding working-sets and using
9214 * a FIFO policy might cause massive TLB trashing with rather
9215 * dramatic performance impacts.
9216 */
9217 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9218 zsflags->z_lifo = true;
9219 }
9220
9221 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9222 /*
9223 * No zone made before zone_init() can have ZC_CACHING set.
9224 */
9225 assert(zc_magazine_zone);
9226 zone_enable_caching(z);
9227 }
9228
9229 zone_lock(z);
9230 z->z_self = z;
9231 zone_unlock(z);
9232
9233 return z;
9234 }
9235
9236 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9237 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9238 {
9239 zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9240 }
9241
9242 zone_id_t
zone_get_sig_eq(zone_t zone)9243 zone_get_sig_eq(zone_t zone)
9244 {
9245 return zone_security_array[zone_index(zone)].z_sig_eq;
9246 }
9247
9248 __mockable void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9249 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9250 {
9251 /* moving to SMR must be done before the zone has ever been used */
9252 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9253 assert(!zone_security_array[zone_index(zone)].z_lifo);
9254 assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9255
9256 if (!zone->z_pcpu_cache) {
9257 zone_enable_caching(zone);
9258 }
9259
9260 zone_lock(zone);
9261
9262 zpercpu_foreach(it, zone->z_pcpu_cache) {
9263 it->zc_smr = smr;
9264 it->zc_free = free_cb;
9265 }
9266 zone->z_smr = true;
9267
9268 zone_unlock(zone);
9269 }
9270
9271 __startup_func
9272 void
zone_create_startup(struct zone_create_startup_spec * spec)9273 zone_create_startup(struct zone_create_startup_spec *spec)
9274 {
9275 zone_t z;
9276
9277 z = zone_create_ext(spec->z_name, spec->z_size,
9278 spec->z_flags, spec->z_zid, spec->z_setup);
9279 if (spec->z_var) {
9280 *spec->z_var = z;
9281 }
9282 }
9283
9284 /*
9285 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9286 * union works. trust but verify.
9287 */
9288 #define zalloc_check_zov_alias(f1, f2) \
9289 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9290 zalloc_check_zov_alias(z_self, zv_zone);
9291 zalloc_check_zov_alias(z_stats, zv_stats);
9292 zalloc_check_zov_alias(z_name, zv_name);
9293 zalloc_check_zov_alias(z_views, zv_next);
9294 #undef zalloc_check_zov_alias
9295
9296 __startup_func
9297 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9298 zone_view_startup_init(struct zone_view_startup_spec *spec)
9299 {
9300 struct kalloc_heap *heap = NULL;
9301 zone_view_t zv = spec->zv_view;
9302 zone_t z;
9303 zone_security_flags_t zsflags;
9304
9305 switch (spec->zv_heapid) {
9306 case KHEAP_ID_DATA_BUFFERS:
9307 heap = KHEAP_DATA_BUFFERS;
9308 break;
9309 case KHEAP_ID_DATA_SHARED:
9310 heap = KHEAP_DATA_SHARED;
9311 break;
9312 default:
9313 heap = NULL;
9314 }
9315
9316 if (heap) {
9317 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9318 } else {
9319 z = *spec->zv_zone;
9320 assert(spec->zv_size <= zone_elem_inner_size(z));
9321 }
9322
9323 assert(z);
9324
9325 zv->zv_zone = z;
9326 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9327 zv->zv_next = z->z_views;
9328 zsflags = zone_security_config(z);
9329 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9330 /*
9331 * count the raw view for zones not in a heap,
9332 * kalloc_heap_init() already counts it for its members.
9333 */
9334 zone_view_count += 2;
9335 } else {
9336 zone_view_count += 1;
9337 }
9338 z->z_views = zv;
9339 }
9340
9341 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9342 zone_create(
9343 const char *name,
9344 vm_size_t size,
9345 zone_create_flags_t flags)
9346 {
9347 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9348 }
9349
9350 vm_size_t
zone_get_elem_size(zone_t zone)9351 zone_get_elem_size(zone_t zone)
9352 {
9353 return zone->z_elem_size;
9354 }
9355
9356 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9357
9358 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9359 zone_create_ro(
9360 const char *name,
9361 vm_size_t size,
9362 zone_create_flags_t flags,
9363 zone_create_ro_id_t zc_ro_id)
9364 {
9365 assert(zc_ro_id <= ZC_RO_ID__LAST);
9366 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9367 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9368 return reserved_zid;
9369 }
9370
9371 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9372 zinit(
9373 vm_size_t size, /* the size of an element */
9374 vm_size_t max __unused, /* maximum memory to use */
9375 vm_size_t alloc __unused, /* allocation size */
9376 const char *name) /* a name for the zone */
9377 {
9378 return zone_create(name, size, ZC_DESTRUCTIBLE);
9379 }
9380
9381 void
zdestroy(zone_t z)9382 zdestroy(zone_t z)
9383 {
9384 unsigned int zindex = zone_index(z);
9385 zone_security_flags_t zsflags = zone_security_array[zindex];
9386
9387 current_thread()->options |= TH_OPT_ZONE_PRIV;
9388 lck_mtx_lock(&zone_gc_lock);
9389
9390 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9391
9392 lck_mtx_unlock(&zone_gc_lock);
9393 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9394
9395 zone_lock(z);
9396
9397 if (!zone_submap_is_sequestered(zsflags)) {
9398 while (!zone_pva_is_null(z->z_pageq_va)) {
9399 struct zone_page_metadata *meta;
9400
9401 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9402 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9403 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9404 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9405 zone_unlock(z);
9406 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9407 ptoa(z->z_chunk_pages));
9408 zone_lock(z);
9409 }
9410 }
9411
9412 #if !KASAN_CLASSIC
9413 /* Assert that all counts are zero */
9414 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9415 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9416 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9417 zone_heap_name(z), z->z_name);
9418 }
9419
9420 /* consistency check: make sure everything is indeed empty */
9421 assert(zone_pva_is_null(z->z_pageq_empty));
9422 assert(zone_pva_is_null(z->z_pageq_partial));
9423 assert(zone_pva_is_null(z->z_pageq_full));
9424 if (!zone_submap_is_sequestered(zsflags)) {
9425 assert(zone_pva_is_null(z->z_pageq_va));
9426 }
9427 #endif
9428
9429 zone_unlock(z);
9430
9431 simple_lock(&all_zones_lock, &zone_locks_grp);
9432
9433 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9434 /* Mark the zone as empty in the bitmap */
9435 bitmap_set(zone_destroyed_bitmap, zindex);
9436 num_zones_in_use--;
9437 assert(num_zones_in_use > 0);
9438
9439 simple_unlock(&all_zones_lock);
9440 }
9441
9442 #endif /* !ZALLOC_TEST */
9443 #pragma mark zalloc module init
9444 #if !ZALLOC_TEST
9445
9446 /*
9447 * Initialize the "zone of zones" which uses fixed memory allocated
9448 * earlier in memory initialization. zone_bootstrap is called
9449 * before zone_init.
9450 */
9451 __startup_func
9452 void
zone_bootstrap(void)9453 zone_bootstrap(void)
9454 {
9455 #if DEBUG || DEVELOPMENT
9456 #if __x86_64__
9457 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9458 /*
9459 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9460 * the fp faulting code gets triggered and deadlocks.
9461 */
9462 zone_caching_disabled = 1;
9463 }
9464 #endif /* __x86_64__ */
9465 #endif /* DEBUG || DEVELOPMENT */
9466
9467 /* Validate struct zone_packed_virtual_address expectations */
9468 #ifndef __BUILDING_XNU_LIBRARY__ /* user-mode addresses are low*/
9469 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9470 #endif /* __BUILDING_XNU_LIBRARY__ */
9471 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9472 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9473 }
9474
9475 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9476 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9477 /*
9478 * Scale zc_mag_size() per machine.
9479 *
9480 * - wide machines get 128B magazines to avoid all false sharing
9481 * - smaller machines but with enough RAM get a bit bigger
9482 * buckets (empirically affects networking performance)
9483 */
9484 if (zpercpu_early_count >= 10) {
9485 _zc_mag_size = 14;
9486 } else if ((sane_size >> 30) >= 4) {
9487 _zc_mag_size = 10;
9488 }
9489 }
9490
9491 /*
9492 * Initialize random used to scramble early allocations
9493 */
9494 zpercpu_foreach_cpu(cpu) {
9495 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9496 }
9497
9498 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9499 /*
9500 * Randomly assign zones to one of the 4 general submaps,
9501 * and pick whether they allocate from the begining
9502 * or the end of it.
9503 *
9504 * A lot of OOB exploitation relies on precise interleaving
9505 * of specific types in the heap.
9506 *
9507 * Woops, you can't guarantee that anymore.
9508 */
9509 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9510 uint32_t r = zalloc_random_uniform32(0,
9511 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9512
9513 zone_security_array[i].z_submap_from_end = (r & 1);
9514 zone_security_array[i].z_submap_idx += (r >> 1);
9515 }
9516 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9517
9518 #if HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING)
9519 /*
9520 * If MTE is disabled, we want all zones to have tagging disabled.
9521 * Loop here disabling tagging across the board, ahead of early boot
9522 * zone operations and, of course, lockdown.
9523 */
9524 if (!is_mte_enabled) {
9525 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9526 zone_security_array[i].z_tag = false;
9527 }
9528 }
9529 #endif /* HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING) */
9530
9531 thread_call_setup_with_options(&zone_expand_callout,
9532 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9533 THREAD_CALL_OPTIONS_ONCE);
9534
9535 thread_call_setup_with_options(&zone_trim_callout,
9536 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9537 THREAD_CALL_OPTIONS_ONCE);
9538 }
9539
9540 #define ZONE_GUARD_SIZE (64UL << 10)
9541
9542 __startup_func
9543 static void
zone_tunables_fixup(void)9544 zone_tunables_fixup(void)
9545 {
9546 int wdt = 0;
9547
9548 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9549 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9550 }
9551 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9552 !PE_parse_boot_argn("zet", NULL, 0)) {
9553 zone_exhausted_timeout = -1;
9554 }
9555 }
9556 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9557
9558 /** Get the left zone guard size for the submap at IDX */
9559 __pure2
9560 __startup_func
9561 static vm_map_size_t
zone_submap_left_guard_size(zone_submap_idx_t __unused idx)9562 zone_submap_left_guard_size(zone_submap_idx_t __unused idx)
9563 {
9564 return ZONE_GUARD_SIZE / 2;
9565 }
9566
9567 /** Get the right zone guard size for the submap at IDX */
9568 __pure2
9569 __startup_func
9570 static vm_map_size_t
zone_submap_right_guard_size(zone_submap_idx_t __unused idx)9571 zone_submap_right_guard_size(zone_submap_idx_t __unused idx)
9572 {
9573 return ZONE_GUARD_SIZE / 2;
9574 }
9575
9576 __startup_func
9577 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9578 zone_submap_init(
9579 mach_vm_offset_t *submap_min,
9580 zone_submap_idx_t idx,
9581 uint64_t zone_sub_map_numer,
9582 uint64_t *remaining_denom,
9583 vm_offset_t *remaining_size)
9584 {
9585 vm_map_create_options_t vmco;
9586 vm_map_address_t addr;
9587 vm_offset_t submap_start, submap_end;
9588 vm_size_t submap_actual_size, submap_usable_size;
9589 vm_map_t submap;
9590 vm_map_size_t left_guard_size = 0, right_guard_size = 0;
9591 vm_prot_t prot = VM_PROT_DEFAULT;
9592 vm_prot_t prot_max = VM_PROT_ALL;
9593 kern_return_t kr;
9594
9595 submap_usable_size =
9596 zone_sub_map_numer * *remaining_size / *remaining_denom;
9597 submap_usable_size = trunc_page(submap_usable_size);
9598
9599 submap_start = *submap_min;
9600
9601 left_guard_size = zone_submap_left_guard_size(idx);
9602 right_guard_size = zone_submap_right_guard_size(idx);
9603
9604 /*
9605 * Compute the final submap size.
9606 *
9607 * The usable size does not include the zone guards, so add them now. This
9608 * VA is paid for in zone_init ahead of time.
9609 */
9610
9611 submap_actual_size =
9612 submap_usable_size + left_guard_size + right_guard_size;
9613
9614 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9615 /*
9616 * The RO zone has special alignment requirements, so snap to the
9617 * required boundary and reflow based on the available space.
9618 *
9619 * This operation only increases the amount of VA used by the submap,
9620 * and so the guards will always still fit.
9621 */
9622 vm_offset_t submap_padding = 0;
9623
9624 submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9625 submap_start += submap_padding;
9626
9627 submap_actual_size = pmap_ro_zone_align(submap_actual_size);
9628 submap_usable_size =
9629 submap_actual_size - left_guard_size - right_guard_size;
9630
9631 assert(*remaining_size >= (submap_padding + submap_usable_size));
9632
9633 *remaining_size -= submap_padding;
9634 *submap_min = submap_start;
9635 }
9636
9637 submap_end = submap_start + submap_actual_size;
9638
9639 if (idx == Z_SUBMAP_IDX_VM) {
9640 vm_packing_verify_range("vm_compressor",
9641 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9642 vm_packing_verify_range("vm_page",
9643 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9644
9645 #if MACH_ASSERT
9646 /*
9647 * vm_submap_restriction_size_debug gives the size passed to the kmem
9648 * claim placer to ensure that the packing behaves correctly. If this
9649 * size is smaller than what we actually end up using for the VM submap,
9650 * the packing may be probabilistically invalid. Assert on this
9651 * condition to catch this type of failure deterministically rather than
9652 * relying on the above assertions catching it when we actually hit that
9653 * rare case and the packing is invalid.
9654 */
9655 assert(submap_actual_size <= vm_submap_restriction_size_debug);
9656 #endif /* MACH_ASSERT */
9657 }
9658
9659 vmco = VM_MAP_CREATE_NEVER_FAULTS;
9660 if (!zone_submap_is_sequestered(idx)) {
9661 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9662 }
9663
9664 vm_map_will_allocate_early_map(&zone_submaps[idx]);
9665 submap = kmem_suballoc(kernel_map, submap_min, submap_actual_size, vmco,
9666 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9667 KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT,
9668 VM_KERN_MEMORY_ZONE).kmr_submap;
9669
9670 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9671 zone_info.zi_ro_range.min_address = submap_start;
9672 zone_info.zi_ro_range.max_address = submap_end;
9673 prot_max = prot = VM_PROT_NONE;
9674 }
9675
9676 addr = submap_start;
9677 #if ZSECURITY_CONFIG(ZONE_TAGGING) && HAS_MTE
9678 boolean_t zone_alloc_mte_pages = is_mte_enabled;
9679
9680 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(
9681 .vmkf_no_soft_limit = true,
9682 .vm_tag = VM_KERN_MEMORY_ZONE,
9683 .vmf_mte = zone_alloc_mte_pages ? true : false);
9684 vm_object_t kobject = zone_alloc_mte_pages ? kernel_object_tagged : kernel_object_default;
9685
9686 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) && HAS_MTE */
9687 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(
9688 .vmkf_no_soft_limit = true,
9689 .vm_tag = VM_KERN_MEMORY_ZONE);
9690 vm_object_t kobject = kernel_object_default;
9691 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) && HAS_MTE */
9692
9693 kr = vm_map_enter(submap, &addr, left_guard_size, 0,
9694 vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9695 if (kr != KERN_SUCCESS) {
9696 panic("ksubmap[%s]: failed to make first entry (%d)",
9697 zone_submaps_names[idx], kr);
9698 }
9699
9700 addr = submap_end - right_guard_size;
9701 kr = vm_map_enter(submap, &addr, right_guard_size, 0,
9702 vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9703 if (kr != KERN_SUCCESS) {
9704 panic("ksubmap[%s]: failed to make last entry (%d)",
9705 zone_submaps_names[idx], kr);
9706 }
9707
9708 #if DEBUG || DEVELOPMENT
9709 printf("zone_init: map %-5s %p:%p (%u%c, %u%c usable)\n",
9710 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9711 mach_vm_size_pretty(submap_actual_size),
9712 mach_vm_size_unit(submap_actual_size),
9713 mach_vm_size_pretty(submap_usable_size),
9714 mach_vm_size_unit(submap_usable_size));
9715 #endif /* DEBUG || DEVELOPMENT */
9716
9717 zone_submaps[idx] = submap;
9718 *submap_min = submap_end;
9719 *remaining_size -= submap_usable_size;
9720 *remaining_denom -= zone_sub_map_numer;
9721 }
9722
9723 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9724 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9725 {
9726 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9727 pva->packed_address += delta;
9728 }
9729 }
9730
9731 /*
9732 * Allocate metadata array and migrate bootstrap initial metadata and memory.
9733 */
9734 __startup_func
9735 static void
zone_metadata_init(void)9736 zone_metadata_init(void)
9737 {
9738 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9739 vm_map_entry_t first;
9740
9741 vmlp_api_start(ZONE_METADATA_INIT);
9742
9743 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
9744 vm_size_t early_sz;
9745 vm_offset_t reloc_base;
9746
9747 /*
9748 * Step 1: Allocate the metadata + bitmaps range
9749 *
9750 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9751 * of physical memory (16M per 1G).
9752 *
9753 * Let's preallocate for the worst to avoid weird panics.
9754 */
9755 vm_map_will_allocate_early_map(&zone_meta_map);
9756 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9757 zone_meta_size + zone_bits_size + zone_xtra_size,
9758 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9759 VM_KERN_MEMORY_ZONE, &zone_meta_map);
9760 meta_r.min_address += ZONE_GUARD_SIZE;
9761 meta_r.max_address -= ZONE_GUARD_SIZE;
9762 if (zone_xtra_size) {
9763 xtra_r.max_address = meta_r.max_address;
9764 meta_r.max_address -= zone_xtra_size;
9765 xtra_r.min_address = meta_r.max_address;
9766 } else {
9767 xtra_r.min_address = xtra_r.max_address = 0;
9768 }
9769 bits_r.max_address = meta_r.max_address;
9770 meta_r.max_address -= zone_bits_size;
9771 bits_r.min_address = meta_r.max_address;
9772
9773 #if DEBUG || DEVELOPMENT
9774 printf("zone_init: metadata %p:%p (%u%c)\n",
9775 (void *)meta_r.min_address, (void *)meta_r.max_address,
9776 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
9777 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
9778 printf("zone_init: metabits %p:%p (%u%c)\n",
9779 (void *)bits_r.min_address, (void *)bits_r.max_address,
9780 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
9781 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
9782 printf("zone_init: extra %p:%p (%u%c)\n",
9783 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
9784 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
9785 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
9786 #endif /* DEBUG || DEVELOPMENT */
9787
9788 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9789 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
9790
9791 /*
9792 * Step 2: Install new ranges.
9793 * Relocate metadata and bits.
9794 */
9795 early_r = zone_info.zi_map_range;
9796 early_sz = mach_vm_range_size(&early_r);
9797
9798 zone_info.zi_map_range = zone_map_range;
9799 zone_info.zi_meta_range = meta_r;
9800 zone_info.zi_bits_range = bits_r;
9801 zone_info.zi_xtra_range = xtra_r;
9802 zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
9803 (struct zone_page_metadata *)meta_r.min_address,
9804 -(ptrdiff_t)zone_pva_from_addr(zone_map_range.min_address).packed_address);
9805
9806 vm_map_lock(vm_map);
9807 first = vm_map_first_entry(vm_map);
9808 reloc_base = first->vme_end;
9809 first->vme_end += early_sz;
9810 vm_map->size += early_sz;
9811 vm_map_unlock(vm_map);
9812
9813 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
9814 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
9815 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
9816 /* this needs to sign extend */
9817 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
9818
9819 zone_meta_populate(reloc_base, early_sz);
9820 memcpy(new_meta, early_meta,
9821 atop(early_sz) * sizeof(struct zone_page_metadata));
9822 for (uint32_t i = 0; i < atop(early_sz); i++) {
9823 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
9824 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
9825 }
9826
9827 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
9828 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
9829
9830 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
9831 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
9832 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
9833 }
9834
9835 zba_populate(0, false);
9836 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
9837 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
9838
9839 /*
9840 * Step 3: Relocate the boostrap VM structs
9841 * (including rewriting their content).
9842 */
9843 kma_flags_t flags = KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL;
9844
9845 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9846 flags |= KMA_TAG;
9847 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
9848
9849 #if HAS_MTE
9850 /* This is temporary for testing/bringup: fully disable MTE */
9851 if (!is_mte_enabled) {
9852 flags &= ~KMA_TAG;
9853 }
9854 #endif /* HAS_MTE */
9855
9856 kernel_memory_populate(reloc_base, early_sz, flags,
9857 VM_KERN_MEMORY_OSFMK);
9858
9859 vm_memtag_disable_checking();
9860 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
9861 vm_memtag_enable_checking();
9862
9863 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9864 vm_memtag_relocate_tags(reloc_base, early_r.min_address, early_sz);
9865 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
9866
9867 #if KASAN
9868 kasan_notify_address(reloc_base, early_sz);
9869 #endif /* KASAN */
9870
9871 vm_map_relocate_early_maps(reloc_delta);
9872
9873 for (uint32_t i = 0; i < atop(early_sz); i++) {
9874 zone_id_t zid = new_meta[i].zm_index;
9875 zone_t z = &zone_array[zid];
9876 vm_size_t esize = zone_elem_outer_size(z);
9877 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
9878 vm_address_t addr;
9879
9880 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
9881 continue;
9882 }
9883
9884 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
9885 if (zone_meta_is_free(&new_meta[i], eidx)) {
9886 continue;
9887 }
9888
9889 addr = vm_memtag_load_tag(base + eidx * esize);
9890 #if KASAN_CLASSIC
9891 kasan_alloc(addr,
9892 zone_elem_inner_size(z), zone_elem_inner_size(z),
9893 zone_elem_redzone(z), false,
9894 __builtin_frame_address(0));
9895 #endif
9896 vm_map_relocate_early_elem(zid, addr, reloc_delta);
9897 }
9898 }
9899
9900 #if HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING)
9901 if (is_mte_enabled) {
9902 zone_early_range = early_r;
9903 }
9904 #endif /* HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING) */
9905 vmlp_api_end(ZONE_METADATA_INIT, 0);
9906 }
9907
9908 #if HAS_MTE
9909 static void
zone_early_alloc_cleanup(void)9910 zone_early_alloc_cleanup(void)
9911 {
9912 vm_size_t early_sz = mach_vm_range_size(&zone_early_range);
9913
9914 if (early_sz) {
9915 ml_static_mfree(zone_early_range.min_address, early_sz);
9916 }
9917 }
9918 STARTUP(ZALLOC, STARTUP_RANK_LAST, zone_early_alloc_cleanup);
9919 #endif /* HAS_MTE */
9920
9921 __startup_data
9922 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
9923 #if ZSECURITY_CONFIG(READ_ONLY)
9924 [Z_SUBMAP_IDX_VM] = 15,
9925 [Z_SUBMAP_IDX_READ_ONLY] = 5,
9926 #else
9927 [Z_SUBMAP_IDX_VM] = 20,
9928 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
9929 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9930 [Z_SUBMAP_IDX_GENERAL_0] = 15,
9931 [Z_SUBMAP_IDX_GENERAL_1] = 15,
9932 [Z_SUBMAP_IDX_GENERAL_2] = 15,
9933 [Z_SUBMAP_IDX_GENERAL_3] = 15,
9934 [Z_SUBMAP_IDX_DATA] = 20,
9935 #else
9936 [Z_SUBMAP_IDX_GENERAL_0] = 60,
9937 [Z_SUBMAP_IDX_DATA] = 20,
9938 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9939 };
9940
9941 __startup_func
9942 static inline uint16_t
zone_submap_ratios_denom(void)9943 zone_submap_ratios_denom(void)
9944 {
9945 uint16_t denom = 0;
9946
9947 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9948 denom += submap_ratios[idx];
9949 }
9950
9951 assert(denom == 100);
9952
9953 return denom;
9954 }
9955
9956 __startup_func
9957 static inline vm_offset_t
zone_restricted_va_max(void)9958 zone_restricted_va_max(void)
9959 {
9960 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
9961 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
9962
9963 return trunc_page(MIN(compressor_max, vm_page_max));
9964 }
9965
9966 __startup_func
9967 static void
zone_set_map_sizes(void)9968 zone_set_map_sizes(void)
9969 {
9970 vm_size_t zsize;
9971 vm_size_t zsizearg;
9972
9973 /*
9974 * Compute the physical limits for the zone map
9975 */
9976
9977 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
9978 zsize = zsizearg * (1024ULL * 1024);
9979 } else {
9980 /* Set target zone size as 1/4 of physical memory */
9981 zsize = (vm_size_t)(sane_size >> 2);
9982 zsize += zsize >> 1;
9983 }
9984
9985 if (zsize < CONFIG_ZONE_MAP_MIN) {
9986 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
9987 }
9988 if (zsize > sane_size >> 1) {
9989 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
9990 }
9991 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
9992 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
9993 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
9994 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
9995 zsize = ZONE_MAP_MAX;
9996 }
9997
9998 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
9999
10000
10001 /*
10002 * Declare restrictions on zone max
10003 */
10004 vm_offset_t vm_submap_size = round_page(
10005 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10006 zone_submap_ratios_denom()) +
10007 zone_submap_left_guard_size(Z_SUBMAP_IDX_VM) +
10008 zone_submap_right_guard_size(Z_SUBMAP_IDX_VM);
10009
10010 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10011 &zone_map_range.min_address)) {
10012 zone_map_range.min_address = 0;
10013 }
10014
10015 #if MACH_ASSERT
10016 vm_submap_restriction_size_debug = vm_submap_size;
10017 #endif /* MACH_ASSERT */
10018
10019 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10020 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10021
10022 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10023 ZBA_PTR_MASK + 1);
10024 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10025 (CHAR_BIT * KALLOC_MINSIZE));
10026
10027 #if VM_TAG_SIZECLASSES
10028 if (zone_tagging_on) {
10029 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10030 }
10031 if (zba_xtra_shift) {
10032 /*
10033 * if we need the extra space range, then limit the size of the
10034 * bitmaps to something reasonable instead of a theoretical
10035 * worst case scenario of all zones being for the smallest
10036 * allocation granule, in order to avoid fake VA pressure on
10037 * other parts of the system.
10038 */
10039 zone_bits_size = round_page(zone_bits_size / 8);
10040 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10041 }
10042 #endif /* VM_TAG_SIZECLASSES */
10043 }
10044 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10045
10046 /*
10047 * Can't use zone_info.zi_map_range at this point as it is being used to
10048 * store the range of early pmap memory that was stolen to bootstrap the
10049 * necessary VM zones.
10050 */
10051 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10052 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10053 return zone_meta_size + zone_bits_size + zone_xtra_size;
10054 });
10055
10056 /*
10057 * Global initialization of Zone Allocator.
10058 * Runs after zone_bootstrap.
10059 */
10060 __startup_func
10061 static void
zone_init(void)10062 zone_init(void)
10063 {
10064 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
10065 mach_vm_offset_t submap_min = 0;
10066 uint64_t denom = zone_submap_ratios_denom();
10067 /*
10068 * And now allocate the various pieces of VA and submaps.
10069 */
10070
10071 submap_min = zone_map_range.min_address;
10072
10073 #ifndef __BUILDING_XNU_LIB_UNITTEST__ /* zone submap is not maintained in unit-test */
10074 /*
10075 * Allocate the submaps
10076 */
10077
10078 /*
10079 * In order to prevent us from throwing off the ratios, deduct VA for the
10080 * zone guards ahead of time.
10081 */
10082 for (uint32_t i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
10083 remaining_size -= zone_submap_left_guard_size(i);
10084 remaining_size -= zone_submap_right_guard_size(i);
10085 }
10086
10087 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10088 if (submap_ratios[idx] == 0) {
10089 zone_submaps[idx] = VM_MAP_NULL;
10090 } else {
10091 zone_submap_init(&submap_min, idx, submap_ratios[idx],
10092 &denom, &remaining_size);
10093 }
10094 }
10095
10096 zone_metadata_init();
10097 #else
10098 #pragma unused(denom, remaining_size)
10099 #endif
10100
10101 #if VM_TAG_SIZECLASSES
10102 if (zone_tagging_on) {
10103 vm_allocation_zones_init();
10104 }
10105 #endif /* VM_TAG_SIZECLASSES */
10106
10107 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10108 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10109
10110 (void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NO_TBI_TAG,
10111 ZONE_ID_PERMANENT, ^(zone_t z) {
10112 z->z_permanent = true;
10113 z->z_elem_size = 1;
10114 });
10115 (void)zone_create_ext("vm.permanent.percpu", 1,
10116 kma_flags | ZC_PERCPU | ZC_NO_TBI_TAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10117 z->z_permanent = true;
10118 z->z_elem_size = 1;
10119 });
10120
10121 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10122 zc_mag_size() * sizeof(vm_offset_t),
10123 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM);
10124 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10125
10126 /*
10127 * Now migrate the startup statistics into their final storage,
10128 * and enable logging for early zones (that zone_create_ext() skipped).
10129 */
10130 int cpu = cpu_number();
10131 zone_index_foreach(idx) {
10132 zone_t tz = &zone_array[idx];
10133
10134 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10135 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10136
10137 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10138 tz->z_stats = zs;
10139 }
10140 if (tz->z_self == tz) {
10141 #if ZALLOC_ENABLE_LOGGING
10142 zone_setup_logging(tz);
10143 #endif /* ZALLOC_ENABLE_LOGGING */
10144 #if KASAN_TBI
10145 zone_setup_kasan_logging(tz);
10146 #endif /* KASAN_TBI */
10147 }
10148 }
10149 }
10150 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10151
10152 void
zalloc_iokit_lockdown(void)10153 zalloc_iokit_lockdown(void)
10154 {
10155 zone_share_always = false;
10156 }
10157
10158 void
zalloc_first_proc_made(void)10159 zalloc_first_proc_made(void)
10160 {
10161 zone_caching_disabled = 0;
10162 zone_early_thres_mul = 1;
10163 }
10164
10165 __startup_func
10166 vm_offset_t
zone_early_mem_init(vm_size_t size)10167 zone_early_mem_init(vm_size_t size)
10168 {
10169 vm_offset_t mem;
10170
10171 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10172
10173 /*
10174 * The zone that is used early to bring up the VM is stolen here.
10175 *
10176 * When the zone subsystem is actually initialized,
10177 * zone_metadata_init() will be called, and those pages
10178 * and the elements they contain, will be relocated into
10179 * the VM submap (even for architectures when those zones
10180 * do not live there).
10181 */
10182 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10183 mem = (vm_offset_t)zone_early_pages_to_cram;
10184
10185 #if HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING)
10186 /*
10187 * zone_early_pages_to_cram is in the DATA segment, so canonically
10188 * tagged. We need to have regularly taggable memory, so need to
10189 * specially allocate with the MTE MAIR set.
10190 */
10191 if (is_mte_enabled) {
10192 mem = (vm_offset_t)pmap_steal_zone_memory(size, PAGE_SIZE);
10193 }
10194 #endif /* HAS_MTE && ZSECURITY_CONFIG(ZONE_TAGGING) */
10195
10196 zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
10197 (struct zone_page_metadata *)zone_early_meta_array_startup,
10198 -(ptrdiff_t)zone_pva_from_addr(mem).packed_address);
10199 zone_info.zi_map_range.min_address = mem;
10200 zone_info.zi_map_range.max_address = mem + size;
10201
10202 zone_info.zi_bits_range = (struct mach_vm_range){
10203 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10204 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10205 sizeof(zba_chunk_startup),
10206 };
10207
10208 zba_meta()->zbam_left = 1;
10209 zba_meta()->zbam_right = 1;
10210 zba_init_chunk(0, false);
10211
10212 return mem;
10213 }
10214
10215 #endif /* !ZALLOC_TEST */
10216 #pragma mark - tests
10217 #if DEBUG || DEVELOPMENT
10218
10219 /*
10220 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10221 * thread goes through at a time.
10222 *
10223 * Or we can end up with multiple test zones (if a second zinit() comes through
10224 * before zdestroy()), which could lead us to run out of zones.
10225 */
10226 static bool any_zone_test_running = FALSE;
10227
10228 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10229 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10230 {
10231 vm_offset_t elem_size = zone_elem_outer_size(z);
10232 vm_offset_t base;
10233 struct zone_page_metadata *meta;
10234
10235 while (!zone_pva_is_null(page_index)) {
10236 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10237 meta = zone_pva_to_meta(page_index);
10238
10239 if (meta->zm_inline_bitmap) {
10240 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10241 uint32_t map = meta[i].zm_bitmap;
10242
10243 for (; map; map &= map - 1) {
10244 *elems++ = INSTANCE_PUT(base +
10245 elem_size * __builtin_clz(map));
10246 }
10247 base += elem_size * 32;
10248 }
10249 } else {
10250 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10251 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10252 for (size_t i = 0; i < (1u << order); i++) {
10253 uint64_t map = bits[i];
10254
10255 for (; map; map &= map - 1) {
10256 *elems++ = INSTANCE_PUT(base +
10257 elem_size * __builtin_clzll(map));
10258 }
10259 base += elem_size * 64;
10260 }
10261 }
10262
10263 page_index = meta->zm_page_next;
10264 }
10265 return elems;
10266 }
10267
10268 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10269 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10270 {
10271 zone_t zone = NULL;
10272 uintptr_t * array;
10273 uintptr_t * next;
10274 uintptr_t element;
10275 uint32_t idx, count, found;
10276 uint32_t nobtcount;
10277 uint32_t elemSize;
10278 size_t maxElems;
10279
10280 zone_foreach(z) {
10281 if (!z->z_name) {
10282 continue;
10283 }
10284 if (!strncmp(zoneName, z->z_name, nameLen)) {
10285 zone = z;
10286 break;
10287 }
10288 }
10289 if (zone == NULL) {
10290 return KERN_INVALID_NAME;
10291 }
10292
10293 elemSize = (uint32_t)zone_elem_inner_size(zone);
10294 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10295
10296 array = kalloc_type_tag(vm_offset_t, maxElems, Z_WAITOK, VM_KERN_MEMORY_DIAG);
10297 if (array == NULL) {
10298 return KERN_RESOURCE_SHORTAGE;
10299 }
10300
10301 zone_lock(zone);
10302
10303 next = array;
10304 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10305 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10306 count = (uint32_t)(next - array);
10307
10308 zone_unlock(zone);
10309
10310 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10311 assert(found <= count);
10312
10313 for (idx = 0; idx < count; idx++) {
10314 element = array[idx];
10315 if (kInstanceFlagReferenced & element) {
10316 continue;
10317 }
10318 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10319 }
10320
10321 #if ZALLOC_ENABLE_LOGGING
10322 if (zone->z_btlog && !corruption_debug_flag) {
10323 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10324 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10325 btlog_copy_backtraces_for_elements(zone->z_btlog,
10326 (vm_address_t *)array, &count, elemSize, proc);
10327 }
10328 #endif /* ZALLOC_ENABLE_LOGGING */
10329
10330 for (nobtcount = idx = 0; idx < count; idx++) {
10331 element = array[idx];
10332 if (!element) {
10333 continue;
10334 }
10335 if (kInstanceFlagReferenced & element) {
10336 continue;
10337 }
10338 nobtcount++;
10339 }
10340 if (nobtcount) {
10341 proc(nobtcount, elemSize, BTREF_NULL);
10342 }
10343
10344 kfree_type(vm_offset_t, maxElems, array);
10345 return KERN_SUCCESS;
10346 }
10347
10348 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10349 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10350 {
10351 zone_security_flags_t zsflags;
10352 uint32_t x = 4;
10353 uint32_t *test_ptr;
10354
10355 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10356 printf("zone_ro_basic_test: Test already running.\n");
10357 return EALREADY;
10358 }
10359
10360 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10361
10362 for (int i = 0; i < 3; i++) {
10363 #if ZSECURITY_CONFIG(READ_ONLY)
10364 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10365 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10366 printf("zone_ro_basic_test: create a sub-page size zone\n");
10367
10368 printf("zone_ro_basic_test: verify flags were set\n");
10369 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10370
10371 printf("zone_ro_basic_test: zalloc an element\n");
10372 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10373 assert(test_ptr);
10374
10375 printf("zone_ro_basic_test: verify we can't write to it\n");
10376 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10377
10378 x = 4;
10379 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10380 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10381 assert(test_ptr);
10382 assert(*(uint32_t*)test_ptr == x);
10383
10384 x = 5;
10385 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10386 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10387 assert(test_ptr);
10388 assert(*(uint32_t*)test_ptr == x);
10389
10390 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10391 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10392
10393 printf("zone_ro_basic_test: free elem\n");
10394 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10395 assert(!test_ptr);
10396 #else
10397 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10398
10399 printf("zone_ro_basic_test: verify flags were set\n");
10400 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10401
10402 printf("zone_ro_basic_test: zalloc an element\n");
10403 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10404 assert(test_ptr);
10405
10406 x = 4;
10407 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10408 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10409 assert(test_ptr);
10410 assert(*(uint32_t*)test_ptr == x);
10411
10412 x = 5;
10413 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10414 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10415 assert(test_ptr);
10416 assert(*(uint32_t*)test_ptr == x);
10417
10418 printf("zone_ro_basic_test: free elem\n");
10419 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10420 assert(!test_ptr);
10421 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10422 }
10423
10424 printf("zone_ro_basic_test: garbage collection\n");
10425 zone_gc(ZONE_GC_DRAIN);
10426
10427 printf("zone_ro_basic_test: Test passed\n");
10428
10429 *out = 1;
10430 os_atomic_store(&any_zone_test_running, false, relaxed);
10431 return 0;
10432 }
10433 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10434
10435 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10436 zone_basic_test_run(__unused int64_t in, int64_t *out)
10437 {
10438 static zone_t test_zone_ptr = NULL;
10439
10440 unsigned int i = 0, max_iter = 5;
10441 void * test_ptr;
10442 zone_t test_zone;
10443 int rc = 0;
10444
10445 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10446 printf("zone_basic_test: Test already running.\n");
10447 return EALREADY;
10448 }
10449
10450 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10451
10452 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10453 do {
10454 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10455 assert(test_zone);
10456
10457 #if KASAN_CLASSIC
10458 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10459 #else
10460 if (test_zone->z_elems_free != 0)
10461 #endif
10462 {
10463 printf("zone_basic_test: free count is not zero\n");
10464 rc = EIO;
10465 goto out;
10466 }
10467
10468 if (test_zone_ptr == NULL) {
10469 /* Stash the zone pointer returned on the fist zinit */
10470 printf("zone_basic_test: zone created for the first time\n");
10471 test_zone_ptr = test_zone;
10472 } else if (test_zone != test_zone_ptr) {
10473 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10474 rc = EIO;
10475 goto out;
10476 }
10477
10478 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10479 zfree(test_zone, test_ptr);
10480
10481 zdestroy(test_zone);
10482 i++;
10483
10484 printf("zone_basic_test: Iteration %d successful\n", i);
10485 } while (i < max_iter);
10486
10487 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10488 /* test Z_VA_SEQUESTER */
10489 {
10490 zone_t test_pcpu_zone;
10491 kern_return_t kr;
10492 const int num_allocs = 8;
10493 int idx;
10494 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10495 void *allocs[num_allocs];
10496 void **allocs_pcpu;
10497 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10498
10499 test_zone = zone_create("test_zone_sysctl", elem_size,
10500 ZC_DESTRUCTIBLE);
10501 assert(test_zone);
10502
10503 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10504 ZC_DESTRUCTIBLE | ZC_PERCPU);
10505 assert(test_pcpu_zone);
10506
10507 for (idx = 0; idx < num_allocs; idx++) {
10508 allocs[idx] = zalloc(test_zone);
10509 assert(NULL != allocs[idx]);
10510 printf("alloc[%d] %p\n", idx, allocs[idx]);
10511 }
10512 for (idx = 0; idx < num_allocs; idx++) {
10513 zfree(test_zone, allocs[idx]);
10514 }
10515 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10516
10517 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10518 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10519 assert(kr == KERN_SUCCESS);
10520
10521 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10522 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10523 Z_WAITOK | Z_ZERO);
10524 assert(NULL != allocs_pcpu[idx]);
10525 }
10526 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10527 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10528 }
10529 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10530
10531 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10532 vm_page_wire_count, vm_page_free_count,
10533 100L * phys_pages / zone_pages_wired_max);
10534 zone_gc(ZONE_GC_DRAIN);
10535 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10536 vm_page_wire_count, vm_page_free_count,
10537 100L * phys_pages / zone_pages_wired_max);
10538
10539 unsigned int allva = 0;
10540
10541 zone_foreach(z) {
10542 zone_lock(z);
10543 allva += z->z_wired_cur;
10544 if (zone_pva_is_null(z->z_pageq_va)) {
10545 zone_unlock(z);
10546 continue;
10547 }
10548 unsigned count = 0;
10549 uint64_t size;
10550 zone_pva_t pg = z->z_pageq_va;
10551 struct zone_page_metadata *page_meta;
10552 while (pg.packed_address) {
10553 page_meta = zone_pva_to_meta(pg);
10554 count += z->z_percpu ? 1 : z->z_chunk_pages;
10555 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10556 count -= page_meta->zm_page_index;
10557 }
10558 pg = page_meta->zm_page_next;
10559 }
10560 size = zone_size_wired(z);
10561 if (!size) {
10562 size = 1;
10563 }
10564 printf("%s%s: seq %d, res %d, %qd %%\n",
10565 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10566 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10567 zone_unlock(z);
10568 }
10569
10570 printf("total va: %d\n", allva);
10571
10572 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10573 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10574 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10575 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10576 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10577 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10578
10579 for (idx = 0; idx < num_allocs; idx++) {
10580 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10581 }
10582
10583 /* make sure the zone is still usable after a GC */
10584
10585 for (idx = 0; idx < num_allocs; idx++) {
10586 allocs[idx] = zalloc(test_zone);
10587 assert(allocs[idx]);
10588 printf("alloc[%d] %p\n", idx, allocs[idx]);
10589 }
10590 for (idx = 0; idx < num_allocs; idx++) {
10591 zfree(test_zone, allocs[idx]);
10592 }
10593
10594 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10595 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10596 Z_WAITOK | Z_ZERO);
10597 assert(NULL != allocs_pcpu[idx]);
10598 }
10599 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10600 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10601 }
10602
10603 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10604
10605 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10606
10607 zdestroy(test_zone);
10608 zdestroy(test_pcpu_zone);
10609 }
10610 #endif /* KASAN_CLASSIC */
10611
10612 printf("zone_basic_test: Test passed\n");
10613
10614
10615 *out = 1;
10616 out:
10617 os_atomic_store(&any_zone_test_running, false, relaxed);
10618 return rc;
10619 }
10620 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10621
10622 #define N_ALLOCATIONS 100
10623
10624 static int
run_kalloc_guard_insertion_test(int64_t in __unused,int64_t * out)10625 run_kalloc_guard_insertion_test(int64_t in __unused, int64_t *out)
10626 {
10627 size_t alloc_size = 24576;
10628 uint64_t *ptrs[N_ALLOCATIONS];
10629 uint32_t n_guard_regions = 0;
10630 zalloc_flags_t flags = Z_WAITOK | Z_FULLSIZE;
10631 int retval = 1;
10632
10633 *out = 0;
10634
10635 for (uint i = 0; i < N_ALLOCATIONS; ++i) {
10636 uint64_t *data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size,
10637 flags, &data_ptr).addr;
10638 if (!data_ptr) {
10639 printf("%s: kalloc_ext %zu with owner and Z_FULLSIZE returned null\n",
10640 __func__, alloc_size);
10641 goto cleanup;
10642 }
10643 ptrs[i] = data_ptr;
10644 }
10645
10646 /* We don't know where there are guard regions, but let's try to find one. */
10647 for (uint i = 0; i < N_ALLOCATIONS; i++) {
10648 vm_address_t addr;
10649 zone_t z;
10650 struct zone_page_metadata *meta;
10651 struct zone_page_metadata *gmeta;
10652 uint32_t chunk_pages;
10653
10654 addr = (vm_address_t)ptrs[i];
10655 meta = zone_meta_from_addr(addr);
10656 z = &zone_array[meta->zm_index];
10657 chunk_pages = z->z_chunk_pages;
10658
10659 if (meta->zm_guarded) {
10660 n_guard_regions++;
10661 if (meta->zm_chunk_len == chunk_pages) {
10662 gmeta = meta + chunk_pages;
10663 } else if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10664 gmeta = meta + meta->zm_subchunk_len;
10665 } else if (meta->zm_chunk_len == ZM_PGZ_GUARD) {
10666 printf("%s: kalloc_ext gave us address 0x%lx for a guard region.\n",
10667 __func__, addr);
10668 goto cleanup;
10669 } else if ((meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) && !z->z_percpu) {
10670 printf("%s: zone [%s%s] is not per-CPU.\n",
10671 __func__, zone_heap_name(z), zone_name(z));
10672 goto cleanup;
10673 } else {
10674 printf("%s: zm_chunk_len value not recognized for 0x%lx.\n",
10675 __func__, addr);
10676 goto cleanup;
10677 }
10678
10679 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
10680 /* Now check that we have chunk_len of guard pages. */
10681 for (uint j = 0; j < chunk_pages; j++) {
10682 if (gmeta->zm_chunk_len != ZM_PGZ_GUARD) {
10683 printf("%s: page %u / %u is not a guard page.\n",
10684 __func__, j + 1, chunk_pages);
10685 goto cleanup;
10686 }
10687 gmeta++;
10688 }
10689
10690 /* The metadata following the guard region should not be a guard page. */
10691 if (gmeta->zm_chunk_len == ZM_PGZ_GUARD) {
10692 printf("%s: zone page following guard region is a guard page.\n",
10693 __func__);
10694 goto cleanup;
10695 }
10696 }
10697 }
10698
10699 printf("%s: there were %u guard regions in %d allocations.\n",
10700 __func__, n_guard_regions, N_ALLOCATIONS);
10701
10702 *out = 1;
10703 retval = 0;
10704
10705 cleanup:
10706 for (uint i = 0; i < N_ALLOCATIONS; ++i) {
10707 kfree_ext(KHEAP_DATA_BUFFERS, ptrs[i], alloc_size);
10708 }
10709
10710 return retval;
10711 }
10712 SYSCTL_TEST_REGISTER(kalloc_guard_regions, run_kalloc_guard_insertion_test);
10713
10714
10715 struct zone_stress_obj {
10716 TAILQ_ENTRY(zone_stress_obj) zso_link;
10717 };
10718
10719 struct zone_stress_ctx {
10720 thread_t zsc_leader;
10721 lck_mtx_t zsc_lock;
10722 zone_t zsc_zone;
10723 uint64_t zsc_end;
10724 uint32_t zsc_workers;
10725 };
10726
10727 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10728 zone_stress_worker(void *arg, wait_result_t __unused wr)
10729 {
10730 struct zone_stress_ctx *ctx = arg;
10731 bool leader = ctx->zsc_leader == current_thread();
10732 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10733 struct zone_bool_gen bg = { };
10734 struct zone_stress_obj *obj;
10735 uint32_t allocs = 0;
10736
10737 random_bool_init(&bg.zbg_bg);
10738
10739 do {
10740 for (int i = 0; i < 2000; i++) {
10741 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10742 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10743 switch (what) {
10744 case 0:
10745 case 1:
10746 if (allocs < 10000) {
10747 obj = zalloc(ctx->zsc_zone);
10748 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10749 allocs++;
10750 }
10751 break;
10752 case 2:
10753 case 3:
10754 if (allocs < 10000) {
10755 obj = zalloc(ctx->zsc_zone);
10756 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10757 allocs++;
10758 }
10759 break;
10760 case 4:
10761 if (leader) {
10762 zone_gc(ZONE_GC_DRAIN);
10763 }
10764 break;
10765 case 5:
10766 case 6:
10767 if (!TAILQ_EMPTY(&head)) {
10768 obj = TAILQ_FIRST(&head);
10769 TAILQ_REMOVE(&head, obj, zso_link);
10770 zfree(ctx->zsc_zone, obj);
10771 allocs--;
10772 }
10773 break;
10774 case 7:
10775 if (!TAILQ_EMPTY(&head)) {
10776 obj = TAILQ_LAST(&head, zone_stress_head);
10777 TAILQ_REMOVE(&head, obj, zso_link);
10778 zfree(ctx->zsc_zone, obj);
10779 allocs--;
10780 }
10781 break;
10782 }
10783 }
10784 } while (mach_absolute_time() < ctx->zsc_end);
10785
10786 while (!TAILQ_EMPTY(&head)) {
10787 obj = TAILQ_FIRST(&head);
10788 TAILQ_REMOVE(&head, obj, zso_link);
10789 zfree(ctx->zsc_zone, obj);
10790 }
10791
10792 lck_mtx_lock(&ctx->zsc_lock);
10793 if (--ctx->zsc_workers == 0) {
10794 thread_wakeup(ctx);
10795 } else if (leader) {
10796 while (ctx->zsc_workers) {
10797 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10798 THREAD_UNINT);
10799 }
10800 }
10801 lck_mtx_unlock(&ctx->zsc_lock);
10802
10803 if (!leader) {
10804 thread_terminate_self();
10805 __builtin_unreachable();
10806 }
10807 }
10808
10809 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10810 zone_stress_test_run(__unused int64_t in, int64_t *out)
10811 {
10812 struct zone_stress_ctx ctx = {
10813 .zsc_leader = current_thread(),
10814 .zsc_workers = 3,
10815 };
10816 kern_return_t kr;
10817 thread_t th;
10818
10819 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10820 printf("zone_stress_test: Test already running.\n");
10821 return EALREADY;
10822 }
10823
10824 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10825 ctx.zsc_zone = zone_create("test_zone_344", 344,
10826 ZC_DESTRUCTIBLE | ZC_NOCACHING);
10827 assert(ctx.zsc_zone->z_chunk_pages > 1);
10828
10829 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10830
10831 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10832
10833 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10834
10835 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10836 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10837 BASEPRI_DEFAULT, &th);
10838 if (kr == KERN_SUCCESS) {
10839 printf("zone_stress_test: thread %d: %p\n", i, th);
10840 thread_deallocate(th);
10841 } else {
10842 ctx.zsc_workers--;
10843 }
10844 }
10845
10846 zone_stress_worker(&ctx, 0);
10847
10848 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10849
10850 zdestroy(ctx.zsc_zone);
10851
10852 printf("zone_stress_test: Done\n");
10853
10854 *out = 1;
10855 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10856 os_atomic_store(&any_zone_test_running, false, relaxed);
10857 return 0;
10858 }
10859 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10860
10861 struct zone_gc_stress_obj {
10862 STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
10863 uintptr_t zgso_pad[63];
10864 };
10865 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
10866
10867 #define ZONE_GC_OBJ_PER_PAGE (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
10868
10869 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
10870
10871 struct zone_gc_stress_ctx {
10872 bool zgsc_done;
10873 lck_mtx_t zgsc_lock;
10874 zone_t zgsc_zone;
10875 uint64_t zgsc_end;
10876 uint32_t zgsc_workers;
10877 };
10878
10879 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)10880 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
10881 {
10882 struct zone_gc_stress_obj *obj;
10883
10884 for (size_t i = 0; i < n; i++) {
10885 obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
10886 STAILQ_INSERT_TAIL(head, obj, zgso_link);
10887 }
10888 }
10889
10890 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)10891 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
10892 {
10893 struct zone_gc_stress_obj *obj;
10894
10895 while ((obj = STAILQ_FIRST(head))) {
10896 STAILQ_REMOVE_HEAD(head, zgso_link);
10897 zfree(zone_gc_stress_zone, obj);
10898 }
10899 }
10900
10901 __dead2
10902 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)10903 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
10904 {
10905 struct zone_gc_stress_ctx *ctx = arg;
10906 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
10907
10908 while (!ctx->zgsc_done) {
10909 zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
10910 zone_gc_stress_test_free_n(&head);
10911 }
10912
10913 lck_mtx_lock(&ctx->zgsc_lock);
10914 if (--ctx->zgsc_workers == 0) {
10915 thread_wakeup(ctx);
10916 }
10917 lck_mtx_unlock(&ctx->zgsc_lock);
10918
10919 thread_terminate_self();
10920 __builtin_unreachable();
10921 }
10922
10923 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)10924 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
10925 {
10926 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
10927 struct zone_gc_stress_ctx ctx = {
10928 .zgsc_workers = 3,
10929 };
10930 kern_return_t kr;
10931 thread_t th;
10932
10933 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10934 printf("zone_gc_stress_test: Test already running.\n");
10935 return EALREADY;
10936 }
10937
10938 lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10939 lck_mtx_lock(&ctx.zgsc_lock);
10940
10941 printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
10942
10943 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10944
10945 for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
10946 kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
10947 BASEPRI_DEFAULT, &th);
10948 if (kr == KERN_SUCCESS) {
10949 printf("zone_gc_stress_test: thread %d: %p\n", i, th);
10950 thread_deallocate(th);
10951 } else {
10952 ctx.zgsc_workers--;
10953 }
10954 }
10955
10956 for (uint64_t i = 0; i < in; i++) {
10957 size_t count = zc_mag_size() * zc_free_batch_size() * 20;
10958
10959 if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
10960 count = ZONE_GC_OBJ_PER_PAGE * 20;
10961 }
10962
10963 zone_gc_stress_test_alloc_n(&head, count);
10964 zone_gc_stress_test_free_n(&head);
10965
10966 lck_mtx_lock(&zone_gc_lock);
10967 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
10968 ZONE_RECLAIM_TRIM);
10969 lck_mtx_unlock(&zone_gc_lock);
10970
10971 printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
10972 }
10973
10974 os_atomic_thread_fence(seq_cst);
10975 ctx.zgsc_done = true;
10976 lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
10977 lck_mtx_unlock(&ctx.zgsc_lock);
10978
10979 lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
10980
10981 lck_mtx_lock(&zone_gc_lock);
10982 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
10983 ZONE_RECLAIM_DRAIN);
10984 lck_mtx_unlock(&zone_gc_lock);
10985
10986 printf("zone_gc_stress_test: Done\n");
10987
10988 *out = 1;
10989 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10990 os_atomic_store(&any_zone_test_running, false, relaxed);
10991 return 0;
10992 }
10993 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
10994
10995 /*
10996 * Routines to test that zone garbage collection and zone replenish threads
10997 * running at the same time don't cause problems.
10998 */
10999
11000 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)11001 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
11002 {
11003 zone_gc(ZONE_GC_DRAIN);
11004 *out = 1;
11005 return 0;
11006 }
11007 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
11008
11009 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)11010 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
11011 {
11012 zone_t z = vm_map_entry_zone;
11013 struct data { struct data *next; } *node, *list = NULL;
11014
11015 if (z == NULL) {
11016 printf("Couldn't find a replenish zone\n");
11017 return EIO;
11018 }
11019
11020 /* big enough to go past replenishment */
11021 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
11022 node = zalloc(z);
11023 node->next = list;
11024 list = node;
11025 }
11026
11027 /*
11028 * release the memory we allocated
11029 */
11030 while (list != NULL) {
11031 node = list;
11032 list = list->next;
11033 zfree(z, node);
11034 }
11035
11036 *out = 1;
11037 return 0;
11038 }
11039 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
11040
11041
11042 #endif /* DEBUG || DEVELOPMENT */
11043